Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
13 *
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
16 *
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
20 *
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22 */
23
24#include "bcache.h"
25#include "btree.h"
26#include "debug.h"
27#include "extents.h"
28
29#include <linux/slab.h>
30#include <linux/bitops.h>
31#include <linux/hash.h>
32#include <linux/kthread.h>
33#include <linux/prefetch.h>
34#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <linux/sched/clock.h>
37#include <linux/rculist.h>
38
39#include <trace/events/bcache.h>
40
41/*
42 * Todo:
43 * register_bcache: Return errors out to userspace correctly
44 *
45 * Writeback: don't undirty key until after a cache flush
46 *
47 * Create an iterator for key pointers
48 *
49 * On btree write error, mark bucket such that it won't be freed from the cache
50 *
51 * Journalling:
52 * Check for bad keys in replay
53 * Propagate barriers
54 * Refcount journal entries in journal_replay
55 *
56 * Garbage collection:
57 * Finish incremental gc
58 * Gc should free old UUIDs, data for invalid UUIDs
59 *
60 * Provide a way to list backing device UUIDs we have data cached for, and
61 * probably how long it's been since we've seen them, and a way to invalidate
62 * dirty data for devices that will never be attached again
63 *
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65 * that based on that and how much dirty data we have we can keep writeback
66 * from being starved
67 *
68 * Add a tracepoint or somesuch to watch for writeback starvation
69 *
70 * When btree depth > 1 and splitting an interior node, we have to make sure
71 * alloc_bucket() cannot fail. This should be true but is not completely
72 * obvious.
73 *
74 * Plugging?
75 *
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
78 *
79 * Superblock needs to be fleshed out for multiple cache devices
80 *
81 * Add a sysfs tunable for the number of writeback IOs in flight
82 *
83 * Add a sysfs tunable for the number of open data buckets
84 *
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
87 *
88 * Test module load/unload
89 */
90
91#define MAX_NEED_GC 64
92#define MAX_SAVE_PRIO 72
93#define MAX_GC_TIMES 100
94#define MIN_GC_NODES 100
95#define GC_SLEEP_MS 100
96
97#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
98
99#define PTR_HASH(c, k) \
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101
102#define insert_lock(s, b) ((b)->level <= (s)->lock)
103
104/*
105 * These macros are for recursing down the btree - they handle the details of
106 * locking and looking up nodes in the cache for you. They're best treated as
107 * mere syntax when reading code that uses them.
108 *
109 * op->lock determines whether we take a read or a write lock at a given depth.
110 * If you've got a read lock and find that you need a write lock (i.e. you're
111 * going to have to split), set op->lock and return -EINTR; btree_root() will
112 * call you again and you'll have the correct lock.
113 */
114
115/**
116 * btree - recurse down the btree on a specified key
117 * @fn: function to call, which will be passed the child node
118 * @key: key to recurse on
119 * @b: parent btree node
120 * @op: pointer to struct btree_op
121 */
122#define btree(fn, key, b, op, ...) \
123({ \
124 int _r, l = (b)->level - 1; \
125 bool _w = l <= (op)->lock; \
126 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
127 _w, b); \
128 if (!IS_ERR(_child)) { \
129 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
130 rw_unlock(_w, _child); \
131 } else \
132 _r = PTR_ERR(_child); \
133 _r; \
134})
135
136/**
137 * btree_root - call a function on the root of the btree
138 * @fn: function to call, which will be passed the child node
139 * @c: cache set
140 * @op: pointer to struct btree_op
141 */
142#define btree_root(fn, c, op, ...) \
143({ \
144 int _r = -EINTR; \
145 do { \
146 struct btree *_b = (c)->root; \
147 bool _w = insert_lock(op, _b); \
148 rw_lock(_w, _b, _b->level); \
149 if (_b == (c)->root && \
150 _w == insert_lock(op, _b)) { \
151 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
152 } \
153 rw_unlock(_w, _b); \
154 bch_cannibalize_unlock(c); \
155 if (_r == -EINTR) \
156 schedule(); \
157 } while (_r == -EINTR); \
158 \
159 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
160 _r; \
161})
162
163static inline struct bset *write_block(struct btree *b)
164{
165 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
166}
167
168static void bch_btree_init_next(struct btree *b)
169{
170 /* If not a leaf node, always sort */
171 if (b->level && b->keys.nsets)
172 bch_btree_sort(&b->keys, &b->c->sort);
173 else
174 bch_btree_sort_lazy(&b->keys, &b->c->sort);
175
176 if (b->written < btree_blocks(b))
177 bch_bset_init_next(&b->keys, write_block(b),
178 bset_magic(&b->c->sb));
179
180}
181
182/* Btree key manipulation */
183
184void bkey_put(struct cache_set *c, struct bkey *k)
185{
186 unsigned int i;
187
188 for (i = 0; i < KEY_PTRS(k); i++)
189 if (ptr_available(c, k, i))
190 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
191}
192
193/* Btree IO */
194
195static uint64_t btree_csum_set(struct btree *b, struct bset *i)
196{
197 uint64_t crc = b->key.ptr[0];
198 void *data = (void *) i + 8, *end = bset_bkey_last(i);
199
200 crc = bch_crc64_update(crc, data, end - data);
201 return crc ^ 0xffffffffffffffffULL;
202}
203
204void bch_btree_node_read_done(struct btree *b)
205{
206 const char *err = "bad btree header";
207 struct bset *i = btree_bset_first(b);
208 struct btree_iter *iter;
209
210 /*
211 * c->fill_iter can allocate an iterator with more memory space
212 * than static MAX_BSETS.
213 * See the comment arount cache_set->fill_iter.
214 */
215 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
216 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
217 iter->used = 0;
218
219#ifdef CONFIG_BCACHE_DEBUG
220 iter->b = &b->keys;
221#endif
222
223 if (!i->seq)
224 goto err;
225
226 for (;
227 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
228 i = write_block(b)) {
229 err = "unsupported bset version";
230 if (i->version > BCACHE_BSET_VERSION)
231 goto err;
232
233 err = "bad btree header";
234 if (b->written + set_blocks(i, block_bytes(b->c)) >
235 btree_blocks(b))
236 goto err;
237
238 err = "bad magic";
239 if (i->magic != bset_magic(&b->c->sb))
240 goto err;
241
242 err = "bad checksum";
243 switch (i->version) {
244 case 0:
245 if (i->csum != csum_set(i))
246 goto err;
247 break;
248 case BCACHE_BSET_VERSION:
249 if (i->csum != btree_csum_set(b, i))
250 goto err;
251 break;
252 }
253
254 err = "empty set";
255 if (i != b->keys.set[0].data && !i->keys)
256 goto err;
257
258 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
259
260 b->written += set_blocks(i, block_bytes(b->c));
261 }
262
263 err = "corrupted btree";
264 for (i = write_block(b);
265 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
266 i = ((void *) i) + block_bytes(b->c))
267 if (i->seq == b->keys.set[0].data->seq)
268 goto err;
269
270 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
271
272 i = b->keys.set[0].data;
273 err = "short btree key";
274 if (b->keys.set[0].size &&
275 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
276 goto err;
277
278 if (b->written < btree_blocks(b))
279 bch_bset_init_next(&b->keys, write_block(b),
280 bset_magic(&b->c->sb));
281out:
282 mempool_free(iter, &b->c->fill_iter);
283 return;
284err:
285 set_btree_node_io_error(b);
286 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
287 err, PTR_BUCKET_NR(b->c, &b->key, 0),
288 bset_block_offset(b, i), i->keys);
289 goto out;
290}
291
292static void btree_node_read_endio(struct bio *bio)
293{
294 struct closure *cl = bio->bi_private;
295
296 closure_put(cl);
297}
298
299static void bch_btree_node_read(struct btree *b)
300{
301 uint64_t start_time = local_clock();
302 struct closure cl;
303 struct bio *bio;
304
305 trace_bcache_btree_read(b);
306
307 closure_init_stack(&cl);
308
309 bio = bch_bbio_alloc(b->c);
310 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
311 bio->bi_end_io = btree_node_read_endio;
312 bio->bi_private = &cl;
313 bio->bi_opf = REQ_OP_READ | REQ_META;
314
315 bch_bio_map(bio, b->keys.set[0].data);
316
317 bch_submit_bbio(bio, b->c, &b->key, 0);
318 closure_sync(&cl);
319
320 if (bio->bi_status)
321 set_btree_node_io_error(b);
322
323 bch_bbio_free(bio, b->c);
324
325 if (btree_node_io_error(b))
326 goto err;
327
328 bch_btree_node_read_done(b);
329 bch_time_stats_update(&b->c->btree_read_time, start_time);
330
331 return;
332err:
333 bch_cache_set_error(b->c, "io error reading bucket %zu",
334 PTR_BUCKET_NR(b->c, &b->key, 0));
335}
336
337static void btree_complete_write(struct btree *b, struct btree_write *w)
338{
339 if (w->prio_blocked &&
340 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
341 wake_up_allocators(b->c);
342
343 if (w->journal) {
344 atomic_dec_bug(w->journal);
345 __closure_wake_up(&b->c->journal.wait);
346 }
347
348 w->prio_blocked = 0;
349 w->journal = NULL;
350}
351
352static void btree_node_write_unlock(struct closure *cl)
353{
354 struct btree *b = container_of(cl, struct btree, io);
355
356 up(&b->io_mutex);
357}
358
359static void __btree_node_write_done(struct closure *cl)
360{
361 struct btree *b = container_of(cl, struct btree, io);
362 struct btree_write *w = btree_prev_write(b);
363
364 bch_bbio_free(b->bio, b->c);
365 b->bio = NULL;
366 btree_complete_write(b, w);
367
368 if (btree_node_dirty(b))
369 schedule_delayed_work(&b->work, 30 * HZ);
370
371 closure_return_with_destructor(cl, btree_node_write_unlock);
372}
373
374static void btree_node_write_done(struct closure *cl)
375{
376 struct btree *b = container_of(cl, struct btree, io);
377
378 bio_free_pages(b->bio);
379 __btree_node_write_done(cl);
380}
381
382static void btree_node_write_endio(struct bio *bio)
383{
384 struct closure *cl = bio->bi_private;
385 struct btree *b = container_of(cl, struct btree, io);
386
387 if (bio->bi_status)
388 set_btree_node_io_error(b);
389
390 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
391 closure_put(cl);
392}
393
394static void do_btree_node_write(struct btree *b)
395{
396 struct closure *cl = &b->io;
397 struct bset *i = btree_bset_last(b);
398 BKEY_PADDED(key) k;
399
400 i->version = BCACHE_BSET_VERSION;
401 i->csum = btree_csum_set(b, i);
402
403 BUG_ON(b->bio);
404 b->bio = bch_bbio_alloc(b->c);
405
406 b->bio->bi_end_io = btree_node_write_endio;
407 b->bio->bi_private = cl;
408 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
409 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
410 bch_bio_map(b->bio, i);
411
412 /*
413 * If we're appending to a leaf node, we don't technically need FUA -
414 * this write just needs to be persisted before the next journal write,
415 * which will be marked FLUSH|FUA.
416 *
417 * Similarly if we're writing a new btree root - the pointer is going to
418 * be in the next journal entry.
419 *
420 * But if we're writing a new btree node (that isn't a root) or
421 * appending to a non leaf btree node, we need either FUA or a flush
422 * when we write the parent with the new pointer. FUA is cheaper than a
423 * flush, and writes appending to leaf nodes aren't blocking anything so
424 * just make all btree node writes FUA to keep things sane.
425 */
426
427 bkey_copy(&k.key, &b->key);
428 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
429 bset_sector_offset(&b->keys, i));
430
431 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
432 int j;
433 struct bio_vec *bv;
434 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
435
436 bio_for_each_segment_all(bv, b->bio, j)
437 memcpy(page_address(bv->bv_page),
438 base + j * PAGE_SIZE, PAGE_SIZE);
439
440 bch_submit_bbio(b->bio, b->c, &k.key, 0);
441
442 continue_at(cl, btree_node_write_done, NULL);
443 } else {
444 /*
445 * No problem for multipage bvec since the bio is
446 * just allocated
447 */
448 b->bio->bi_vcnt = 0;
449 bch_bio_map(b->bio, i);
450
451 bch_submit_bbio(b->bio, b->c, &k.key, 0);
452
453 closure_sync(cl);
454 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
455 }
456}
457
458void __bch_btree_node_write(struct btree *b, struct closure *parent)
459{
460 struct bset *i = btree_bset_last(b);
461
462 lockdep_assert_held(&b->write_lock);
463
464 trace_bcache_btree_write(b);
465
466 BUG_ON(current->bio_list);
467 BUG_ON(b->written >= btree_blocks(b));
468 BUG_ON(b->written && !i->keys);
469 BUG_ON(btree_bset_first(b)->seq != i->seq);
470 bch_check_keys(&b->keys, "writing");
471
472 cancel_delayed_work(&b->work);
473
474 /* If caller isn't waiting for write, parent refcount is cache set */
475 down(&b->io_mutex);
476 closure_init(&b->io, parent ?: &b->c->cl);
477
478 clear_bit(BTREE_NODE_dirty, &b->flags);
479 change_bit(BTREE_NODE_write_idx, &b->flags);
480
481 do_btree_node_write(b);
482
483 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
484 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
485
486 b->written += set_blocks(i, block_bytes(b->c));
487}
488
489void bch_btree_node_write(struct btree *b, struct closure *parent)
490{
491 unsigned int nsets = b->keys.nsets;
492
493 lockdep_assert_held(&b->lock);
494
495 __bch_btree_node_write(b, parent);
496
497 /*
498 * do verify if there was more than one set initially (i.e. we did a
499 * sort) and we sorted down to a single set:
500 */
501 if (nsets && !b->keys.nsets)
502 bch_btree_verify(b);
503
504 bch_btree_init_next(b);
505}
506
507static void bch_btree_node_write_sync(struct btree *b)
508{
509 struct closure cl;
510
511 closure_init_stack(&cl);
512
513 mutex_lock(&b->write_lock);
514 bch_btree_node_write(b, &cl);
515 mutex_unlock(&b->write_lock);
516
517 closure_sync(&cl);
518}
519
520static void btree_node_write_work(struct work_struct *w)
521{
522 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
523
524 mutex_lock(&b->write_lock);
525 if (btree_node_dirty(b))
526 __bch_btree_node_write(b, NULL);
527 mutex_unlock(&b->write_lock);
528}
529
530static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
531{
532 struct bset *i = btree_bset_last(b);
533 struct btree_write *w = btree_current_write(b);
534
535 lockdep_assert_held(&b->write_lock);
536
537 BUG_ON(!b->written);
538 BUG_ON(!i->keys);
539
540 if (!btree_node_dirty(b))
541 schedule_delayed_work(&b->work, 30 * HZ);
542
543 set_btree_node_dirty(b);
544
545 if (journal_ref) {
546 if (w->journal &&
547 journal_pin_cmp(b->c, w->journal, journal_ref)) {
548 atomic_dec_bug(w->journal);
549 w->journal = NULL;
550 }
551
552 if (!w->journal) {
553 w->journal = journal_ref;
554 atomic_inc(w->journal);
555 }
556 }
557
558 /* Force write if set is too big */
559 if (set_bytes(i) > PAGE_SIZE - 48 &&
560 !current->bio_list)
561 bch_btree_node_write(b, NULL);
562}
563
564/*
565 * Btree in memory cache - allocation/freeing
566 * mca -> memory cache
567 */
568
569#define mca_reserve(c) (((c->root && c->root->level) \
570 ? c->root->level : 1) * 8 + 16)
571#define mca_can_free(c) \
572 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
573
574static void mca_data_free(struct btree *b)
575{
576 BUG_ON(b->io_mutex.count != 1);
577
578 bch_btree_keys_free(&b->keys);
579
580 b->c->btree_cache_used--;
581 list_move(&b->list, &b->c->btree_cache_freed);
582}
583
584static void mca_bucket_free(struct btree *b)
585{
586 BUG_ON(btree_node_dirty(b));
587
588 b->key.ptr[0] = 0;
589 hlist_del_init_rcu(&b->hash);
590 list_move(&b->list, &b->c->btree_cache_freeable);
591}
592
593static unsigned int btree_order(struct bkey *k)
594{
595 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
596}
597
598static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
599{
600 if (!bch_btree_keys_alloc(&b->keys,
601 max_t(unsigned int,
602 ilog2(b->c->btree_pages),
603 btree_order(k)),
604 gfp)) {
605 b->c->btree_cache_used++;
606 list_move(&b->list, &b->c->btree_cache);
607 } else {
608 list_move(&b->list, &b->c->btree_cache_freed);
609 }
610}
611
612static struct btree *mca_bucket_alloc(struct cache_set *c,
613 struct bkey *k, gfp_t gfp)
614{
615 struct btree *b = kzalloc(sizeof(struct btree), gfp);
616
617 if (!b)
618 return NULL;
619
620 init_rwsem(&b->lock);
621 lockdep_set_novalidate_class(&b->lock);
622 mutex_init(&b->write_lock);
623 lockdep_set_novalidate_class(&b->write_lock);
624 INIT_LIST_HEAD(&b->list);
625 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
626 b->c = c;
627 sema_init(&b->io_mutex, 1);
628
629 mca_data_alloc(b, k, gfp);
630 return b;
631}
632
633static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
634{
635 struct closure cl;
636
637 closure_init_stack(&cl);
638 lockdep_assert_held(&b->c->bucket_lock);
639
640 if (!down_write_trylock(&b->lock))
641 return -ENOMEM;
642
643 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
644
645 if (b->keys.page_order < min_order)
646 goto out_unlock;
647
648 if (!flush) {
649 if (btree_node_dirty(b))
650 goto out_unlock;
651
652 if (down_trylock(&b->io_mutex))
653 goto out_unlock;
654 up(&b->io_mutex);
655 }
656
657 mutex_lock(&b->write_lock);
658 if (btree_node_dirty(b))
659 __bch_btree_node_write(b, &cl);
660 mutex_unlock(&b->write_lock);
661
662 closure_sync(&cl);
663
664 /* wait for any in flight btree write */
665 down(&b->io_mutex);
666 up(&b->io_mutex);
667
668 return 0;
669out_unlock:
670 rw_unlock(true, b);
671 return -ENOMEM;
672}
673
674static unsigned long bch_mca_scan(struct shrinker *shrink,
675 struct shrink_control *sc)
676{
677 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
678 struct btree *b, *t;
679 unsigned long i, nr = sc->nr_to_scan;
680 unsigned long freed = 0;
681 unsigned int btree_cache_used;
682
683 if (c->shrinker_disabled)
684 return SHRINK_STOP;
685
686 if (c->btree_cache_alloc_lock)
687 return SHRINK_STOP;
688
689 /* Return -1 if we can't do anything right now */
690 if (sc->gfp_mask & __GFP_IO)
691 mutex_lock(&c->bucket_lock);
692 else if (!mutex_trylock(&c->bucket_lock))
693 return -1;
694
695 /*
696 * It's _really_ critical that we don't free too many btree nodes - we
697 * have to always leave ourselves a reserve. The reserve is how we
698 * guarantee that allocating memory for a new btree node can always
699 * succeed, so that inserting keys into the btree can always succeed and
700 * IO can always make forward progress:
701 */
702 nr /= c->btree_pages;
703 nr = min_t(unsigned long, nr, mca_can_free(c));
704
705 i = 0;
706 btree_cache_used = c->btree_cache_used;
707 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
708 if (nr <= 0)
709 goto out;
710
711 if (++i > 3 &&
712 !mca_reap(b, 0, false)) {
713 mca_data_free(b);
714 rw_unlock(true, b);
715 freed++;
716 }
717 nr--;
718 }
719
720 for (; (nr--) && i < btree_cache_used; i++) {
721 if (list_empty(&c->btree_cache))
722 goto out;
723
724 b = list_first_entry(&c->btree_cache, struct btree, list);
725 list_rotate_left(&c->btree_cache);
726
727 if (!b->accessed &&
728 !mca_reap(b, 0, false)) {
729 mca_bucket_free(b);
730 mca_data_free(b);
731 rw_unlock(true, b);
732 freed++;
733 } else
734 b->accessed = 0;
735 }
736out:
737 mutex_unlock(&c->bucket_lock);
738 return freed * c->btree_pages;
739}
740
741static unsigned long bch_mca_count(struct shrinker *shrink,
742 struct shrink_control *sc)
743{
744 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
745
746 if (c->shrinker_disabled)
747 return 0;
748
749 if (c->btree_cache_alloc_lock)
750 return 0;
751
752 return mca_can_free(c) * c->btree_pages;
753}
754
755void bch_btree_cache_free(struct cache_set *c)
756{
757 struct btree *b;
758 struct closure cl;
759
760 closure_init_stack(&cl);
761
762 if (c->shrink.list.next)
763 unregister_shrinker(&c->shrink);
764
765 mutex_lock(&c->bucket_lock);
766
767#ifdef CONFIG_BCACHE_DEBUG
768 if (c->verify_data)
769 list_move(&c->verify_data->list, &c->btree_cache);
770
771 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
772#endif
773
774 list_splice(&c->btree_cache_freeable,
775 &c->btree_cache);
776
777 while (!list_empty(&c->btree_cache)) {
778 b = list_first_entry(&c->btree_cache, struct btree, list);
779
780 if (btree_node_dirty(b))
781 btree_complete_write(b, btree_current_write(b));
782 clear_bit(BTREE_NODE_dirty, &b->flags);
783
784 mca_data_free(b);
785 }
786
787 while (!list_empty(&c->btree_cache_freed)) {
788 b = list_first_entry(&c->btree_cache_freed,
789 struct btree, list);
790 list_del(&b->list);
791 cancel_delayed_work_sync(&b->work);
792 kfree(b);
793 }
794
795 mutex_unlock(&c->bucket_lock);
796}
797
798int bch_btree_cache_alloc(struct cache_set *c)
799{
800 unsigned int i;
801
802 for (i = 0; i < mca_reserve(c); i++)
803 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
804 return -ENOMEM;
805
806 list_splice_init(&c->btree_cache,
807 &c->btree_cache_freeable);
808
809#ifdef CONFIG_BCACHE_DEBUG
810 mutex_init(&c->verify_lock);
811
812 c->verify_ondisk = (void *)
813 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
814
815 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
816
817 if (c->verify_data &&
818 c->verify_data->keys.set->data)
819 list_del_init(&c->verify_data->list);
820 else
821 c->verify_data = NULL;
822#endif
823
824 c->shrink.count_objects = bch_mca_count;
825 c->shrink.scan_objects = bch_mca_scan;
826 c->shrink.seeks = 4;
827 c->shrink.batch = c->btree_pages * 2;
828
829 if (register_shrinker(&c->shrink))
830 pr_warn("bcache: %s: could not register shrinker",
831 __func__);
832
833 return 0;
834}
835
836/* Btree in memory cache - hash table */
837
838static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
839{
840 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
841}
842
843static struct btree *mca_find(struct cache_set *c, struct bkey *k)
844{
845 struct btree *b;
846
847 rcu_read_lock();
848 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
849 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
850 goto out;
851 b = NULL;
852out:
853 rcu_read_unlock();
854 return b;
855}
856
857static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
858{
859 struct task_struct *old;
860
861 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
862 if (old && old != current) {
863 if (op)
864 prepare_to_wait(&c->btree_cache_wait, &op->wait,
865 TASK_UNINTERRUPTIBLE);
866 return -EINTR;
867 }
868
869 return 0;
870}
871
872static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
873 struct bkey *k)
874{
875 struct btree *b;
876
877 trace_bcache_btree_cache_cannibalize(c);
878
879 if (mca_cannibalize_lock(c, op))
880 return ERR_PTR(-EINTR);
881
882 list_for_each_entry_reverse(b, &c->btree_cache, list)
883 if (!mca_reap(b, btree_order(k), false))
884 return b;
885
886 list_for_each_entry_reverse(b, &c->btree_cache, list)
887 if (!mca_reap(b, btree_order(k), true))
888 return b;
889
890 WARN(1, "btree cache cannibalize failed\n");
891 return ERR_PTR(-ENOMEM);
892}
893
894/*
895 * We can only have one thread cannibalizing other cached btree nodes at a time,
896 * or we'll deadlock. We use an open coded mutex to ensure that, which a
897 * cannibalize_bucket() will take. This means every time we unlock the root of
898 * the btree, we need to release this lock if we have it held.
899 */
900static void bch_cannibalize_unlock(struct cache_set *c)
901{
902 if (c->btree_cache_alloc_lock == current) {
903 c->btree_cache_alloc_lock = NULL;
904 wake_up(&c->btree_cache_wait);
905 }
906}
907
908static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
909 struct bkey *k, int level)
910{
911 struct btree *b;
912
913 BUG_ON(current->bio_list);
914
915 lockdep_assert_held(&c->bucket_lock);
916
917 if (mca_find(c, k))
918 return NULL;
919
920 /* btree_free() doesn't free memory; it sticks the node on the end of
921 * the list. Check if there's any freed nodes there:
922 */
923 list_for_each_entry(b, &c->btree_cache_freeable, list)
924 if (!mca_reap(b, btree_order(k), false))
925 goto out;
926
927 /* We never free struct btree itself, just the memory that holds the on
928 * disk node. Check the freed list before allocating a new one:
929 */
930 list_for_each_entry(b, &c->btree_cache_freed, list)
931 if (!mca_reap(b, 0, false)) {
932 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
933 if (!b->keys.set[0].data)
934 goto err;
935 else
936 goto out;
937 }
938
939 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
940 if (!b)
941 goto err;
942
943 BUG_ON(!down_write_trylock(&b->lock));
944 if (!b->keys.set->data)
945 goto err;
946out:
947 BUG_ON(b->io_mutex.count != 1);
948
949 bkey_copy(&b->key, k);
950 list_move(&b->list, &c->btree_cache);
951 hlist_del_init_rcu(&b->hash);
952 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
953
954 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
955 b->parent = (void *) ~0UL;
956 b->flags = 0;
957 b->written = 0;
958 b->level = level;
959
960 if (!b->level)
961 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
962 &b->c->expensive_debug_checks);
963 else
964 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
965 &b->c->expensive_debug_checks);
966
967 return b;
968err:
969 if (b)
970 rw_unlock(true, b);
971
972 b = mca_cannibalize(c, op, k);
973 if (!IS_ERR(b))
974 goto out;
975
976 return b;
977}
978
979/*
980 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
981 * in from disk if necessary.
982 *
983 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
984 *
985 * The btree node will have either a read or a write lock held, depending on
986 * level and op->lock.
987 */
988struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
989 struct bkey *k, int level, bool write,
990 struct btree *parent)
991{
992 int i = 0;
993 struct btree *b;
994
995 BUG_ON(level < 0);
996retry:
997 b = mca_find(c, k);
998
999 if (!b) {
1000 if (current->bio_list)
1001 return ERR_PTR(-EAGAIN);
1002
1003 mutex_lock(&c->bucket_lock);
1004 b = mca_alloc(c, op, k, level);
1005 mutex_unlock(&c->bucket_lock);
1006
1007 if (!b)
1008 goto retry;
1009 if (IS_ERR(b))
1010 return b;
1011
1012 bch_btree_node_read(b);
1013
1014 if (!write)
1015 downgrade_write(&b->lock);
1016 } else {
1017 rw_lock(write, b, level);
1018 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1019 rw_unlock(write, b);
1020 goto retry;
1021 }
1022 BUG_ON(b->level != level);
1023 }
1024
1025 if (btree_node_io_error(b)) {
1026 rw_unlock(write, b);
1027 return ERR_PTR(-EIO);
1028 }
1029
1030 BUG_ON(!b->written);
1031
1032 b->parent = parent;
1033 b->accessed = 1;
1034
1035 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1036 prefetch(b->keys.set[i].tree);
1037 prefetch(b->keys.set[i].data);
1038 }
1039
1040 for (; i <= b->keys.nsets; i++)
1041 prefetch(b->keys.set[i].data);
1042
1043 return b;
1044}
1045
1046static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1047{
1048 struct btree *b;
1049
1050 mutex_lock(&parent->c->bucket_lock);
1051 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1052 mutex_unlock(&parent->c->bucket_lock);
1053
1054 if (!IS_ERR_OR_NULL(b)) {
1055 b->parent = parent;
1056 bch_btree_node_read(b);
1057 rw_unlock(true, b);
1058 }
1059}
1060
1061/* Btree alloc */
1062
1063static void btree_node_free(struct btree *b)
1064{
1065 trace_bcache_btree_node_free(b);
1066
1067 BUG_ON(b == b->c->root);
1068
1069 mutex_lock(&b->write_lock);
1070
1071 if (btree_node_dirty(b))
1072 btree_complete_write(b, btree_current_write(b));
1073 clear_bit(BTREE_NODE_dirty, &b->flags);
1074
1075 mutex_unlock(&b->write_lock);
1076
1077 cancel_delayed_work(&b->work);
1078
1079 mutex_lock(&b->c->bucket_lock);
1080 bch_bucket_free(b->c, &b->key);
1081 mca_bucket_free(b);
1082 mutex_unlock(&b->c->bucket_lock);
1083}
1084
1085struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1086 int level, bool wait,
1087 struct btree *parent)
1088{
1089 BKEY_PADDED(key) k;
1090 struct btree *b = ERR_PTR(-EAGAIN);
1091
1092 mutex_lock(&c->bucket_lock);
1093retry:
1094 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1095 goto err;
1096
1097 bkey_put(c, &k.key);
1098 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1099
1100 b = mca_alloc(c, op, &k.key, level);
1101 if (IS_ERR(b))
1102 goto err_free;
1103
1104 if (!b) {
1105 cache_bug(c,
1106 "Tried to allocate bucket that was in btree cache");
1107 goto retry;
1108 }
1109
1110 b->accessed = 1;
1111 b->parent = parent;
1112 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1113
1114 mutex_unlock(&c->bucket_lock);
1115
1116 trace_bcache_btree_node_alloc(b);
1117 return b;
1118err_free:
1119 bch_bucket_free(c, &k.key);
1120err:
1121 mutex_unlock(&c->bucket_lock);
1122
1123 trace_bcache_btree_node_alloc_fail(c);
1124 return b;
1125}
1126
1127static struct btree *bch_btree_node_alloc(struct cache_set *c,
1128 struct btree_op *op, int level,
1129 struct btree *parent)
1130{
1131 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1132}
1133
1134static struct btree *btree_node_alloc_replacement(struct btree *b,
1135 struct btree_op *op)
1136{
1137 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1138
1139 if (!IS_ERR_OR_NULL(n)) {
1140 mutex_lock(&n->write_lock);
1141 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1142 bkey_copy_key(&n->key, &b->key);
1143 mutex_unlock(&n->write_lock);
1144 }
1145
1146 return n;
1147}
1148
1149static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1150{
1151 unsigned int i;
1152
1153 mutex_lock(&b->c->bucket_lock);
1154
1155 atomic_inc(&b->c->prio_blocked);
1156
1157 bkey_copy(k, &b->key);
1158 bkey_copy_key(k, &ZERO_KEY);
1159
1160 for (i = 0; i < KEY_PTRS(k); i++)
1161 SET_PTR_GEN(k, i,
1162 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1163 PTR_BUCKET(b->c, &b->key, i)));
1164
1165 mutex_unlock(&b->c->bucket_lock);
1166}
1167
1168static int btree_check_reserve(struct btree *b, struct btree_op *op)
1169{
1170 struct cache_set *c = b->c;
1171 struct cache *ca;
1172 unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
1173
1174 mutex_lock(&c->bucket_lock);
1175
1176 for_each_cache(ca, c, i)
1177 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1178 if (op)
1179 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1180 TASK_UNINTERRUPTIBLE);
1181 mutex_unlock(&c->bucket_lock);
1182 return -EINTR;
1183 }
1184
1185 mutex_unlock(&c->bucket_lock);
1186
1187 return mca_cannibalize_lock(b->c, op);
1188}
1189
1190/* Garbage collection */
1191
1192static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1193 struct bkey *k)
1194{
1195 uint8_t stale = 0;
1196 unsigned int i;
1197 struct bucket *g;
1198
1199 /*
1200 * ptr_invalid() can't return true for the keys that mark btree nodes as
1201 * freed, but since ptr_bad() returns true we'll never actually use them
1202 * for anything and thus we don't want mark their pointers here
1203 */
1204 if (!bkey_cmp(k, &ZERO_KEY))
1205 return stale;
1206
1207 for (i = 0; i < KEY_PTRS(k); i++) {
1208 if (!ptr_available(c, k, i))
1209 continue;
1210
1211 g = PTR_BUCKET(c, k, i);
1212
1213 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1214 g->last_gc = PTR_GEN(k, i);
1215
1216 if (ptr_stale(c, k, i)) {
1217 stale = max(stale, ptr_stale(c, k, i));
1218 continue;
1219 }
1220
1221 cache_bug_on(GC_MARK(g) &&
1222 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1223 c, "inconsistent ptrs: mark = %llu, level = %i",
1224 GC_MARK(g), level);
1225
1226 if (level)
1227 SET_GC_MARK(g, GC_MARK_METADATA);
1228 else if (KEY_DIRTY(k))
1229 SET_GC_MARK(g, GC_MARK_DIRTY);
1230 else if (!GC_MARK(g))
1231 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1232
1233 /* guard against overflow */
1234 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1235 GC_SECTORS_USED(g) + KEY_SIZE(k),
1236 MAX_GC_SECTORS_USED));
1237
1238 BUG_ON(!GC_SECTORS_USED(g));
1239 }
1240
1241 return stale;
1242}
1243
1244#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1245
1246void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1247{
1248 unsigned int i;
1249
1250 for (i = 0; i < KEY_PTRS(k); i++)
1251 if (ptr_available(c, k, i) &&
1252 !ptr_stale(c, k, i)) {
1253 struct bucket *b = PTR_BUCKET(c, k, i);
1254
1255 b->gen = PTR_GEN(k, i);
1256
1257 if (level && bkey_cmp(k, &ZERO_KEY))
1258 b->prio = BTREE_PRIO;
1259 else if (!level && b->prio == BTREE_PRIO)
1260 b->prio = INITIAL_PRIO;
1261 }
1262
1263 __bch_btree_mark_key(c, level, k);
1264}
1265
1266void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1267{
1268 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1269}
1270
1271static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1272{
1273 uint8_t stale = 0;
1274 unsigned int keys = 0, good_keys = 0;
1275 struct bkey *k;
1276 struct btree_iter iter;
1277 struct bset_tree *t;
1278
1279 gc->nodes++;
1280
1281 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1282 stale = max(stale, btree_mark_key(b, k));
1283 keys++;
1284
1285 if (bch_ptr_bad(&b->keys, k))
1286 continue;
1287
1288 gc->key_bytes += bkey_u64s(k);
1289 gc->nkeys++;
1290 good_keys++;
1291
1292 gc->data += KEY_SIZE(k);
1293 }
1294
1295 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1296 btree_bug_on(t->size &&
1297 bset_written(&b->keys, t) &&
1298 bkey_cmp(&b->key, &t->end) < 0,
1299 b, "found short btree key in gc");
1300
1301 if (b->c->gc_always_rewrite)
1302 return true;
1303
1304 if (stale > 10)
1305 return true;
1306
1307 if ((keys - good_keys) * 2 > keys)
1308 return true;
1309
1310 return false;
1311}
1312
1313#define GC_MERGE_NODES 4U
1314
1315struct gc_merge_info {
1316 struct btree *b;
1317 unsigned int keys;
1318};
1319
1320static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1321 struct keylist *insert_keys,
1322 atomic_t *journal_ref,
1323 struct bkey *replace_key);
1324
1325static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1326 struct gc_stat *gc, struct gc_merge_info *r)
1327{
1328 unsigned int i, nodes = 0, keys = 0, blocks;
1329 struct btree *new_nodes[GC_MERGE_NODES];
1330 struct keylist keylist;
1331 struct closure cl;
1332 struct bkey *k;
1333
1334 bch_keylist_init(&keylist);
1335
1336 if (btree_check_reserve(b, NULL))
1337 return 0;
1338
1339 memset(new_nodes, 0, sizeof(new_nodes));
1340 closure_init_stack(&cl);
1341
1342 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1343 keys += r[nodes++].keys;
1344
1345 blocks = btree_default_blocks(b->c) * 2 / 3;
1346
1347 if (nodes < 2 ||
1348 __set_blocks(b->keys.set[0].data, keys,
1349 block_bytes(b->c)) > blocks * (nodes - 1))
1350 return 0;
1351
1352 for (i = 0; i < nodes; i++) {
1353 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1354 if (IS_ERR_OR_NULL(new_nodes[i]))
1355 goto out_nocoalesce;
1356 }
1357
1358 /*
1359 * We have to check the reserve here, after we've allocated our new
1360 * nodes, to make sure the insert below will succeed - we also check
1361 * before as an optimization to potentially avoid a bunch of expensive
1362 * allocs/sorts
1363 */
1364 if (btree_check_reserve(b, NULL))
1365 goto out_nocoalesce;
1366
1367 for (i = 0; i < nodes; i++)
1368 mutex_lock(&new_nodes[i]->write_lock);
1369
1370 for (i = nodes - 1; i > 0; --i) {
1371 struct bset *n1 = btree_bset_first(new_nodes[i]);
1372 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1373 struct bkey *k, *last = NULL;
1374
1375 keys = 0;
1376
1377 if (i > 1) {
1378 for (k = n2->start;
1379 k < bset_bkey_last(n2);
1380 k = bkey_next(k)) {
1381 if (__set_blocks(n1, n1->keys + keys +
1382 bkey_u64s(k),
1383 block_bytes(b->c)) > blocks)
1384 break;
1385
1386 last = k;
1387 keys += bkey_u64s(k);
1388 }
1389 } else {
1390 /*
1391 * Last node we're not getting rid of - we're getting
1392 * rid of the node at r[0]. Have to try and fit all of
1393 * the remaining keys into this node; we can't ensure
1394 * they will always fit due to rounding and variable
1395 * length keys (shouldn't be possible in practice,
1396 * though)
1397 */
1398 if (__set_blocks(n1, n1->keys + n2->keys,
1399 block_bytes(b->c)) >
1400 btree_blocks(new_nodes[i]))
1401 goto out_nocoalesce;
1402
1403 keys = n2->keys;
1404 /* Take the key of the node we're getting rid of */
1405 last = &r->b->key;
1406 }
1407
1408 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1409 btree_blocks(new_nodes[i]));
1410
1411 if (last)
1412 bkey_copy_key(&new_nodes[i]->key, last);
1413
1414 memcpy(bset_bkey_last(n1),
1415 n2->start,
1416 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1417
1418 n1->keys += keys;
1419 r[i].keys = n1->keys;
1420
1421 memmove(n2->start,
1422 bset_bkey_idx(n2, keys),
1423 (void *) bset_bkey_last(n2) -
1424 (void *) bset_bkey_idx(n2, keys));
1425
1426 n2->keys -= keys;
1427
1428 if (__bch_keylist_realloc(&keylist,
1429 bkey_u64s(&new_nodes[i]->key)))
1430 goto out_nocoalesce;
1431
1432 bch_btree_node_write(new_nodes[i], &cl);
1433 bch_keylist_add(&keylist, &new_nodes[i]->key);
1434 }
1435
1436 for (i = 0; i < nodes; i++)
1437 mutex_unlock(&new_nodes[i]->write_lock);
1438
1439 closure_sync(&cl);
1440
1441 /* We emptied out this node */
1442 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1443 btree_node_free(new_nodes[0]);
1444 rw_unlock(true, new_nodes[0]);
1445 new_nodes[0] = NULL;
1446
1447 for (i = 0; i < nodes; i++) {
1448 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1449 goto out_nocoalesce;
1450
1451 make_btree_freeing_key(r[i].b, keylist.top);
1452 bch_keylist_push(&keylist);
1453 }
1454
1455 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1456 BUG_ON(!bch_keylist_empty(&keylist));
1457
1458 for (i = 0; i < nodes; i++) {
1459 btree_node_free(r[i].b);
1460 rw_unlock(true, r[i].b);
1461
1462 r[i].b = new_nodes[i];
1463 }
1464
1465 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1466 r[nodes - 1].b = ERR_PTR(-EINTR);
1467
1468 trace_bcache_btree_gc_coalesce(nodes);
1469 gc->nodes--;
1470
1471 bch_keylist_free(&keylist);
1472
1473 /* Invalidated our iterator */
1474 return -EINTR;
1475
1476out_nocoalesce:
1477 closure_sync(&cl);
1478 bch_keylist_free(&keylist);
1479
1480 while ((k = bch_keylist_pop(&keylist)))
1481 if (!bkey_cmp(k, &ZERO_KEY))
1482 atomic_dec(&b->c->prio_blocked);
1483
1484 for (i = 0; i < nodes; i++)
1485 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1486 btree_node_free(new_nodes[i]);
1487 rw_unlock(true, new_nodes[i]);
1488 }
1489 return 0;
1490}
1491
1492static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1493 struct btree *replace)
1494{
1495 struct keylist keys;
1496 struct btree *n;
1497
1498 if (btree_check_reserve(b, NULL))
1499 return 0;
1500
1501 n = btree_node_alloc_replacement(replace, NULL);
1502
1503 /* recheck reserve after allocating replacement node */
1504 if (btree_check_reserve(b, NULL)) {
1505 btree_node_free(n);
1506 rw_unlock(true, n);
1507 return 0;
1508 }
1509
1510 bch_btree_node_write_sync(n);
1511
1512 bch_keylist_init(&keys);
1513 bch_keylist_add(&keys, &n->key);
1514
1515 make_btree_freeing_key(replace, keys.top);
1516 bch_keylist_push(&keys);
1517
1518 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1519 BUG_ON(!bch_keylist_empty(&keys));
1520
1521 btree_node_free(replace);
1522 rw_unlock(true, n);
1523
1524 /* Invalidated our iterator */
1525 return -EINTR;
1526}
1527
1528static unsigned int btree_gc_count_keys(struct btree *b)
1529{
1530 struct bkey *k;
1531 struct btree_iter iter;
1532 unsigned int ret = 0;
1533
1534 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1535 ret += bkey_u64s(k);
1536
1537 return ret;
1538}
1539
1540static size_t btree_gc_min_nodes(struct cache_set *c)
1541{
1542 size_t min_nodes;
1543
1544 /*
1545 * Since incremental GC would stop 100ms when front
1546 * side I/O comes, so when there are many btree nodes,
1547 * if GC only processes constant (100) nodes each time,
1548 * GC would last a long time, and the front side I/Os
1549 * would run out of the buckets (since no new bucket
1550 * can be allocated during GC), and be blocked again.
1551 * So GC should not process constant nodes, but varied
1552 * nodes according to the number of btree nodes, which
1553 * realized by dividing GC into constant(100) times,
1554 * so when there are many btree nodes, GC can process
1555 * more nodes each time, otherwise, GC will process less
1556 * nodes each time (but no less than MIN_GC_NODES)
1557 */
1558 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1559 if (min_nodes < MIN_GC_NODES)
1560 min_nodes = MIN_GC_NODES;
1561
1562 return min_nodes;
1563}
1564
1565
1566static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1567 struct closure *writes, struct gc_stat *gc)
1568{
1569 int ret = 0;
1570 bool should_rewrite;
1571 struct bkey *k;
1572 struct btree_iter iter;
1573 struct gc_merge_info r[GC_MERGE_NODES];
1574 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1575
1576 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1577
1578 for (i = r; i < r + ARRAY_SIZE(r); i++)
1579 i->b = ERR_PTR(-EINTR);
1580
1581 while (1) {
1582 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1583 if (k) {
1584 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1585 true, b);
1586 if (IS_ERR(r->b)) {
1587 ret = PTR_ERR(r->b);
1588 break;
1589 }
1590
1591 r->keys = btree_gc_count_keys(r->b);
1592
1593 ret = btree_gc_coalesce(b, op, gc, r);
1594 if (ret)
1595 break;
1596 }
1597
1598 if (!last->b)
1599 break;
1600
1601 if (!IS_ERR(last->b)) {
1602 should_rewrite = btree_gc_mark_node(last->b, gc);
1603 if (should_rewrite) {
1604 ret = btree_gc_rewrite_node(b, op, last->b);
1605 if (ret)
1606 break;
1607 }
1608
1609 if (last->b->level) {
1610 ret = btree_gc_recurse(last->b, op, writes, gc);
1611 if (ret)
1612 break;
1613 }
1614
1615 bkey_copy_key(&b->c->gc_done, &last->b->key);
1616
1617 /*
1618 * Must flush leaf nodes before gc ends, since replace
1619 * operations aren't journalled
1620 */
1621 mutex_lock(&last->b->write_lock);
1622 if (btree_node_dirty(last->b))
1623 bch_btree_node_write(last->b, writes);
1624 mutex_unlock(&last->b->write_lock);
1625 rw_unlock(true, last->b);
1626 }
1627
1628 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1629 r->b = NULL;
1630
1631 if (atomic_read(&b->c->search_inflight) &&
1632 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1633 gc->nodes_pre = gc->nodes;
1634 ret = -EAGAIN;
1635 break;
1636 }
1637
1638 if (need_resched()) {
1639 ret = -EAGAIN;
1640 break;
1641 }
1642 }
1643
1644 for (i = r; i < r + ARRAY_SIZE(r); i++)
1645 if (!IS_ERR_OR_NULL(i->b)) {
1646 mutex_lock(&i->b->write_lock);
1647 if (btree_node_dirty(i->b))
1648 bch_btree_node_write(i->b, writes);
1649 mutex_unlock(&i->b->write_lock);
1650 rw_unlock(true, i->b);
1651 }
1652
1653 return ret;
1654}
1655
1656static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1657 struct closure *writes, struct gc_stat *gc)
1658{
1659 struct btree *n = NULL;
1660 int ret = 0;
1661 bool should_rewrite;
1662
1663 should_rewrite = btree_gc_mark_node(b, gc);
1664 if (should_rewrite) {
1665 n = btree_node_alloc_replacement(b, NULL);
1666
1667 if (!IS_ERR_OR_NULL(n)) {
1668 bch_btree_node_write_sync(n);
1669
1670 bch_btree_set_root(n);
1671 btree_node_free(b);
1672 rw_unlock(true, n);
1673
1674 return -EINTR;
1675 }
1676 }
1677
1678 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1679
1680 if (b->level) {
1681 ret = btree_gc_recurse(b, op, writes, gc);
1682 if (ret)
1683 return ret;
1684 }
1685
1686 bkey_copy_key(&b->c->gc_done, &b->key);
1687
1688 return ret;
1689}
1690
1691static void btree_gc_start(struct cache_set *c)
1692{
1693 struct cache *ca;
1694 struct bucket *b;
1695 unsigned int i;
1696
1697 if (!c->gc_mark_valid)
1698 return;
1699
1700 mutex_lock(&c->bucket_lock);
1701
1702 c->gc_mark_valid = 0;
1703 c->gc_done = ZERO_KEY;
1704
1705 for_each_cache(ca, c, i)
1706 for_each_bucket(b, ca) {
1707 b->last_gc = b->gen;
1708 if (!atomic_read(&b->pin)) {
1709 SET_GC_MARK(b, 0);
1710 SET_GC_SECTORS_USED(b, 0);
1711 }
1712 }
1713
1714 mutex_unlock(&c->bucket_lock);
1715}
1716
1717static void bch_btree_gc_finish(struct cache_set *c)
1718{
1719 struct bucket *b;
1720 struct cache *ca;
1721 unsigned int i;
1722
1723 mutex_lock(&c->bucket_lock);
1724
1725 set_gc_sectors(c);
1726 c->gc_mark_valid = 1;
1727 c->need_gc = 0;
1728
1729 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1730 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1731 GC_MARK_METADATA);
1732
1733 /* don't reclaim buckets to which writeback keys point */
1734 rcu_read_lock();
1735 for (i = 0; i < c->devices_max_used; i++) {
1736 struct bcache_device *d = c->devices[i];
1737 struct cached_dev *dc;
1738 struct keybuf_key *w, *n;
1739 unsigned int j;
1740
1741 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1742 continue;
1743 dc = container_of(d, struct cached_dev, disk);
1744
1745 spin_lock(&dc->writeback_keys.lock);
1746 rbtree_postorder_for_each_entry_safe(w, n,
1747 &dc->writeback_keys.keys, node)
1748 for (j = 0; j < KEY_PTRS(&w->key); j++)
1749 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1750 GC_MARK_DIRTY);
1751 spin_unlock(&dc->writeback_keys.lock);
1752 }
1753 rcu_read_unlock();
1754
1755 c->avail_nbuckets = 0;
1756 for_each_cache(ca, c, i) {
1757 uint64_t *i;
1758
1759 ca->invalidate_needs_gc = 0;
1760
1761 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1762 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1763
1764 for (i = ca->prio_buckets;
1765 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1766 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1767
1768 for_each_bucket(b, ca) {
1769 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1770
1771 if (atomic_read(&b->pin))
1772 continue;
1773
1774 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1775
1776 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1777 c->avail_nbuckets++;
1778 }
1779 }
1780
1781 mutex_unlock(&c->bucket_lock);
1782}
1783
1784static void bch_btree_gc(struct cache_set *c)
1785{
1786 int ret;
1787 struct gc_stat stats;
1788 struct closure writes;
1789 struct btree_op op;
1790 uint64_t start_time = local_clock();
1791
1792 trace_bcache_gc_start(c);
1793
1794 memset(&stats, 0, sizeof(struct gc_stat));
1795 closure_init_stack(&writes);
1796 bch_btree_op_init(&op, SHRT_MAX);
1797
1798 btree_gc_start(c);
1799
1800 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1801 do {
1802 ret = btree_root(gc_root, c, &op, &writes, &stats);
1803 closure_sync(&writes);
1804 cond_resched();
1805
1806 if (ret == -EAGAIN)
1807 schedule_timeout_interruptible(msecs_to_jiffies
1808 (GC_SLEEP_MS));
1809 else if (ret)
1810 pr_warn("gc failed!");
1811 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1812
1813 bch_btree_gc_finish(c);
1814 wake_up_allocators(c);
1815
1816 bch_time_stats_update(&c->btree_gc_time, start_time);
1817
1818 stats.key_bytes *= sizeof(uint64_t);
1819 stats.data <<= 9;
1820 bch_update_bucket_in_use(c, &stats);
1821 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1822
1823 trace_bcache_gc_end(c);
1824
1825 bch_moving_gc(c);
1826}
1827
1828static bool gc_should_run(struct cache_set *c)
1829{
1830 struct cache *ca;
1831 unsigned int i;
1832
1833 for_each_cache(ca, c, i)
1834 if (ca->invalidate_needs_gc)
1835 return true;
1836
1837 if (atomic_read(&c->sectors_to_gc) < 0)
1838 return true;
1839
1840 return false;
1841}
1842
1843static int bch_gc_thread(void *arg)
1844{
1845 struct cache_set *c = arg;
1846
1847 while (1) {
1848 wait_event_interruptible(c->gc_wait,
1849 kthread_should_stop() ||
1850 test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1851 gc_should_run(c));
1852
1853 if (kthread_should_stop() ||
1854 test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1855 break;
1856
1857 set_gc_sectors(c);
1858 bch_btree_gc(c);
1859 }
1860
1861 wait_for_kthread_stop();
1862 return 0;
1863}
1864
1865int bch_gc_thread_start(struct cache_set *c)
1866{
1867 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1868 return PTR_ERR_OR_ZERO(c->gc_thread);
1869}
1870
1871/* Initial partial gc */
1872
1873static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1874{
1875 int ret = 0;
1876 struct bkey *k, *p = NULL;
1877 struct btree_iter iter;
1878
1879 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1880 bch_initial_mark_key(b->c, b->level, k);
1881
1882 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1883
1884 if (b->level) {
1885 bch_btree_iter_init(&b->keys, &iter, NULL);
1886
1887 do {
1888 k = bch_btree_iter_next_filter(&iter, &b->keys,
1889 bch_ptr_bad);
1890 if (k) {
1891 btree_node_prefetch(b, k);
1892 /*
1893 * initiallize c->gc_stats.nodes
1894 * for incremental GC
1895 */
1896 b->c->gc_stats.nodes++;
1897 }
1898
1899 if (p)
1900 ret = btree(check_recurse, p, b, op);
1901
1902 p = k;
1903 } while (p && !ret);
1904 }
1905
1906 return ret;
1907}
1908
1909int bch_btree_check(struct cache_set *c)
1910{
1911 struct btree_op op;
1912
1913 bch_btree_op_init(&op, SHRT_MAX);
1914
1915 return btree_root(check_recurse, c, &op);
1916}
1917
1918void bch_initial_gc_finish(struct cache_set *c)
1919{
1920 struct cache *ca;
1921 struct bucket *b;
1922 unsigned int i;
1923
1924 bch_btree_gc_finish(c);
1925
1926 mutex_lock(&c->bucket_lock);
1927
1928 /*
1929 * We need to put some unused buckets directly on the prio freelist in
1930 * order to get the allocator thread started - it needs freed buckets in
1931 * order to rewrite the prios and gens, and it needs to rewrite prios
1932 * and gens in order to free buckets.
1933 *
1934 * This is only safe for buckets that have no live data in them, which
1935 * there should always be some of.
1936 */
1937 for_each_cache(ca, c, i) {
1938 for_each_bucket(b, ca) {
1939 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1940 fifo_full(&ca->free[RESERVE_BTREE]))
1941 break;
1942
1943 if (bch_can_invalidate_bucket(ca, b) &&
1944 !GC_MARK(b)) {
1945 __bch_invalidate_one_bucket(ca, b);
1946 if (!fifo_push(&ca->free[RESERVE_PRIO],
1947 b - ca->buckets))
1948 fifo_push(&ca->free[RESERVE_BTREE],
1949 b - ca->buckets);
1950 }
1951 }
1952 }
1953
1954 mutex_unlock(&c->bucket_lock);
1955}
1956
1957/* Btree insertion */
1958
1959static bool btree_insert_key(struct btree *b, struct bkey *k,
1960 struct bkey *replace_key)
1961{
1962 unsigned int status;
1963
1964 BUG_ON(bkey_cmp(k, &b->key) > 0);
1965
1966 status = bch_btree_insert_key(&b->keys, k, replace_key);
1967 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1968 bch_check_keys(&b->keys, "%u for %s", status,
1969 replace_key ? "replace" : "insert");
1970
1971 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1972 status);
1973 return true;
1974 } else
1975 return false;
1976}
1977
1978static size_t insert_u64s_remaining(struct btree *b)
1979{
1980 long ret = bch_btree_keys_u64s_remaining(&b->keys);
1981
1982 /*
1983 * Might land in the middle of an existing extent and have to split it
1984 */
1985 if (b->keys.ops->is_extents)
1986 ret -= KEY_MAX_U64S;
1987
1988 return max(ret, 0L);
1989}
1990
1991static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1992 struct keylist *insert_keys,
1993 struct bkey *replace_key)
1994{
1995 bool ret = false;
1996 int oldsize = bch_count_data(&b->keys);
1997
1998 while (!bch_keylist_empty(insert_keys)) {
1999 struct bkey *k = insert_keys->keys;
2000
2001 if (bkey_u64s(k) > insert_u64s_remaining(b))
2002 break;
2003
2004 if (bkey_cmp(k, &b->key) <= 0) {
2005 if (!b->level)
2006 bkey_put(b->c, k);
2007
2008 ret |= btree_insert_key(b, k, replace_key);
2009 bch_keylist_pop_front(insert_keys);
2010 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2011 BKEY_PADDED(key) temp;
2012 bkey_copy(&temp.key, insert_keys->keys);
2013
2014 bch_cut_back(&b->key, &temp.key);
2015 bch_cut_front(&b->key, insert_keys->keys);
2016
2017 ret |= btree_insert_key(b, &temp.key, replace_key);
2018 break;
2019 } else {
2020 break;
2021 }
2022 }
2023
2024 if (!ret)
2025 op->insert_collision = true;
2026
2027 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2028
2029 BUG_ON(bch_count_data(&b->keys) < oldsize);
2030 return ret;
2031}
2032
2033static int btree_split(struct btree *b, struct btree_op *op,
2034 struct keylist *insert_keys,
2035 struct bkey *replace_key)
2036{
2037 bool split;
2038 struct btree *n1, *n2 = NULL, *n3 = NULL;
2039 uint64_t start_time = local_clock();
2040 struct closure cl;
2041 struct keylist parent_keys;
2042
2043 closure_init_stack(&cl);
2044 bch_keylist_init(&parent_keys);
2045
2046 if (btree_check_reserve(b, op)) {
2047 if (!b->level)
2048 return -EINTR;
2049 else
2050 WARN(1, "insufficient reserve for split\n");
2051 }
2052
2053 n1 = btree_node_alloc_replacement(b, op);
2054 if (IS_ERR(n1))
2055 goto err;
2056
2057 split = set_blocks(btree_bset_first(n1),
2058 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2059
2060 if (split) {
2061 unsigned int keys = 0;
2062
2063 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2064
2065 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2066 if (IS_ERR(n2))
2067 goto err_free1;
2068
2069 if (!b->parent) {
2070 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2071 if (IS_ERR(n3))
2072 goto err_free2;
2073 }
2074
2075 mutex_lock(&n1->write_lock);
2076 mutex_lock(&n2->write_lock);
2077
2078 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2079
2080 /*
2081 * Has to be a linear search because we don't have an auxiliary
2082 * search tree yet
2083 */
2084
2085 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2086 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2087 keys));
2088
2089 bkey_copy_key(&n1->key,
2090 bset_bkey_idx(btree_bset_first(n1), keys));
2091 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2092
2093 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2094 btree_bset_first(n1)->keys = keys;
2095
2096 memcpy(btree_bset_first(n2)->start,
2097 bset_bkey_last(btree_bset_first(n1)),
2098 btree_bset_first(n2)->keys * sizeof(uint64_t));
2099
2100 bkey_copy_key(&n2->key, &b->key);
2101
2102 bch_keylist_add(&parent_keys, &n2->key);
2103 bch_btree_node_write(n2, &cl);
2104 mutex_unlock(&n2->write_lock);
2105 rw_unlock(true, n2);
2106 } else {
2107 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2108
2109 mutex_lock(&n1->write_lock);
2110 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2111 }
2112
2113 bch_keylist_add(&parent_keys, &n1->key);
2114 bch_btree_node_write(n1, &cl);
2115 mutex_unlock(&n1->write_lock);
2116
2117 if (n3) {
2118 /* Depth increases, make a new root */
2119 mutex_lock(&n3->write_lock);
2120 bkey_copy_key(&n3->key, &MAX_KEY);
2121 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2122 bch_btree_node_write(n3, &cl);
2123 mutex_unlock(&n3->write_lock);
2124
2125 closure_sync(&cl);
2126 bch_btree_set_root(n3);
2127 rw_unlock(true, n3);
2128 } else if (!b->parent) {
2129 /* Root filled up but didn't need to be split */
2130 closure_sync(&cl);
2131 bch_btree_set_root(n1);
2132 } else {
2133 /* Split a non root node */
2134 closure_sync(&cl);
2135 make_btree_freeing_key(b, parent_keys.top);
2136 bch_keylist_push(&parent_keys);
2137
2138 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2139 BUG_ON(!bch_keylist_empty(&parent_keys));
2140 }
2141
2142 btree_node_free(b);
2143 rw_unlock(true, n1);
2144
2145 bch_time_stats_update(&b->c->btree_split_time, start_time);
2146
2147 return 0;
2148err_free2:
2149 bkey_put(b->c, &n2->key);
2150 btree_node_free(n2);
2151 rw_unlock(true, n2);
2152err_free1:
2153 bkey_put(b->c, &n1->key);
2154 btree_node_free(n1);
2155 rw_unlock(true, n1);
2156err:
2157 WARN(1, "bcache: btree split failed (level %u)", b->level);
2158
2159 if (n3 == ERR_PTR(-EAGAIN) ||
2160 n2 == ERR_PTR(-EAGAIN) ||
2161 n1 == ERR_PTR(-EAGAIN))
2162 return -EAGAIN;
2163
2164 return -ENOMEM;
2165}
2166
2167static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2168 struct keylist *insert_keys,
2169 atomic_t *journal_ref,
2170 struct bkey *replace_key)
2171{
2172 struct closure cl;
2173
2174 BUG_ON(b->level && replace_key);
2175
2176 closure_init_stack(&cl);
2177
2178 mutex_lock(&b->write_lock);
2179
2180 if (write_block(b) != btree_bset_last(b) &&
2181 b->keys.last_set_unwritten)
2182 bch_btree_init_next(b); /* just wrote a set */
2183
2184 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2185 mutex_unlock(&b->write_lock);
2186 goto split;
2187 }
2188
2189 BUG_ON(write_block(b) != btree_bset_last(b));
2190
2191 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2192 if (!b->level)
2193 bch_btree_leaf_dirty(b, journal_ref);
2194 else
2195 bch_btree_node_write(b, &cl);
2196 }
2197
2198 mutex_unlock(&b->write_lock);
2199
2200 /* wait for btree node write if necessary, after unlock */
2201 closure_sync(&cl);
2202
2203 return 0;
2204split:
2205 if (current->bio_list) {
2206 op->lock = b->c->root->level + 1;
2207 return -EAGAIN;
2208 } else if (op->lock <= b->c->root->level) {
2209 op->lock = b->c->root->level + 1;
2210 return -EINTR;
2211 } else {
2212 /* Invalidated all iterators */
2213 int ret = btree_split(b, op, insert_keys, replace_key);
2214
2215 if (bch_keylist_empty(insert_keys))
2216 return 0;
2217 else if (!ret)
2218 return -EINTR;
2219 return ret;
2220 }
2221}
2222
2223int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2224 struct bkey *check_key)
2225{
2226 int ret = -EINTR;
2227 uint64_t btree_ptr = b->key.ptr[0];
2228 unsigned long seq = b->seq;
2229 struct keylist insert;
2230 bool upgrade = op->lock == -1;
2231
2232 bch_keylist_init(&insert);
2233
2234 if (upgrade) {
2235 rw_unlock(false, b);
2236 rw_lock(true, b, b->level);
2237
2238 if (b->key.ptr[0] != btree_ptr ||
2239 b->seq != seq + 1) {
2240 op->lock = b->level;
2241 goto out;
2242 }
2243 }
2244
2245 SET_KEY_PTRS(check_key, 1);
2246 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2247
2248 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2249
2250 bch_keylist_add(&insert, check_key);
2251
2252 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2253
2254 BUG_ON(!ret && !bch_keylist_empty(&insert));
2255out:
2256 if (upgrade)
2257 downgrade_write(&b->lock);
2258 return ret;
2259}
2260
2261struct btree_insert_op {
2262 struct btree_op op;
2263 struct keylist *keys;
2264 atomic_t *journal_ref;
2265 struct bkey *replace_key;
2266};
2267
2268static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2269{
2270 struct btree_insert_op *op = container_of(b_op,
2271 struct btree_insert_op, op);
2272
2273 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2274 op->journal_ref, op->replace_key);
2275 if (ret && !bch_keylist_empty(op->keys))
2276 return ret;
2277 else
2278 return MAP_DONE;
2279}
2280
2281int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2282 atomic_t *journal_ref, struct bkey *replace_key)
2283{
2284 struct btree_insert_op op;
2285 int ret = 0;
2286
2287 BUG_ON(current->bio_list);
2288 BUG_ON(bch_keylist_empty(keys));
2289
2290 bch_btree_op_init(&op.op, 0);
2291 op.keys = keys;
2292 op.journal_ref = journal_ref;
2293 op.replace_key = replace_key;
2294
2295 while (!ret && !bch_keylist_empty(keys)) {
2296 op.op.lock = 0;
2297 ret = bch_btree_map_leaf_nodes(&op.op, c,
2298 &START_KEY(keys->keys),
2299 btree_insert_fn);
2300 }
2301
2302 if (ret) {
2303 struct bkey *k;
2304
2305 pr_err("error %i", ret);
2306
2307 while ((k = bch_keylist_pop(keys)))
2308 bkey_put(c, k);
2309 } else if (op.op.insert_collision)
2310 ret = -ESRCH;
2311
2312 return ret;
2313}
2314
2315void bch_btree_set_root(struct btree *b)
2316{
2317 unsigned int i;
2318 struct closure cl;
2319
2320 closure_init_stack(&cl);
2321
2322 trace_bcache_btree_set_root(b);
2323
2324 BUG_ON(!b->written);
2325
2326 for (i = 0; i < KEY_PTRS(&b->key); i++)
2327 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2328
2329 mutex_lock(&b->c->bucket_lock);
2330 list_del_init(&b->list);
2331 mutex_unlock(&b->c->bucket_lock);
2332
2333 b->c->root = b;
2334
2335 bch_journal_meta(b->c, &cl);
2336 closure_sync(&cl);
2337}
2338
2339/* Map across nodes or keys */
2340
2341static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2342 struct bkey *from,
2343 btree_map_nodes_fn *fn, int flags)
2344{
2345 int ret = MAP_CONTINUE;
2346
2347 if (b->level) {
2348 struct bkey *k;
2349 struct btree_iter iter;
2350
2351 bch_btree_iter_init(&b->keys, &iter, from);
2352
2353 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2354 bch_ptr_bad))) {
2355 ret = btree(map_nodes_recurse, k, b,
2356 op, from, fn, flags);
2357 from = NULL;
2358
2359 if (ret != MAP_CONTINUE)
2360 return ret;
2361 }
2362 }
2363
2364 if (!b->level || flags == MAP_ALL_NODES)
2365 ret = fn(op, b);
2366
2367 return ret;
2368}
2369
2370int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2371 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2372{
2373 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2374}
2375
2376static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2377 struct bkey *from, btree_map_keys_fn *fn,
2378 int flags)
2379{
2380 int ret = MAP_CONTINUE;
2381 struct bkey *k;
2382 struct btree_iter iter;
2383
2384 bch_btree_iter_init(&b->keys, &iter, from);
2385
2386 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2387 ret = !b->level
2388 ? fn(op, b, k)
2389 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2390 from = NULL;
2391
2392 if (ret != MAP_CONTINUE)
2393 return ret;
2394 }
2395
2396 if (!b->level && (flags & MAP_END_KEY))
2397 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2398 KEY_OFFSET(&b->key), 0));
2399
2400 return ret;
2401}
2402
2403int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2404 struct bkey *from, btree_map_keys_fn *fn, int flags)
2405{
2406 return btree_root(map_keys_recurse, c, op, from, fn, flags);
2407}
2408
2409/* Keybuf code */
2410
2411static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2412{
2413 /* Overlapping keys compare equal */
2414 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2415 return -1;
2416 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2417 return 1;
2418 return 0;
2419}
2420
2421static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2422 struct keybuf_key *r)
2423{
2424 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2425}
2426
2427struct refill {
2428 struct btree_op op;
2429 unsigned int nr_found;
2430 struct keybuf *buf;
2431 struct bkey *end;
2432 keybuf_pred_fn *pred;
2433};
2434
2435static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2436 struct bkey *k)
2437{
2438 struct refill *refill = container_of(op, struct refill, op);
2439 struct keybuf *buf = refill->buf;
2440 int ret = MAP_CONTINUE;
2441
2442 if (bkey_cmp(k, refill->end) > 0) {
2443 ret = MAP_DONE;
2444 goto out;
2445 }
2446
2447 if (!KEY_SIZE(k)) /* end key */
2448 goto out;
2449
2450 if (refill->pred(buf, k)) {
2451 struct keybuf_key *w;
2452
2453 spin_lock(&buf->lock);
2454
2455 w = array_alloc(&buf->freelist);
2456 if (!w) {
2457 spin_unlock(&buf->lock);
2458 return MAP_DONE;
2459 }
2460
2461 w->private = NULL;
2462 bkey_copy(&w->key, k);
2463
2464 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2465 array_free(&buf->freelist, w);
2466 else
2467 refill->nr_found++;
2468
2469 if (array_freelist_empty(&buf->freelist))
2470 ret = MAP_DONE;
2471
2472 spin_unlock(&buf->lock);
2473 }
2474out:
2475 buf->last_scanned = *k;
2476 return ret;
2477}
2478
2479void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2480 struct bkey *end, keybuf_pred_fn *pred)
2481{
2482 struct bkey start = buf->last_scanned;
2483 struct refill refill;
2484
2485 cond_resched();
2486
2487 bch_btree_op_init(&refill.op, -1);
2488 refill.nr_found = 0;
2489 refill.buf = buf;
2490 refill.end = end;
2491 refill.pred = pred;
2492
2493 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2494 refill_keybuf_fn, MAP_END_KEY);
2495
2496 trace_bcache_keyscan(refill.nr_found,
2497 KEY_INODE(&start), KEY_OFFSET(&start),
2498 KEY_INODE(&buf->last_scanned),
2499 KEY_OFFSET(&buf->last_scanned));
2500
2501 spin_lock(&buf->lock);
2502
2503 if (!RB_EMPTY_ROOT(&buf->keys)) {
2504 struct keybuf_key *w;
2505
2506 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2507 buf->start = START_KEY(&w->key);
2508
2509 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2510 buf->end = w->key;
2511 } else {
2512 buf->start = MAX_KEY;
2513 buf->end = MAX_KEY;
2514 }
2515
2516 spin_unlock(&buf->lock);
2517}
2518
2519static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2520{
2521 rb_erase(&w->node, &buf->keys);
2522 array_free(&buf->freelist, w);
2523}
2524
2525void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2526{
2527 spin_lock(&buf->lock);
2528 __bch_keybuf_del(buf, w);
2529 spin_unlock(&buf->lock);
2530}
2531
2532bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2533 struct bkey *end)
2534{
2535 bool ret = false;
2536 struct keybuf_key *p, *w, s;
2537
2538 s.key = *start;
2539
2540 if (bkey_cmp(end, &buf->start) <= 0 ||
2541 bkey_cmp(start, &buf->end) >= 0)
2542 return false;
2543
2544 spin_lock(&buf->lock);
2545 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2546
2547 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2548 p = w;
2549 w = RB_NEXT(w, node);
2550
2551 if (p->private)
2552 ret = true;
2553 else
2554 __bch_keybuf_del(buf, p);
2555 }
2556
2557 spin_unlock(&buf->lock);
2558 return ret;
2559}
2560
2561struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2562{
2563 struct keybuf_key *w;
2564
2565 spin_lock(&buf->lock);
2566
2567 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2568
2569 while (w && w->private)
2570 w = RB_NEXT(w, node);
2571
2572 if (w)
2573 w->private = ERR_PTR(-EINTR);
2574
2575 spin_unlock(&buf->lock);
2576 return w;
2577}
2578
2579struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2580 struct keybuf *buf,
2581 struct bkey *end,
2582 keybuf_pred_fn *pred)
2583{
2584 struct keybuf_key *ret;
2585
2586 while (1) {
2587 ret = bch_keybuf_next(buf);
2588 if (ret)
2589 break;
2590
2591 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2592 pr_debug("scan finished");
2593 break;
2594 }
2595
2596 bch_refill_keybuf(c, buf, end, pred);
2597 }
2598
2599 return ret;
2600}
2601
2602void bch_keybuf_init(struct keybuf *buf)
2603{
2604 buf->last_scanned = MAX_KEY;
2605 buf->keys = RB_ROOT;
2606
2607 spin_lock_init(&buf->lock);
2608 array_allocator_init(&buf->freelist);
2609}