Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "btree_key_cache.h"
5#include "btree_update.h"
6#include "btree_write_buffer.h"
7#include "buckets.h"
8#include "errcode.h"
9#include "error.h"
10#include "journal.h"
11#include "journal_io.h"
12#include "journal_reclaim.h"
13#include "replicas.h"
14#include "sb-members.h"
15#include "trace.h"
16
17#include <linux/kthread.h>
18#include <linux/sched/mm.h>
19
20/* Free space calculations: */
21
22static unsigned journal_space_from(struct journal_device *ja,
23 enum journal_space_from from)
24{
25 switch (from) {
26 case journal_space_discarded:
27 return ja->discard_idx;
28 case journal_space_clean_ondisk:
29 return ja->dirty_idx_ondisk;
30 case journal_space_clean:
31 return ja->dirty_idx;
32 default:
33 BUG();
34 }
35}
36
37unsigned bch2_journal_dev_buckets_available(struct journal *j,
38 struct journal_device *ja,
39 enum journal_space_from from)
40{
41 if (!ja->nr)
42 return 0;
43
44 unsigned available = (journal_space_from(ja, from) -
45 ja->cur_idx - 1 + ja->nr) % ja->nr;
46
47 /*
48 * Don't use the last bucket unless writing the new last_seq
49 * will make another bucket available:
50 */
51 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
52 --available;
53
54 return available;
55}
56
57void bch2_journal_set_watermark(struct journal *j)
58{
59 struct bch_fs *c = container_of(j, struct bch_fs, journal);
60 bool low_on_space = j->space[journal_space_clean].total * 4 <=
61 j->space[journal_space_total].total;
62 bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
63 bool low_on_wb = bch2_btree_write_buffer_must_wait(c);
64 unsigned watermark = low_on_space || low_on_pin || low_on_wb
65 ? BCH_WATERMARK_reclaim
66 : BCH_WATERMARK_stripe;
67
68 if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space], low_on_space) ||
69 track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin], low_on_pin) ||
70 track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
71 trace_and_count(c, journal_full, c);
72
73 mod_bit(JOURNAL_space_low, &j->flags, low_on_space || low_on_pin);
74
75 swap(watermark, j->watermark);
76 if (watermark > j->watermark)
77 journal_wake(j);
78}
79
80static struct journal_space
81journal_dev_space_available(struct journal *j, struct bch_dev *ca,
82 enum journal_space_from from)
83{
84 struct journal_device *ja = &ca->journal;
85 unsigned sectors, buckets, unwritten;
86 u64 seq;
87
88 if (from == journal_space_total)
89 return (struct journal_space) {
90 .next_entry = ca->mi.bucket_size,
91 .total = ca->mi.bucket_size * ja->nr,
92 };
93
94 buckets = bch2_journal_dev_buckets_available(j, ja, from);
95 sectors = ja->sectors_free;
96
97 /*
98 * We that we don't allocate the space for a journal entry
99 * until we write it out - thus, account for it here:
100 */
101 for (seq = journal_last_unwritten_seq(j);
102 seq <= journal_cur_seq(j);
103 seq++) {
104 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
105
106 if (!unwritten)
107 continue;
108
109 /* entry won't fit on this device, skip: */
110 if (unwritten > ca->mi.bucket_size)
111 continue;
112
113 if (unwritten >= sectors) {
114 if (!buckets) {
115 sectors = 0;
116 break;
117 }
118
119 buckets--;
120 sectors = ca->mi.bucket_size;
121 }
122
123 sectors -= unwritten;
124 }
125
126 if (sectors < ca->mi.bucket_size && buckets) {
127 buckets--;
128 sectors = ca->mi.bucket_size;
129 }
130
131 return (struct journal_space) {
132 .next_entry = sectors,
133 .total = sectors + buckets * ca->mi.bucket_size,
134 };
135}
136
137static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
138 enum journal_space_from from)
139{
140 struct bch_fs *c = container_of(j, struct bch_fs, journal);
141 unsigned pos, nr_devs = 0;
142 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
143 unsigned min_bucket_size = U32_MAX;
144
145 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
146
147 rcu_read_lock();
148 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
149 if (!ca->journal.nr ||
150 !ca->mi.durability)
151 continue;
152
153 min_bucket_size = min(min_bucket_size, ca->mi.bucket_size);
154
155 space = journal_dev_space_available(j, ca, from);
156 if (!space.next_entry)
157 continue;
158
159 for (pos = 0; pos < nr_devs; pos++)
160 if (space.total > dev_space[pos].total)
161 break;
162
163 array_insert_item(dev_space, nr_devs, pos, space);
164 }
165 rcu_read_unlock();
166
167 if (nr_devs < nr_devs_want)
168 return (struct journal_space) { 0, 0 };
169
170 /*
171 * We sorted largest to smallest, and we want the smallest out of the
172 * @nr_devs_want largest devices:
173 */
174 space = dev_space[nr_devs_want - 1];
175 space.next_entry = min(space.next_entry, min_bucket_size);
176 return space;
177}
178
179void bch2_journal_space_available(struct journal *j)
180{
181 struct bch_fs *c = container_of(j, struct bch_fs, journal);
182 unsigned clean, clean_ondisk, total;
183 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
184 j->buf[1].buf_size >> 9);
185 unsigned nr_online = 0, nr_devs_want;
186 bool can_discard = false;
187 int ret = 0;
188
189 lockdep_assert_held(&j->lock);
190
191 rcu_read_lock();
192 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
193 struct journal_device *ja = &ca->journal;
194
195 if (!ja->nr)
196 continue;
197
198 while (ja->dirty_idx != ja->cur_idx &&
199 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
200 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
201
202 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
203 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
204 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
205
206 if (ja->discard_idx != ja->dirty_idx_ondisk)
207 can_discard = true;
208
209 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
210 nr_online++;
211 }
212 rcu_read_unlock();
213
214 j->can_discard = can_discard;
215
216 if (nr_online < metadata_replicas_required(c)) {
217 struct printbuf buf = PRINTBUF;
218 buf.atomic++;
219 prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n"
220 "rw journal devs:", nr_online, metadata_replicas_required(c));
221
222 rcu_read_lock();
223 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal])
224 prt_printf(&buf, " %s", ca->name);
225 rcu_read_unlock();
226
227 bch_err(c, "%s", buf.buf);
228 printbuf_exit(&buf);
229 ret = -BCH_ERR_insufficient_journal_devices;
230 goto out;
231 }
232
233 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
234
235 for (unsigned i = 0; i < journal_space_nr; i++)
236 j->space[i] = __journal_space_available(j, nr_devs_want, i);
237
238 clean_ondisk = j->space[journal_space_clean_ondisk].total;
239 clean = j->space[journal_space_clean].total;
240 total = j->space[journal_space_total].total;
241
242 if (!j->space[journal_space_discarded].next_entry)
243 ret = -BCH_ERR_journal_full;
244
245 if ((j->space[journal_space_clean_ondisk].next_entry <
246 j->space[journal_space_clean_ondisk].total) &&
247 (clean - clean_ondisk <= total / 8) &&
248 (clean_ondisk * 2 > clean))
249 set_bit(JOURNAL_may_skip_flush, &j->flags);
250 else
251 clear_bit(JOURNAL_may_skip_flush, &j->flags);
252
253 bch2_journal_set_watermark(j);
254out:
255 j->cur_entry_sectors = !ret
256 ? round_down(j->space[journal_space_discarded].next_entry,
257 block_sectors(c))
258 : 0;
259 j->cur_entry_error = ret;
260
261 if (!ret)
262 journal_wake(j);
263}
264
265/* Discards - last part of journal reclaim: */
266
267static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
268{
269 bool ret;
270
271 spin_lock(&j->lock);
272 ret = ja->discard_idx != ja->dirty_idx_ondisk;
273 spin_unlock(&j->lock);
274
275 return ret;
276}
277
278/*
279 * Advance ja->discard_idx as long as it points to buckets that are no longer
280 * dirty, issuing discards if necessary:
281 */
282void bch2_journal_do_discards(struct journal *j)
283{
284 struct bch_fs *c = container_of(j, struct bch_fs, journal);
285
286 mutex_lock(&j->discard_lock);
287
288 for_each_rw_member(c, ca) {
289 struct journal_device *ja = &ca->journal;
290
291 while (should_discard_bucket(j, ja)) {
292 if (!c->opts.nochanges &&
293 ca->mi.discard &&
294 bdev_max_discard_sectors(ca->disk_sb.bdev))
295 blkdev_issue_discard(ca->disk_sb.bdev,
296 bucket_to_sector(ca,
297 ja->buckets[ja->discard_idx]),
298 ca->mi.bucket_size, GFP_NOFS);
299
300 spin_lock(&j->lock);
301 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
302
303 bch2_journal_space_available(j);
304 spin_unlock(&j->lock);
305 }
306 }
307
308 mutex_unlock(&j->discard_lock);
309}
310
311/*
312 * Journal entry pinning - machinery for holding a reference on a given journal
313 * entry, holding it open to ensure it gets replayed during recovery:
314 */
315
316void bch2_journal_reclaim_fast(struct journal *j)
317{
318 bool popped = false;
319
320 lockdep_assert_held(&j->lock);
321
322 /*
323 * Unpin journal entries whose reference counts reached zero, meaning
324 * all btree nodes got written out
325 */
326 while (!fifo_empty(&j->pin) &&
327 j->pin.front <= j->seq_ondisk &&
328 !atomic_read(&fifo_peek_front(&j->pin).count)) {
329 j->pin.front++;
330 popped = true;
331 }
332
333 if (popped) {
334 bch2_journal_space_available(j);
335 __closure_wake_up(&j->reclaim_flush_wait);
336 }
337}
338
339bool __bch2_journal_pin_put(struct journal *j, u64 seq)
340{
341 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
342
343 return atomic_dec_and_test(&pin_list->count);
344}
345
346void bch2_journal_pin_put(struct journal *j, u64 seq)
347{
348 if (__bch2_journal_pin_put(j, seq)) {
349 spin_lock(&j->lock);
350 bch2_journal_reclaim_fast(j);
351 spin_unlock(&j->lock);
352 }
353}
354
355static inline bool __journal_pin_drop(struct journal *j,
356 struct journal_entry_pin *pin)
357{
358 struct journal_entry_pin_list *pin_list;
359
360 if (!journal_pin_active(pin))
361 return false;
362
363 if (j->flush_in_progress == pin)
364 j->flush_in_progress_dropped = true;
365
366 pin_list = journal_seq_pin(j, pin->seq);
367 pin->seq = 0;
368 list_del_init(&pin->list);
369
370 if (j->reclaim_flush_wait.list.first)
371 __closure_wake_up(&j->reclaim_flush_wait);
372
373 /*
374 * Unpinning a journal entry may make journal_next_bucket() succeed, if
375 * writing a new last_seq will now make another bucket available:
376 */
377 return atomic_dec_and_test(&pin_list->count) &&
378 pin_list == &fifo_peek_front(&j->pin);
379}
380
381void bch2_journal_pin_drop(struct journal *j,
382 struct journal_entry_pin *pin)
383{
384 spin_lock(&j->lock);
385 if (__journal_pin_drop(j, pin))
386 bch2_journal_reclaim_fast(j);
387 spin_unlock(&j->lock);
388}
389
390static enum journal_pin_type journal_pin_type(struct journal_entry_pin *pin,
391 journal_pin_flush_fn fn)
392{
393 if (fn == bch2_btree_node_flush0 ||
394 fn == bch2_btree_node_flush1) {
395 unsigned idx = fn == bch2_btree_node_flush1;
396 struct btree *b = container_of(pin, struct btree, writes[idx].journal);
397
398 return JOURNAL_PIN_TYPE_btree0 - b->c.level;
399 } else if (fn == bch2_btree_key_cache_journal_flush)
400 return JOURNAL_PIN_TYPE_key_cache;
401 else
402 return JOURNAL_PIN_TYPE_other;
403}
404
405static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
406 struct journal_entry_pin *pin,
407 journal_pin_flush_fn flush_fn,
408 enum journal_pin_type type)
409{
410 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
411
412 /*
413 * flush_fn is how we identify journal pins in debugfs, so must always
414 * exist, even if it doesn't do anything:
415 */
416 BUG_ON(!flush_fn);
417
418 atomic_inc(&pin_list->count);
419 pin->seq = seq;
420 pin->flush = flush_fn;
421
422 if (list_empty(&pin_list->unflushed[type]) &&
423 j->reclaim_flush_wait.list.first)
424 __closure_wake_up(&j->reclaim_flush_wait);
425
426 list_add(&pin->list, &pin_list->unflushed[type]);
427}
428
429void bch2_journal_pin_copy(struct journal *j,
430 struct journal_entry_pin *dst,
431 struct journal_entry_pin *src,
432 journal_pin_flush_fn flush_fn)
433{
434 spin_lock(&j->lock);
435
436 u64 seq = READ_ONCE(src->seq);
437
438 if (seq < journal_last_seq(j)) {
439 /*
440 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
441 * the src pin - with the pin dropped, the entry to pin might no
442 * longer to exist, but that means there's no longer anything to
443 * copy and we can bail out here:
444 */
445 spin_unlock(&j->lock);
446 return;
447 }
448
449 bool reclaim = __journal_pin_drop(j, dst);
450
451 bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(dst, flush_fn));
452
453 if (reclaim)
454 bch2_journal_reclaim_fast(j);
455
456 /*
457 * If the journal is currently full, we might want to call flush_fn
458 * immediately:
459 */
460 if (seq == journal_last_seq(j))
461 journal_wake(j);
462 spin_unlock(&j->lock);
463}
464
465void bch2_journal_pin_set(struct journal *j, u64 seq,
466 struct journal_entry_pin *pin,
467 journal_pin_flush_fn flush_fn)
468{
469 spin_lock(&j->lock);
470
471 BUG_ON(seq < journal_last_seq(j));
472
473 bool reclaim = __journal_pin_drop(j, pin);
474
475 bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(pin, flush_fn));
476
477 if (reclaim)
478 bch2_journal_reclaim_fast(j);
479 /*
480 * If the journal is currently full, we might want to call flush_fn
481 * immediately:
482 */
483 if (seq == journal_last_seq(j))
484 journal_wake(j);
485
486 spin_unlock(&j->lock);
487}
488
489/**
490 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
491 * @j: journal object
492 * @pin: pin to flush
493 */
494void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
495{
496 BUG_ON(journal_pin_active(pin));
497
498 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
499}
500
501/*
502 * Journal reclaim: flush references to open journal entries to reclaim space in
503 * the journal
504 *
505 * May be done by the journal code in the background as needed to free up space
506 * for more journal entries, or as part of doing a clean shutdown, or to migrate
507 * data off of a specific device:
508 */
509
510static struct journal_entry_pin *
511journal_get_next_pin(struct journal *j,
512 u64 seq_to_flush,
513 unsigned allowed_below_seq,
514 unsigned allowed_above_seq,
515 u64 *seq)
516{
517 struct journal_entry_pin_list *pin_list;
518 struct journal_entry_pin *ret = NULL;
519
520 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
521 if (*seq > seq_to_flush && !allowed_above_seq)
522 break;
523
524 for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++)
525 if (((BIT(i) & allowed_below_seq) && *seq <= seq_to_flush) ||
526 (BIT(i) & allowed_above_seq)) {
527 ret = list_first_entry_or_null(&pin_list->unflushed[i],
528 struct journal_entry_pin, list);
529 if (ret)
530 return ret;
531 }
532 }
533
534 return NULL;
535}
536
537/* returns true if we did work */
538static size_t journal_flush_pins(struct journal *j,
539 u64 seq_to_flush,
540 unsigned allowed_below_seq,
541 unsigned allowed_above_seq,
542 unsigned min_any,
543 unsigned min_key_cache)
544{
545 struct journal_entry_pin *pin;
546 size_t nr_flushed = 0;
547 journal_pin_flush_fn flush_fn;
548 u64 seq;
549 int err;
550
551 lockdep_assert_held(&j->reclaim_lock);
552
553 while (1) {
554 unsigned allowed_above = allowed_above_seq;
555 unsigned allowed_below = allowed_below_seq;
556
557 if (min_any) {
558 allowed_above |= ~0;
559 allowed_below |= ~0;
560 }
561
562 if (min_key_cache) {
563 allowed_above |= BIT(JOURNAL_PIN_TYPE_key_cache);
564 allowed_below |= BIT(JOURNAL_PIN_TYPE_key_cache);
565 }
566
567 cond_resched();
568
569 j->last_flushed = jiffies;
570
571 spin_lock(&j->lock);
572 pin = journal_get_next_pin(j, seq_to_flush,
573 allowed_below,
574 allowed_above, &seq);
575 if (pin) {
576 BUG_ON(j->flush_in_progress);
577 j->flush_in_progress = pin;
578 j->flush_in_progress_dropped = false;
579 flush_fn = pin->flush;
580 }
581 spin_unlock(&j->lock);
582
583 if (!pin)
584 break;
585
586 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
587 min_key_cache--;
588
589 if (min_any)
590 min_any--;
591
592 err = flush_fn(j, pin, seq);
593
594 spin_lock(&j->lock);
595 /* Pin might have been dropped or rearmed: */
596 if (likely(!err && !j->flush_in_progress_dropped))
597 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed[journal_pin_type(pin, flush_fn)]);
598 j->flush_in_progress = NULL;
599 j->flush_in_progress_dropped = false;
600 spin_unlock(&j->lock);
601
602 wake_up(&j->pin_flush_wait);
603
604 if (err)
605 break;
606
607 nr_flushed++;
608 }
609
610 return nr_flushed;
611}
612
613static u64 journal_seq_to_flush(struct journal *j)
614{
615 struct bch_fs *c = container_of(j, struct bch_fs, journal);
616 u64 seq_to_flush = 0;
617
618 spin_lock(&j->lock);
619
620 for_each_rw_member(c, ca) {
621 struct journal_device *ja = &ca->journal;
622 unsigned nr_buckets, bucket_to_flush;
623
624 if (!ja->nr)
625 continue;
626
627 /* Try to keep the journal at most half full: */
628 nr_buckets = ja->nr / 2;
629
630 nr_buckets = min(nr_buckets, ja->nr);
631
632 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
633 seq_to_flush = max(seq_to_flush,
634 ja->bucket_seq[bucket_to_flush]);
635 }
636
637 /* Also flush if the pin fifo is more than half full */
638 seq_to_flush = max_t(s64, seq_to_flush,
639 (s64) journal_cur_seq(j) -
640 (j->pin.size >> 1));
641 spin_unlock(&j->lock);
642
643 return seq_to_flush;
644}
645
646/**
647 * __bch2_journal_reclaim - free up journal buckets
648 * @j: journal object
649 * @direct: direct or background reclaim?
650 * @kicked: requested to run since we last ran?
651 *
652 * Background journal reclaim writes out btree nodes. It should be run
653 * early enough so that we never completely run out of journal buckets.
654 *
655 * High watermarks for triggering background reclaim:
656 * - FIFO has fewer than 512 entries left
657 * - fewer than 25% journal buckets free
658 *
659 * Background reclaim runs until low watermarks are reached:
660 * - FIFO has more than 1024 entries left
661 * - more than 50% journal buckets free
662 *
663 * As long as a reclaim can complete in the time it takes to fill up
664 * 512 journal entries or 25% of all journal buckets, then
665 * journal_next_bucket() should not stall.
666 */
667static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
668{
669 struct bch_fs *c = container_of(j, struct bch_fs, journal);
670 struct btree_cache *bc = &c->btree_cache;
671 bool kthread = (current->flags & PF_KTHREAD) != 0;
672 u64 seq_to_flush;
673 size_t min_nr, min_key_cache, nr_flushed;
674 unsigned flags;
675 int ret = 0;
676
677 /*
678 * We can't invoke memory reclaim while holding the reclaim_lock -
679 * journal reclaim is required to make progress for memory reclaim
680 * (cleaning the caches), so we can't get stuck in memory reclaim while
681 * we're holding the reclaim lock:
682 */
683 lockdep_assert_held(&j->reclaim_lock);
684 flags = memalloc_noreclaim_save();
685
686 do {
687 if (kthread && kthread_should_stop())
688 break;
689
690 ret = bch2_journal_error(j);
691 if (ret)
692 break;
693
694 bch2_journal_do_discards(j);
695
696 seq_to_flush = journal_seq_to_flush(j);
697 min_nr = 0;
698
699 /*
700 * If it's been longer than j->reclaim_delay_ms since we last flushed,
701 * make sure to flush at least one journal pin:
702 */
703 if (time_after(jiffies, j->last_flushed +
704 msecs_to_jiffies(c->opts.journal_reclaim_delay)))
705 min_nr = 1;
706
707 if (j->watermark != BCH_WATERMARK_stripe)
708 min_nr = 1;
709
710 size_t btree_cache_live = bc->live[0].nr + bc->live[1].nr;
711 if (atomic_long_read(&bc->nr_dirty) * 2 > btree_cache_live)
712 min_nr = 1;
713
714 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
715
716 trace_and_count(c, journal_reclaim_start, c,
717 direct, kicked,
718 min_nr, min_key_cache,
719 atomic_long_read(&bc->nr_dirty), btree_cache_live,
720 atomic_long_read(&c->btree_key_cache.nr_dirty),
721 atomic_long_read(&c->btree_key_cache.nr_keys));
722
723 nr_flushed = journal_flush_pins(j, seq_to_flush,
724 ~0, 0,
725 min_nr, min_key_cache);
726
727 if (direct)
728 j->nr_direct_reclaim += nr_flushed;
729 else
730 j->nr_background_reclaim += nr_flushed;
731 trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
732
733 if (nr_flushed)
734 wake_up(&j->reclaim_wait);
735 } while ((min_nr || min_key_cache) && nr_flushed && !direct);
736
737 memalloc_noreclaim_restore(flags);
738
739 return ret;
740}
741
742int bch2_journal_reclaim(struct journal *j)
743{
744 return __bch2_journal_reclaim(j, true, true);
745}
746
747static int bch2_journal_reclaim_thread(void *arg)
748{
749 struct journal *j = arg;
750 struct bch_fs *c = container_of(j, struct bch_fs, journal);
751 unsigned long delay, now;
752 bool journal_empty;
753 int ret = 0;
754
755 set_freezable();
756
757 j->last_flushed = jiffies;
758
759 while (!ret && !kthread_should_stop()) {
760 bool kicked = j->reclaim_kicked;
761
762 j->reclaim_kicked = false;
763
764 mutex_lock(&j->reclaim_lock);
765 ret = __bch2_journal_reclaim(j, false, kicked);
766 mutex_unlock(&j->reclaim_lock);
767
768 now = jiffies;
769 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
770 j->next_reclaim = j->last_flushed + delay;
771
772 if (!time_in_range(j->next_reclaim, now, now + delay))
773 j->next_reclaim = now + delay;
774
775 while (1) {
776 set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
777 if (kthread_should_stop())
778 break;
779 if (j->reclaim_kicked)
780 break;
781
782 spin_lock(&j->lock);
783 journal_empty = fifo_empty(&j->pin);
784 spin_unlock(&j->lock);
785
786 long timeout = j->next_reclaim - jiffies;
787
788 if (journal_empty)
789 schedule();
790 else if (timeout > 0)
791 schedule_timeout(timeout);
792 else
793 break;
794 }
795 __set_current_state(TASK_RUNNING);
796 }
797
798 return 0;
799}
800
801void bch2_journal_reclaim_stop(struct journal *j)
802{
803 struct task_struct *p = j->reclaim_thread;
804
805 j->reclaim_thread = NULL;
806
807 if (p) {
808 kthread_stop(p);
809 put_task_struct(p);
810 }
811}
812
813int bch2_journal_reclaim_start(struct journal *j)
814{
815 struct bch_fs *c = container_of(j, struct bch_fs, journal);
816 struct task_struct *p;
817 int ret;
818
819 if (j->reclaim_thread)
820 return 0;
821
822 p = kthread_create(bch2_journal_reclaim_thread, j,
823 "bch-reclaim/%s", c->name);
824 ret = PTR_ERR_OR_ZERO(p);
825 bch_err_msg(c, ret, "creating journal reclaim thread");
826 if (ret)
827 return ret;
828
829 get_task_struct(p);
830 j->reclaim_thread = p;
831 wake_up_process(p);
832 return 0;
833}
834
835static bool journal_pins_still_flushing(struct journal *j, u64 seq_to_flush,
836 unsigned types)
837{
838 struct journal_entry_pin_list *pin_list;
839 u64 seq;
840
841 spin_lock(&j->lock);
842 fifo_for_each_entry_ptr(pin_list, &j->pin, seq) {
843 if (seq > seq_to_flush)
844 break;
845
846 for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++)
847 if ((BIT(i) & types) &&
848 (!list_empty(&pin_list->unflushed[i]) ||
849 !list_empty(&pin_list->flushed[i]))) {
850 spin_unlock(&j->lock);
851 return true;
852 }
853 }
854 spin_unlock(&j->lock);
855
856 return false;
857}
858
859static bool journal_flush_pins_or_still_flushing(struct journal *j, u64 seq_to_flush,
860 unsigned types)
861{
862 return journal_flush_pins(j, seq_to_flush, types, 0, 0, 0) ||
863 journal_pins_still_flushing(j, seq_to_flush, types);
864}
865
866static int journal_flush_done(struct journal *j, u64 seq_to_flush,
867 bool *did_work)
868{
869 int ret = 0;
870
871 ret = bch2_journal_error(j);
872 if (ret)
873 return ret;
874
875 mutex_lock(&j->reclaim_lock);
876
877 for (int type = JOURNAL_PIN_TYPE_NR - 1;
878 type >= 0;
879 --type)
880 if (journal_flush_pins_or_still_flushing(j, seq_to_flush, BIT(type))) {
881 *did_work = true;
882 goto unlock;
883 }
884
885 if (seq_to_flush > journal_cur_seq(j))
886 bch2_journal_entry_close(j);
887
888 spin_lock(&j->lock);
889 /*
890 * If journal replay hasn't completed, the unreplayed journal entries
891 * hold refs on their corresponding sequence numbers
892 */
893 ret = !test_bit(JOURNAL_replay_done, &j->flags) ||
894 journal_last_seq(j) > seq_to_flush ||
895 !fifo_used(&j->pin);
896
897 spin_unlock(&j->lock);
898unlock:
899 mutex_unlock(&j->reclaim_lock);
900
901 return ret;
902}
903
904bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
905{
906 /* time_stats this */
907 bool did_work = false;
908
909 if (!test_bit(JOURNAL_running, &j->flags))
910 return false;
911
912 closure_wait_event(&j->reclaim_flush_wait,
913 journal_flush_done(j, seq_to_flush, &did_work));
914
915 return did_work;
916}
917
918int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
919{
920 struct bch_fs *c = container_of(j, struct bch_fs, journal);
921 struct journal_entry_pin_list *p;
922 u64 iter, seq = 0;
923 int ret = 0;
924
925 spin_lock(&j->lock);
926 fifo_for_each_entry_ptr(p, &j->pin, iter)
927 if (dev_idx >= 0
928 ? bch2_dev_list_has_dev(p->devs, dev_idx)
929 : p->devs.nr < c->opts.metadata_replicas)
930 seq = iter;
931 spin_unlock(&j->lock);
932
933 bch2_journal_flush_pins(j, seq);
934
935 ret = bch2_journal_error(j);
936 if (ret)
937 return ret;
938
939 mutex_lock(&c->replicas_gc_lock);
940 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
941
942 /*
943 * Now that we've populated replicas_gc, write to the journal to mark
944 * active journal devices. This handles the case where the journal might
945 * be empty. Otherwise we could clear all journal replicas and
946 * temporarily put the fs into an unrecoverable state. Journal recovery
947 * expects to find devices marked for journal data on unclean mount.
948 */
949 ret = bch2_journal_meta(&c->journal);
950 if (ret)
951 goto err;
952
953 seq = 0;
954 spin_lock(&j->lock);
955 while (!ret) {
956 struct bch_replicas_padded replicas;
957
958 seq = max(seq, journal_last_seq(j));
959 if (seq >= j->pin.back)
960 break;
961 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
962 journal_seq_pin(j, seq)->devs);
963 seq++;
964
965 if (replicas.e.nr_devs) {
966 spin_unlock(&j->lock);
967 ret = bch2_mark_replicas(c, &replicas.e);
968 spin_lock(&j->lock);
969 }
970 }
971 spin_unlock(&j->lock);
972err:
973 ret = bch2_replicas_gc_end(c, ret);
974 mutex_unlock(&c->replicas_gc_lock);
975
976 return ret;
977}
978
979bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
980{
981 struct journal_entry_pin_list *pin_list;
982 struct journal_entry_pin *pin;
983
984 spin_lock(&j->lock);
985 if (!test_bit(JOURNAL_running, &j->flags)) {
986 spin_unlock(&j->lock);
987 return true;
988 }
989
990 *seq = max(*seq, j->pin.front);
991
992 if (*seq >= j->pin.back) {
993 spin_unlock(&j->lock);
994 return true;
995 }
996
997 out->atomic++;
998
999 pin_list = journal_seq_pin(j, *seq);
1000
1001 prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count));
1002 printbuf_indent_add(out, 2);
1003
1004 prt_printf(out, "unflushed:\n");
1005 for (unsigned i = 0; i < ARRAY_SIZE(pin_list->unflushed); i++)
1006 list_for_each_entry(pin, &pin_list->unflushed[i], list)
1007 prt_printf(out, "\t%px %ps\n", pin, pin->flush);
1008
1009 prt_printf(out, "flushed:\n");
1010 for (unsigned i = 0; i < ARRAY_SIZE(pin_list->flushed); i++)
1011 list_for_each_entry(pin, &pin_list->flushed[i], list)
1012 prt_printf(out, "\t%px %ps\n", pin, pin->flush);
1013
1014 printbuf_indent_sub(out, 2);
1015
1016 --out->atomic;
1017 spin_unlock(&j->lock);
1018
1019 return false;
1020}
1021
1022void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1023{
1024 u64 seq = 0;
1025
1026 while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1027 seq++;
1028}