Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
3#include "alloc_background.h"
4#include "alloc_foreground.h"
5#include "backpointers.h"
6#include "btree_cache.h"
7#include "btree_io.h"
8#include "btree_key_cache.h"
9#include "btree_update.h"
10#include "btree_update_interior.h"
11#include "btree_gc.h"
12#include "btree_write_buffer.h"
13#include "buckets.h"
14#include "buckets_waiting_for_journal.h"
15#include "clock.h"
16#include "debug.h"
17#include "ec.h"
18#include "error.h"
19#include "lru.h"
20#include "recovery.h"
21#include "trace.h"
22#include "varint.h"
23
24#include <linux/kthread.h>
25#include <linux/math64.h>
26#include <linux/random.h>
27#include <linux/rculist.h>
28#include <linux/rcupdate.h>
29#include <linux/sched/task.h>
30#include <linux/sort.h>
31
32static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket);
33
34/* Persistent alloc info: */
35
36static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
37#define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
38 BCH_ALLOC_FIELDS_V1()
39#undef x
40};
41
42struct bkey_alloc_unpacked {
43 u64 journal_seq;
44 u8 gen;
45 u8 oldest_gen;
46 u8 data_type;
47 bool need_discard:1;
48 bool need_inc_gen:1;
49#define x(_name, _bits) u##_bits _name;
50 BCH_ALLOC_FIELDS_V2()
51#undef x
52};
53
54static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
55 const void **p, unsigned field)
56{
57 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
58 u64 v;
59
60 if (!(a->fields & (1 << field)))
61 return 0;
62
63 switch (bytes) {
64 case 1:
65 v = *((const u8 *) *p);
66 break;
67 case 2:
68 v = le16_to_cpup(*p);
69 break;
70 case 4:
71 v = le32_to_cpup(*p);
72 break;
73 case 8:
74 v = le64_to_cpup(*p);
75 break;
76 default:
77 BUG();
78 }
79
80 *p += bytes;
81 return v;
82}
83
84static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
85 struct bkey_s_c k)
86{
87 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
88 const void *d = in->data;
89 unsigned idx = 0;
90
91 out->gen = in->gen;
92
93#define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
94 BCH_ALLOC_FIELDS_V1()
95#undef x
96}
97
98static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
99 struct bkey_s_c k)
100{
101 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
102 const u8 *in = a.v->data;
103 const u8 *end = bkey_val_end(a);
104 unsigned fieldnr = 0;
105 int ret;
106 u64 v;
107
108 out->gen = a.v->gen;
109 out->oldest_gen = a.v->oldest_gen;
110 out->data_type = a.v->data_type;
111
112#define x(_name, _bits) \
113 if (fieldnr < a.v->nr_fields) { \
114 ret = bch2_varint_decode_fast(in, end, &v); \
115 if (ret < 0) \
116 return ret; \
117 in += ret; \
118 } else { \
119 v = 0; \
120 } \
121 out->_name = v; \
122 if (v != out->_name) \
123 return -1; \
124 fieldnr++;
125
126 BCH_ALLOC_FIELDS_V2()
127#undef x
128 return 0;
129}
130
131static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
132 struct bkey_s_c k)
133{
134 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
135 const u8 *in = a.v->data;
136 const u8 *end = bkey_val_end(a);
137 unsigned fieldnr = 0;
138 int ret;
139 u64 v;
140
141 out->gen = a.v->gen;
142 out->oldest_gen = a.v->oldest_gen;
143 out->data_type = a.v->data_type;
144 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
145 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
146 out->journal_seq = le64_to_cpu(a.v->journal_seq);
147
148#define x(_name, _bits) \
149 if (fieldnr < a.v->nr_fields) { \
150 ret = bch2_varint_decode_fast(in, end, &v); \
151 if (ret < 0) \
152 return ret; \
153 in += ret; \
154 } else { \
155 v = 0; \
156 } \
157 out->_name = v; \
158 if (v != out->_name) \
159 return -1; \
160 fieldnr++;
161
162 BCH_ALLOC_FIELDS_V2()
163#undef x
164 return 0;
165}
166
167static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
168{
169 struct bkey_alloc_unpacked ret = { .gen = 0 };
170
171 switch (k.k->type) {
172 case KEY_TYPE_alloc:
173 bch2_alloc_unpack_v1(&ret, k);
174 break;
175 case KEY_TYPE_alloc_v2:
176 bch2_alloc_unpack_v2(&ret, k);
177 break;
178 case KEY_TYPE_alloc_v3:
179 bch2_alloc_unpack_v3(&ret, k);
180 break;
181 }
182
183 return ret;
184}
185
186static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
187{
188 unsigned i, bytes = offsetof(struct bch_alloc, data);
189
190 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
191 if (a->fields & (1 << i))
192 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
193
194 return DIV_ROUND_UP(bytes, sizeof(u64));
195}
196
197int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
198 enum bch_validate_flags flags,
199 struct printbuf *err)
200{
201 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
202 int ret = 0;
203
204 /* allow for unknown fields */
205 bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
206 alloc_v1_val_size_bad,
207 "incorrect value size (%zu < %u)",
208 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
209fsck_err:
210 return ret;
211}
212
213int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
214 enum bch_validate_flags flags,
215 struct printbuf *err)
216{
217 struct bkey_alloc_unpacked u;
218 int ret = 0;
219
220 bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
221 alloc_v2_unpack_error,
222 "unpack error");
223fsck_err:
224 return ret;
225}
226
227int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
228 enum bch_validate_flags flags,
229 struct printbuf *err)
230{
231 struct bkey_alloc_unpacked u;
232 int ret = 0;
233
234 bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
235 alloc_v2_unpack_error,
236 "unpack error");
237fsck_err:
238 return ret;
239}
240
241int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
242 enum bch_validate_flags flags, struct printbuf *err)
243{
244 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
245 int ret = 0;
246
247 bkey_fsck_err_on(alloc_v4_u64s_noerror(a.v) > bkey_val_u64s(k.k), c, err,
248 alloc_v4_val_size_bad,
249 "bad val size (%u > %zu)",
250 alloc_v4_u64s_noerror(a.v), bkey_val_u64s(k.k));
251
252 bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
253 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
254 alloc_v4_backpointers_start_bad,
255 "invalid backpointers_start");
256
257 bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
258 alloc_key_data_type_bad,
259 "invalid data type (got %u should be %u)",
260 a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
261
262 switch (a.v->data_type) {
263 case BCH_DATA_free:
264 case BCH_DATA_need_gc_gens:
265 case BCH_DATA_need_discard:
266 bkey_fsck_err_on(bch2_bucket_sectors_total(*a.v) || a.v->stripe,
267 c, err, alloc_key_empty_but_have_data,
268 "empty data type free but have data");
269 break;
270 case BCH_DATA_sb:
271 case BCH_DATA_journal:
272 case BCH_DATA_btree:
273 case BCH_DATA_user:
274 case BCH_DATA_parity:
275 bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
276 c, err, alloc_key_dirty_sectors_0,
277 "data_type %s but dirty_sectors==0",
278 bch2_data_type_str(a.v->data_type));
279 break;
280 case BCH_DATA_cached:
281 bkey_fsck_err_on(!a.v->cached_sectors ||
282 bch2_bucket_sectors_dirty(*a.v) ||
283 a.v->stripe,
284 c, err, alloc_key_cached_inconsistency,
285 "data type inconsistency");
286
287 bkey_fsck_err_on(!a.v->io_time[READ] &&
288 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
289 c, err, alloc_key_cached_but_read_time_zero,
290 "cached bucket with read_time == 0");
291 break;
292 case BCH_DATA_stripe:
293 break;
294 }
295fsck_err:
296 return ret;
297}
298
299void bch2_alloc_v4_swab(struct bkey_s k)
300{
301 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
302 struct bch_backpointer *bp, *bps;
303
304 a->journal_seq = swab64(a->journal_seq);
305 a->flags = swab32(a->flags);
306 a->dirty_sectors = swab32(a->dirty_sectors);
307 a->cached_sectors = swab32(a->cached_sectors);
308 a->io_time[0] = swab64(a->io_time[0]);
309 a->io_time[1] = swab64(a->io_time[1]);
310 a->stripe = swab32(a->stripe);
311 a->nr_external_backpointers = swab32(a->nr_external_backpointers);
312 a->fragmentation_lru = swab64(a->fragmentation_lru);
313
314 bps = alloc_v4_backpointers(a);
315 for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
316 bp->bucket_offset = swab40(bp->bucket_offset);
317 bp->bucket_len = swab32(bp->bucket_len);
318 bch2_bpos_swab(&bp->pos);
319 }
320}
321
322void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
323{
324 struct bch_alloc_v4 _a;
325 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
326
327 prt_newline(out);
328 printbuf_indent_add(out, 2);
329
330 prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
331 bch2_prt_data_type(out, a->data_type);
332 prt_newline(out);
333 prt_printf(out, "journal_seq %llu\n", a->journal_seq);
334 prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a));
335 prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a));
336 prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors);
337 prt_printf(out, "cached_sectors %u\n", a->cached_sectors);
338 prt_printf(out, "stripe %u\n", a->stripe);
339 prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy);
340 prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]);
341 prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]);
342 prt_printf(out, "fragmentation %llu\n", a->fragmentation_lru);
343 prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a));
344 printbuf_indent_sub(out, 2);
345}
346
347void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
348{
349 if (k.k->type == KEY_TYPE_alloc_v4) {
350 void *src, *dst;
351
352 *out = *bkey_s_c_to_alloc_v4(k).v;
353
354 src = alloc_v4_backpointers(out);
355 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
356 dst = alloc_v4_backpointers(out);
357
358 if (src < dst)
359 memset(src, 0, dst - src);
360
361 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
362 } else {
363 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
364
365 *out = (struct bch_alloc_v4) {
366 .journal_seq = u.journal_seq,
367 .flags = u.need_discard,
368 .gen = u.gen,
369 .oldest_gen = u.oldest_gen,
370 .data_type = u.data_type,
371 .stripe_redundancy = u.stripe_redundancy,
372 .dirty_sectors = u.dirty_sectors,
373 .cached_sectors = u.cached_sectors,
374 .io_time[READ] = u.read_time,
375 .io_time[WRITE] = u.write_time,
376 .stripe = u.stripe,
377 };
378
379 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
380 }
381}
382
383static noinline struct bkey_i_alloc_v4 *
384__bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
385{
386 struct bkey_i_alloc_v4 *ret;
387
388 ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
389 if (IS_ERR(ret))
390 return ret;
391
392 if (k.k->type == KEY_TYPE_alloc_v4) {
393 void *src, *dst;
394
395 bkey_reassemble(&ret->k_i, k);
396
397 src = alloc_v4_backpointers(&ret->v);
398 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
399 dst = alloc_v4_backpointers(&ret->v);
400
401 if (src < dst)
402 memset(src, 0, dst - src);
403
404 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
405 set_alloc_v4_u64s(ret);
406 } else {
407 bkey_alloc_v4_init(&ret->k_i);
408 ret->k.p = k.k->p;
409 bch2_alloc_to_v4(k, &ret->v);
410 }
411 return ret;
412}
413
414static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
415{
416 struct bkey_s_c_alloc_v4 a;
417
418 if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
419 ((a = bkey_s_c_to_alloc_v4(k), true) &&
420 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
421 return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
422
423 return __bch2_alloc_to_v4_mut(trans, k);
424}
425
426struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
427{
428 return bch2_alloc_to_v4_mut_inlined(trans, k);
429}
430
431struct bkey_i_alloc_v4 *
432bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_iter *iter,
433 struct bpos pos)
434{
435 struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
436 BTREE_ITER_with_updates|
437 BTREE_ITER_cached|
438 BTREE_ITER_intent);
439 int ret = bkey_err(k);
440 if (unlikely(ret))
441 return ERR_PTR(ret);
442
443 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k);
444 ret = PTR_ERR_OR_ZERO(a);
445 if (unlikely(ret))
446 goto err;
447 return a;
448err:
449 bch2_trans_iter_exit(trans, iter);
450 return ERR_PTR(ret);
451}
452
453__flatten
454struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, struct bpos pos)
455{
456 struct btree_iter iter;
457 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update_noupdate(trans, &iter, pos);
458 int ret = PTR_ERR_OR_ZERO(a);
459 if (ret)
460 return ERR_PTR(ret);
461
462 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
463 bch2_trans_iter_exit(trans, &iter);
464 return unlikely(ret) ? ERR_PTR(ret) : a;
465}
466
467static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
468{
469 *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
470
471 pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
472 return pos;
473}
474
475static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
476{
477 pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
478 pos.offset += offset;
479 return pos;
480}
481
482static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
483{
484 return k.k->type == KEY_TYPE_bucket_gens
485 ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
486 : 0;
487}
488
489int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
490 enum bch_validate_flags flags,
491 struct printbuf *err)
492{
493 int ret = 0;
494
495 bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
496 bucket_gens_val_size_bad,
497 "bad val size (%zu != %zu)",
498 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
499fsck_err:
500 return ret;
501}
502
503void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
504{
505 struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
506 unsigned i;
507
508 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
509 if (i)
510 prt_char(out, ' ');
511 prt_printf(out, "%u", g.v->gens[i]);
512 }
513}
514
515int bch2_bucket_gens_init(struct bch_fs *c)
516{
517 struct btree_trans *trans = bch2_trans_get(c);
518 struct bkey_i_bucket_gens g;
519 bool have_bucket_gens_key = false;
520 int ret;
521
522 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
523 BTREE_ITER_prefetch, k, ({
524 /*
525 * Not a fsck error because this is checked/repaired by
526 * bch2_check_alloc_key() which runs later:
527 */
528 if (!bch2_dev_bucket_exists(c, k.k->p))
529 continue;
530
531 struct bch_alloc_v4 a;
532 u8 gen = bch2_alloc_to_v4(k, &a)->gen;
533 unsigned offset;
534 struct bpos pos = alloc_gens_pos(iter.pos, &offset);
535 int ret2 = 0;
536
537 if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
538 ret2 = bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?:
539 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
540 if (ret2)
541 goto iter_err;
542 have_bucket_gens_key = false;
543 }
544
545 if (!have_bucket_gens_key) {
546 bkey_bucket_gens_init(&g.k_i);
547 g.k.p = pos;
548 have_bucket_gens_key = true;
549 }
550
551 g.v.gens[offset] = gen;
552iter_err:
553 ret2;
554 }));
555
556 if (have_bucket_gens_key && !ret)
557 ret = commit_do(trans, NULL, NULL,
558 BCH_TRANS_COMMIT_no_enospc,
559 bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
560
561 bch2_trans_put(trans);
562
563 bch_err_fn(c, ret);
564 return ret;
565}
566
567int bch2_alloc_read(struct bch_fs *c)
568{
569 struct btree_trans *trans = bch2_trans_get(c);
570 struct bch_dev *ca = NULL;
571 int ret;
572
573 down_read(&c->gc_lock);
574
575 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
576 ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
577 BTREE_ITER_prefetch, k, ({
578 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
579 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
580
581 if (k.k->type != KEY_TYPE_bucket_gens)
582 continue;
583
584 ca = bch2_dev_iterate(c, ca, k.k->p.inode);
585 /*
586 * Not a fsck error because this is checked/repaired by
587 * bch2_check_alloc_key() which runs later:
588 */
589 if (!ca) {
590 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
591 continue;
592 }
593
594 const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
595
596 for (u64 b = max_t(u64, ca->mi.first_bucket, start);
597 b < min_t(u64, ca->mi.nbuckets, end);
598 b++)
599 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
600 0;
601 }));
602 } else {
603 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
604 BTREE_ITER_prefetch, k, ({
605 ca = bch2_dev_iterate(c, ca, k.k->p.inode);
606 /*
607 * Not a fsck error because this is checked/repaired by
608 * bch2_check_alloc_key() which runs later:
609 */
610 if (!ca) {
611 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
612 continue;
613 }
614
615 struct bch_alloc_v4 a;
616 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
617 0;
618 }));
619 }
620
621 bch2_dev_put(ca);
622 bch2_trans_put(trans);
623 up_read(&c->gc_lock);
624
625 bch_err_fn(c, ret);
626 return ret;
627}
628
629/* Free space/discard btree: */
630
631static int bch2_bucket_do_index(struct btree_trans *trans,
632 struct bch_dev *ca,
633 struct bkey_s_c alloc_k,
634 const struct bch_alloc_v4 *a,
635 bool set)
636{
637 struct bch_fs *c = trans->c;
638 struct btree_iter iter;
639 struct bkey_s_c old;
640 struct bkey_i *k;
641 enum btree_id btree;
642 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
643 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
644 struct printbuf buf = PRINTBUF;
645 int ret;
646
647 if (a->data_type != BCH_DATA_free &&
648 a->data_type != BCH_DATA_need_discard)
649 return 0;
650
651 k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
652 if (IS_ERR(k))
653 return PTR_ERR(k);
654
655 bkey_init(&k->k);
656 k->k.type = new_type;
657
658 switch (a->data_type) {
659 case BCH_DATA_free:
660 btree = BTREE_ID_freespace;
661 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
662 bch2_key_resize(&k->k, 1);
663 break;
664 case BCH_DATA_need_discard:
665 btree = BTREE_ID_need_discard;
666 k->k.p = alloc_k.k->p;
667 break;
668 default:
669 return 0;
670 }
671
672 old = bch2_bkey_get_iter(trans, &iter, btree,
673 bkey_start_pos(&k->k),
674 BTREE_ITER_intent);
675 ret = bkey_err(old);
676 if (ret)
677 return ret;
678
679 if (ca->mi.freespace_initialized &&
680 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
681 bch2_trans_inconsistent_on(old.k->type != old_type, trans,
682 "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
683 " for %s",
684 set ? "setting" : "clearing",
685 bch2_btree_id_str(btree),
686 iter.pos.inode,
687 iter.pos.offset,
688 bch2_bkey_types[old.k->type],
689 bch2_bkey_types[old_type],
690 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
691 ret = -EIO;
692 goto err;
693 }
694
695 ret = bch2_trans_update(trans, &iter, k, 0);
696err:
697 bch2_trans_iter_exit(trans, &iter);
698 printbuf_exit(&buf);
699 return ret;
700}
701
702static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
703 struct bpos bucket, u8 gen)
704{
705 struct btree_iter iter;
706 unsigned offset;
707 struct bpos pos = alloc_gens_pos(bucket, &offset);
708 struct bkey_i_bucket_gens *g;
709 struct bkey_s_c k;
710 int ret;
711
712 g = bch2_trans_kmalloc(trans, sizeof(*g));
713 ret = PTR_ERR_OR_ZERO(g);
714 if (ret)
715 return ret;
716
717 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
718 BTREE_ITER_intent|
719 BTREE_ITER_with_updates);
720 ret = bkey_err(k);
721 if (ret)
722 return ret;
723
724 if (k.k->type != KEY_TYPE_bucket_gens) {
725 bkey_bucket_gens_init(&g->k_i);
726 g->k.p = iter.pos;
727 } else {
728 bkey_reassemble(&g->k_i, k);
729 }
730
731 g->v.gens[offset] = gen;
732
733 ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
734 bch2_trans_iter_exit(trans, &iter);
735 return ret;
736}
737
738int bch2_trigger_alloc(struct btree_trans *trans,
739 enum btree_id btree, unsigned level,
740 struct bkey_s_c old, struct bkey_s new,
741 enum btree_iter_update_trigger_flags flags)
742{
743 struct bch_fs *c = trans->c;
744 int ret = 0;
745
746 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
747 if (!ca)
748 return -EIO;
749
750 struct bch_alloc_v4 old_a_convert;
751 const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
752
753 if (flags & BTREE_TRIGGER_transactional) {
754 struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
755
756 alloc_data_type_set(new_a, new_a->data_type);
757
758 if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) {
759 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
760 new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
761 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
762 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
763 }
764
765 if (data_type_is_empty(new_a->data_type) &&
766 BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
767 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
768 new_a->gen++;
769 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
770 }
771
772 if (old_a->data_type != new_a->data_type ||
773 (new_a->data_type == BCH_DATA_free &&
774 alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
775 ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?:
776 bch2_bucket_do_index(trans, ca, new.s_c, new_a, true);
777 if (ret)
778 goto err;
779 }
780
781 if (new_a->data_type == BCH_DATA_cached &&
782 !new_a->io_time[READ])
783 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
784
785 u64 old_lru = alloc_lru_idx_read(*old_a);
786 u64 new_lru = alloc_lru_idx_read(*new_a);
787 if (old_lru != new_lru) {
788 ret = bch2_lru_change(trans, new.k->p.inode,
789 bucket_to_u64(new.k->p),
790 old_lru, new_lru);
791 if (ret)
792 goto err;
793 }
794
795 new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a, ca);
796 if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
797 ret = bch2_lru_change(trans,
798 BCH_LRU_FRAGMENTATION_START,
799 bucket_to_u64(new.k->p),
800 old_a->fragmentation_lru, new_a->fragmentation_lru);
801 if (ret)
802 goto err;
803 }
804
805 if (old_a->gen != new_a->gen) {
806 ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
807 if (ret)
808 goto err;
809 }
810
811 /*
812 * need to know if we're getting called from the invalidate path or
813 * not:
814 */
815
816 if ((flags & BTREE_TRIGGER_bucket_invalidate) &&
817 old_a->cached_sectors) {
818 ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
819 -((s64) old_a->cached_sectors));
820 if (ret)
821 goto err;
822 }
823 }
824
825 if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
826 struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
827 u64 journal_seq = trans->journal_res.seq;
828 u64 bucket_journal_seq = new_a->journal_seq;
829
830 if ((flags & BTREE_TRIGGER_insert) &&
831 data_type_is_empty(old_a->data_type) !=
832 data_type_is_empty(new_a->data_type) &&
833 new.k->type == KEY_TYPE_alloc_v4) {
834 struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
835
836 /*
837 * If the btree updates referring to a bucket weren't flushed
838 * before the bucket became empty again, then the we don't have
839 * to wait on a journal flush before we can reuse the bucket:
840 */
841 v->journal_seq = bucket_journal_seq =
842 data_type_is_empty(new_a->data_type) &&
843 (journal_seq == v->journal_seq ||
844 bch2_journal_noflush_seq(&c->journal, v->journal_seq))
845 ? 0 : journal_seq;
846 }
847
848 if (!data_type_is_empty(old_a->data_type) &&
849 data_type_is_empty(new_a->data_type) &&
850 bucket_journal_seq) {
851 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
852 c->journal.flushed_seq_ondisk,
853 new.k->p.inode, new.k->p.offset,
854 bucket_journal_seq);
855 if (ret) {
856 bch2_fs_fatal_error(c,
857 "setting bucket_needs_journal_commit: %s", bch2_err_str(ret));
858 goto err;
859 }
860 }
861
862 percpu_down_read(&c->mark_lock);
863 if (new_a->gen != old_a->gen)
864 *bucket_gen(ca, new.k->p.offset) = new_a->gen;
865
866 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
867 percpu_up_read(&c->mark_lock);
868
869#define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; })
870#define statechange(expr) !eval_state(old_a, expr) && eval_state(new_a, expr)
871#define bucket_flushed(a) (!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk)
872
873 if (statechange(a->data_type == BCH_DATA_free) &&
874 bucket_flushed(new_a))
875 closure_wake_up(&c->freelist_wait);
876
877 if (statechange(a->data_type == BCH_DATA_need_discard) &&
878 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
879 bucket_flushed(new_a))
880 bch2_discard_one_bucket_fast(c, new.k->p);
881
882 if (statechange(a->data_type == BCH_DATA_cached) &&
883 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
884 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
885 bch2_do_invalidates(c);
886
887 if (statechange(a->data_type == BCH_DATA_need_gc_gens))
888 bch2_gc_gens_async(c);
889 }
890
891 if ((flags & BTREE_TRIGGER_gc) &&
892 (flags & BTREE_TRIGGER_bucket_invalidate)) {
893 struct bch_alloc_v4 new_a_convert;
894 const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
895
896 percpu_down_read(&c->mark_lock);
897 struct bucket *g = gc_bucket(ca, new.k->p.offset);
898
899 bucket_lock(g);
900
901 g->gen_valid = 1;
902 g->gen = new_a->gen;
903 g->data_type = new_a->data_type;
904 g->stripe = new_a->stripe;
905 g->stripe_redundancy = new_a->stripe_redundancy;
906 g->dirty_sectors = new_a->dirty_sectors;
907 g->cached_sectors = new_a->cached_sectors;
908
909 bucket_unlock(g);
910 percpu_up_read(&c->mark_lock);
911 }
912err:
913 bch2_dev_put(ca);
914 return ret;
915}
916
917/*
918 * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
919 * extents style btrees, but works on non-extents btrees:
920 */
921static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
922{
923 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
924
925 if (bkey_err(k))
926 return k;
927
928 if (k.k->type) {
929 return k;
930 } else {
931 struct btree_iter iter2;
932 struct bpos next;
933
934 bch2_trans_copy_iter(&iter2, iter);
935
936 struct btree_path *path = btree_iter_path(iter->trans, iter);
937 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
938 end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
939
940 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
941
942 /*
943 * btree node min/max is a closed interval, upto takes a half
944 * open interval:
945 */
946 k = bch2_btree_iter_peek_upto(&iter2, end);
947 next = iter2.pos;
948 bch2_trans_iter_exit(iter->trans, &iter2);
949
950 BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
951
952 if (bkey_err(k))
953 return k;
954
955 bkey_init(hole);
956 hole->p = iter->pos;
957
958 bch2_key_resize(hole, next.offset - iter->pos.offset);
959 return (struct bkey_s_c) { hole, NULL };
960 }
961}
962
963static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket)
964{
965 if (*ca) {
966 if (bucket->offset < (*ca)->mi.first_bucket)
967 bucket->offset = (*ca)->mi.first_bucket;
968
969 if (bucket->offset < (*ca)->mi.nbuckets)
970 return true;
971
972 bch2_dev_put(*ca);
973 *ca = NULL;
974 bucket->inode++;
975 bucket->offset = 0;
976 }
977
978 rcu_read_lock();
979 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
980 if (*ca) {
981 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket);
982 bch2_dev_get(*ca);
983 }
984 rcu_read_unlock();
985
986 return *ca != NULL;
987}
988
989static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter,
990 struct bch_dev **ca, struct bkey *hole)
991{
992 struct bch_fs *c = iter->trans->c;
993 struct bkey_s_c k;
994again:
995 k = bch2_get_key_or_hole(iter, POS_MAX, hole);
996 if (bkey_err(k))
997 return k;
998
999 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode);
1000
1001 if (!k.k->type) {
1002 struct bpos hole_start = bkey_start_pos(k.k);
1003
1004 if (!*ca || !bucket_valid(*ca, hole_start.offset)) {
1005 if (!next_bucket(c, ca, &hole_start))
1006 return bkey_s_c_null;
1007
1008 bch2_btree_iter_set_pos(iter, hole_start);
1009 goto again;
1010 }
1011
1012 if (k.k->p.offset > (*ca)->mi.nbuckets)
1013 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset);
1014 }
1015
1016 return k;
1017}
1018
1019static noinline_for_stack
1020int bch2_check_alloc_key(struct btree_trans *trans,
1021 struct bkey_s_c alloc_k,
1022 struct btree_iter *alloc_iter,
1023 struct btree_iter *discard_iter,
1024 struct btree_iter *freespace_iter,
1025 struct btree_iter *bucket_gens_iter)
1026{
1027 struct bch_fs *c = trans->c;
1028 struct bch_alloc_v4 a_convert;
1029 const struct bch_alloc_v4 *a;
1030 unsigned discard_key_type, freespace_key_type;
1031 unsigned gens_offset;
1032 struct bkey_s_c k;
1033 struct printbuf buf = PRINTBUF;
1034 int ret = 0;
1035
1036 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p);
1037 if (fsck_err_on(!ca,
1038 c, alloc_key_to_missing_dev_bucket,
1039 "alloc key for invalid device:bucket %llu:%llu",
1040 alloc_k.k->p.inode, alloc_k.k->p.offset))
1041 ret = bch2_btree_delete_at(trans, alloc_iter, 0);
1042 if (!ca)
1043 return ret;
1044
1045 if (!ca->mi.freespace_initialized)
1046 goto out;
1047
1048 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1049
1050 discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
1051 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
1052 k = bch2_btree_iter_peek_slot(discard_iter);
1053 ret = bkey_err(k);
1054 if (ret)
1055 goto err;
1056
1057 if (fsck_err_on(k.k->type != discard_key_type,
1058 c, need_discard_key_wrong,
1059 "incorrect key in need_discard btree (got %s should be %s)\n"
1060 " %s",
1061 bch2_bkey_types[k.k->type],
1062 bch2_bkey_types[discard_key_type],
1063 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1064 struct bkey_i *update =
1065 bch2_trans_kmalloc(trans, sizeof(*update));
1066
1067 ret = PTR_ERR_OR_ZERO(update);
1068 if (ret)
1069 goto err;
1070
1071 bkey_init(&update->k);
1072 update->k.type = discard_key_type;
1073 update->k.p = discard_iter->pos;
1074
1075 ret = bch2_trans_update(trans, discard_iter, update, 0);
1076 if (ret)
1077 goto err;
1078 }
1079
1080 freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
1081 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1082 k = bch2_btree_iter_peek_slot(freespace_iter);
1083 ret = bkey_err(k);
1084 if (ret)
1085 goto err;
1086
1087 if (fsck_err_on(k.k->type != freespace_key_type,
1088 c, freespace_key_wrong,
1089 "incorrect key in freespace btree (got %s should be %s)\n"
1090 " %s",
1091 bch2_bkey_types[k.k->type],
1092 bch2_bkey_types[freespace_key_type],
1093 (printbuf_reset(&buf),
1094 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1095 struct bkey_i *update =
1096 bch2_trans_kmalloc(trans, sizeof(*update));
1097
1098 ret = PTR_ERR_OR_ZERO(update);
1099 if (ret)
1100 goto err;
1101
1102 bkey_init(&update->k);
1103 update->k.type = freespace_key_type;
1104 update->k.p = freespace_iter->pos;
1105 bch2_key_resize(&update->k, 1);
1106
1107 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1108 if (ret)
1109 goto err;
1110 }
1111
1112 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1113 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1114 ret = bkey_err(k);
1115 if (ret)
1116 goto err;
1117
1118 if (fsck_err_on(a->gen != alloc_gen(k, gens_offset),
1119 c, bucket_gens_key_wrong,
1120 "incorrect gen in bucket_gens btree (got %u should be %u)\n"
1121 " %s",
1122 alloc_gen(k, gens_offset), a->gen,
1123 (printbuf_reset(&buf),
1124 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1125 struct bkey_i_bucket_gens *g =
1126 bch2_trans_kmalloc(trans, sizeof(*g));
1127
1128 ret = PTR_ERR_OR_ZERO(g);
1129 if (ret)
1130 goto err;
1131
1132 if (k.k->type == KEY_TYPE_bucket_gens) {
1133 bkey_reassemble(&g->k_i, k);
1134 } else {
1135 bkey_bucket_gens_init(&g->k_i);
1136 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1137 }
1138
1139 g->v.gens[gens_offset] = a->gen;
1140
1141 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1142 if (ret)
1143 goto err;
1144 }
1145out:
1146err:
1147fsck_err:
1148 bch2_dev_put(ca);
1149 printbuf_exit(&buf);
1150 return ret;
1151}
1152
1153static noinline_for_stack
1154int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1155 struct bch_dev *ca,
1156 struct bpos start,
1157 struct bpos *end,
1158 struct btree_iter *freespace_iter)
1159{
1160 struct bch_fs *c = trans->c;
1161 struct bkey_s_c k;
1162 struct printbuf buf = PRINTBUF;
1163 int ret;
1164
1165 if (!ca->mi.freespace_initialized)
1166 return 0;
1167
1168 bch2_btree_iter_set_pos(freespace_iter, start);
1169
1170 k = bch2_btree_iter_peek_slot(freespace_iter);
1171 ret = bkey_err(k);
1172 if (ret)
1173 goto err;
1174
1175 *end = bkey_min(k.k->p, *end);
1176
1177 if (fsck_err_on(k.k->type != KEY_TYPE_set,
1178 c, freespace_hole_missing,
1179 "hole in alloc btree missing in freespace btree\n"
1180 " device %llu buckets %llu-%llu",
1181 freespace_iter->pos.inode,
1182 freespace_iter->pos.offset,
1183 end->offset)) {
1184 struct bkey_i *update =
1185 bch2_trans_kmalloc(trans, sizeof(*update));
1186
1187 ret = PTR_ERR_OR_ZERO(update);
1188 if (ret)
1189 goto err;
1190
1191 bkey_init(&update->k);
1192 update->k.type = KEY_TYPE_set;
1193 update->k.p = freespace_iter->pos;
1194 bch2_key_resize(&update->k,
1195 min_t(u64, U32_MAX, end->offset -
1196 freespace_iter->pos.offset));
1197
1198 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1199 if (ret)
1200 goto err;
1201 }
1202err:
1203fsck_err:
1204 printbuf_exit(&buf);
1205 return ret;
1206}
1207
1208static noinline_for_stack
1209int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1210 struct bpos start,
1211 struct bpos *end,
1212 struct btree_iter *bucket_gens_iter)
1213{
1214 struct bch_fs *c = trans->c;
1215 struct bkey_s_c k;
1216 struct printbuf buf = PRINTBUF;
1217 unsigned i, gens_offset, gens_end_offset;
1218 int ret;
1219
1220 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1221
1222 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1223 ret = bkey_err(k);
1224 if (ret)
1225 goto err;
1226
1227 if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1228 alloc_gens_pos(*end, &gens_end_offset)))
1229 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1230
1231 if (k.k->type == KEY_TYPE_bucket_gens) {
1232 struct bkey_i_bucket_gens g;
1233 bool need_update = false;
1234
1235 bkey_reassemble(&g.k_i, k);
1236
1237 for (i = gens_offset; i < gens_end_offset; i++) {
1238 if (fsck_err_on(g.v.gens[i], c,
1239 bucket_gens_hole_wrong,
1240 "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1241 bucket_gens_pos_to_alloc(k.k->p, i).inode,
1242 bucket_gens_pos_to_alloc(k.k->p, i).offset,
1243 g.v.gens[i])) {
1244 g.v.gens[i] = 0;
1245 need_update = true;
1246 }
1247 }
1248
1249 if (need_update) {
1250 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1251
1252 ret = PTR_ERR_OR_ZERO(u);
1253 if (ret)
1254 goto err;
1255
1256 memcpy(u, &g, sizeof(g));
1257
1258 ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
1259 if (ret)
1260 goto err;
1261 }
1262 }
1263
1264 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1265err:
1266fsck_err:
1267 printbuf_exit(&buf);
1268 return ret;
1269}
1270
1271static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans,
1272 struct btree_iter *iter)
1273{
1274 struct bch_fs *c = trans->c;
1275 struct btree_iter alloc_iter;
1276 struct bkey_s_c alloc_k;
1277 struct bch_alloc_v4 a_convert;
1278 const struct bch_alloc_v4 *a;
1279 u64 genbits;
1280 struct bpos pos;
1281 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1282 ? BCH_DATA_need_discard
1283 : BCH_DATA_free;
1284 struct printbuf buf = PRINTBUF;
1285 int ret;
1286
1287 pos = iter->pos;
1288 pos.offset &= ~(~0ULL << 56);
1289 genbits = iter->pos.offset & (~0ULL << 56);
1290
1291 alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
1292 ret = bkey_err(alloc_k);
1293 if (ret)
1294 return ret;
1295
1296 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
1297 need_discard_freespace_key_to_invalid_dev_bucket,
1298 "entry in %s btree for nonexistant dev:bucket %llu:%llu",
1299 bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
1300 goto delete;
1301
1302 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1303
1304 if (fsck_err_on(a->data_type != state ||
1305 (state == BCH_DATA_free &&
1306 genbits != alloc_freespace_genbits(*a)), c,
1307 need_discard_freespace_key_bad,
1308 "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
1309 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1310 bch2_btree_id_str(iter->btree_id),
1311 iter->pos.inode,
1312 iter->pos.offset,
1313 a->data_type == state,
1314 genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1315 goto delete;
1316out:
1317fsck_err:
1318 bch2_set_btree_iter_dontneed(&alloc_iter);
1319 bch2_trans_iter_exit(trans, &alloc_iter);
1320 printbuf_exit(&buf);
1321 return ret;
1322delete:
1323 ret = bch2_btree_delete_extent_at(trans, iter,
1324 iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
1325 bch2_trans_commit(trans, NULL, NULL,
1326 BCH_TRANS_COMMIT_no_enospc);
1327 goto out;
1328}
1329
1330/*
1331 * We've already checked that generation numbers in the bucket_gens btree are
1332 * valid for buckets that exist; this just checks for keys for nonexistent
1333 * buckets.
1334 */
1335static noinline_for_stack
1336int bch2_check_bucket_gens_key(struct btree_trans *trans,
1337 struct btree_iter *iter,
1338 struct bkey_s_c k)
1339{
1340 struct bch_fs *c = trans->c;
1341 struct bkey_i_bucket_gens g;
1342 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1343 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1344 u64 b;
1345 bool need_update = false;
1346 struct printbuf buf = PRINTBUF;
1347 int ret = 0;
1348
1349 BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1350 bkey_reassemble(&g.k_i, k);
1351
1352 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode);
1353 if (!ca) {
1354 if (fsck_err(c, bucket_gens_to_invalid_dev,
1355 "bucket_gens key for invalid device:\n %s",
1356 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1357 ret = bch2_btree_delete_at(trans, iter, 0);
1358 goto out;
1359 }
1360
1361 if (fsck_err_on(end <= ca->mi.first_bucket ||
1362 start >= ca->mi.nbuckets, c,
1363 bucket_gens_to_invalid_buckets,
1364 "bucket_gens key for invalid buckets:\n %s",
1365 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1366 ret = bch2_btree_delete_at(trans, iter, 0);
1367 goto out;
1368 }
1369
1370 for (b = start; b < ca->mi.first_bucket; b++)
1371 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1372 bucket_gens_nonzero_for_invalid_buckets,
1373 "bucket_gens key has nonzero gen for invalid bucket")) {
1374 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1375 need_update = true;
1376 }
1377
1378 for (b = ca->mi.nbuckets; b < end; b++)
1379 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1380 bucket_gens_nonzero_for_invalid_buckets,
1381 "bucket_gens key has nonzero gen for invalid bucket")) {
1382 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1383 need_update = true;
1384 }
1385
1386 if (need_update) {
1387 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1388
1389 ret = PTR_ERR_OR_ZERO(u);
1390 if (ret)
1391 goto out;
1392
1393 memcpy(u, &g, sizeof(g));
1394 ret = bch2_trans_update(trans, iter, u, 0);
1395 }
1396out:
1397fsck_err:
1398 bch2_dev_put(ca);
1399 printbuf_exit(&buf);
1400 return ret;
1401}
1402
1403int bch2_check_alloc_info(struct bch_fs *c)
1404{
1405 struct btree_trans *trans = bch2_trans_get(c);
1406 struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1407 struct bch_dev *ca = NULL;
1408 struct bkey hole;
1409 struct bkey_s_c k;
1410 int ret = 0;
1411
1412 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
1413 BTREE_ITER_prefetch);
1414 bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1415 BTREE_ITER_prefetch);
1416 bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1417 BTREE_ITER_prefetch);
1418 bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1419 BTREE_ITER_prefetch);
1420
1421 while (1) {
1422 struct bpos next;
1423
1424 bch2_trans_begin(trans);
1425
1426 k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
1427 ret = bkey_err(k);
1428 if (ret)
1429 goto bkey_err;
1430
1431 if (!k.k)
1432 break;
1433
1434 if (k.k->type) {
1435 next = bpos_nosnap_successor(k.k->p);
1436
1437 ret = bch2_check_alloc_key(trans,
1438 k, &iter,
1439 &discard_iter,
1440 &freespace_iter,
1441 &bucket_gens_iter);
1442 if (ret)
1443 goto bkey_err;
1444 } else {
1445 next = k.k->p;
1446
1447 ret = bch2_check_alloc_hole_freespace(trans, ca,
1448 bkey_start_pos(k.k),
1449 &next,
1450 &freespace_iter) ?:
1451 bch2_check_alloc_hole_bucket_gens(trans,
1452 bkey_start_pos(k.k),
1453 &next,
1454 &bucket_gens_iter);
1455 if (ret)
1456 goto bkey_err;
1457 }
1458
1459 ret = bch2_trans_commit(trans, NULL, NULL,
1460 BCH_TRANS_COMMIT_no_enospc);
1461 if (ret)
1462 goto bkey_err;
1463
1464 bch2_btree_iter_set_pos(&iter, next);
1465bkey_err:
1466 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1467 continue;
1468 if (ret)
1469 break;
1470 }
1471 bch2_trans_iter_exit(trans, &bucket_gens_iter);
1472 bch2_trans_iter_exit(trans, &freespace_iter);
1473 bch2_trans_iter_exit(trans, &discard_iter);
1474 bch2_trans_iter_exit(trans, &iter);
1475 bch2_dev_put(ca);
1476 ca = NULL;
1477
1478 if (ret < 0)
1479 goto err;
1480
1481 ret = for_each_btree_key(trans, iter,
1482 BTREE_ID_need_discard, POS_MIN,
1483 BTREE_ITER_prefetch, k,
1484 bch2_check_discard_freespace_key(trans, &iter));
1485 if (ret)
1486 goto err;
1487
1488 bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
1489 BTREE_ITER_prefetch);
1490 while (1) {
1491 bch2_trans_begin(trans);
1492 k = bch2_btree_iter_peek(&iter);
1493 if (!k.k)
1494 break;
1495
1496 ret = bkey_err(k) ?:
1497 bch2_check_discard_freespace_key(trans, &iter);
1498 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1499 ret = 0;
1500 continue;
1501 }
1502 if (ret) {
1503 struct printbuf buf = PRINTBUF;
1504 bch2_bkey_val_to_text(&buf, c, k);
1505
1506 bch_err(c, "while checking %s", buf.buf);
1507 printbuf_exit(&buf);
1508 break;
1509 }
1510
1511 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
1512 }
1513 bch2_trans_iter_exit(trans, &iter);
1514 if (ret)
1515 goto err;
1516
1517 ret = for_each_btree_key_commit(trans, iter,
1518 BTREE_ID_bucket_gens, POS_MIN,
1519 BTREE_ITER_prefetch, k,
1520 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1521 bch2_check_bucket_gens_key(trans, &iter, k));
1522err:
1523 bch2_trans_put(trans);
1524 bch_err_fn(c, ret);
1525 return ret;
1526}
1527
1528static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1529 struct btree_iter *alloc_iter)
1530{
1531 struct bch_fs *c = trans->c;
1532 struct btree_iter lru_iter;
1533 struct bch_alloc_v4 a_convert;
1534 const struct bch_alloc_v4 *a;
1535 struct bkey_s_c alloc_k, lru_k;
1536 struct printbuf buf = PRINTBUF;
1537 int ret;
1538
1539 alloc_k = bch2_btree_iter_peek(alloc_iter);
1540 if (!alloc_k.k)
1541 return 0;
1542
1543 ret = bkey_err(alloc_k);
1544 if (ret)
1545 return ret;
1546
1547 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1548
1549 if (a->data_type != BCH_DATA_cached)
1550 return 0;
1551
1552 if (fsck_err_on(!a->io_time[READ], c,
1553 alloc_key_cached_but_read_time_zero,
1554 "cached bucket with read_time 0\n"
1555 " %s",
1556 (printbuf_reset(&buf),
1557 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1558 struct bkey_i_alloc_v4 *a_mut =
1559 bch2_alloc_to_v4_mut(trans, alloc_k);
1560 ret = PTR_ERR_OR_ZERO(a_mut);
1561 if (ret)
1562 goto err;
1563
1564 a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1565 ret = bch2_trans_update(trans, alloc_iter,
1566 &a_mut->k_i, BTREE_TRIGGER_norun);
1567 if (ret)
1568 goto err;
1569
1570 a = &a_mut->v;
1571 }
1572
1573 lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
1574 lru_pos(alloc_k.k->p.inode,
1575 bucket_to_u64(alloc_k.k->p),
1576 a->io_time[READ]), 0);
1577 ret = bkey_err(lru_k);
1578 if (ret)
1579 return ret;
1580
1581 if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
1582 alloc_key_to_missing_lru_entry,
1583 "missing lru entry\n"
1584 " %s",
1585 (printbuf_reset(&buf),
1586 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1587 ret = bch2_lru_set(trans,
1588 alloc_k.k->p.inode,
1589 bucket_to_u64(alloc_k.k->p),
1590 a->io_time[READ]);
1591 if (ret)
1592 goto err;
1593 }
1594err:
1595fsck_err:
1596 bch2_trans_iter_exit(trans, &lru_iter);
1597 printbuf_exit(&buf);
1598 return ret;
1599}
1600
1601int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1602{
1603 int ret = bch2_trans_run(c,
1604 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1605 POS_MIN, BTREE_ITER_prefetch, k,
1606 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1607 bch2_check_alloc_to_lru_ref(trans, &iter)));
1608 bch_err_fn(c, ret);
1609 return ret;
1610}
1611
1612static int discard_in_flight_add(struct bch_fs *c, struct bpos bucket)
1613{
1614 int ret;
1615
1616 mutex_lock(&c->discard_buckets_in_flight_lock);
1617 darray_for_each(c->discard_buckets_in_flight, i)
1618 if (bkey_eq(*i, bucket)) {
1619 ret = -EEXIST;
1620 goto out;
1621 }
1622
1623 ret = darray_push(&c->discard_buckets_in_flight, bucket);
1624out:
1625 mutex_unlock(&c->discard_buckets_in_flight_lock);
1626 return ret;
1627}
1628
1629static void discard_in_flight_remove(struct bch_fs *c, struct bpos bucket)
1630{
1631 mutex_lock(&c->discard_buckets_in_flight_lock);
1632 darray_for_each(c->discard_buckets_in_flight, i)
1633 if (bkey_eq(*i, bucket)) {
1634 darray_remove_item(&c->discard_buckets_in_flight, i);
1635 goto found;
1636 }
1637 BUG();
1638found:
1639 mutex_unlock(&c->discard_buckets_in_flight_lock);
1640}
1641
1642struct discard_buckets_state {
1643 u64 seen;
1644 u64 open;
1645 u64 need_journal_commit;
1646 u64 discarded;
1647 struct bch_dev *ca;
1648 u64 need_journal_commit_this_dev;
1649};
1650
1651static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
1652{
1653 if (s->ca == ca)
1654 return;
1655
1656 if (s->ca && s->need_journal_commit_this_dev >
1657 bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
1658 bch2_journal_flush_async(&c->journal, NULL);
1659
1660 if (s->ca)
1661 percpu_ref_put(&s->ca->io_ref);
1662 s->ca = ca;
1663 s->need_journal_commit_this_dev = 0;
1664}
1665
1666static int bch2_discard_one_bucket(struct btree_trans *trans,
1667 struct btree_iter *need_discard_iter,
1668 struct bpos *discard_pos_done,
1669 struct discard_buckets_state *s)
1670{
1671 struct bch_fs *c = trans->c;
1672 struct bpos pos = need_discard_iter->pos;
1673 struct btree_iter iter = { NULL };
1674 struct bkey_s_c k;
1675 struct bkey_i_alloc_v4 *a;
1676 struct printbuf buf = PRINTBUF;
1677 bool discard_locked = false;
1678 int ret = 0;
1679
1680 struct bch_dev *ca = s->ca && s->ca->dev_idx == pos.inode
1681 ? s->ca
1682 : bch2_dev_get_ioref(c, pos.inode, WRITE);
1683 if (!ca) {
1684 bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
1685 return 0;
1686 }
1687
1688 discard_buckets_next_dev(c, s, ca);
1689
1690 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1691 s->open++;
1692 goto out;
1693 }
1694
1695 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1696 c->journal.flushed_seq_ondisk,
1697 pos.inode, pos.offset)) {
1698 s->need_journal_commit++;
1699 s->need_journal_commit_this_dev++;
1700 goto out;
1701 }
1702
1703 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
1704 need_discard_iter->pos,
1705 BTREE_ITER_cached);
1706 ret = bkey_err(k);
1707 if (ret)
1708 goto out;
1709
1710 a = bch2_alloc_to_v4_mut(trans, k);
1711 ret = PTR_ERR_OR_ZERO(a);
1712 if (ret)
1713 goto out;
1714
1715 if (bch2_bucket_sectors_total(a->v)) {
1716 if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
1717 trans, "attempting to discard bucket with dirty data\n%s",
1718 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1719 ret = -EIO;
1720 goto out;
1721 }
1722
1723 if (a->v.data_type != BCH_DATA_need_discard) {
1724 if (data_type_is_empty(a->v.data_type) &&
1725 BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1726 a->v.gen++;
1727 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1728 goto write;
1729 }
1730
1731 if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
1732 trans, "bucket incorrectly set in need_discard btree\n"
1733 "%s",
1734 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1735 ret = -EIO;
1736 goto out;
1737 }
1738
1739 if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
1740 if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
1741 trans, "clearing need_discard but journal_seq %llu > flushed_seq %llu\n%s",
1742 a->v.journal_seq,
1743 c->journal.flushed_seq_ondisk,
1744 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1745 ret = -EIO;
1746 goto out;
1747 }
1748
1749 if (discard_in_flight_add(c, SPOS(iter.pos.inode, iter.pos.offset, true)))
1750 goto out;
1751
1752 discard_locked = true;
1753
1754 if (!bkey_eq(*discard_pos_done, iter.pos) &&
1755 ca->mi.discard && !c->opts.nochanges) {
1756 /*
1757 * This works without any other locks because this is the only
1758 * thread that removes items from the need_discard tree
1759 */
1760 bch2_trans_unlock_long(trans);
1761 blkdev_issue_discard(ca->disk_sb.bdev,
1762 k.k->p.offset * ca->mi.bucket_size,
1763 ca->mi.bucket_size,
1764 GFP_KERNEL);
1765 *discard_pos_done = iter.pos;
1766
1767 ret = bch2_trans_relock_notrace(trans);
1768 if (ret)
1769 goto out;
1770 }
1771
1772 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1773 alloc_data_type_set(&a->v, a->v.data_type);
1774write:
1775 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1776 bch2_trans_commit(trans, NULL, NULL,
1777 BCH_WATERMARK_btree|
1778 BCH_TRANS_COMMIT_no_enospc);
1779 if (ret)
1780 goto out;
1781
1782 count_event(c, bucket_discard);
1783 s->discarded++;
1784out:
1785 if (discard_locked)
1786 discard_in_flight_remove(c, iter.pos);
1787 s->seen++;
1788 bch2_trans_iter_exit(trans, &iter);
1789 printbuf_exit(&buf);
1790 return ret;
1791}
1792
1793static void bch2_do_discards_work(struct work_struct *work)
1794{
1795 struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
1796 struct discard_buckets_state s = {};
1797 struct bpos discard_pos_done = POS_MAX;
1798 int ret;
1799
1800 /*
1801 * We're doing the commit in bch2_discard_one_bucket instead of using
1802 * for_each_btree_key_commit() so that we can increment counters after
1803 * successful commit:
1804 */
1805 ret = bch2_trans_run(c,
1806 for_each_btree_key(trans, iter,
1807 BTREE_ID_need_discard, POS_MIN, 0, k,
1808 bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s)));
1809
1810 discard_buckets_next_dev(c, &s, NULL);
1811
1812 trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
1813 bch2_err_str(ret));
1814
1815 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1816}
1817
1818void bch2_do_discards(struct bch_fs *c)
1819{
1820 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
1821 !queue_work(c->write_ref_wq, &c->discard_work))
1822 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1823}
1824
1825static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket)
1826{
1827 struct btree_iter iter;
1828 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_intent);
1829 struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
1830 int ret = bkey_err(k);
1831 if (ret)
1832 goto err;
1833
1834 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k);
1835 ret = PTR_ERR_OR_ZERO(a);
1836 if (ret)
1837 goto err;
1838
1839 BUG_ON(a->v.dirty_sectors);
1840 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1841 alloc_data_type_set(&a->v, a->v.data_type);
1842
1843 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1844err:
1845 bch2_trans_iter_exit(trans, &iter);
1846 return ret;
1847}
1848
1849static void bch2_do_discards_fast_work(struct work_struct *work)
1850{
1851 struct bch_fs *c = container_of(work, struct bch_fs, discard_fast_work);
1852
1853 while (1) {
1854 bool got_bucket = false;
1855 struct bpos bucket;
1856 struct bch_dev *ca;
1857
1858 mutex_lock(&c->discard_buckets_in_flight_lock);
1859 darray_for_each(c->discard_buckets_in_flight, i) {
1860 if (i->snapshot)
1861 continue;
1862
1863 ca = bch2_dev_get_ioref(c, i->inode, WRITE);
1864 if (!ca) {
1865 darray_remove_item(&c->discard_buckets_in_flight, i);
1866 continue;
1867 }
1868
1869 got_bucket = true;
1870 bucket = *i;
1871 i->snapshot = true;
1872 break;
1873 }
1874 mutex_unlock(&c->discard_buckets_in_flight_lock);
1875
1876 if (!got_bucket)
1877 break;
1878
1879 if (ca->mi.discard && !c->opts.nochanges)
1880 blkdev_issue_discard(ca->disk_sb.bdev,
1881 bucket.offset * ca->mi.bucket_size,
1882 ca->mi.bucket_size,
1883 GFP_KERNEL);
1884
1885 int ret = bch2_trans_do(c, NULL, NULL,
1886 BCH_WATERMARK_btree|
1887 BCH_TRANS_COMMIT_no_enospc,
1888 bch2_clear_bucket_needs_discard(trans, bucket));
1889 bch_err_fn(c, ret);
1890
1891 percpu_ref_put(&ca->io_ref);
1892 discard_in_flight_remove(c, bucket);
1893
1894 if (ret)
1895 break;
1896 }
1897
1898 bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
1899}
1900
1901static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket)
1902{
1903 rcu_read_lock();
1904 struct bch_dev *ca = bch2_dev_rcu(c, bucket.inode);
1905 bool dead = !ca || percpu_ref_is_dying(&ca->io_ref);
1906 rcu_read_unlock();
1907
1908 if (!dead &&
1909 !discard_in_flight_add(c, bucket) &&
1910 bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast) &&
1911 !queue_work(c->write_ref_wq, &c->discard_fast_work))
1912 bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
1913}
1914
1915static int invalidate_one_bucket(struct btree_trans *trans,
1916 struct btree_iter *lru_iter,
1917 struct bkey_s_c lru_k,
1918 s64 *nr_to_invalidate)
1919{
1920 struct bch_fs *c = trans->c;
1921 struct bkey_i_alloc_v4 *a = NULL;
1922 struct printbuf buf = PRINTBUF;
1923 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
1924 unsigned cached_sectors;
1925 int ret = 0;
1926
1927 if (*nr_to_invalidate <= 0)
1928 return 1;
1929
1930 if (!bch2_dev_bucket_exists(c, bucket)) {
1931 prt_str(&buf, "lru entry points to invalid bucket");
1932 goto err;
1933 }
1934
1935 if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
1936 return 0;
1937
1938 a = bch2_trans_start_alloc_update(trans, bucket);
1939 ret = PTR_ERR_OR_ZERO(a);
1940 if (ret)
1941 goto out;
1942
1943 /* We expect harmless races here due to the btree write buffer: */
1944 if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
1945 goto out;
1946
1947 BUG_ON(a->v.data_type != BCH_DATA_cached);
1948 BUG_ON(a->v.dirty_sectors);
1949
1950 if (!a->v.cached_sectors)
1951 bch_err(c, "invalidating empty bucket, confused");
1952
1953 cached_sectors = a->v.cached_sectors;
1954
1955 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1956 a->v.gen++;
1957 a->v.data_type = 0;
1958 a->v.dirty_sectors = 0;
1959 a->v.cached_sectors = 0;
1960 a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1961 a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
1962
1963 ret = bch2_trans_commit(trans, NULL, NULL,
1964 BCH_WATERMARK_btree|
1965 BCH_TRANS_COMMIT_no_enospc);
1966 if (ret)
1967 goto out;
1968
1969 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
1970 --*nr_to_invalidate;
1971out:
1972 printbuf_exit(&buf);
1973 return ret;
1974err:
1975 prt_str(&buf, "\n lru key: ");
1976 bch2_bkey_val_to_text(&buf, c, lru_k);
1977
1978 prt_str(&buf, "\n lru entry: ");
1979 bch2_lru_pos_to_text(&buf, lru_iter->pos);
1980
1981 prt_str(&buf, "\n alloc key: ");
1982 if (!a)
1983 bch2_bpos_to_text(&buf, bucket);
1984 else
1985 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
1986
1987 bch_err(c, "%s", buf.buf);
1988 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
1989 bch2_inconsistent_error(c);
1990 ret = -EINVAL;
1991 }
1992
1993 goto out;
1994}
1995
1996static void bch2_do_invalidates_work(struct work_struct *work)
1997{
1998 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1999 struct btree_trans *trans = bch2_trans_get(c);
2000 int ret = 0;
2001
2002 ret = bch2_btree_write_buffer_tryflush(trans);
2003 if (ret)
2004 goto err;
2005
2006 for_each_member_device(c, ca) {
2007 s64 nr_to_invalidate =
2008 should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
2009
2010 ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
2011 lru_pos(ca->dev_idx, 0, 0),
2012 lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
2013 BTREE_ITER_intent, k,
2014 invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
2015
2016 if (ret < 0) {
2017 bch2_dev_put(ca);
2018 break;
2019 }
2020 }
2021err:
2022 bch2_trans_put(trans);
2023 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
2024}
2025
2026void bch2_do_invalidates(struct bch_fs *c)
2027{
2028 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
2029 !queue_work(c->write_ref_wq, &c->invalidate_work))
2030 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
2031}
2032
2033int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
2034 u64 bucket_start, u64 bucket_end)
2035{
2036 struct btree_trans *trans = bch2_trans_get(c);
2037 struct btree_iter iter;
2038 struct bkey_s_c k;
2039 struct bkey hole;
2040 struct bpos end = POS(ca->dev_idx, bucket_end);
2041 struct bch_member *m;
2042 unsigned long last_updated = jiffies;
2043 int ret;
2044
2045 BUG_ON(bucket_start > bucket_end);
2046 BUG_ON(bucket_end > ca->mi.nbuckets);
2047
2048 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
2049 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
2050 BTREE_ITER_prefetch);
2051 /*
2052 * Scan the alloc btree for every bucket on @ca, and add buckets to the
2053 * freespace/need_discard/need_gc_gens btrees as needed:
2054 */
2055 while (1) {
2056 if (last_updated + HZ * 10 < jiffies) {
2057 bch_info(ca, "%s: currently at %llu/%llu",
2058 __func__, iter.pos.offset, ca->mi.nbuckets);
2059 last_updated = jiffies;
2060 }
2061
2062 bch2_trans_begin(trans);
2063
2064 if (bkey_ge(iter.pos, end)) {
2065 ret = 0;
2066 break;
2067 }
2068
2069 k = bch2_get_key_or_hole(&iter, end, &hole);
2070 ret = bkey_err(k);
2071 if (ret)
2072 goto bkey_err;
2073
2074 if (k.k->type) {
2075 /*
2076 * We process live keys in the alloc btree one at a
2077 * time:
2078 */
2079 struct bch_alloc_v4 a_convert;
2080 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
2081
2082 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?:
2083 bch2_trans_commit(trans, NULL, NULL,
2084 BCH_TRANS_COMMIT_no_enospc);
2085 if (ret)
2086 goto bkey_err;
2087
2088 bch2_btree_iter_advance(&iter);
2089 } else {
2090 struct bkey_i *freespace;
2091
2092 freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
2093 ret = PTR_ERR_OR_ZERO(freespace);
2094 if (ret)
2095 goto bkey_err;
2096
2097 bkey_init(&freespace->k);
2098 freespace->k.type = KEY_TYPE_set;
2099 freespace->k.p = k.k->p;
2100 freespace->k.size = k.k->size;
2101
2102 ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
2103 bch2_trans_commit(trans, NULL, NULL,
2104 BCH_TRANS_COMMIT_no_enospc);
2105 if (ret)
2106 goto bkey_err;
2107
2108 bch2_btree_iter_set_pos(&iter, k.k->p);
2109 }
2110bkey_err:
2111 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2112 continue;
2113 if (ret)
2114 break;
2115 }
2116
2117 bch2_trans_iter_exit(trans, &iter);
2118 bch2_trans_put(trans);
2119
2120 if (ret < 0) {
2121 bch_err_msg(ca, ret, "initializing free space");
2122 return ret;
2123 }
2124
2125 mutex_lock(&c->sb_lock);
2126 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
2127 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
2128 mutex_unlock(&c->sb_lock);
2129
2130 return 0;
2131}
2132
2133int bch2_fs_freespace_init(struct bch_fs *c)
2134{
2135 int ret = 0;
2136 bool doing_init = false;
2137
2138 /*
2139 * We can crash during the device add path, so we need to check this on
2140 * every mount:
2141 */
2142
2143 for_each_member_device(c, ca) {
2144 if (ca->mi.freespace_initialized)
2145 continue;
2146
2147 if (!doing_init) {
2148 bch_info(c, "initializing freespace");
2149 doing_init = true;
2150 }
2151
2152 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
2153 if (ret) {
2154 bch2_dev_put(ca);
2155 bch_err_fn(c, ret);
2156 return ret;
2157 }
2158 }
2159
2160 if (doing_init) {
2161 mutex_lock(&c->sb_lock);
2162 bch2_write_super(c);
2163 mutex_unlock(&c->sb_lock);
2164 bch_verbose(c, "done initializing freespace");
2165 }
2166
2167 return 0;
2168}
2169
2170/* Bucket IO clocks: */
2171
2172int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2173 size_t bucket_nr, int rw)
2174{
2175 struct bch_fs *c = trans->c;
2176 struct btree_iter iter;
2177 struct bkey_i_alloc_v4 *a;
2178 u64 now;
2179 int ret = 0;
2180
2181 if (bch2_trans_relock(trans))
2182 bch2_trans_begin(trans);
2183
2184 a = bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(dev, bucket_nr));
2185 ret = PTR_ERR_OR_ZERO(a);
2186 if (ret)
2187 return ret;
2188
2189 now = atomic64_read(&c->io_clock[rw].now);
2190 if (a->v.io_time[rw] == now)
2191 goto out;
2192
2193 a->v.io_time[rw] = now;
2194
2195 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2196 bch2_trans_commit(trans, NULL, NULL, 0);
2197out:
2198 bch2_trans_iter_exit(trans, &iter);
2199 return ret;
2200}
2201
2202/* Startup/shutdown (ro/rw): */
2203
2204void bch2_recalc_capacity(struct bch_fs *c)
2205{
2206 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2207 unsigned bucket_size_max = 0;
2208 unsigned long ra_pages = 0;
2209
2210 lockdep_assert_held(&c->state_lock);
2211
2212 for_each_online_member(c, ca) {
2213 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2214
2215 ra_pages += bdi->ra_pages;
2216 }
2217
2218 bch2_set_ra_pages(c, ra_pages);
2219
2220 for_each_rw_member(c, ca) {
2221 u64 dev_reserve = 0;
2222
2223 /*
2224 * We need to reserve buckets (from the number
2225 * of currently available buckets) against
2226 * foreground writes so that mainly copygc can
2227 * make forward progress.
2228 *
2229 * We need enough to refill the various reserves
2230 * from scratch - copygc will use its entire
2231 * reserve all at once, then run against when
2232 * its reserve is refilled (from the formerly
2233 * available buckets).
2234 *
2235 * This reserve is just used when considering if
2236 * allocations for foreground writes must wait -
2237 * not -ENOSPC calculations.
2238 */
2239
2240 dev_reserve += ca->nr_btree_reserve * 2;
2241 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2242
2243 dev_reserve += 1; /* btree write point */
2244 dev_reserve += 1; /* copygc write point */
2245 dev_reserve += 1; /* rebalance write point */
2246
2247 dev_reserve *= ca->mi.bucket_size;
2248
2249 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2250 ca->mi.first_bucket);
2251
2252 reserved_sectors += dev_reserve * 2;
2253
2254 bucket_size_max = max_t(unsigned, bucket_size_max,
2255 ca->mi.bucket_size);
2256 }
2257
2258 gc_reserve = c->opts.gc_reserve_bytes
2259 ? c->opts.gc_reserve_bytes >> 9
2260 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2261
2262 reserved_sectors = max(gc_reserve, reserved_sectors);
2263
2264 reserved_sectors = min(reserved_sectors, capacity);
2265
2266 c->capacity = capacity - reserved_sectors;
2267
2268 c->bucket_size_max = bucket_size_max;
2269
2270 /* Wake up case someone was waiting for buckets */
2271 closure_wake_up(&c->freelist_wait);
2272}
2273
2274u64 bch2_min_rw_member_capacity(struct bch_fs *c)
2275{
2276 u64 ret = U64_MAX;
2277
2278 for_each_rw_member(c, ca)
2279 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
2280 return ret;
2281}
2282
2283static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2284{
2285 struct open_bucket *ob;
2286 bool ret = false;
2287
2288 for (ob = c->open_buckets;
2289 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2290 ob++) {
2291 spin_lock(&ob->lock);
2292 if (ob->valid && !ob->on_partial_list &&
2293 ob->dev == ca->dev_idx)
2294 ret = true;
2295 spin_unlock(&ob->lock);
2296 }
2297
2298 return ret;
2299}
2300
2301/* device goes ro: */
2302void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2303{
2304 unsigned i;
2305
2306 /* First, remove device from allocation groups: */
2307
2308 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2309 clear_bit(ca->dev_idx, c->rw_devs[i].d);
2310
2311 /*
2312 * Capacity is calculated based off of devices in allocation groups:
2313 */
2314 bch2_recalc_capacity(c);
2315
2316 bch2_open_buckets_stop(c, ca, false);
2317
2318 /*
2319 * Wake up threads that were blocked on allocation, so they can notice
2320 * the device can no longer be removed and the capacity has changed:
2321 */
2322 closure_wake_up(&c->freelist_wait);
2323
2324 /*
2325 * journal_res_get() can block waiting for free space in the journal -
2326 * it needs to notice there may not be devices to allocate from anymore:
2327 */
2328 wake_up(&c->journal.wait);
2329
2330 /* Now wait for any in flight writes: */
2331
2332 closure_wait_event(&c->open_buckets_wait,
2333 !bch2_dev_has_open_write_point(c, ca));
2334}
2335
2336/* device goes rw: */
2337void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2338{
2339 unsigned i;
2340
2341 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2342 if (ca->mi.data_allowed & (1 << i))
2343 set_bit(ca->dev_idx, c->rw_devs[i].d);
2344}
2345
2346void bch2_fs_allocator_background_exit(struct bch_fs *c)
2347{
2348 darray_exit(&c->discard_buckets_in_flight);
2349}
2350
2351void bch2_fs_allocator_background_init(struct bch_fs *c)
2352{
2353 spin_lock_init(&c->freelist_lock);
2354 mutex_init(&c->discard_buckets_in_flight_lock);
2355 INIT_WORK(&c->discard_work, bch2_do_discards_work);
2356 INIT_WORK(&c->discard_fast_work, bch2_do_discards_fast_work);
2357 INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);
2358}