Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
3#include "alloc_background.h"
4#include "alloc_foreground.h"
5#include "backpointers.h"
6#include "bkey_buf.h"
7#include "btree_cache.h"
8#include "btree_io.h"
9#include "btree_key_cache.h"
10#include "btree_update.h"
11#include "btree_update_interior.h"
12#include "btree_gc.h"
13#include "btree_write_buffer.h"
14#include "buckets.h"
15#include "buckets_waiting_for_journal.h"
16#include "clock.h"
17#include "debug.h"
18#include "disk_accounting.h"
19#include "ec.h"
20#include "error.h"
21#include "lru.h"
22#include "recovery.h"
23#include "trace.h"
24#include "varint.h"
25
26#include <linux/kthread.h>
27#include <linux/math64.h>
28#include <linux/random.h>
29#include <linux/rculist.h>
30#include <linux/rcupdate.h>
31#include <linux/sched/task.h>
32#include <linux/sort.h>
33#include <linux/jiffies.h>
34
35static void bch2_discard_one_bucket_fast(struct bch_dev *, u64);
36
37/* Persistent alloc info: */
38
39static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
40#define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
41 BCH_ALLOC_FIELDS_V1()
42#undef x
43};
44
45struct bkey_alloc_unpacked {
46 u64 journal_seq;
47 u8 gen;
48 u8 oldest_gen;
49 u8 data_type;
50 bool need_discard:1;
51 bool need_inc_gen:1;
52#define x(_name, _bits) u##_bits _name;
53 BCH_ALLOC_FIELDS_V2()
54#undef x
55};
56
57static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
58 const void **p, unsigned field)
59{
60 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
61 u64 v;
62
63 if (!(a->fields & (1 << field)))
64 return 0;
65
66 switch (bytes) {
67 case 1:
68 v = *((const u8 *) *p);
69 break;
70 case 2:
71 v = le16_to_cpup(*p);
72 break;
73 case 4:
74 v = le32_to_cpup(*p);
75 break;
76 case 8:
77 v = le64_to_cpup(*p);
78 break;
79 default:
80 BUG();
81 }
82
83 *p += bytes;
84 return v;
85}
86
87static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
88 struct bkey_s_c k)
89{
90 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
91 const void *d = in->data;
92 unsigned idx = 0;
93
94 out->gen = in->gen;
95
96#define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
97 BCH_ALLOC_FIELDS_V1()
98#undef x
99}
100
101static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
102 struct bkey_s_c k)
103{
104 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
105 const u8 *in = a.v->data;
106 const u8 *end = bkey_val_end(a);
107 unsigned fieldnr = 0;
108 int ret;
109 u64 v;
110
111 out->gen = a.v->gen;
112 out->oldest_gen = a.v->oldest_gen;
113 out->data_type = a.v->data_type;
114
115#define x(_name, _bits) \
116 if (fieldnr < a.v->nr_fields) { \
117 ret = bch2_varint_decode_fast(in, end, &v); \
118 if (ret < 0) \
119 return ret; \
120 in += ret; \
121 } else { \
122 v = 0; \
123 } \
124 out->_name = v; \
125 if (v != out->_name) \
126 return -1; \
127 fieldnr++;
128
129 BCH_ALLOC_FIELDS_V2()
130#undef x
131 return 0;
132}
133
134static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
135 struct bkey_s_c k)
136{
137 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
138 const u8 *in = a.v->data;
139 const u8 *end = bkey_val_end(a);
140 unsigned fieldnr = 0;
141 int ret;
142 u64 v;
143
144 out->gen = a.v->gen;
145 out->oldest_gen = a.v->oldest_gen;
146 out->data_type = a.v->data_type;
147 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
148 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
149 out->journal_seq = le64_to_cpu(a.v->journal_seq);
150
151#define x(_name, _bits) \
152 if (fieldnr < a.v->nr_fields) { \
153 ret = bch2_varint_decode_fast(in, end, &v); \
154 if (ret < 0) \
155 return ret; \
156 in += ret; \
157 } else { \
158 v = 0; \
159 } \
160 out->_name = v; \
161 if (v != out->_name) \
162 return -1; \
163 fieldnr++;
164
165 BCH_ALLOC_FIELDS_V2()
166#undef x
167 return 0;
168}
169
170static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
171{
172 struct bkey_alloc_unpacked ret = { .gen = 0 };
173
174 switch (k.k->type) {
175 case KEY_TYPE_alloc:
176 bch2_alloc_unpack_v1(&ret, k);
177 break;
178 case KEY_TYPE_alloc_v2:
179 bch2_alloc_unpack_v2(&ret, k);
180 break;
181 case KEY_TYPE_alloc_v3:
182 bch2_alloc_unpack_v3(&ret, k);
183 break;
184 }
185
186 return ret;
187}
188
189static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
190{
191 unsigned i, bytes = offsetof(struct bch_alloc, data);
192
193 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
194 if (a->fields & (1 << i))
195 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
196
197 return DIV_ROUND_UP(bytes, sizeof(u64));
198}
199
200int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k,
201 enum bch_validate_flags flags)
202{
203 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
204 int ret = 0;
205
206 /* allow for unknown fields */
207 bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v),
208 c, alloc_v1_val_size_bad,
209 "incorrect value size (%zu < %u)",
210 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
211fsck_err:
212 return ret;
213}
214
215int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k,
216 enum bch_validate_flags flags)
217{
218 struct bkey_alloc_unpacked u;
219 int ret = 0;
220
221 bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k),
222 c, alloc_v2_unpack_error,
223 "unpack error");
224fsck_err:
225 return ret;
226}
227
228int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k,
229 enum bch_validate_flags flags)
230{
231 struct bkey_alloc_unpacked u;
232 int ret = 0;
233
234 bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k),
235 c, alloc_v2_unpack_error,
236 "unpack error");
237fsck_err:
238 return ret;
239}
240
241int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k,
242 enum bch_validate_flags flags)
243{
244 struct bch_alloc_v4 a;
245 int ret = 0;
246
247 bkey_val_copy(&a, bkey_s_c_to_alloc_v4(k));
248
249 bkey_fsck_err_on(alloc_v4_u64s_noerror(&a) > bkey_val_u64s(k.k),
250 c, alloc_v4_val_size_bad,
251 "bad val size (%u > %zu)",
252 alloc_v4_u64s_noerror(&a), bkey_val_u64s(k.k));
253
254 bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(&a) &&
255 BCH_ALLOC_V4_NR_BACKPOINTERS(&a),
256 c, alloc_v4_backpointers_start_bad,
257 "invalid backpointers_start");
258
259 bkey_fsck_err_on(alloc_data_type(a, a.data_type) != a.data_type,
260 c, alloc_key_data_type_bad,
261 "invalid data type (got %u should be %u)",
262 a.data_type, alloc_data_type(a, a.data_type));
263
264 for (unsigned i = 0; i < 2; i++)
265 bkey_fsck_err_on(a.io_time[i] > LRU_TIME_MAX,
266 c, alloc_key_io_time_bad,
267 "invalid io_time[%s]: %llu, max %llu",
268 i == READ ? "read" : "write",
269 a.io_time[i], LRU_TIME_MAX);
270
271 unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(&a) * sizeof(u64) >
272 offsetof(struct bch_alloc_v4, stripe_sectors)
273 ? a.stripe_sectors
274 : 0;
275
276 switch (a.data_type) {
277 case BCH_DATA_free:
278 case BCH_DATA_need_gc_gens:
279 case BCH_DATA_need_discard:
280 bkey_fsck_err_on(stripe_sectors ||
281 a.dirty_sectors ||
282 a.cached_sectors ||
283 a.stripe,
284 c, alloc_key_empty_but_have_data,
285 "empty data type free but have data %u.%u.%u %u",
286 stripe_sectors,
287 a.dirty_sectors,
288 a.cached_sectors,
289 a.stripe);
290 break;
291 case BCH_DATA_sb:
292 case BCH_DATA_journal:
293 case BCH_DATA_btree:
294 case BCH_DATA_user:
295 case BCH_DATA_parity:
296 bkey_fsck_err_on(!a.dirty_sectors &&
297 !stripe_sectors,
298 c, alloc_key_dirty_sectors_0,
299 "data_type %s but dirty_sectors==0",
300 bch2_data_type_str(a.data_type));
301 break;
302 case BCH_DATA_cached:
303 bkey_fsck_err_on(!a.cached_sectors ||
304 a.dirty_sectors ||
305 stripe_sectors ||
306 a.stripe,
307 c, alloc_key_cached_inconsistency,
308 "data type inconsistency");
309
310 bkey_fsck_err_on(!a.io_time[READ] &&
311 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
312 c, alloc_key_cached_but_read_time_zero,
313 "cached bucket with read_time == 0");
314 break;
315 case BCH_DATA_stripe:
316 break;
317 }
318fsck_err:
319 return ret;
320}
321
322void bch2_alloc_v4_swab(struct bkey_s k)
323{
324 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
325 struct bch_backpointer *bp, *bps;
326
327 a->journal_seq = swab64(a->journal_seq);
328 a->flags = swab32(a->flags);
329 a->dirty_sectors = swab32(a->dirty_sectors);
330 a->cached_sectors = swab32(a->cached_sectors);
331 a->io_time[0] = swab64(a->io_time[0]);
332 a->io_time[1] = swab64(a->io_time[1]);
333 a->stripe = swab32(a->stripe);
334 a->nr_external_backpointers = swab32(a->nr_external_backpointers);
335 a->stripe_sectors = swab32(a->stripe_sectors);
336
337 bps = alloc_v4_backpointers(a);
338 for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
339 bp->bucket_offset = swab40(bp->bucket_offset);
340 bp->bucket_len = swab32(bp->bucket_len);
341 bch2_bpos_swab(&bp->pos);
342 }
343}
344
345void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
346{
347 struct bch_alloc_v4 _a;
348 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
349 struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL;
350
351 prt_newline(out);
352 printbuf_indent_add(out, 2);
353
354 prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
355 bch2_prt_data_type(out, a->data_type);
356 prt_newline(out);
357 prt_printf(out, "journal_seq %llu\n", a->journal_seq);
358 prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a));
359 prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a));
360 prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors);
361 prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors);
362 prt_printf(out, "cached_sectors %u\n", a->cached_sectors);
363 prt_printf(out, "stripe %u\n", a->stripe);
364 prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy);
365 prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]);
366 prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]);
367
368 if (ca)
369 prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca));
370 prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a));
371 printbuf_indent_sub(out, 2);
372
373 bch2_dev_put(ca);
374}
375
376void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
377{
378 if (k.k->type == KEY_TYPE_alloc_v4) {
379 void *src, *dst;
380
381 *out = *bkey_s_c_to_alloc_v4(k).v;
382
383 src = alloc_v4_backpointers(out);
384 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
385 dst = alloc_v4_backpointers(out);
386
387 if (src < dst)
388 memset(src, 0, dst - src);
389
390 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
391 } else {
392 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
393
394 *out = (struct bch_alloc_v4) {
395 .journal_seq = u.journal_seq,
396 .flags = u.need_discard,
397 .gen = u.gen,
398 .oldest_gen = u.oldest_gen,
399 .data_type = u.data_type,
400 .stripe_redundancy = u.stripe_redundancy,
401 .dirty_sectors = u.dirty_sectors,
402 .cached_sectors = u.cached_sectors,
403 .io_time[READ] = u.read_time,
404 .io_time[WRITE] = u.write_time,
405 .stripe = u.stripe,
406 };
407
408 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
409 }
410}
411
412static noinline struct bkey_i_alloc_v4 *
413__bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
414{
415 struct bkey_i_alloc_v4 *ret;
416
417 ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
418 if (IS_ERR(ret))
419 return ret;
420
421 if (k.k->type == KEY_TYPE_alloc_v4) {
422 void *src, *dst;
423
424 bkey_reassemble(&ret->k_i, k);
425
426 src = alloc_v4_backpointers(&ret->v);
427 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
428 dst = alloc_v4_backpointers(&ret->v);
429
430 if (src < dst)
431 memset(src, 0, dst - src);
432
433 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
434 set_alloc_v4_u64s(ret);
435 } else {
436 bkey_alloc_v4_init(&ret->k_i);
437 ret->k.p = k.k->p;
438 bch2_alloc_to_v4(k, &ret->v);
439 }
440 return ret;
441}
442
443static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
444{
445 struct bkey_s_c_alloc_v4 a;
446
447 if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
448 ((a = bkey_s_c_to_alloc_v4(k), true) &&
449 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
450 return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
451
452 return __bch2_alloc_to_v4_mut(trans, k);
453}
454
455struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
456{
457 return bch2_alloc_to_v4_mut_inlined(trans, k);
458}
459
460struct bkey_i_alloc_v4 *
461bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_iter *iter,
462 struct bpos pos)
463{
464 struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
465 BTREE_ITER_with_updates|
466 BTREE_ITER_cached|
467 BTREE_ITER_intent);
468 int ret = bkey_err(k);
469 if (unlikely(ret))
470 return ERR_PTR(ret);
471
472 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k);
473 ret = PTR_ERR_OR_ZERO(a);
474 if (unlikely(ret))
475 goto err;
476 return a;
477err:
478 bch2_trans_iter_exit(trans, iter);
479 return ERR_PTR(ret);
480}
481
482__flatten
483struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, struct bpos pos,
484 enum btree_iter_update_trigger_flags flags)
485{
486 struct btree_iter iter;
487 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update_noupdate(trans, &iter, pos);
488 int ret = PTR_ERR_OR_ZERO(a);
489 if (ret)
490 return ERR_PTR(ret);
491
492 ret = bch2_trans_update(trans, &iter, &a->k_i, flags);
493 bch2_trans_iter_exit(trans, &iter);
494 return unlikely(ret) ? ERR_PTR(ret) : a;
495}
496
497static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
498{
499 *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
500
501 pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
502 return pos;
503}
504
505static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
506{
507 pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
508 pos.offset += offset;
509 return pos;
510}
511
512static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
513{
514 return k.k->type == KEY_TYPE_bucket_gens
515 ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
516 : 0;
517}
518
519int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k,
520 enum bch_validate_flags flags)
521{
522 int ret = 0;
523
524 bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens),
525 c, bucket_gens_val_size_bad,
526 "bad val size (%zu != %zu)",
527 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
528fsck_err:
529 return ret;
530}
531
532void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
533{
534 struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
535 unsigned i;
536
537 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
538 if (i)
539 prt_char(out, ' ');
540 prt_printf(out, "%u", g.v->gens[i]);
541 }
542}
543
544int bch2_bucket_gens_init(struct bch_fs *c)
545{
546 struct btree_trans *trans = bch2_trans_get(c);
547 struct bkey_i_bucket_gens g;
548 bool have_bucket_gens_key = false;
549 int ret;
550
551 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
552 BTREE_ITER_prefetch, k, ({
553 /*
554 * Not a fsck error because this is checked/repaired by
555 * bch2_check_alloc_key() which runs later:
556 */
557 if (!bch2_dev_bucket_exists(c, k.k->p))
558 continue;
559
560 struct bch_alloc_v4 a;
561 u8 gen = bch2_alloc_to_v4(k, &a)->gen;
562 unsigned offset;
563 struct bpos pos = alloc_gens_pos(iter.pos, &offset);
564 int ret2 = 0;
565
566 if (have_bucket_gens_key && !bkey_eq(g.k.p, pos)) {
567 ret2 = bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?:
568 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
569 if (ret2)
570 goto iter_err;
571 have_bucket_gens_key = false;
572 }
573
574 if (!have_bucket_gens_key) {
575 bkey_bucket_gens_init(&g.k_i);
576 g.k.p = pos;
577 have_bucket_gens_key = true;
578 }
579
580 g.v.gens[offset] = gen;
581iter_err:
582 ret2;
583 }));
584
585 if (have_bucket_gens_key && !ret)
586 ret = commit_do(trans, NULL, NULL,
587 BCH_TRANS_COMMIT_no_enospc,
588 bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
589
590 bch2_trans_put(trans);
591
592 bch_err_fn(c, ret);
593 return ret;
594}
595
596int bch2_alloc_read(struct bch_fs *c)
597{
598 struct btree_trans *trans = bch2_trans_get(c);
599 struct bch_dev *ca = NULL;
600 int ret;
601
602 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
603 ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
604 BTREE_ITER_prefetch, k, ({
605 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
606 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
607
608 if (k.k->type != KEY_TYPE_bucket_gens)
609 continue;
610
611 ca = bch2_dev_iterate(c, ca, k.k->p.inode);
612 /*
613 * Not a fsck error because this is checked/repaired by
614 * bch2_check_alloc_key() which runs later:
615 */
616 if (!ca) {
617 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
618 continue;
619 }
620
621 const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
622
623 for (u64 b = max_t(u64, ca->mi.first_bucket, start);
624 b < min_t(u64, ca->mi.nbuckets, end);
625 b++)
626 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
627 0;
628 }));
629 } else {
630 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
631 BTREE_ITER_prefetch, k, ({
632 ca = bch2_dev_iterate(c, ca, k.k->p.inode);
633 /*
634 * Not a fsck error because this is checked/repaired by
635 * bch2_check_alloc_key() which runs later:
636 */
637 if (!ca) {
638 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
639 continue;
640 }
641
642 struct bch_alloc_v4 a;
643 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
644 0;
645 }));
646 }
647
648 bch2_dev_put(ca);
649 bch2_trans_put(trans);
650
651 bch_err_fn(c, ret);
652 return ret;
653}
654
655/* Free space/discard btree: */
656
657static int bch2_bucket_do_index(struct btree_trans *trans,
658 struct bch_dev *ca,
659 struct bkey_s_c alloc_k,
660 const struct bch_alloc_v4 *a,
661 bool set)
662{
663 struct bch_fs *c = trans->c;
664 struct btree_iter iter;
665 struct bkey_s_c old;
666 struct bkey_i *k;
667 enum btree_id btree;
668 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
669 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
670 struct printbuf buf = PRINTBUF;
671 int ret;
672
673 if (a->data_type != BCH_DATA_free &&
674 a->data_type != BCH_DATA_need_discard)
675 return 0;
676
677 k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
678 if (IS_ERR(k))
679 return PTR_ERR(k);
680
681 bkey_init(&k->k);
682 k->k.type = new_type;
683
684 switch (a->data_type) {
685 case BCH_DATA_free:
686 btree = BTREE_ID_freespace;
687 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
688 bch2_key_resize(&k->k, 1);
689 break;
690 case BCH_DATA_need_discard:
691 btree = BTREE_ID_need_discard;
692 k->k.p = alloc_k.k->p;
693 break;
694 default:
695 return 0;
696 }
697
698 old = bch2_bkey_get_iter(trans, &iter, btree,
699 bkey_start_pos(&k->k),
700 BTREE_ITER_intent);
701 ret = bkey_err(old);
702 if (ret)
703 return ret;
704
705 if (ca->mi.freespace_initialized &&
706 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
707 bch2_trans_inconsistent_on(old.k->type != old_type, trans,
708 "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
709 " for %s",
710 set ? "setting" : "clearing",
711 bch2_btree_id_str(btree),
712 iter.pos.inode,
713 iter.pos.offset,
714 bch2_bkey_types[old.k->type],
715 bch2_bkey_types[old_type],
716 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
717 ret = -EIO;
718 goto err;
719 }
720
721 ret = bch2_trans_update(trans, &iter, k, 0);
722err:
723 bch2_trans_iter_exit(trans, &iter);
724 printbuf_exit(&buf);
725 return ret;
726}
727
728static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
729 struct bpos bucket, u8 gen)
730{
731 struct btree_iter iter;
732 unsigned offset;
733 struct bpos pos = alloc_gens_pos(bucket, &offset);
734 struct bkey_i_bucket_gens *g;
735 struct bkey_s_c k;
736 int ret;
737
738 g = bch2_trans_kmalloc(trans, sizeof(*g));
739 ret = PTR_ERR_OR_ZERO(g);
740 if (ret)
741 return ret;
742
743 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
744 BTREE_ITER_intent|
745 BTREE_ITER_with_updates);
746 ret = bkey_err(k);
747 if (ret)
748 return ret;
749
750 if (k.k->type != KEY_TYPE_bucket_gens) {
751 bkey_bucket_gens_init(&g->k_i);
752 g->k.p = iter.pos;
753 } else {
754 bkey_reassemble(&g->k_i, k);
755 }
756
757 g->v.gens[offset] = gen;
758
759 ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
760 bch2_trans_iter_exit(trans, &iter);
761 return ret;
762}
763
764static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca,
765 enum bch_data_type data_type,
766 s64 delta_buckets,
767 s64 delta_sectors,
768 s64 delta_fragmented, unsigned flags)
769{
770 struct disk_accounting_pos acc = {
771 .type = BCH_DISK_ACCOUNTING_dev_data_type,
772 .dev_data_type.dev = ca->dev_idx,
773 .dev_data_type.data_type = data_type,
774 };
775 s64 d[3] = { delta_buckets, delta_sectors, delta_fragmented };
776
777 return bch2_disk_accounting_mod(trans, &acc, d, 3, flags & BTREE_TRIGGER_gc);
778}
779
780int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca,
781 const struct bch_alloc_v4 *old,
782 const struct bch_alloc_v4 *new,
783 unsigned flags)
784{
785 s64 old_sectors = bch2_bucket_sectors(*old);
786 s64 new_sectors = bch2_bucket_sectors(*new);
787 if (old->data_type != new->data_type) {
788 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type,
789 1, new_sectors, bch2_bucket_sectors_fragmented(ca, *new), flags) ?:
790 bch2_dev_data_type_accounting_mod(trans, ca, old->data_type,
791 -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags);
792 if (ret)
793 return ret;
794 } else if (old_sectors != new_sectors) {
795 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type,
796 0,
797 new_sectors - old_sectors,
798 bch2_bucket_sectors_fragmented(ca, *new) -
799 bch2_bucket_sectors_fragmented(ca, *old), flags);
800 if (ret)
801 return ret;
802 }
803
804 s64 old_unstriped = bch2_bucket_sectors_unstriped(*old);
805 s64 new_unstriped = bch2_bucket_sectors_unstriped(*new);
806 if (old_unstriped != new_unstriped) {
807 int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped,
808 !!new_unstriped - !!old_unstriped,
809 new_unstriped - old_unstriped,
810 0,
811 flags);
812 if (ret)
813 return ret;
814 }
815
816 return 0;
817}
818
819int bch2_trigger_alloc(struct btree_trans *trans,
820 enum btree_id btree, unsigned level,
821 struct bkey_s_c old, struct bkey_s new,
822 enum btree_iter_update_trigger_flags flags)
823{
824 struct bch_fs *c = trans->c;
825 struct printbuf buf = PRINTBUF;
826 int ret = 0;
827
828 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
829 if (!ca)
830 return -EIO;
831
832 struct bch_alloc_v4 old_a_convert;
833 const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
834
835 struct bch_alloc_v4 *new_a;
836 if (likely(new.k->type == KEY_TYPE_alloc_v4)) {
837 new_a = bkey_s_to_alloc_v4(new).v;
838 } else {
839 BUG_ON(!(flags & (BTREE_TRIGGER_gc|BTREE_TRIGGER_check_repair)));
840
841 struct bkey_i_alloc_v4 *new_ka = bch2_alloc_to_v4_mut_inlined(trans, new.s_c);
842 ret = PTR_ERR_OR_ZERO(new_ka);
843 if (unlikely(ret))
844 goto err;
845 new_a = &new_ka->v;
846 }
847
848 if (flags & BTREE_TRIGGER_transactional) {
849 alloc_data_type_set(new_a, new_a->data_type);
850
851 if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) {
852 new_a->io_time[READ] = bch2_current_io_time(c, READ);
853 new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE);
854 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
855 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
856 }
857
858 if (data_type_is_empty(new_a->data_type) &&
859 BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
860 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
861 new_a->gen++;
862 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
863 alloc_data_type_set(new_a, new_a->data_type);
864 }
865
866 if (old_a->data_type != new_a->data_type ||
867 (new_a->data_type == BCH_DATA_free &&
868 alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
869 ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?:
870 bch2_bucket_do_index(trans, ca, new.s_c, new_a, true);
871 if (ret)
872 goto err;
873 }
874
875 if (new_a->data_type == BCH_DATA_cached &&
876 !new_a->io_time[READ])
877 new_a->io_time[READ] = bch2_current_io_time(c, READ);
878
879 u64 old_lru = alloc_lru_idx_read(*old_a);
880 u64 new_lru = alloc_lru_idx_read(*new_a);
881 if (old_lru != new_lru) {
882 ret = bch2_lru_change(trans, new.k->p.inode,
883 bucket_to_u64(new.k->p),
884 old_lru, new_lru);
885 if (ret)
886 goto err;
887 }
888
889 old_lru = alloc_lru_idx_fragmentation(*old_a, ca);
890 new_lru = alloc_lru_idx_fragmentation(*new_a, ca);
891 if (old_lru != new_lru) {
892 ret = bch2_lru_change(trans,
893 BCH_LRU_FRAGMENTATION_START,
894 bucket_to_u64(new.k->p),
895 old_lru, new_lru);
896 if (ret)
897 goto err;
898 }
899
900 if (old_a->gen != new_a->gen) {
901 ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
902 if (ret)
903 goto err;
904 }
905
906 if ((flags & BTREE_TRIGGER_bucket_invalidate) &&
907 old_a->cached_sectors) {
908 ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx,
909 -((s64) old_a->cached_sectors),
910 flags & BTREE_TRIGGER_gc);
911 if (ret)
912 goto err;
913 }
914
915 ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags);
916 if (ret)
917 goto err;
918 }
919
920 if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
921 u64 journal_seq = trans->journal_res.seq;
922 u64 bucket_journal_seq = new_a->journal_seq;
923
924 if ((flags & BTREE_TRIGGER_insert) &&
925 data_type_is_empty(old_a->data_type) !=
926 data_type_is_empty(new_a->data_type) &&
927 new.k->type == KEY_TYPE_alloc_v4) {
928 struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
929
930 /*
931 * If the btree updates referring to a bucket weren't flushed
932 * before the bucket became empty again, then the we don't have
933 * to wait on a journal flush before we can reuse the bucket:
934 */
935 v->journal_seq = bucket_journal_seq =
936 data_type_is_empty(new_a->data_type) &&
937 (journal_seq == v->journal_seq ||
938 bch2_journal_noflush_seq(&c->journal, v->journal_seq))
939 ? 0 : journal_seq;
940 }
941
942 if (!data_type_is_empty(old_a->data_type) &&
943 data_type_is_empty(new_a->data_type) &&
944 bucket_journal_seq) {
945 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
946 c->journal.flushed_seq_ondisk,
947 new.k->p.inode, new.k->p.offset,
948 bucket_journal_seq);
949 if (bch2_fs_fatal_err_on(ret, c,
950 "setting bucket_needs_journal_commit: %s", bch2_err_str(ret)))
951 goto err;
952 }
953
954 if (new_a->gen != old_a->gen) {
955 rcu_read_lock();
956 u8 *gen = bucket_gen(ca, new.k->p.offset);
957 if (unlikely(!gen)) {
958 rcu_read_unlock();
959 goto invalid_bucket;
960 }
961 *gen = new_a->gen;
962 rcu_read_unlock();
963 }
964
965#define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; })
966#define statechange(expr) !eval_state(old_a, expr) && eval_state(new_a, expr)
967#define bucket_flushed(a) (!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk)
968
969 if (statechange(a->data_type == BCH_DATA_free) &&
970 bucket_flushed(new_a))
971 closure_wake_up(&c->freelist_wait);
972
973 if (statechange(a->data_type == BCH_DATA_need_discard) &&
974 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) &&
975 bucket_flushed(new_a))
976 bch2_discard_one_bucket_fast(ca, new.k->p.offset);
977
978 if (statechange(a->data_type == BCH_DATA_cached) &&
979 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
980 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
981 bch2_dev_do_invalidates(ca);
982
983 if (statechange(a->data_type == BCH_DATA_need_gc_gens))
984 bch2_gc_gens_async(c);
985 }
986
987 if ((flags & BTREE_TRIGGER_gc) && (flags & BTREE_TRIGGER_insert)) {
988 rcu_read_lock();
989 struct bucket *g = gc_bucket(ca, new.k->p.offset);
990 if (unlikely(!g)) {
991 rcu_read_unlock();
992 goto invalid_bucket;
993 }
994 g->gen_valid = 1;
995 g->gen = new_a->gen;
996 rcu_read_unlock();
997 }
998err:
999 printbuf_exit(&buf);
1000 bch2_dev_put(ca);
1001 return ret;
1002invalid_bucket:
1003 bch2_fs_inconsistent(c, "reference to invalid bucket\n %s",
1004 (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
1005 ret = -EIO;
1006 goto err;
1007}
1008
1009/*
1010 * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
1011 * extents style btrees, but works on non-extents btrees:
1012 */
1013static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
1014{
1015 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
1016
1017 if (bkey_err(k))
1018 return k;
1019
1020 if (k.k->type) {
1021 return k;
1022 } else {
1023 struct btree_iter iter2;
1024 struct bpos next;
1025
1026 bch2_trans_copy_iter(&iter2, iter);
1027
1028 struct btree_path *path = btree_iter_path(iter->trans, iter);
1029 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
1030 end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
1031
1032 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
1033
1034 /*
1035 * btree node min/max is a closed interval, upto takes a half
1036 * open interval:
1037 */
1038 k = bch2_btree_iter_peek_upto(&iter2, end);
1039 next = iter2.pos;
1040 bch2_trans_iter_exit(iter->trans, &iter2);
1041
1042 BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
1043
1044 if (bkey_err(k))
1045 return k;
1046
1047 bkey_init(hole);
1048 hole->p = iter->pos;
1049
1050 bch2_key_resize(hole, next.offset - iter->pos.offset);
1051 return (struct bkey_s_c) { hole, NULL };
1052 }
1053}
1054
1055static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket)
1056{
1057 if (*ca) {
1058 if (bucket->offset < (*ca)->mi.first_bucket)
1059 bucket->offset = (*ca)->mi.first_bucket;
1060
1061 if (bucket->offset < (*ca)->mi.nbuckets)
1062 return true;
1063
1064 bch2_dev_put(*ca);
1065 *ca = NULL;
1066 bucket->inode++;
1067 bucket->offset = 0;
1068 }
1069
1070 rcu_read_lock();
1071 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
1072 if (*ca) {
1073 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket);
1074 bch2_dev_get(*ca);
1075 }
1076 rcu_read_unlock();
1077
1078 return *ca != NULL;
1079}
1080
1081static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter,
1082 struct bch_dev **ca, struct bkey *hole)
1083{
1084 struct bch_fs *c = iter->trans->c;
1085 struct bkey_s_c k;
1086again:
1087 k = bch2_get_key_or_hole(iter, POS_MAX, hole);
1088 if (bkey_err(k))
1089 return k;
1090
1091 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode);
1092
1093 if (!k.k->type) {
1094 struct bpos hole_start = bkey_start_pos(k.k);
1095
1096 if (!*ca || !bucket_valid(*ca, hole_start.offset)) {
1097 if (!next_bucket(c, ca, &hole_start))
1098 return bkey_s_c_null;
1099
1100 bch2_btree_iter_set_pos(iter, hole_start);
1101 goto again;
1102 }
1103
1104 if (k.k->p.offset > (*ca)->mi.nbuckets)
1105 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset);
1106 }
1107
1108 return k;
1109}
1110
1111static noinline_for_stack
1112int bch2_check_alloc_key(struct btree_trans *trans,
1113 struct bkey_s_c alloc_k,
1114 struct btree_iter *alloc_iter,
1115 struct btree_iter *discard_iter,
1116 struct btree_iter *freespace_iter,
1117 struct btree_iter *bucket_gens_iter)
1118{
1119 struct bch_fs *c = trans->c;
1120 struct bch_alloc_v4 a_convert;
1121 const struct bch_alloc_v4 *a;
1122 unsigned discard_key_type, freespace_key_type;
1123 unsigned gens_offset;
1124 struct bkey_s_c k;
1125 struct printbuf buf = PRINTBUF;
1126 int ret = 0;
1127
1128 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p);
1129 if (fsck_err_on(!ca,
1130 trans, alloc_key_to_missing_dev_bucket,
1131 "alloc key for invalid device:bucket %llu:%llu",
1132 alloc_k.k->p.inode, alloc_k.k->p.offset))
1133 ret = bch2_btree_delete_at(trans, alloc_iter, 0);
1134 if (!ca)
1135 return ret;
1136
1137 if (!ca->mi.freespace_initialized)
1138 goto out;
1139
1140 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1141
1142 discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
1143 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
1144 k = bch2_btree_iter_peek_slot(discard_iter);
1145 ret = bkey_err(k);
1146 if (ret)
1147 goto err;
1148
1149 if (fsck_err_on(k.k->type != discard_key_type,
1150 trans, need_discard_key_wrong,
1151 "incorrect key in need_discard btree (got %s should be %s)\n"
1152 " %s",
1153 bch2_bkey_types[k.k->type],
1154 bch2_bkey_types[discard_key_type],
1155 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1156 struct bkey_i *update =
1157 bch2_trans_kmalloc(trans, sizeof(*update));
1158
1159 ret = PTR_ERR_OR_ZERO(update);
1160 if (ret)
1161 goto err;
1162
1163 bkey_init(&update->k);
1164 update->k.type = discard_key_type;
1165 update->k.p = discard_iter->pos;
1166
1167 ret = bch2_trans_update(trans, discard_iter, update, 0);
1168 if (ret)
1169 goto err;
1170 }
1171
1172 freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
1173 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1174 k = bch2_btree_iter_peek_slot(freespace_iter);
1175 ret = bkey_err(k);
1176 if (ret)
1177 goto err;
1178
1179 if (fsck_err_on(k.k->type != freespace_key_type,
1180 trans, freespace_key_wrong,
1181 "incorrect key in freespace btree (got %s should be %s)\n"
1182 " %s",
1183 bch2_bkey_types[k.k->type],
1184 bch2_bkey_types[freespace_key_type],
1185 (printbuf_reset(&buf),
1186 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1187 struct bkey_i *update =
1188 bch2_trans_kmalloc(trans, sizeof(*update));
1189
1190 ret = PTR_ERR_OR_ZERO(update);
1191 if (ret)
1192 goto err;
1193
1194 bkey_init(&update->k);
1195 update->k.type = freespace_key_type;
1196 update->k.p = freespace_iter->pos;
1197 bch2_key_resize(&update->k, 1);
1198
1199 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1200 if (ret)
1201 goto err;
1202 }
1203
1204 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1205 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1206 ret = bkey_err(k);
1207 if (ret)
1208 goto err;
1209
1210 if (fsck_err_on(a->gen != alloc_gen(k, gens_offset),
1211 trans, bucket_gens_key_wrong,
1212 "incorrect gen in bucket_gens btree (got %u should be %u)\n"
1213 " %s",
1214 alloc_gen(k, gens_offset), a->gen,
1215 (printbuf_reset(&buf),
1216 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1217 struct bkey_i_bucket_gens *g =
1218 bch2_trans_kmalloc(trans, sizeof(*g));
1219
1220 ret = PTR_ERR_OR_ZERO(g);
1221 if (ret)
1222 goto err;
1223
1224 if (k.k->type == KEY_TYPE_bucket_gens) {
1225 bkey_reassemble(&g->k_i, k);
1226 } else {
1227 bkey_bucket_gens_init(&g->k_i);
1228 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1229 }
1230
1231 g->v.gens[gens_offset] = a->gen;
1232
1233 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1234 if (ret)
1235 goto err;
1236 }
1237out:
1238err:
1239fsck_err:
1240 bch2_dev_put(ca);
1241 printbuf_exit(&buf);
1242 return ret;
1243}
1244
1245static noinline_for_stack
1246int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1247 struct bch_dev *ca,
1248 struct bpos start,
1249 struct bpos *end,
1250 struct btree_iter *freespace_iter)
1251{
1252 struct bkey_s_c k;
1253 struct printbuf buf = PRINTBUF;
1254 int ret;
1255
1256 if (!ca->mi.freespace_initialized)
1257 return 0;
1258
1259 bch2_btree_iter_set_pos(freespace_iter, start);
1260
1261 k = bch2_btree_iter_peek_slot(freespace_iter);
1262 ret = bkey_err(k);
1263 if (ret)
1264 goto err;
1265
1266 *end = bkey_min(k.k->p, *end);
1267
1268 if (fsck_err_on(k.k->type != KEY_TYPE_set,
1269 trans, freespace_hole_missing,
1270 "hole in alloc btree missing in freespace btree\n"
1271 " device %llu buckets %llu-%llu",
1272 freespace_iter->pos.inode,
1273 freespace_iter->pos.offset,
1274 end->offset)) {
1275 struct bkey_i *update =
1276 bch2_trans_kmalloc(trans, sizeof(*update));
1277
1278 ret = PTR_ERR_OR_ZERO(update);
1279 if (ret)
1280 goto err;
1281
1282 bkey_init(&update->k);
1283 update->k.type = KEY_TYPE_set;
1284 update->k.p = freespace_iter->pos;
1285 bch2_key_resize(&update->k,
1286 min_t(u64, U32_MAX, end->offset -
1287 freespace_iter->pos.offset));
1288
1289 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1290 if (ret)
1291 goto err;
1292 }
1293err:
1294fsck_err:
1295 printbuf_exit(&buf);
1296 return ret;
1297}
1298
1299static noinline_for_stack
1300int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1301 struct bpos start,
1302 struct bpos *end,
1303 struct btree_iter *bucket_gens_iter)
1304{
1305 struct bkey_s_c k;
1306 struct printbuf buf = PRINTBUF;
1307 unsigned i, gens_offset, gens_end_offset;
1308 int ret;
1309
1310 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1311
1312 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1313 ret = bkey_err(k);
1314 if (ret)
1315 goto err;
1316
1317 if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1318 alloc_gens_pos(*end, &gens_end_offset)))
1319 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1320
1321 if (k.k->type == KEY_TYPE_bucket_gens) {
1322 struct bkey_i_bucket_gens g;
1323 bool need_update = false;
1324
1325 bkey_reassemble(&g.k_i, k);
1326
1327 for (i = gens_offset; i < gens_end_offset; i++) {
1328 if (fsck_err_on(g.v.gens[i], trans,
1329 bucket_gens_hole_wrong,
1330 "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1331 bucket_gens_pos_to_alloc(k.k->p, i).inode,
1332 bucket_gens_pos_to_alloc(k.k->p, i).offset,
1333 g.v.gens[i])) {
1334 g.v.gens[i] = 0;
1335 need_update = true;
1336 }
1337 }
1338
1339 if (need_update) {
1340 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1341
1342 ret = PTR_ERR_OR_ZERO(u);
1343 if (ret)
1344 goto err;
1345
1346 memcpy(u, &g, sizeof(g));
1347
1348 ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
1349 if (ret)
1350 goto err;
1351 }
1352 }
1353
1354 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1355err:
1356fsck_err:
1357 printbuf_exit(&buf);
1358 return ret;
1359}
1360
1361static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans,
1362 struct btree_iter *iter)
1363{
1364 struct bch_fs *c = trans->c;
1365 struct btree_iter alloc_iter;
1366 struct bkey_s_c alloc_k;
1367 struct bch_alloc_v4 a_convert;
1368 const struct bch_alloc_v4 *a;
1369 u64 genbits;
1370 struct bpos pos;
1371 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1372 ? BCH_DATA_need_discard
1373 : BCH_DATA_free;
1374 struct printbuf buf = PRINTBUF;
1375 int ret;
1376
1377 pos = iter->pos;
1378 pos.offset &= ~(~0ULL << 56);
1379 genbits = iter->pos.offset & (~0ULL << 56);
1380
1381 alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
1382 ret = bkey_err(alloc_k);
1383 if (ret)
1384 return ret;
1385
1386 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos),
1387 trans, need_discard_freespace_key_to_invalid_dev_bucket,
1388 "entry in %s btree for nonexistant dev:bucket %llu:%llu",
1389 bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
1390 goto delete;
1391
1392 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1393
1394 if (fsck_err_on(a->data_type != state ||
1395 (state == BCH_DATA_free &&
1396 genbits != alloc_freespace_genbits(*a)),
1397 trans, need_discard_freespace_key_bad,
1398 "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
1399 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1400 bch2_btree_id_str(iter->btree_id),
1401 iter->pos.inode,
1402 iter->pos.offset,
1403 a->data_type == state,
1404 genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1405 goto delete;
1406out:
1407fsck_err:
1408 bch2_set_btree_iter_dontneed(&alloc_iter);
1409 bch2_trans_iter_exit(trans, &alloc_iter);
1410 printbuf_exit(&buf);
1411 return ret;
1412delete:
1413 ret = bch2_btree_delete_extent_at(trans, iter,
1414 iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
1415 bch2_trans_commit(trans, NULL, NULL,
1416 BCH_TRANS_COMMIT_no_enospc);
1417 goto out;
1418}
1419
1420/*
1421 * We've already checked that generation numbers in the bucket_gens btree are
1422 * valid for buckets that exist; this just checks for keys for nonexistent
1423 * buckets.
1424 */
1425static noinline_for_stack
1426int bch2_check_bucket_gens_key(struct btree_trans *trans,
1427 struct btree_iter *iter,
1428 struct bkey_s_c k)
1429{
1430 struct bch_fs *c = trans->c;
1431 struct bkey_i_bucket_gens g;
1432 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1433 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1434 u64 b;
1435 bool need_update = false;
1436 struct printbuf buf = PRINTBUF;
1437 int ret = 0;
1438
1439 BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1440 bkey_reassemble(&g.k_i, k);
1441
1442 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode);
1443 if (!ca) {
1444 if (fsck_err(trans, bucket_gens_to_invalid_dev,
1445 "bucket_gens key for invalid device:\n %s",
1446 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1447 ret = bch2_btree_delete_at(trans, iter, 0);
1448 goto out;
1449 }
1450
1451 if (fsck_err_on(end <= ca->mi.first_bucket ||
1452 start >= ca->mi.nbuckets,
1453 trans, bucket_gens_to_invalid_buckets,
1454 "bucket_gens key for invalid buckets:\n %s",
1455 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1456 ret = bch2_btree_delete_at(trans, iter, 0);
1457 goto out;
1458 }
1459
1460 for (b = start; b < ca->mi.first_bucket; b++)
1461 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK],
1462 trans, bucket_gens_nonzero_for_invalid_buckets,
1463 "bucket_gens key has nonzero gen for invalid bucket")) {
1464 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1465 need_update = true;
1466 }
1467
1468 for (b = ca->mi.nbuckets; b < end; b++)
1469 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK],
1470 trans, bucket_gens_nonzero_for_invalid_buckets,
1471 "bucket_gens key has nonzero gen for invalid bucket")) {
1472 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1473 need_update = true;
1474 }
1475
1476 if (need_update) {
1477 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1478
1479 ret = PTR_ERR_OR_ZERO(u);
1480 if (ret)
1481 goto out;
1482
1483 memcpy(u, &g, sizeof(g));
1484 ret = bch2_trans_update(trans, iter, u, 0);
1485 }
1486out:
1487fsck_err:
1488 bch2_dev_put(ca);
1489 printbuf_exit(&buf);
1490 return ret;
1491}
1492
1493int bch2_check_alloc_info(struct bch_fs *c)
1494{
1495 struct btree_trans *trans = bch2_trans_get(c);
1496 struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1497 struct bch_dev *ca = NULL;
1498 struct bkey hole;
1499 struct bkey_s_c k;
1500 int ret = 0;
1501
1502 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
1503 BTREE_ITER_prefetch);
1504 bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1505 BTREE_ITER_prefetch);
1506 bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1507 BTREE_ITER_prefetch);
1508 bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1509 BTREE_ITER_prefetch);
1510
1511 while (1) {
1512 struct bpos next;
1513
1514 bch2_trans_begin(trans);
1515
1516 k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
1517 ret = bkey_err(k);
1518 if (ret)
1519 goto bkey_err;
1520
1521 if (!k.k)
1522 break;
1523
1524 if (k.k->type) {
1525 next = bpos_nosnap_successor(k.k->p);
1526
1527 ret = bch2_check_alloc_key(trans,
1528 k, &iter,
1529 &discard_iter,
1530 &freespace_iter,
1531 &bucket_gens_iter);
1532 if (ret)
1533 goto bkey_err;
1534 } else {
1535 next = k.k->p;
1536
1537 ret = bch2_check_alloc_hole_freespace(trans, ca,
1538 bkey_start_pos(k.k),
1539 &next,
1540 &freespace_iter) ?:
1541 bch2_check_alloc_hole_bucket_gens(trans,
1542 bkey_start_pos(k.k),
1543 &next,
1544 &bucket_gens_iter);
1545 if (ret)
1546 goto bkey_err;
1547 }
1548
1549 ret = bch2_trans_commit(trans, NULL, NULL,
1550 BCH_TRANS_COMMIT_no_enospc);
1551 if (ret)
1552 goto bkey_err;
1553
1554 bch2_btree_iter_set_pos(&iter, next);
1555bkey_err:
1556 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1557 continue;
1558 if (ret)
1559 break;
1560 }
1561 bch2_trans_iter_exit(trans, &bucket_gens_iter);
1562 bch2_trans_iter_exit(trans, &freespace_iter);
1563 bch2_trans_iter_exit(trans, &discard_iter);
1564 bch2_trans_iter_exit(trans, &iter);
1565 bch2_dev_put(ca);
1566 ca = NULL;
1567
1568 if (ret < 0)
1569 goto err;
1570
1571 ret = for_each_btree_key(trans, iter,
1572 BTREE_ID_need_discard, POS_MIN,
1573 BTREE_ITER_prefetch, k,
1574 bch2_check_discard_freespace_key(trans, &iter));
1575 if (ret)
1576 goto err;
1577
1578 bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
1579 BTREE_ITER_prefetch);
1580 while (1) {
1581 bch2_trans_begin(trans);
1582 k = bch2_btree_iter_peek(&iter);
1583 if (!k.k)
1584 break;
1585
1586 ret = bkey_err(k) ?:
1587 bch2_check_discard_freespace_key(trans, &iter);
1588 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1589 ret = 0;
1590 continue;
1591 }
1592 if (ret) {
1593 struct printbuf buf = PRINTBUF;
1594 bch2_bkey_val_to_text(&buf, c, k);
1595
1596 bch_err(c, "while checking %s", buf.buf);
1597 printbuf_exit(&buf);
1598 break;
1599 }
1600
1601 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
1602 }
1603 bch2_trans_iter_exit(trans, &iter);
1604 if (ret)
1605 goto err;
1606
1607 ret = for_each_btree_key_commit(trans, iter,
1608 BTREE_ID_bucket_gens, POS_MIN,
1609 BTREE_ITER_prefetch, k,
1610 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1611 bch2_check_bucket_gens_key(trans, &iter, k));
1612err:
1613 bch2_trans_put(trans);
1614 bch_err_fn(c, ret);
1615 return ret;
1616}
1617
1618static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1619 struct btree_iter *alloc_iter,
1620 struct bkey_buf *last_flushed)
1621{
1622 struct bch_fs *c = trans->c;
1623 struct bch_alloc_v4 a_convert;
1624 const struct bch_alloc_v4 *a;
1625 struct bkey_s_c alloc_k;
1626 struct printbuf buf = PRINTBUF;
1627 int ret;
1628
1629 alloc_k = bch2_btree_iter_peek(alloc_iter);
1630 if (!alloc_k.k)
1631 return 0;
1632
1633 ret = bkey_err(alloc_k);
1634 if (ret)
1635 return ret;
1636
1637 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode);
1638 if (!ca)
1639 return 0;
1640
1641 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1642
1643 u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
1644 if (lru_idx) {
1645 ret = bch2_lru_check_set(trans, BCH_LRU_FRAGMENTATION_START,
1646 lru_idx, alloc_k, last_flushed);
1647 if (ret)
1648 goto err;
1649 }
1650
1651 if (a->data_type != BCH_DATA_cached)
1652 goto err;
1653
1654 if (fsck_err_on(!a->io_time[READ],
1655 trans, alloc_key_cached_but_read_time_zero,
1656 "cached bucket with read_time 0\n"
1657 " %s",
1658 (printbuf_reset(&buf),
1659 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1660 struct bkey_i_alloc_v4 *a_mut =
1661 bch2_alloc_to_v4_mut(trans, alloc_k);
1662 ret = PTR_ERR_OR_ZERO(a_mut);
1663 if (ret)
1664 goto err;
1665
1666 a_mut->v.io_time[READ] = bch2_current_io_time(c, READ);
1667 ret = bch2_trans_update(trans, alloc_iter,
1668 &a_mut->k_i, BTREE_TRIGGER_norun);
1669 if (ret)
1670 goto err;
1671
1672 a = &a_mut->v;
1673 }
1674
1675 ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, a->io_time[READ],
1676 alloc_k, last_flushed);
1677 if (ret)
1678 goto err;
1679err:
1680fsck_err:
1681 bch2_dev_put(ca);
1682 printbuf_exit(&buf);
1683 return ret;
1684}
1685
1686int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1687{
1688 struct bkey_buf last_flushed;
1689
1690 bch2_bkey_buf_init(&last_flushed);
1691 bkey_init(&last_flushed.k->k);
1692
1693 int ret = bch2_trans_run(c,
1694 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1695 POS_MIN, BTREE_ITER_prefetch, k,
1696 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1697 bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed)));
1698
1699 bch2_bkey_buf_exit(&last_flushed, c);
1700 bch_err_fn(c, ret);
1701 return ret;
1702}
1703
1704static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress)
1705{
1706 int ret;
1707
1708 mutex_lock(&ca->discard_buckets_in_flight_lock);
1709 darray_for_each(ca->discard_buckets_in_flight, i)
1710 if (i->bucket == bucket) {
1711 ret = -BCH_ERR_EEXIST_discard_in_flight_add;
1712 goto out;
1713 }
1714
1715 ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) {
1716 .in_progress = in_progress,
1717 .bucket = bucket,
1718 }));
1719out:
1720 mutex_unlock(&ca->discard_buckets_in_flight_lock);
1721 return ret;
1722}
1723
1724static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket)
1725{
1726 mutex_lock(&ca->discard_buckets_in_flight_lock);
1727 darray_for_each(ca->discard_buckets_in_flight, i)
1728 if (i->bucket == bucket) {
1729 BUG_ON(!i->in_progress);
1730 darray_remove_item(&ca->discard_buckets_in_flight, i);
1731 goto found;
1732 }
1733 BUG();
1734found:
1735 mutex_unlock(&ca->discard_buckets_in_flight_lock);
1736}
1737
1738struct discard_buckets_state {
1739 u64 seen;
1740 u64 open;
1741 u64 need_journal_commit;
1742 u64 discarded;
1743 u64 need_journal_commit_this_dev;
1744};
1745
1746static int bch2_discard_one_bucket(struct btree_trans *trans,
1747 struct bch_dev *ca,
1748 struct btree_iter *need_discard_iter,
1749 struct bpos *discard_pos_done,
1750 struct discard_buckets_state *s)
1751{
1752 struct bch_fs *c = trans->c;
1753 struct bpos pos = need_discard_iter->pos;
1754 struct btree_iter iter = { NULL };
1755 struct bkey_s_c k;
1756 struct bkey_i_alloc_v4 *a;
1757 struct printbuf buf = PRINTBUF;
1758 bool discard_locked = false;
1759 int ret = 0;
1760
1761 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1762 s->open++;
1763 goto out;
1764 }
1765
1766 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1767 c->journal.flushed_seq_ondisk,
1768 pos.inode, pos.offset)) {
1769 s->need_journal_commit++;
1770 s->need_journal_commit_this_dev++;
1771 goto out;
1772 }
1773
1774 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
1775 need_discard_iter->pos,
1776 BTREE_ITER_cached);
1777 ret = bkey_err(k);
1778 if (ret)
1779 goto out;
1780
1781 a = bch2_alloc_to_v4_mut(trans, k);
1782 ret = PTR_ERR_OR_ZERO(a);
1783 if (ret)
1784 goto out;
1785
1786 if (bch2_bucket_sectors_total(a->v)) {
1787 if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
1788 trans, "attempting to discard bucket with dirty data\n%s",
1789 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1790 ret = -EIO;
1791 goto out;
1792 }
1793
1794 if (a->v.data_type != BCH_DATA_need_discard) {
1795 if (data_type_is_empty(a->v.data_type) &&
1796 BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1797 a->v.gen++;
1798 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1799 goto write;
1800 }
1801
1802 if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
1803 trans, "bucket incorrectly set in need_discard btree\n"
1804 "%s",
1805 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1806 ret = -EIO;
1807 goto out;
1808 }
1809
1810 if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
1811 if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
1812 trans, "clearing need_discard but journal_seq %llu > flushed_seq %llu\n%s",
1813 a->v.journal_seq,
1814 c->journal.flushed_seq_ondisk,
1815 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1816 ret = -EIO;
1817 goto out;
1818 }
1819
1820 if (discard_in_flight_add(ca, iter.pos.offset, true))
1821 goto out;
1822
1823 discard_locked = true;
1824
1825 if (!bkey_eq(*discard_pos_done, iter.pos) &&
1826 ca->mi.discard && !c->opts.nochanges) {
1827 /*
1828 * This works without any other locks because this is the only
1829 * thread that removes items from the need_discard tree
1830 */
1831 bch2_trans_unlock_long(trans);
1832 blkdev_issue_discard(ca->disk_sb.bdev,
1833 k.k->p.offset * ca->mi.bucket_size,
1834 ca->mi.bucket_size,
1835 GFP_KERNEL);
1836 *discard_pos_done = iter.pos;
1837
1838 ret = bch2_trans_relock_notrace(trans);
1839 if (ret)
1840 goto out;
1841 }
1842
1843 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1844write:
1845 alloc_data_type_set(&a->v, a->v.data_type);
1846
1847 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1848 bch2_trans_commit(trans, NULL, NULL,
1849 BCH_WATERMARK_btree|
1850 BCH_TRANS_COMMIT_no_enospc);
1851 if (ret)
1852 goto out;
1853
1854 count_event(c, bucket_discard);
1855 s->discarded++;
1856out:
1857 if (discard_locked)
1858 discard_in_flight_remove(ca, iter.pos.offset);
1859 s->seen++;
1860 bch2_trans_iter_exit(trans, &iter);
1861 printbuf_exit(&buf);
1862 return ret;
1863}
1864
1865static void bch2_do_discards_work(struct work_struct *work)
1866{
1867 struct bch_dev *ca = container_of(work, struct bch_dev, discard_work);
1868 struct bch_fs *c = ca->fs;
1869 struct discard_buckets_state s = {};
1870 struct bpos discard_pos_done = POS_MAX;
1871 int ret;
1872
1873 /*
1874 * We're doing the commit in bch2_discard_one_bucket instead of using
1875 * for_each_btree_key_commit() so that we can increment counters after
1876 * successful commit:
1877 */
1878 ret = bch2_trans_run(c,
1879 for_each_btree_key_upto(trans, iter,
1880 BTREE_ID_need_discard,
1881 POS(ca->dev_idx, 0),
1882 POS(ca->dev_idx, U64_MAX), 0, k,
1883 bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s)));
1884
1885 trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
1886 bch2_err_str(ret));
1887
1888 percpu_ref_put(&ca->io_ref);
1889 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1890}
1891
1892void bch2_dev_do_discards(struct bch_dev *ca)
1893{
1894 struct bch_fs *c = ca->fs;
1895
1896 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard))
1897 return;
1898
1899 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
1900 goto put_write_ref;
1901
1902 if (queue_work(c->write_ref_wq, &ca->discard_work))
1903 return;
1904
1905 percpu_ref_put(&ca->io_ref);
1906put_write_ref:
1907 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1908}
1909
1910void bch2_do_discards(struct bch_fs *c)
1911{
1912 for_each_member_device(c, ca)
1913 bch2_dev_do_discards(ca);
1914}
1915
1916static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket)
1917{
1918 struct btree_iter iter;
1919 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_intent);
1920 struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
1921 int ret = bkey_err(k);
1922 if (ret)
1923 goto err;
1924
1925 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k);
1926 ret = PTR_ERR_OR_ZERO(a);
1927 if (ret)
1928 goto err;
1929
1930 BUG_ON(a->v.dirty_sectors);
1931 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1932 alloc_data_type_set(&a->v, a->v.data_type);
1933
1934 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1935err:
1936 bch2_trans_iter_exit(trans, &iter);
1937 return ret;
1938}
1939
1940static void bch2_do_discards_fast_work(struct work_struct *work)
1941{
1942 struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work);
1943 struct bch_fs *c = ca->fs;
1944
1945 while (1) {
1946 bool got_bucket = false;
1947 u64 bucket;
1948
1949 mutex_lock(&ca->discard_buckets_in_flight_lock);
1950 darray_for_each(ca->discard_buckets_in_flight, i) {
1951 if (i->in_progress)
1952 continue;
1953
1954 got_bucket = true;
1955 bucket = i->bucket;
1956 i->in_progress = true;
1957 break;
1958 }
1959 mutex_unlock(&ca->discard_buckets_in_flight_lock);
1960
1961 if (!got_bucket)
1962 break;
1963
1964 if (ca->mi.discard && !c->opts.nochanges)
1965 blkdev_issue_discard(ca->disk_sb.bdev,
1966 bucket_to_sector(ca, bucket),
1967 ca->mi.bucket_size,
1968 GFP_KERNEL);
1969
1970 int ret = bch2_trans_do(c, NULL, NULL,
1971 BCH_WATERMARK_btree|
1972 BCH_TRANS_COMMIT_no_enospc,
1973 bch2_clear_bucket_needs_discard(trans, POS(ca->dev_idx, bucket)));
1974 bch_err_fn(c, ret);
1975
1976 discard_in_flight_remove(ca, bucket);
1977
1978 if (ret)
1979 break;
1980 }
1981
1982 percpu_ref_put(&ca->io_ref);
1983 bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
1984}
1985
1986static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
1987{
1988 struct bch_fs *c = ca->fs;
1989
1990 if (discard_in_flight_add(ca, bucket, false))
1991 return;
1992
1993 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast))
1994 return;
1995
1996 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
1997 goto put_ref;
1998
1999 if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
2000 return;
2001
2002 percpu_ref_put(&ca->io_ref);
2003put_ref:
2004 bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
2005}
2006
2007static int invalidate_one_bucket(struct btree_trans *trans,
2008 struct btree_iter *lru_iter,
2009 struct bkey_s_c lru_k,
2010 s64 *nr_to_invalidate)
2011{
2012 struct bch_fs *c = trans->c;
2013 struct bkey_i_alloc_v4 *a = NULL;
2014 struct printbuf buf = PRINTBUF;
2015 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
2016 unsigned cached_sectors;
2017 int ret = 0;
2018
2019 if (*nr_to_invalidate <= 0)
2020 return 1;
2021
2022 if (!bch2_dev_bucket_exists(c, bucket)) {
2023 prt_str(&buf, "lru entry points to invalid bucket");
2024 goto err;
2025 }
2026
2027 if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
2028 return 0;
2029
2030 a = bch2_trans_start_alloc_update(trans, bucket, BTREE_TRIGGER_bucket_invalidate);
2031 ret = PTR_ERR_OR_ZERO(a);
2032 if (ret)
2033 goto out;
2034
2035 /* We expect harmless races here due to the btree write buffer: */
2036 if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
2037 goto out;
2038
2039 BUG_ON(a->v.data_type != BCH_DATA_cached);
2040 BUG_ON(a->v.dirty_sectors);
2041
2042 if (!a->v.cached_sectors)
2043 bch_err(c, "invalidating empty bucket, confused");
2044
2045 cached_sectors = a->v.cached_sectors;
2046
2047 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
2048 a->v.gen++;
2049 a->v.data_type = 0;
2050 a->v.dirty_sectors = 0;
2051 a->v.stripe_sectors = 0;
2052 a->v.cached_sectors = 0;
2053 a->v.io_time[READ] = bch2_current_io_time(c, READ);
2054 a->v.io_time[WRITE] = bch2_current_io_time(c, WRITE);
2055
2056 ret = bch2_trans_commit(trans, NULL, NULL,
2057 BCH_WATERMARK_btree|
2058 BCH_TRANS_COMMIT_no_enospc);
2059 if (ret)
2060 goto out;
2061
2062 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
2063 --*nr_to_invalidate;
2064out:
2065 printbuf_exit(&buf);
2066 return ret;
2067err:
2068 prt_str(&buf, "\n lru key: ");
2069 bch2_bkey_val_to_text(&buf, c, lru_k);
2070
2071 prt_str(&buf, "\n lru entry: ");
2072 bch2_lru_pos_to_text(&buf, lru_iter->pos);
2073
2074 prt_str(&buf, "\n alloc key: ");
2075 if (!a)
2076 bch2_bpos_to_text(&buf, bucket);
2077 else
2078 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
2079
2080 bch_err(c, "%s", buf.buf);
2081 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
2082 bch2_inconsistent_error(c);
2083 ret = -EINVAL;
2084 }
2085
2086 goto out;
2087}
2088
2089static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter,
2090 struct bch_dev *ca, bool *wrapped)
2091{
2092 struct bkey_s_c k;
2093again:
2094 k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
2095 if (!k.k && !*wrapped) {
2096 bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
2097 *wrapped = true;
2098 goto again;
2099 }
2100
2101 return k;
2102}
2103
2104static void bch2_do_invalidates_work(struct work_struct *work)
2105{
2106 struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work);
2107 struct bch_fs *c = ca->fs;
2108 struct btree_trans *trans = bch2_trans_get(c);
2109 int ret = 0;
2110
2111 ret = bch2_btree_write_buffer_tryflush(trans);
2112 if (ret)
2113 goto err;
2114
2115 s64 nr_to_invalidate =
2116 should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
2117 struct btree_iter iter;
2118 bool wrapped = false;
2119
2120 bch2_trans_iter_init(trans, &iter, BTREE_ID_lru,
2121 lru_pos(ca->dev_idx, 0,
2122 ((bch2_current_io_time(c, READ) + U32_MAX) &
2123 LRU_TIME_MAX)), 0);
2124
2125 while (true) {
2126 bch2_trans_begin(trans);
2127
2128 struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped);
2129 ret = bkey_err(k);
2130 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2131 continue;
2132 if (ret)
2133 break;
2134 if (!k.k)
2135 break;
2136
2137 ret = invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate);
2138 if (ret)
2139 break;
2140
2141 bch2_btree_iter_advance(&iter);
2142 }
2143 bch2_trans_iter_exit(trans, &iter);
2144err:
2145 bch2_trans_put(trans);
2146 percpu_ref_put(&ca->io_ref);
2147 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
2148}
2149
2150void bch2_dev_do_invalidates(struct bch_dev *ca)
2151{
2152 struct bch_fs *c = ca->fs;
2153
2154 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate))
2155 return;
2156
2157 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
2158 goto put_ref;
2159
2160 if (queue_work(c->write_ref_wq, &ca->invalidate_work))
2161 return;
2162
2163 percpu_ref_put(&ca->io_ref);
2164put_ref:
2165 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
2166}
2167
2168void bch2_do_invalidates(struct bch_fs *c)
2169{
2170 for_each_member_device(c, ca)
2171 bch2_dev_do_invalidates(ca);
2172}
2173
2174int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
2175 u64 bucket_start, u64 bucket_end)
2176{
2177 struct btree_trans *trans = bch2_trans_get(c);
2178 struct btree_iter iter;
2179 struct bkey_s_c k;
2180 struct bkey hole;
2181 struct bpos end = POS(ca->dev_idx, bucket_end);
2182 struct bch_member *m;
2183 unsigned long last_updated = jiffies;
2184 int ret;
2185
2186 BUG_ON(bucket_start > bucket_end);
2187 BUG_ON(bucket_end > ca->mi.nbuckets);
2188
2189 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
2190 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
2191 BTREE_ITER_prefetch);
2192 /*
2193 * Scan the alloc btree for every bucket on @ca, and add buckets to the
2194 * freespace/need_discard/need_gc_gens btrees as needed:
2195 */
2196 while (1) {
2197 if (time_after(jiffies, last_updated + HZ * 10)) {
2198 bch_info(ca, "%s: currently at %llu/%llu",
2199 __func__, iter.pos.offset, ca->mi.nbuckets);
2200 last_updated = jiffies;
2201 }
2202
2203 bch2_trans_begin(trans);
2204
2205 if (bkey_ge(iter.pos, end)) {
2206 ret = 0;
2207 break;
2208 }
2209
2210 k = bch2_get_key_or_hole(&iter, end, &hole);
2211 ret = bkey_err(k);
2212 if (ret)
2213 goto bkey_err;
2214
2215 if (k.k->type) {
2216 /*
2217 * We process live keys in the alloc btree one at a
2218 * time:
2219 */
2220 struct bch_alloc_v4 a_convert;
2221 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
2222
2223 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?:
2224 bch2_trans_commit(trans, NULL, NULL,
2225 BCH_TRANS_COMMIT_no_enospc);
2226 if (ret)
2227 goto bkey_err;
2228
2229 bch2_btree_iter_advance(&iter);
2230 } else {
2231 struct bkey_i *freespace;
2232
2233 freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
2234 ret = PTR_ERR_OR_ZERO(freespace);
2235 if (ret)
2236 goto bkey_err;
2237
2238 bkey_init(&freespace->k);
2239 freespace->k.type = KEY_TYPE_set;
2240 freespace->k.p = k.k->p;
2241 freespace->k.size = k.k->size;
2242
2243 ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
2244 bch2_trans_commit(trans, NULL, NULL,
2245 BCH_TRANS_COMMIT_no_enospc);
2246 if (ret)
2247 goto bkey_err;
2248
2249 bch2_btree_iter_set_pos(&iter, k.k->p);
2250 }
2251bkey_err:
2252 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2253 continue;
2254 if (ret)
2255 break;
2256 }
2257
2258 bch2_trans_iter_exit(trans, &iter);
2259 bch2_trans_put(trans);
2260
2261 if (ret < 0) {
2262 bch_err_msg(ca, ret, "initializing free space");
2263 return ret;
2264 }
2265
2266 mutex_lock(&c->sb_lock);
2267 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
2268 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
2269 mutex_unlock(&c->sb_lock);
2270
2271 return 0;
2272}
2273
2274int bch2_fs_freespace_init(struct bch_fs *c)
2275{
2276 int ret = 0;
2277 bool doing_init = false;
2278
2279 /*
2280 * We can crash during the device add path, so we need to check this on
2281 * every mount:
2282 */
2283
2284 for_each_member_device(c, ca) {
2285 if (ca->mi.freespace_initialized)
2286 continue;
2287
2288 if (!doing_init) {
2289 bch_info(c, "initializing freespace");
2290 doing_init = true;
2291 }
2292
2293 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
2294 if (ret) {
2295 bch2_dev_put(ca);
2296 bch_err_fn(c, ret);
2297 return ret;
2298 }
2299 }
2300
2301 if (doing_init) {
2302 mutex_lock(&c->sb_lock);
2303 bch2_write_super(c);
2304 mutex_unlock(&c->sb_lock);
2305 bch_verbose(c, "done initializing freespace");
2306 }
2307
2308 return 0;
2309}
2310
2311/* device removal */
2312
2313int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
2314{
2315 struct bpos start = POS(ca->dev_idx, 0);
2316 struct bpos end = POS(ca->dev_idx, U64_MAX);
2317 int ret;
2318
2319 /*
2320 * We clear the LRU and need_discard btrees first so that we don't race
2321 * with bch2_do_invalidates() and bch2_do_discards()
2322 */
2323 ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?:
2324 bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
2325 BTREE_TRIGGER_norun, NULL) ?:
2326 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
2327 BTREE_TRIGGER_norun, NULL) ?:
2328 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
2329 BTREE_TRIGGER_norun, NULL) ?:
2330 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
2331 BTREE_TRIGGER_norun, NULL) ?:
2332 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
2333 BTREE_TRIGGER_norun, NULL) ?:
2334 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
2335 BTREE_TRIGGER_norun, NULL) ?:
2336 bch2_dev_usage_remove(c, ca->dev_idx);
2337 bch_err_msg(ca, ret, "removing dev alloc info");
2338 return ret;
2339}
2340
2341/* Bucket IO clocks: */
2342
2343int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2344 size_t bucket_nr, int rw)
2345{
2346 struct bch_fs *c = trans->c;
2347 struct btree_iter iter;
2348 struct bkey_i_alloc_v4 *a;
2349 u64 now;
2350 int ret = 0;
2351
2352 if (bch2_trans_relock(trans))
2353 bch2_trans_begin(trans);
2354
2355 a = bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(dev, bucket_nr));
2356 ret = PTR_ERR_OR_ZERO(a);
2357 if (ret)
2358 return ret;
2359
2360 now = bch2_current_io_time(c, rw);
2361 if (a->v.io_time[rw] == now)
2362 goto out;
2363
2364 a->v.io_time[rw] = now;
2365
2366 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2367 bch2_trans_commit(trans, NULL, NULL, 0);
2368out:
2369 bch2_trans_iter_exit(trans, &iter);
2370 return ret;
2371}
2372
2373/* Startup/shutdown (ro/rw): */
2374
2375void bch2_recalc_capacity(struct bch_fs *c)
2376{
2377 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2378 unsigned bucket_size_max = 0;
2379 unsigned long ra_pages = 0;
2380
2381 lockdep_assert_held(&c->state_lock);
2382
2383 for_each_online_member(c, ca) {
2384 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2385
2386 ra_pages += bdi->ra_pages;
2387 }
2388
2389 bch2_set_ra_pages(c, ra_pages);
2390
2391 for_each_rw_member(c, ca) {
2392 u64 dev_reserve = 0;
2393
2394 /*
2395 * We need to reserve buckets (from the number
2396 * of currently available buckets) against
2397 * foreground writes so that mainly copygc can
2398 * make forward progress.
2399 *
2400 * We need enough to refill the various reserves
2401 * from scratch - copygc will use its entire
2402 * reserve all at once, then run against when
2403 * its reserve is refilled (from the formerly
2404 * available buckets).
2405 *
2406 * This reserve is just used when considering if
2407 * allocations for foreground writes must wait -
2408 * not -ENOSPC calculations.
2409 */
2410
2411 dev_reserve += ca->nr_btree_reserve * 2;
2412 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2413
2414 dev_reserve += 1; /* btree write point */
2415 dev_reserve += 1; /* copygc write point */
2416 dev_reserve += 1; /* rebalance write point */
2417
2418 dev_reserve *= ca->mi.bucket_size;
2419
2420 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2421 ca->mi.first_bucket);
2422
2423 reserved_sectors += dev_reserve * 2;
2424
2425 bucket_size_max = max_t(unsigned, bucket_size_max,
2426 ca->mi.bucket_size);
2427 }
2428
2429 gc_reserve = c->opts.gc_reserve_bytes
2430 ? c->opts.gc_reserve_bytes >> 9
2431 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2432
2433 reserved_sectors = max(gc_reserve, reserved_sectors);
2434
2435 reserved_sectors = min(reserved_sectors, capacity);
2436
2437 c->reserved = reserved_sectors;
2438 c->capacity = capacity - reserved_sectors;
2439
2440 c->bucket_size_max = bucket_size_max;
2441
2442 /* Wake up case someone was waiting for buckets */
2443 closure_wake_up(&c->freelist_wait);
2444}
2445
2446u64 bch2_min_rw_member_capacity(struct bch_fs *c)
2447{
2448 u64 ret = U64_MAX;
2449
2450 for_each_rw_member(c, ca)
2451 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
2452 return ret;
2453}
2454
2455static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2456{
2457 struct open_bucket *ob;
2458 bool ret = false;
2459
2460 for (ob = c->open_buckets;
2461 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2462 ob++) {
2463 spin_lock(&ob->lock);
2464 if (ob->valid && !ob->on_partial_list &&
2465 ob->dev == ca->dev_idx)
2466 ret = true;
2467 spin_unlock(&ob->lock);
2468 }
2469
2470 return ret;
2471}
2472
2473/* device goes ro: */
2474void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2475{
2476 lockdep_assert_held(&c->state_lock);
2477
2478 /* First, remove device from allocation groups: */
2479
2480 for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2481 clear_bit(ca->dev_idx, c->rw_devs[i].d);
2482
2483 c->rw_devs_change_count++;
2484
2485 /*
2486 * Capacity is calculated based off of devices in allocation groups:
2487 */
2488 bch2_recalc_capacity(c);
2489
2490 bch2_open_buckets_stop(c, ca, false);
2491
2492 /*
2493 * Wake up threads that were blocked on allocation, so they can notice
2494 * the device can no longer be removed and the capacity has changed:
2495 */
2496 closure_wake_up(&c->freelist_wait);
2497
2498 /*
2499 * journal_res_get() can block waiting for free space in the journal -
2500 * it needs to notice there may not be devices to allocate from anymore:
2501 */
2502 wake_up(&c->journal.wait);
2503
2504 /* Now wait for any in flight writes: */
2505
2506 closure_wait_event(&c->open_buckets_wait,
2507 !bch2_dev_has_open_write_point(c, ca));
2508}
2509
2510/* device goes rw: */
2511void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2512{
2513 lockdep_assert_held(&c->state_lock);
2514
2515 for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2516 if (ca->mi.data_allowed & (1 << i))
2517 set_bit(ca->dev_idx, c->rw_devs[i].d);
2518
2519 c->rw_devs_change_count++;
2520}
2521
2522void bch2_dev_allocator_background_exit(struct bch_dev *ca)
2523{
2524 darray_exit(&ca->discard_buckets_in_flight);
2525}
2526
2527void bch2_dev_allocator_background_init(struct bch_dev *ca)
2528{
2529 mutex_init(&ca->discard_buckets_in_flight_lock);
2530 INIT_WORK(&ca->discard_work, bch2_do_discards_work);
2531 INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work);
2532 INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work);
2533}
2534
2535void bch2_fs_allocator_background_init(struct bch_fs *c)
2536{
2537 spin_lock_init(&c->freelist_lock);
2538}