Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Moving/copying garbage collector
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcachefs.h"
9#include "alloc_background.h"
10#include "alloc_foreground.h"
11#include "btree_iter.h"
12#include "btree_update.h"
13#include "btree_write_buffer.h"
14#include "buckets.h"
15#include "clock.h"
16#include "errcode.h"
17#include "error.h"
18#include "lru.h"
19#include "move.h"
20#include "movinggc.h"
21#include "trace.h"
22
23#include <linux/freezer.h>
24#include <linux/kthread.h>
25#include <linux/math64.h>
26#include <linux/sched/task.h>
27#include <linux/wait.h>
28
29struct buckets_in_flight {
30 struct rhashtable table;
31 struct move_bucket_in_flight *first;
32 struct move_bucket_in_flight *last;
33 size_t nr;
34 size_t sectors;
35};
36
37static const struct rhashtable_params bch_move_bucket_params = {
38 .head_offset = offsetof(struct move_bucket_in_flight, hash),
39 .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
40 .key_len = sizeof(struct move_bucket_key),
41 .automatic_shrinking = true,
42};
43
44static struct move_bucket_in_flight *
45move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
46{
47 struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
48 int ret;
49
50 if (!new)
51 return ERR_PTR(-ENOMEM);
52
53 new->bucket = b;
54
55 ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
56 bch_move_bucket_params);
57 if (ret) {
58 kfree(new);
59 return ERR_PTR(ret);
60 }
61
62 if (!list->first)
63 list->first = new;
64 else
65 list->last->next = new;
66
67 list->last = new;
68 list->nr++;
69 list->sectors += b.sectors;
70 return new;
71}
72
73static int bch2_bucket_is_movable(struct btree_trans *trans,
74 struct move_bucket *b, u64 time)
75{
76 struct bch_fs *c = trans->c;
77 struct btree_iter iter;
78 struct bkey_s_c k;
79 struct bch_alloc_v4 _a;
80 const struct bch_alloc_v4 *a;
81 int ret;
82
83 if (bch2_bucket_is_open(trans->c,
84 b->k.bucket.inode,
85 b->k.bucket.offset))
86 return 0;
87
88 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
89 b->k.bucket, BTREE_ITER_cached);
90 ret = bkey_err(k);
91 if (ret)
92 return ret;
93
94 struct bch_dev *ca = bch2_dev_tryget(c, k.k->p.inode);
95 if (!ca)
96 goto out;
97
98 a = bch2_alloc_to_v4(k, &_a);
99 b->k.gen = a->gen;
100 b->sectors = bch2_bucket_sectors_dirty(*a);
101 u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
102
103 ret = lru_idx && lru_idx <= time;
104
105 bch2_dev_put(ca);
106out:
107 bch2_trans_iter_exit(trans, &iter);
108 return ret;
109}
110
111static void move_buckets_wait(struct moving_context *ctxt,
112 struct buckets_in_flight *list,
113 bool flush)
114{
115 struct move_bucket_in_flight *i;
116 int ret;
117
118 while ((i = list->first)) {
119 if (flush)
120 move_ctxt_wait_event(ctxt, !atomic_read(&i->count));
121
122 if (atomic_read(&i->count))
123 break;
124
125 list->first = i->next;
126 if (!list->first)
127 list->last = NULL;
128
129 list->nr--;
130 list->sectors -= i->bucket.sectors;
131
132 ret = rhashtable_remove_fast(&list->table, &i->hash,
133 bch_move_bucket_params);
134 BUG_ON(ret);
135 kfree(i);
136 }
137
138 bch2_trans_unlock_long(ctxt->trans);
139}
140
141static bool bucket_in_flight(struct buckets_in_flight *list,
142 struct move_bucket_key k)
143{
144 return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
145}
146
147typedef DARRAY(struct move_bucket) move_buckets;
148
149static int bch2_copygc_get_buckets(struct moving_context *ctxt,
150 struct buckets_in_flight *buckets_in_flight,
151 move_buckets *buckets)
152{
153 struct btree_trans *trans = ctxt->trans;
154 struct bch_fs *c = trans->c;
155 size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
156 size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
157 int ret;
158
159 move_buckets_wait(ctxt, buckets_in_flight, false);
160
161 ret = bch2_btree_write_buffer_tryflush(trans);
162 if (bch2_err_matches(ret, EROFS))
163 return ret;
164
165 if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret)))
166 return ret;
167
168 bch2_trans_begin(trans);
169
170 ret = for_each_btree_key_max(trans, iter, BTREE_ID_lru,
171 lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
172 lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
173 0, k, ({
174 struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
175 int ret2 = 0;
176
177 saw++;
178
179 ret2 = bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p));
180 if (ret2 < 0)
181 goto err;
182
183 if (!ret2)
184 not_movable++;
185 else if (bucket_in_flight(buckets_in_flight, b.k))
186 in_flight++;
187 else {
188 ret2 = darray_push(buckets, b);
189 if (ret2)
190 goto err;
191 sectors += b.sectors;
192 }
193
194 ret2 = buckets->nr >= nr_to_get;
195err:
196 ret2;
197 }));
198
199 pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
200 buckets_in_flight->nr, buckets_in_flight->sectors,
201 saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
202
203 return ret < 0 ? ret : 0;
204}
205
206noinline
207static int bch2_copygc(struct moving_context *ctxt,
208 struct buckets_in_flight *buckets_in_flight,
209 bool *did_work)
210{
211 struct btree_trans *trans = ctxt->trans;
212 struct bch_fs *c = trans->c;
213 struct data_update_opts data_opts = {
214 .btree_insert_flags = BCH_WATERMARK_copygc,
215 };
216 move_buckets buckets = { 0 };
217 struct move_bucket_in_flight *f;
218 u64 sectors_seen = atomic64_read(&ctxt->stats->sectors_seen);
219 u64 sectors_moved = atomic64_read(&ctxt->stats->sectors_moved);
220 int ret = 0;
221
222 ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets);
223 if (ret)
224 goto err;
225
226 darray_for_each(buckets, i) {
227 if (kthread_should_stop() || freezing(current))
228 break;
229
230 f = move_bucket_in_flight_add(buckets_in_flight, *i);
231 ret = PTR_ERR_OR_ZERO(f);
232 if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */
233 ret = 0;
234 continue;
235 }
236 if (ret == -ENOMEM) { /* flush IO, continue later */
237 ret = 0;
238 break;
239 }
240
241 ret = bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket,
242 f->bucket.k.gen, data_opts);
243 if (ret)
244 goto err;
245
246 *did_work = true;
247 }
248err:
249
250 /* no entries in LRU btree found, or got to end: */
251 if (bch2_err_matches(ret, ENOENT))
252 ret = 0;
253
254 if (ret < 0 && !bch2_err_matches(ret, EROFS))
255 bch_err_msg(c, ret, "from bch2_move_data()");
256
257 sectors_seen = atomic64_read(&ctxt->stats->sectors_seen) - sectors_seen;
258 sectors_moved = atomic64_read(&ctxt->stats->sectors_moved) - sectors_moved;
259 trace_and_count(c, copygc, c, buckets.nr, sectors_seen, sectors_moved);
260
261 darray_exit(&buckets);
262 return ret;
263}
264
265/*
266 * Copygc runs when the amount of fragmented data is above some arbitrary
267 * threshold:
268 *
269 * The threshold at the limit - when the device is full - is the amount of space
270 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
271 * disk space stranded due to fragmentation and store everything we have
272 * promised to store.
273 *
274 * But we don't want to be running copygc unnecessarily when the device still
275 * has plenty of free space - rather, we want copygc to smoothly run every so
276 * often and continually reduce the amount of fragmented space as the device
277 * fills up. So, we increase the threshold by half the current free space.
278 */
279unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
280{
281 s64 wait = S64_MAX, fragmented_allowed, fragmented;
282
283 for_each_rw_member(c, ca) {
284 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
285
286 fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
287 ca->mi.bucket_size) >> 1);
288 fragmented = 0;
289
290 for (unsigned i = 0; i < BCH_DATA_NR; i++)
291 if (data_type_movable(i))
292 fragmented += usage.d[i].fragmented;
293
294 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
295 }
296
297 return wait;
298}
299
300void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
301{
302 printbuf_tabstop_push(out, 32);
303 prt_printf(out, "running:\t%u\n", c->copygc_running);
304 prt_printf(out, "copygc_wait:\t%llu\n", c->copygc_wait);
305 prt_printf(out, "copygc_wait_at:\t%llu\n", c->copygc_wait_at);
306
307 prt_printf(out, "Currently waiting for:\t");
308 prt_human_readable_u64(out, max(0LL, c->copygc_wait -
309 atomic64_read(&c->io_clock[WRITE].now)) << 9);
310 prt_newline(out);
311
312 prt_printf(out, "Currently waiting since:\t");
313 prt_human_readable_u64(out, max(0LL,
314 atomic64_read(&c->io_clock[WRITE].now) -
315 c->copygc_wait_at) << 9);
316 prt_newline(out);
317
318 prt_printf(out, "Currently calculated wait:\t");
319 prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
320 prt_newline(out);
321}
322
323static int bch2_copygc_thread(void *arg)
324{
325 struct bch_fs *c = arg;
326 struct moving_context ctxt;
327 struct bch_move_stats move_stats;
328 struct io_clock *clock = &c->io_clock[WRITE];
329 struct buckets_in_flight *buckets;
330 u64 last, wait;
331 int ret = 0;
332
333 buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL);
334 if (!buckets)
335 return -ENOMEM;
336 ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
337 bch_err_msg(c, ret, "allocating copygc buckets in flight");
338 if (ret) {
339 kfree(buckets);
340 return ret;
341 }
342
343 set_freezable();
344
345 bch2_move_stats_init(&move_stats, "copygc");
346 bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
347 writepoint_ptr(&c->copygc_write_point),
348 false);
349
350 while (!ret && !kthread_should_stop()) {
351 bool did_work = false;
352
353 bch2_trans_unlock_long(ctxt.trans);
354 cond_resched();
355
356 if (!c->opts.copygc_enabled) {
357 move_buckets_wait(&ctxt, buckets, true);
358 kthread_wait_freezable(c->opts.copygc_enabled ||
359 kthread_should_stop());
360 }
361
362 if (unlikely(freezing(current))) {
363 move_buckets_wait(&ctxt, buckets, true);
364 __refrigerator(false);
365 continue;
366 }
367
368 last = atomic64_read(&clock->now);
369 wait = bch2_copygc_wait_amount(c);
370
371 if (wait > clock->max_slop) {
372 c->copygc_wait_at = last;
373 c->copygc_wait = last + wait;
374 move_buckets_wait(&ctxt, buckets, true);
375 trace_and_count(c, copygc_wait, c, wait, last + wait);
376 bch2_kthread_io_clock_wait(clock, last + wait,
377 MAX_SCHEDULE_TIMEOUT);
378 continue;
379 }
380
381 c->copygc_wait = 0;
382
383 c->copygc_running = true;
384 ret = bch2_copygc(&ctxt, buckets, &did_work);
385 c->copygc_running = false;
386
387 wake_up(&c->copygc_running_wq);
388
389 if (!wait && !did_work) {
390 u64 min_member_capacity = bch2_min_rw_member_capacity(c);
391
392 if (min_member_capacity == U64_MAX)
393 min_member_capacity = 128 * 2048;
394
395 move_buckets_wait(&ctxt, buckets, true);
396 bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6),
397 MAX_SCHEDULE_TIMEOUT);
398 }
399 }
400
401 move_buckets_wait(&ctxt, buckets, true);
402
403 rhashtable_destroy(&buckets->table);
404 kfree(buckets);
405 bch2_moving_ctxt_exit(&ctxt);
406 bch2_move_stats_exit(&move_stats, c);
407
408 return 0;
409}
410
411void bch2_copygc_stop(struct bch_fs *c)
412{
413 if (c->copygc_thread) {
414 kthread_stop(c->copygc_thread);
415 put_task_struct(c->copygc_thread);
416 }
417 c->copygc_thread = NULL;
418}
419
420int bch2_copygc_start(struct bch_fs *c)
421{
422 struct task_struct *t;
423 int ret;
424
425 if (c->copygc_thread)
426 return 0;
427
428 if (c->opts.nochanges)
429 return 0;
430
431 if (bch2_fs_init_fault("copygc_start"))
432 return -ENOMEM;
433
434 t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
435 ret = PTR_ERR_OR_ZERO(t);
436 bch_err_msg(c, ret, "creating copygc thread");
437 if (ret)
438 return ret;
439
440 get_task_struct(t);
441
442 c->copygc_thread = t;
443 wake_up_process(c->copygc_thread);
444
445 return 0;
446}
447
448void bch2_fs_copygc_init(struct bch_fs *c)
449{
450 init_waitqueue_head(&c->copygc_running_wq);
451 c->copygc_running = false;
452}