Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "alloc_foreground.h"
5#include "bkey_buf.h"
6#include "btree_update.h"
7#include "buckets.h"
8#include "data_update.h"
9#include "ec.h"
10#include "error.h"
11#include "extents.h"
12#include "io_write.h"
13#include "keylist.h"
14#include "move.h"
15#include "nocow_locking.h"
16#include "rebalance.h"
17#include "snapshot.h"
18#include "subvolume.h"
19#include "trace.h"
20
21static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
22{
23 if (trace_move_extent_finish_enabled()) {
24 struct printbuf buf = PRINTBUF;
25
26 bch2_bkey_val_to_text(&buf, c, k);
27 trace_move_extent_finish(c, buf.buf);
28 printbuf_exit(&buf);
29 }
30}
31
32static void trace_move_extent_fail2(struct data_update *m,
33 struct bkey_s_c new,
34 struct bkey_s_c wrote,
35 struct bkey_i *insert,
36 const char *msg)
37{
38 struct bch_fs *c = m->op.c;
39 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
40 const union bch_extent_entry *entry;
41 struct bch_extent_ptr *ptr;
42 struct extent_ptr_decoded p;
43 struct printbuf buf = PRINTBUF;
44 unsigned i, rewrites_found = 0;
45
46 if (!trace_move_extent_fail_enabled())
47 return;
48
49 prt_str(&buf, msg);
50
51 if (insert) {
52 i = 0;
53 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
54 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
55 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
56 !ptr->cached)
57 rewrites_found |= 1U << i;
58 i++;
59 }
60 }
61
62 prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
63 (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
64 (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
65 (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
66 (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
67
68 prt_printf(&buf, "\nrewrites found: %u%u%u%u",
69 (rewrites_found & (1 << 0)) != 0,
70 (rewrites_found & (1 << 1)) != 0,
71 (rewrites_found & (1 << 2)) != 0,
72 (rewrites_found & (1 << 3)) != 0);
73
74 prt_str(&buf, "\nold: ");
75 bch2_bkey_val_to_text(&buf, c, old);
76
77 prt_str(&buf, "\nnew: ");
78 bch2_bkey_val_to_text(&buf, c, new);
79
80 prt_str(&buf, "\nwrote: ");
81 bch2_bkey_val_to_text(&buf, c, wrote);
82
83 if (insert) {
84 prt_str(&buf, "\ninsert: ");
85 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
86 }
87
88 trace_move_extent_fail(c, buf.buf);
89 printbuf_exit(&buf);
90}
91
92static int __bch2_data_update_index_update(struct btree_trans *trans,
93 struct bch_write_op *op)
94{
95 struct bch_fs *c = op->c;
96 struct btree_iter iter;
97 struct data_update *m =
98 container_of(op, struct data_update, op);
99 struct keylist *keys = &op->insert_keys;
100 struct bkey_buf _new, _insert;
101 int ret = 0;
102
103 bch2_bkey_buf_init(&_new);
104 bch2_bkey_buf_init(&_insert);
105 bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
106
107 bch2_trans_iter_init(trans, &iter, m->btree_id,
108 bkey_start_pos(&bch2_keylist_front(keys)->k),
109 BTREE_ITER_slots|BTREE_ITER_intent);
110
111 while (1) {
112 struct bkey_s_c k;
113 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
114 struct bkey_i *insert = NULL;
115 struct bkey_i_extent *new;
116 const union bch_extent_entry *entry_c;
117 union bch_extent_entry *entry;
118 struct extent_ptr_decoded p;
119 struct bch_extent_ptr *ptr;
120 const struct bch_extent_ptr *ptr_c;
121 struct bpos next_pos;
122 bool should_check_enospc;
123 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
124 unsigned rewrites_found = 0, durability, i;
125
126 bch2_trans_begin(trans);
127
128 k = bch2_btree_iter_peek_slot(&iter);
129 ret = bkey_err(k);
130 if (ret)
131 goto err;
132
133 new = bkey_i_to_extent(bch2_keylist_front(keys));
134
135 if (!bch2_extents_match(k, old)) {
136 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
137 NULL, "no match:");
138 goto nowork;
139 }
140
141 bkey_reassemble(_insert.k, k);
142 insert = _insert.k;
143
144 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
145 new = bkey_i_to_extent(_new.k);
146 bch2_cut_front(iter.pos, &new->k_i);
147
148 bch2_cut_front(iter.pos, insert);
149 bch2_cut_back(new->k.p, insert);
150 bch2_cut_back(insert->k.p, &new->k_i);
151
152 /*
153 * @old: extent that we read from
154 * @insert: key that we're going to update, initialized from
155 * extent currently in btree - same as @old unless we raced with
156 * other updates
157 * @new: extent with new pointers that we'll be adding to @insert
158 *
159 * Fist, drop rewrite_ptrs from @new:
160 */
161 i = 0;
162 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
163 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
164 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
165 !ptr->cached) {
166 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
167 rewrites_found |= 1U << i;
168 }
169 i++;
170 }
171
172 if (m->data_opts.rewrite_ptrs &&
173 !rewrites_found &&
174 bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
175 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
176 goto nowork;
177 }
178
179 /*
180 * A replica that we just wrote might conflict with a replica
181 * that we want to keep, due to racing with another move:
182 */
183restart_drop_conflicting_replicas:
184 extent_for_each_ptr(extent_i_to_s(new), ptr)
185 if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
186 !ptr_c->cached) {
187 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
188 goto restart_drop_conflicting_replicas;
189 }
190
191 if (!bkey_val_u64s(&new->k)) {
192 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
193 goto nowork;
194 }
195
196 /* Now, drop pointers that conflict with what we just wrote: */
197 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
198 if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
199 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
200
201 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
202 bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
203
204 /* Now, drop excess replicas: */
205 rcu_read_lock();
206restart_drop_extra_replicas:
207 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
208 unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
209
210 if (!p.ptr.cached &&
211 durability - ptr_durability >= m->op.opts.data_replicas) {
212 durability -= ptr_durability;
213
214 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
215 goto restart_drop_extra_replicas;
216 }
217 }
218 rcu_read_unlock();
219
220 /* Finally, add the pointers we just wrote: */
221 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
222 bch2_extent_ptr_decoded_append(insert, &p);
223
224 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
225 bch2_extent_normalize(c, bkey_i_to_s(insert));
226
227 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
228 &should_check_enospc,
229 &i_sectors_delta,
230 &disk_sectors_delta);
231 if (ret)
232 goto err;
233
234 if (disk_sectors_delta > (s64) op->res.sectors) {
235 ret = bch2_disk_reservation_add(c, &op->res,
236 disk_sectors_delta - op->res.sectors,
237 !should_check_enospc
238 ? BCH_DISK_RESERVATION_NOFAIL : 0);
239 if (ret)
240 goto out;
241 }
242
243 next_pos = insert->k.p;
244
245 /*
246 * Check for nonce offset inconsistency:
247 * This is debug code - we've been seeing this bug rarely, and
248 * it's been hard to reproduce, so this should give us some more
249 * information when it does occur:
250 */
251 struct printbuf err = PRINTBUF;
252 int invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id), 0, &err);
253 printbuf_exit(&err);
254
255 if (invalid) {
256 struct printbuf buf = PRINTBUF;
257
258 prt_str(&buf, "about to insert invalid key in data update path");
259 prt_str(&buf, "\nold: ");
260 bch2_bkey_val_to_text(&buf, c, old);
261 prt_str(&buf, "\nk: ");
262 bch2_bkey_val_to_text(&buf, c, k);
263 prt_str(&buf, "\nnew: ");
264 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
265
266 bch2_print_string_as_lines(KERN_ERR, buf.buf);
267 printbuf_exit(&buf);
268
269 bch2_fatal_error(c);
270 goto out;
271 }
272
273 if (trace_data_update_enabled()) {
274 struct printbuf buf = PRINTBUF;
275
276 prt_str(&buf, "\nold: ");
277 bch2_bkey_val_to_text(&buf, c, old);
278 prt_str(&buf, "\nk: ");
279 bch2_bkey_val_to_text(&buf, c, k);
280 prt_str(&buf, "\nnew: ");
281 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
282
283 trace_data_update(c, buf.buf);
284 printbuf_exit(&buf);
285 }
286
287 ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
288 k.k->p, bkey_start_pos(&insert->k)) ?:
289 bch2_insert_snapshot_whiteouts(trans, m->btree_id,
290 k.k->p, insert->k.p) ?:
291 bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
292 bch2_trans_update(trans, &iter, insert,
293 BTREE_UPDATE_internal_snapshot_node) ?:
294 bch2_trans_commit(trans, &op->res,
295 NULL,
296 BCH_TRANS_COMMIT_no_check_rw|
297 BCH_TRANS_COMMIT_no_enospc|
298 m->data_opts.btree_insert_flags);
299 if (!ret) {
300 bch2_btree_iter_set_pos(&iter, next_pos);
301
302 this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
303 trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
304 }
305err:
306 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
307 ret = 0;
308 if (ret)
309 break;
310next:
311 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
312 bch2_keylist_pop_front(keys);
313 if (bch2_keylist_empty(keys))
314 goto out;
315 }
316 continue;
317nowork:
318 if (m->stats) {
319 BUG_ON(k.k->p.offset <= iter.pos.offset);
320 atomic64_inc(&m->stats->keys_raced);
321 atomic64_add(k.k->p.offset - iter.pos.offset,
322 &m->stats->sectors_raced);
323 }
324
325 count_event(c, move_extent_fail);
326
327 bch2_btree_iter_advance(&iter);
328 goto next;
329 }
330out:
331 bch2_trans_iter_exit(trans, &iter);
332 bch2_bkey_buf_exit(&_insert, c);
333 bch2_bkey_buf_exit(&_new, c);
334 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
335 return ret;
336}
337
338int bch2_data_update_index_update(struct bch_write_op *op)
339{
340 return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
341}
342
343void bch2_data_update_read_done(struct data_update *m,
344 struct bch_extent_crc_unpacked crc)
345{
346 /* write bio must own pages: */
347 BUG_ON(!m->op.wbio.bio.bi_vcnt);
348
349 m->op.crc = crc;
350 m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
351
352 closure_call(&m->op.cl, bch2_write, NULL, NULL);
353}
354
355void bch2_data_update_exit(struct data_update *update)
356{
357 struct bch_fs *c = update->op.c;
358 struct bkey_ptrs_c ptrs =
359 bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
360
361 bkey_for_each_ptr(ptrs, ptr) {
362 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
363 if (c->opts.nocow_enabled)
364 bch2_bucket_nocow_unlock(&c->nocow_locks,
365 PTR_BUCKET_POS(ca, ptr), 0);
366 bch2_dev_put(ca);
367 }
368
369 bch2_bkey_buf_exit(&update->k, c);
370 bch2_disk_reservation_put(c, &update->op.res);
371 bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
372}
373
374static void bch2_update_unwritten_extent(struct btree_trans *trans,
375 struct data_update *update)
376{
377 struct bch_fs *c = update->op.c;
378 struct bio *bio = &update->op.wbio.bio;
379 struct bkey_i_extent *e;
380 struct write_point *wp;
381 struct closure cl;
382 struct btree_iter iter;
383 struct bkey_s_c k;
384 int ret;
385
386 closure_init_stack(&cl);
387 bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
388
389 while (bio_sectors(bio)) {
390 unsigned sectors = bio_sectors(bio);
391
392 bch2_trans_begin(trans);
393
394 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
395 BTREE_ITER_slots);
396 ret = lockrestart_do(trans, ({
397 k = bch2_btree_iter_peek_slot(&iter);
398 bkey_err(k);
399 }));
400 bch2_trans_iter_exit(trans, &iter);
401
402 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
403 break;
404
405 e = bkey_extent_init(update->op.insert_keys.top);
406 e->k.p = update->op.pos;
407
408 ret = bch2_alloc_sectors_start_trans(trans,
409 update->op.target,
410 false,
411 update->op.write_point,
412 &update->op.devs_have,
413 update->op.nr_replicas,
414 update->op.nr_replicas,
415 update->op.watermark,
416 0, &cl, &wp);
417 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
418 bch2_trans_unlock(trans);
419 closure_sync(&cl);
420 continue;
421 }
422
423 bch_err_fn_ratelimited(c, ret);
424
425 if (ret)
426 return;
427
428 sectors = min(sectors, wp->sectors_free);
429
430 bch2_key_resize(&e->k, sectors);
431
432 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
433 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
434 bch2_alloc_sectors_done(c, wp);
435
436 bio_advance(bio, sectors << 9);
437 update->op.pos.offset += sectors;
438
439 extent_for_each_ptr(extent_i_to_s(e), ptr)
440 ptr->unwritten = true;
441 bch2_keylist_push(&update->op.insert_keys);
442
443 ret = __bch2_data_update_index_update(trans, &update->op);
444
445 bch2_open_buckets_put(c, &update->op.open_buckets);
446
447 if (ret)
448 break;
449 }
450
451 if (closure_nr_remaining(&cl) != 1) {
452 bch2_trans_unlock(trans);
453 closure_sync(&cl);
454 }
455}
456
457int bch2_extent_drop_ptrs(struct btree_trans *trans,
458 struct btree_iter *iter,
459 struct bkey_s_c k,
460 struct data_update_opts data_opts)
461{
462 struct bch_fs *c = trans->c;
463 struct bkey_i *n;
464 int ret;
465
466 n = bch2_bkey_make_mut_noupdate(trans, k);
467 ret = PTR_ERR_OR_ZERO(n);
468 if (ret)
469 return ret;
470
471 while (data_opts.kill_ptrs) {
472 unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
473
474 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
475 data_opts.kill_ptrs ^= 1U << drop;
476 }
477
478 /*
479 * If the new extent no longer has any pointers, bch2_extent_normalize()
480 * will do the appropriate thing with it (turning it into a
481 * KEY_TYPE_error key, or just a discard if it was a cached extent)
482 */
483 bch2_extent_normalize(c, bkey_i_to_s(n));
484
485 /*
486 * Since we're not inserting through an extent iterator
487 * (BTREE_ITER_all_snapshots iterators aren't extent iterators),
488 * we aren't using the extent overwrite path to delete, we're
489 * just using the normal key deletion path:
490 */
491 if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_is_extents))
492 n->k.size = 0;
493
494 return bch2_trans_relock(trans) ?:
495 bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
496 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
497}
498
499int bch2_data_update_init(struct btree_trans *trans,
500 struct btree_iter *iter,
501 struct moving_context *ctxt,
502 struct data_update *m,
503 struct write_point_specifier wp,
504 struct bch_io_opts io_opts,
505 struct data_update_opts data_opts,
506 enum btree_id btree_id,
507 struct bkey_s_c k)
508{
509 struct bch_fs *c = trans->c;
510 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
511 const union bch_extent_entry *entry;
512 struct extent_ptr_decoded p;
513 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
514 unsigned ptrs_locked = 0;
515 int ret = 0;
516
517 /*
518 * fs is corrupt we have a key for a snapshot node that doesn't exist,
519 * and we have to check for this because we go rw before repairing the
520 * snapshots table - just skip it, we can move it later.
521 */
522 if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot)))
523 return -BCH_ERR_data_update_done;
524
525 bch2_bkey_buf_init(&m->k);
526 bch2_bkey_buf_reassemble(&m->k, c, k);
527 m->btree_id = btree_id;
528 m->data_opts = data_opts;
529 m->ctxt = ctxt;
530 m->stats = ctxt ? ctxt->stats : NULL;
531
532 bch2_write_op_init(&m->op, c, io_opts);
533 m->op.pos = bkey_start_pos(k.k);
534 m->op.version = k.k->version;
535 m->op.target = data_opts.target;
536 m->op.write_point = wp;
537 m->op.nr_replicas = 0;
538 m->op.flags |= BCH_WRITE_PAGES_STABLE|
539 BCH_WRITE_PAGES_OWNED|
540 BCH_WRITE_DATA_ENCODED|
541 BCH_WRITE_MOVE|
542 m->data_opts.write_flags;
543 m->op.compression_opt = background_compression(io_opts);
544 m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
545
546 bkey_for_each_ptr(ptrs, ptr) {
547 if (!bch2_dev_tryget(c, ptr->dev)) {
548 bkey_for_each_ptr(ptrs, ptr2) {
549 if (ptr2 == ptr)
550 break;
551 bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
552 }
553 return -BCH_ERR_data_update_done;
554 }
555 }
556
557 unsigned durability_have = 0, durability_removing = 0;
558
559 i = 0;
560 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
561 struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev);
562 struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
563 bool locked;
564
565 rcu_read_lock();
566 if (((1U << i) & m->data_opts.rewrite_ptrs)) {
567 BUG_ON(p.ptr.cached);
568
569 if (crc_is_compressed(p.crc))
570 reserve_sectors += k.k->size;
571
572 m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
573 durability_removing += bch2_extent_ptr_desired_durability(c, &p);
574 } else if (!p.ptr.cached &&
575 !((1U << i) & m->data_opts.kill_ptrs)) {
576 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
577 durability_have += bch2_extent_ptr_durability(c, &p);
578 }
579 rcu_read_unlock();
580
581 /*
582 * op->csum_type is normally initialized from the fs/file's
583 * current options - but if an extent is encrypted, we require
584 * that it stays encrypted:
585 */
586 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
587 m->op.nonce = p.crc.nonce + p.crc.offset;
588 m->op.csum_type = p.crc.csum_type;
589 }
590
591 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
592 m->op.incompressible = true;
593
594 if (c->opts.nocow_enabled) {
595 if (ctxt) {
596 move_ctxt_wait_event(ctxt,
597 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
598 bucket, 0)) ||
599 list_empty(&ctxt->ios));
600
601 if (!locked)
602 bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
603 } else {
604 if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
605 ret = -BCH_ERR_nocow_lock_blocked;
606 goto err;
607 }
608 }
609 ptrs_locked |= (1U << i);
610 }
611
612 i++;
613 }
614
615 unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have));
616
617 /*
618 * If current extent durability is less than io_opts.data_replicas,
619 * we're not trying to rereplicate the extent up to data_replicas here -
620 * unless extra_replicas was specified
621 *
622 * Increasing replication is an explicit operation triggered by
623 * rereplicate, currently, so that users don't get an unexpected -ENOSPC
624 */
625 if (!(m->data_opts.write_flags & BCH_WRITE_CACHED) &&
626 !durability_required) {
627 m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
628 m->data_opts.rewrite_ptrs = 0;
629 /* if iter == NULL, it's just a promote */
630 if (iter)
631 ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts);
632 goto done;
633 }
634
635 m->op.nr_replicas = min(durability_removing, durability_required) +
636 m->data_opts.extra_replicas;
637
638 /*
639 * If device(s) were set to durability=0 after data was written to them
640 * we can end up with a duribilty=0 extent, and the normal algorithm
641 * that tries not to increase durability doesn't work:
642 */
643 if (!(durability_have + durability_removing))
644 m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
645
646 m->op.nr_replicas_required = m->op.nr_replicas;
647
648 if (reserve_sectors) {
649 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
650 m->data_opts.extra_replicas
651 ? 0
652 : BCH_DISK_RESERVATION_NOFAIL);
653 if (ret)
654 goto err;
655 }
656
657 if (bkey_extent_is_unwritten(k)) {
658 bch2_update_unwritten_extent(trans, m);
659 goto done;
660 }
661
662 return 0;
663err:
664 i = 0;
665 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
666 struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev);
667 struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
668 if ((1U << i) & ptrs_locked)
669 bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
670 bch2_dev_put(ca);
671 i++;
672 }
673
674 bch2_bkey_buf_exit(&m->k, c);
675 bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
676 return ret;
677done:
678 bch2_data_update_exit(m);
679 return ret ?: -BCH_ERR_data_update_done;
680}
681
682void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
683{
684 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
685 unsigned i = 0;
686
687 bkey_for_each_ptr(ptrs, ptr) {
688 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
689 opts->kill_ptrs |= 1U << i;
690 opts->rewrite_ptrs ^= 1U << i;
691 }
692
693 i++;
694 }
695}