Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "alloc_foreground.h"
5#include "bkey_buf.h"
6#include "btree_update.h"
7#include "buckets.h"
8#include "data_update.h"
9#include "ec.h"
10#include "error.h"
11#include "extents.h"
12#include "io_write.h"
13#include "keylist.h"
14#include "move.h"
15#include "nocow_locking.h"
16#include "rebalance.h"
17#include "subvolume.h"
18#include "trace.h"
19
20static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
21{
22 if (trace_move_extent_finish_enabled()) {
23 struct printbuf buf = PRINTBUF;
24
25 bch2_bkey_val_to_text(&buf, c, k);
26 trace_move_extent_finish(c, buf.buf);
27 printbuf_exit(&buf);
28 }
29}
30
31static void trace_move_extent_fail2(struct data_update *m,
32 struct bkey_s_c new,
33 struct bkey_s_c wrote,
34 struct bkey_i *insert,
35 const char *msg)
36{
37 struct bch_fs *c = m->op.c;
38 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
39 const union bch_extent_entry *entry;
40 struct bch_extent_ptr *ptr;
41 struct extent_ptr_decoded p;
42 struct printbuf buf = PRINTBUF;
43 unsigned i, rewrites_found = 0;
44
45 if (!trace_move_extent_fail_enabled())
46 return;
47
48 prt_str(&buf, msg);
49
50 if (insert) {
51 i = 0;
52 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
53 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
54 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
55 !ptr->cached)
56 rewrites_found |= 1U << i;
57 i++;
58 }
59 }
60
61 prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
62 (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
63 (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
64 (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
65 (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
66
67 prt_printf(&buf, "\nrewrites found: %u%u%u%u",
68 (rewrites_found & (1 << 0)) != 0,
69 (rewrites_found & (1 << 1)) != 0,
70 (rewrites_found & (1 << 2)) != 0,
71 (rewrites_found & (1 << 3)) != 0);
72
73 prt_str(&buf, "\nold: ");
74 bch2_bkey_val_to_text(&buf, c, old);
75
76 prt_str(&buf, "\nnew: ");
77 bch2_bkey_val_to_text(&buf, c, new);
78
79 prt_str(&buf, "\nwrote: ");
80 bch2_bkey_val_to_text(&buf, c, wrote);
81
82 if (insert) {
83 prt_str(&buf, "\ninsert: ");
84 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
85 }
86
87 trace_move_extent_fail(c, buf.buf);
88 printbuf_exit(&buf);
89}
90
91static int __bch2_data_update_index_update(struct btree_trans *trans,
92 struct bch_write_op *op)
93{
94 struct bch_fs *c = op->c;
95 struct btree_iter iter;
96 struct data_update *m =
97 container_of(op, struct data_update, op);
98 struct keylist *keys = &op->insert_keys;
99 struct bkey_buf _new, _insert;
100 int ret = 0;
101
102 bch2_bkey_buf_init(&_new);
103 bch2_bkey_buf_init(&_insert);
104 bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
105
106 bch2_trans_iter_init(trans, &iter, m->btree_id,
107 bkey_start_pos(&bch2_keylist_front(keys)->k),
108 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
109
110 while (1) {
111 struct bkey_s_c k;
112 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
113 struct bkey_i *insert = NULL;
114 struct bkey_i_extent *new;
115 const union bch_extent_entry *entry_c;
116 union bch_extent_entry *entry;
117 struct extent_ptr_decoded p;
118 struct bch_extent_ptr *ptr;
119 const struct bch_extent_ptr *ptr_c;
120 struct bpos next_pos;
121 bool should_check_enospc;
122 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
123 unsigned rewrites_found = 0, durability, i;
124
125 bch2_trans_begin(trans);
126
127 k = bch2_btree_iter_peek_slot(&iter);
128 ret = bkey_err(k);
129 if (ret)
130 goto err;
131
132 new = bkey_i_to_extent(bch2_keylist_front(keys));
133
134 if (!bch2_extents_match(k, old)) {
135 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
136 NULL, "no match:");
137 goto nowork;
138 }
139
140 bkey_reassemble(_insert.k, k);
141 insert = _insert.k;
142
143 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
144 new = bkey_i_to_extent(_new.k);
145 bch2_cut_front(iter.pos, &new->k_i);
146
147 bch2_cut_front(iter.pos, insert);
148 bch2_cut_back(new->k.p, insert);
149 bch2_cut_back(insert->k.p, &new->k_i);
150
151 /*
152 * @old: extent that we read from
153 * @insert: key that we're going to update, initialized from
154 * extent currently in btree - same as @old unless we raced with
155 * other updates
156 * @new: extent with new pointers that we'll be adding to @insert
157 *
158 * Fist, drop rewrite_ptrs from @new:
159 */
160 i = 0;
161 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
162 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
163 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
164 !ptr->cached) {
165 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
166 rewrites_found |= 1U << i;
167 }
168 i++;
169 }
170
171 if (m->data_opts.rewrite_ptrs &&
172 !rewrites_found &&
173 bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
174 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
175 goto nowork;
176 }
177
178 /*
179 * A replica that we just wrote might conflict with a replica
180 * that we want to keep, due to racing with another move:
181 */
182restart_drop_conflicting_replicas:
183 extent_for_each_ptr(extent_i_to_s(new), ptr)
184 if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
185 !ptr_c->cached) {
186 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
187 goto restart_drop_conflicting_replicas;
188 }
189
190 if (!bkey_val_u64s(&new->k)) {
191 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
192 goto nowork;
193 }
194
195 /* Now, drop pointers that conflict with what we just wrote: */
196 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
197 if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
198 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
199
200 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
201 bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
202
203 /* Now, drop excess replicas: */
204restart_drop_extra_replicas:
205 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
206 unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
207
208 if (!p.ptr.cached &&
209 durability - ptr_durability >= m->op.opts.data_replicas) {
210 durability -= ptr_durability;
211
212 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
213 goto restart_drop_extra_replicas;
214 }
215 }
216
217 /* Finally, add the pointers we just wrote: */
218 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
219 bch2_extent_ptr_decoded_append(insert, &p);
220
221 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
222 bch2_extent_normalize(c, bkey_i_to_s(insert));
223
224 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
225 &should_check_enospc,
226 &i_sectors_delta,
227 &disk_sectors_delta);
228 if (ret)
229 goto err;
230
231 if (disk_sectors_delta > (s64) op->res.sectors) {
232 ret = bch2_disk_reservation_add(c, &op->res,
233 disk_sectors_delta - op->res.sectors,
234 !should_check_enospc
235 ? BCH_DISK_RESERVATION_NOFAIL : 0);
236 if (ret)
237 goto out;
238 }
239
240 next_pos = insert->k.p;
241
242 /*
243 * Check for nonce offset inconsistency:
244 * This is debug code - we've been seeing this bug rarely, and
245 * it's been hard to reproduce, so this should give us some more
246 * information when it does occur:
247 */
248 struct printbuf err = PRINTBUF;
249 int invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id), 0, &err);
250 printbuf_exit(&err);
251
252 if (invalid) {
253 struct printbuf buf = PRINTBUF;
254
255 prt_str(&buf, "about to insert invalid key in data update path");
256 prt_str(&buf, "\nold: ");
257 bch2_bkey_val_to_text(&buf, c, old);
258 prt_str(&buf, "\nk: ");
259 bch2_bkey_val_to_text(&buf, c, k);
260 prt_str(&buf, "\nnew: ");
261 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
262
263 bch2_print_string_as_lines(KERN_ERR, buf.buf);
264 printbuf_exit(&buf);
265
266 bch2_fatal_error(c);
267 goto out;
268 }
269
270 ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
271 k.k->p, bkey_start_pos(&insert->k)) ?:
272 bch2_insert_snapshot_whiteouts(trans, m->btree_id,
273 k.k->p, insert->k.p) ?:
274 bch2_bkey_set_needs_rebalance(c, insert,
275 op->opts.background_target,
276 op->opts.background_compression) ?:
277 bch2_trans_update(trans, &iter, insert,
278 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
279 bch2_trans_commit(trans, &op->res,
280 NULL,
281 BTREE_INSERT_NOCHECK_RW|
282 BTREE_INSERT_NOFAIL|
283 m->data_opts.btree_insert_flags);
284 if (!ret) {
285 bch2_btree_iter_set_pos(&iter, next_pos);
286
287 this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
288 trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
289 }
290err:
291 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
292 ret = 0;
293 if (ret)
294 break;
295next:
296 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
297 bch2_keylist_pop_front(keys);
298 if (bch2_keylist_empty(keys))
299 goto out;
300 }
301 continue;
302nowork:
303 if (m->stats && m->stats) {
304 BUG_ON(k.k->p.offset <= iter.pos.offset);
305 atomic64_inc(&m->stats->keys_raced);
306 atomic64_add(k.k->p.offset - iter.pos.offset,
307 &m->stats->sectors_raced);
308 }
309
310 this_cpu_inc(c->counters[BCH_COUNTER_move_extent_fail]);
311
312 bch2_btree_iter_advance(&iter);
313 goto next;
314 }
315out:
316 bch2_trans_iter_exit(trans, &iter);
317 bch2_bkey_buf_exit(&_insert, c);
318 bch2_bkey_buf_exit(&_new, c);
319 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
320 return ret;
321}
322
323int bch2_data_update_index_update(struct bch_write_op *op)
324{
325 return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
326}
327
328void bch2_data_update_read_done(struct data_update *m,
329 struct bch_extent_crc_unpacked crc)
330{
331 /* write bio must own pages: */
332 BUG_ON(!m->op.wbio.bio.bi_vcnt);
333
334 m->op.crc = crc;
335 m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
336
337 closure_call(&m->op.cl, bch2_write, NULL, NULL);
338}
339
340void bch2_data_update_exit(struct data_update *update)
341{
342 struct bch_fs *c = update->op.c;
343 struct bkey_ptrs_c ptrs =
344 bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
345 const struct bch_extent_ptr *ptr;
346
347 bkey_for_each_ptr(ptrs, ptr) {
348 if (c->opts.nocow_enabled)
349 bch2_bucket_nocow_unlock(&c->nocow_locks,
350 PTR_BUCKET_POS(c, ptr), 0);
351 percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
352 }
353
354 bch2_bkey_buf_exit(&update->k, c);
355 bch2_disk_reservation_put(c, &update->op.res);
356 bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
357}
358
359static void bch2_update_unwritten_extent(struct btree_trans *trans,
360 struct data_update *update)
361{
362 struct bch_fs *c = update->op.c;
363 struct bio *bio = &update->op.wbio.bio;
364 struct bkey_i_extent *e;
365 struct write_point *wp;
366 struct bch_extent_ptr *ptr;
367 struct closure cl;
368 struct btree_iter iter;
369 struct bkey_s_c k;
370 int ret;
371
372 closure_init_stack(&cl);
373 bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
374
375 while (bio_sectors(bio)) {
376 unsigned sectors = bio_sectors(bio);
377
378 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
379 BTREE_ITER_SLOTS);
380 ret = lockrestart_do(trans, ({
381 k = bch2_btree_iter_peek_slot(&iter);
382 bkey_err(k);
383 }));
384 bch2_trans_iter_exit(trans, &iter);
385
386 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
387 break;
388
389 e = bkey_extent_init(update->op.insert_keys.top);
390 e->k.p = update->op.pos;
391
392 ret = bch2_alloc_sectors_start_trans(trans,
393 update->op.target,
394 false,
395 update->op.write_point,
396 &update->op.devs_have,
397 update->op.nr_replicas,
398 update->op.nr_replicas,
399 update->op.watermark,
400 0, &cl, &wp);
401 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
402 bch2_trans_unlock(trans);
403 closure_sync(&cl);
404 continue;
405 }
406
407 if (ret)
408 return;
409
410 sectors = min(sectors, wp->sectors_free);
411
412 bch2_key_resize(&e->k, sectors);
413
414 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
415 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
416 bch2_alloc_sectors_done(c, wp);
417
418 bio_advance(bio, sectors << 9);
419 update->op.pos.offset += sectors;
420
421 extent_for_each_ptr(extent_i_to_s(e), ptr)
422 ptr->unwritten = true;
423 bch2_keylist_push(&update->op.insert_keys);
424
425 ret = __bch2_data_update_index_update(trans, &update->op);
426
427 bch2_open_buckets_put(c, &update->op.open_buckets);
428
429 if (ret)
430 break;
431 }
432
433 if (closure_nr_remaining(&cl) != 1) {
434 bch2_trans_unlock(trans);
435 closure_sync(&cl);
436 }
437}
438
439int bch2_extent_drop_ptrs(struct btree_trans *trans,
440 struct btree_iter *iter,
441 struct bkey_s_c k,
442 struct data_update_opts data_opts)
443{
444 struct bch_fs *c = trans->c;
445 struct bkey_i *n;
446 int ret;
447
448 n = bch2_bkey_make_mut_noupdate(trans, k);
449 ret = PTR_ERR_OR_ZERO(n);
450 if (ret)
451 return ret;
452
453 while (data_opts.kill_ptrs) {
454 unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
455 struct bch_extent_ptr *ptr;
456
457 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
458 data_opts.kill_ptrs ^= 1U << drop;
459 }
460
461 /*
462 * If the new extent no longer has any pointers, bch2_extent_normalize()
463 * will do the appropriate thing with it (turning it into a
464 * KEY_TYPE_error key, or just a discard if it was a cached extent)
465 */
466 bch2_extent_normalize(c, bkey_i_to_s(n));
467
468 /*
469 * Since we're not inserting through an extent iterator
470 * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
471 * we aren't using the extent overwrite path to delete, we're
472 * just using the normal key deletion path:
473 */
474 if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_IS_EXTENTS))
475 n->k.size = 0;
476
477 return bch2_trans_relock(trans) ?:
478 bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
479 bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
480}
481
482int bch2_data_update_init(struct btree_trans *trans,
483 struct btree_iter *iter,
484 struct moving_context *ctxt,
485 struct data_update *m,
486 struct write_point_specifier wp,
487 struct bch_io_opts io_opts,
488 struct data_update_opts data_opts,
489 enum btree_id btree_id,
490 struct bkey_s_c k)
491{
492 struct bch_fs *c = trans->c;
493 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
494 const union bch_extent_entry *entry;
495 struct extent_ptr_decoded p;
496 const struct bch_extent_ptr *ptr;
497 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
498 unsigned ptrs_locked = 0;
499 int ret = 0;
500
501 bch2_bkey_buf_init(&m->k);
502 bch2_bkey_buf_reassemble(&m->k, c, k);
503 m->btree_id = btree_id;
504 m->data_opts = data_opts;
505 m->ctxt = ctxt;
506 m->stats = ctxt ? ctxt->stats : NULL;
507
508 bch2_write_op_init(&m->op, c, io_opts);
509 m->op.pos = bkey_start_pos(k.k);
510 m->op.version = k.k->version;
511 m->op.target = data_opts.target;
512 m->op.write_point = wp;
513 m->op.nr_replicas = 0;
514 m->op.flags |= BCH_WRITE_PAGES_STABLE|
515 BCH_WRITE_PAGES_OWNED|
516 BCH_WRITE_DATA_ENCODED|
517 BCH_WRITE_MOVE|
518 m->data_opts.write_flags;
519 m->op.compression_opt = io_opts.background_compression ?: io_opts.compression;
520 m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
521
522 bkey_for_each_ptr(ptrs, ptr)
523 percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
524
525 unsigned durability_have = 0, durability_removing = 0;
526
527 i = 0;
528 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
529 bool locked;
530
531 if (((1U << i) & m->data_opts.rewrite_ptrs)) {
532 BUG_ON(p.ptr.cached);
533
534 if (crc_is_compressed(p.crc))
535 reserve_sectors += k.k->size;
536
537 m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
538 durability_removing += bch2_extent_ptr_desired_durability(c, &p);
539 } else if (!p.ptr.cached &&
540 !((1U << i) & m->data_opts.kill_ptrs)) {
541 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
542 durability_have += bch2_extent_ptr_durability(c, &p);
543 }
544
545 /*
546 * op->csum_type is normally initialized from the fs/file's
547 * current options - but if an extent is encrypted, we require
548 * that it stays encrypted:
549 */
550 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
551 m->op.nonce = p.crc.nonce + p.crc.offset;
552 m->op.csum_type = p.crc.csum_type;
553 }
554
555 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
556 m->op.incompressible = true;
557
558 if (c->opts.nocow_enabled) {
559 if (ctxt) {
560 move_ctxt_wait_event(ctxt,
561 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
562 PTR_BUCKET_POS(c, &p.ptr), 0)) ||
563 (!atomic_read(&ctxt->read_sectors) &&
564 !atomic_read(&ctxt->write_sectors)));
565
566 if (!locked)
567 bch2_bucket_nocow_lock(&c->nocow_locks,
568 PTR_BUCKET_POS(c, &p.ptr), 0);
569 } else {
570 if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
571 PTR_BUCKET_POS(c, &p.ptr), 0)) {
572 ret = -BCH_ERR_nocow_lock_blocked;
573 goto err;
574 }
575 }
576 ptrs_locked |= (1U << i);
577 }
578
579 i++;
580 }
581
582 /*
583 * If current extent durability is less than io_opts.data_replicas,
584 * we're not trying to rereplicate the extent up to data_replicas here -
585 * unless extra_replicas was specified
586 *
587 * Increasing replication is an explicit operation triggered by
588 * rereplicate, currently, so that users don't get an unexpected -ENOSPC
589 */
590 if (!(m->data_opts.write_flags & BCH_WRITE_CACHED) &&
591 durability_have >= io_opts.data_replicas) {
592 m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
593 m->data_opts.rewrite_ptrs = 0;
594 /* if iter == NULL, it's just a promote */
595 if (iter)
596 ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts);
597 goto done;
598 }
599
600 m->op.nr_replicas = min(durability_removing, io_opts.data_replicas - durability_have) +
601 m->data_opts.extra_replicas;
602 m->op.nr_replicas_required = m->op.nr_replicas;
603
604 BUG_ON(!m->op.nr_replicas);
605
606 if (reserve_sectors) {
607 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
608 m->data_opts.extra_replicas
609 ? 0
610 : BCH_DISK_RESERVATION_NOFAIL);
611 if (ret)
612 goto err;
613 }
614
615 if (bkey_extent_is_unwritten(k)) {
616 bch2_update_unwritten_extent(trans, m);
617 goto done;
618 }
619
620 return 0;
621err:
622 i = 0;
623 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
624 if ((1U << i) & ptrs_locked)
625 bch2_bucket_nocow_unlock(&c->nocow_locks,
626 PTR_BUCKET_POS(c, &p.ptr), 0);
627 percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
628 i++;
629 }
630
631 bch2_bkey_buf_exit(&m->k, c);
632 bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
633 return ret;
634done:
635 bch2_data_update_exit(m);
636 return ret ?: -BCH_ERR_data_update_done;
637}
638
639void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
640{
641 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
642 const struct bch_extent_ptr *ptr;
643 unsigned i = 0;
644
645 bkey_for_each_ptr(ptrs, ptr) {
646 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
647 opts->kill_ptrs |= 1U << i;
648 opts->rewrite_ptrs ^= 1U << i;
649 }
650
651 i++;
652 }
653}