Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 * Copyright (C) 2022 Christoph Hellwig.
5 */
6
7#include <linux/bio.h>
8#include "bio.h"
9#include "ctree.h"
10#include "volumes.h"
11#include "raid56.h"
12#include "async-thread.h"
13#include "dev-replace.h"
14#include "zoned.h"
15#include "file-item.h"
16#include "raid-stripe-tree.h"
17
18static struct bio_set btrfs_bioset;
19static struct bio_set btrfs_clone_bioset;
20static struct bio_set btrfs_repair_bioset;
21static mempool_t btrfs_failed_bio_pool;
22
23struct btrfs_failed_bio {
24 struct btrfs_bio *bbio;
25 int num_copies;
26 atomic_t repair_count;
27};
28
29/* Is this a data path I/O that needs storage layer checksum and repair? */
30static inline bool is_data_bbio(struct btrfs_bio *bbio)
31{
32 return bbio->inode && is_data_inode(bbio->inode);
33}
34
35static bool bbio_has_ordered_extent(struct btrfs_bio *bbio)
36{
37 return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE;
38}
39
40/*
41 * Initialize a btrfs_bio structure. This skips the embedded bio itself as it
42 * is already initialized by the block layer.
43 */
44void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
45 btrfs_bio_end_io_t end_io, void *private)
46{
47 memset(bbio, 0, offsetof(struct btrfs_bio, bio));
48 bbio->fs_info = fs_info;
49 bbio->end_io = end_io;
50 bbio->private = private;
51 atomic_set(&bbio->pending_ios, 1);
52}
53
54/*
55 * Allocate a btrfs_bio structure. The btrfs_bio is the main I/O container for
56 * btrfs, and is used for all I/O submitted through btrfs_submit_bbio().
57 *
58 * Just like the underlying bio_alloc_bioset it will not fail as it is backed by
59 * a mempool.
60 */
61struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
62 struct btrfs_fs_info *fs_info,
63 btrfs_bio_end_io_t end_io, void *private)
64{
65 struct btrfs_bio *bbio;
66 struct bio *bio;
67
68 bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
69 bbio = btrfs_bio(bio);
70 btrfs_bio_init(bbio, fs_info, end_io, private);
71 return bbio;
72}
73
74static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
75 struct btrfs_bio *orig_bbio,
76 u64 map_length)
77{
78 struct btrfs_bio *bbio;
79 struct bio *bio;
80
81 bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, GFP_NOFS,
82 &btrfs_clone_bioset);
83 bbio = btrfs_bio(bio);
84 btrfs_bio_init(bbio, fs_info, NULL, orig_bbio);
85 bbio->inode = orig_bbio->inode;
86 bbio->file_offset = orig_bbio->file_offset;
87 orig_bbio->file_offset += map_length;
88 if (bbio_has_ordered_extent(bbio)) {
89 refcount_inc(&orig_bbio->ordered->refs);
90 bbio->ordered = orig_bbio->ordered;
91 }
92 atomic_inc(&orig_bbio->pending_ios);
93 return bbio;
94}
95
96/* Free a bio that was never submitted to the underlying device. */
97static void btrfs_cleanup_bio(struct btrfs_bio *bbio)
98{
99 if (bbio_has_ordered_extent(bbio))
100 btrfs_put_ordered_extent(bbio->ordered);
101 bio_put(&bbio->bio);
102}
103
104static void __btrfs_bio_end_io(struct btrfs_bio *bbio)
105{
106 if (bbio_has_ordered_extent(bbio)) {
107 struct btrfs_ordered_extent *ordered = bbio->ordered;
108
109 bbio->end_io(bbio);
110 btrfs_put_ordered_extent(ordered);
111 } else {
112 bbio->end_io(bbio);
113 }
114}
115
116static void btrfs_orig_write_end_io(struct bio *bio);
117
118static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio,
119 struct btrfs_bio *orig_bbio)
120{
121 /*
122 * For writes we tolerate nr_mirrors - 1 write failures, so we can't
123 * just blindly propagate a write failure here. Instead increment the
124 * error count in the original I/O context so that it is guaranteed to
125 * be larger than the error tolerance.
126 */
127 if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) {
128 struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private;
129 struct btrfs_io_context *orig_bioc = orig_stripe->bioc;
130
131 atomic_add(orig_bioc->max_errors, &orig_bioc->error);
132 } else {
133 orig_bbio->bio.bi_status = bbio->bio.bi_status;
134 }
135}
136
137void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
138{
139 bbio->bio.bi_status = status;
140 if (bbio->bio.bi_pool == &btrfs_clone_bioset) {
141 struct btrfs_bio *orig_bbio = bbio->private;
142
143 if (bbio->bio.bi_status)
144 btrfs_bbio_propagate_error(bbio, orig_bbio);
145 btrfs_cleanup_bio(bbio);
146 bbio = orig_bbio;
147 }
148
149 if (atomic_dec_and_test(&bbio->pending_ios))
150 __btrfs_bio_end_io(bbio);
151}
152
153static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
154{
155 if (cur_mirror == fbio->num_copies)
156 return cur_mirror + 1 - fbio->num_copies;
157 return cur_mirror + 1;
158}
159
160static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
161{
162 if (cur_mirror == 1)
163 return fbio->num_copies;
164 return cur_mirror - 1;
165}
166
167static void btrfs_repair_done(struct btrfs_failed_bio *fbio)
168{
169 if (atomic_dec_and_test(&fbio->repair_count)) {
170 btrfs_bio_end_io(fbio->bbio, fbio->bbio->bio.bi_status);
171 mempool_free(fbio, &btrfs_failed_bio_pool);
172 }
173}
174
175static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio,
176 struct btrfs_device *dev)
177{
178 struct btrfs_failed_bio *fbio = repair_bbio->private;
179 struct btrfs_inode *inode = repair_bbio->inode;
180 struct btrfs_fs_info *fs_info = inode->root->fs_info;
181 struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio);
182 int mirror = repair_bbio->mirror_num;
183
184 /*
185 * We can only trigger this for data bio, which doesn't support larger
186 * folios yet.
187 */
188 ASSERT(folio_order(page_folio(bv->bv_page)) == 0);
189
190 if (repair_bbio->bio.bi_status ||
191 !btrfs_data_csum_ok(repair_bbio, dev, 0, bv)) {
192 bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ);
193 repair_bbio->bio.bi_iter = repair_bbio->saved_iter;
194
195 mirror = next_repair_mirror(fbio, mirror);
196 if (mirror == fbio->bbio->mirror_num) {
197 btrfs_debug(fs_info, "no mirror left");
198 fbio->bbio->bio.bi_status = BLK_STS_IOERR;
199 goto done;
200 }
201
202 btrfs_submit_bbio(repair_bbio, mirror);
203 return;
204 }
205
206 do {
207 mirror = prev_repair_mirror(fbio, mirror);
208 btrfs_repair_io_failure(fs_info, btrfs_ino(inode),
209 repair_bbio->file_offset, fs_info->sectorsize,
210 repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT,
211 page_folio(bv->bv_page), bv->bv_offset, mirror);
212 } while (mirror != fbio->bbio->mirror_num);
213
214done:
215 btrfs_repair_done(fbio);
216 bio_put(&repair_bbio->bio);
217}
218
219/*
220 * Try to kick off a repair read to the next available mirror for a bad sector.
221 *
222 * This primarily tries to recover good data to serve the actual read request,
223 * but also tries to write the good data back to the bad mirror(s) when a
224 * read succeeded to restore the redundancy.
225 */
226static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio,
227 u32 bio_offset,
228 struct bio_vec *bv,
229 struct btrfs_failed_bio *fbio)
230{
231 struct btrfs_inode *inode = failed_bbio->inode;
232 struct btrfs_fs_info *fs_info = inode->root->fs_info;
233 const u32 sectorsize = fs_info->sectorsize;
234 const u64 logical = (failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT);
235 struct btrfs_bio *repair_bbio;
236 struct bio *repair_bio;
237 int num_copies;
238 int mirror;
239
240 btrfs_debug(fs_info, "repair read error: read error at %llu",
241 failed_bbio->file_offset + bio_offset);
242
243 num_copies = btrfs_num_copies(fs_info, logical, sectorsize);
244 if (num_copies == 1) {
245 btrfs_debug(fs_info, "no copy to repair from");
246 failed_bbio->bio.bi_status = BLK_STS_IOERR;
247 return fbio;
248 }
249
250 if (!fbio) {
251 fbio = mempool_alloc(&btrfs_failed_bio_pool, GFP_NOFS);
252 fbio->bbio = failed_bbio;
253 fbio->num_copies = num_copies;
254 atomic_set(&fbio->repair_count, 1);
255 }
256
257 atomic_inc(&fbio->repair_count);
258
259 repair_bio = bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS,
260 &btrfs_repair_bioset);
261 repair_bio->bi_iter.bi_sector = failed_bbio->saved_iter.bi_sector;
262 __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset);
263
264 repair_bbio = btrfs_bio(repair_bio);
265 btrfs_bio_init(repair_bbio, fs_info, NULL, fbio);
266 repair_bbio->inode = failed_bbio->inode;
267 repair_bbio->file_offset = failed_bbio->file_offset + bio_offset;
268
269 mirror = next_repair_mirror(fbio, failed_bbio->mirror_num);
270 btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror);
271 btrfs_submit_bbio(repair_bbio, mirror);
272 return fbio;
273}
274
275static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *dev)
276{
277 struct btrfs_inode *inode = bbio->inode;
278 struct btrfs_fs_info *fs_info = inode->root->fs_info;
279 u32 sectorsize = fs_info->sectorsize;
280 struct bvec_iter *iter = &bbio->saved_iter;
281 blk_status_t status = bbio->bio.bi_status;
282 struct btrfs_failed_bio *fbio = NULL;
283 u32 offset = 0;
284
285 /* Read-repair requires the inode field to be set by the submitter. */
286 ASSERT(inode);
287
288 /*
289 * Hand off repair bios to the repair code as there is no upper level
290 * submitter for them.
291 */
292 if (bbio->bio.bi_pool == &btrfs_repair_bioset) {
293 btrfs_end_repair_bio(bbio, dev);
294 return;
295 }
296
297 /* Clear the I/O error. A failed repair will reset it. */
298 bbio->bio.bi_status = BLK_STS_OK;
299
300 while (iter->bi_size) {
301 struct bio_vec bv = bio_iter_iovec(&bbio->bio, *iter);
302
303 bv.bv_len = min(bv.bv_len, sectorsize);
304 if (status || !btrfs_data_csum_ok(bbio, dev, offset, &bv))
305 fbio = repair_one_sector(bbio, offset, &bv, fbio);
306
307 bio_advance_iter_single(&bbio->bio, iter, sectorsize);
308 offset += sectorsize;
309 }
310
311 if (bbio->csum != bbio->csum_inline)
312 kfree(bbio->csum);
313
314 if (fbio)
315 btrfs_repair_done(fbio);
316 else
317 btrfs_bio_end_io(bbio, bbio->bio.bi_status);
318}
319
320static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
321{
322 if (!dev || !dev->bdev)
323 return;
324 if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET)
325 return;
326
327 if (btrfs_op(bio) == BTRFS_MAP_WRITE)
328 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
329 else if (!(bio->bi_opf & REQ_RAHEAD))
330 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
331 if (bio->bi_opf & REQ_PREFLUSH)
332 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS);
333}
334
335static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info,
336 struct bio *bio)
337{
338 if (bio->bi_opf & REQ_META)
339 return fs_info->endio_meta_workers;
340 return fs_info->endio_workers;
341}
342
343static void btrfs_end_bio_work(struct work_struct *work)
344{
345 struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
346
347 /* Metadata reads are checked and repaired by the submitter. */
348 if (is_data_bbio(bbio))
349 btrfs_check_read_bio(bbio, bbio->bio.bi_private);
350 else
351 btrfs_bio_end_io(bbio, bbio->bio.bi_status);
352}
353
354static void btrfs_simple_end_io(struct bio *bio)
355{
356 struct btrfs_bio *bbio = btrfs_bio(bio);
357 struct btrfs_device *dev = bio->bi_private;
358 struct btrfs_fs_info *fs_info = bbio->fs_info;
359
360 btrfs_bio_counter_dec(fs_info);
361
362 if (bio->bi_status)
363 btrfs_log_dev_io_error(bio, dev);
364
365 if (bio_op(bio) == REQ_OP_READ) {
366 INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
367 queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
368 } else {
369 if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
370 btrfs_record_physical_zoned(bbio);
371 btrfs_bio_end_io(bbio, bbio->bio.bi_status);
372 }
373}
374
375static void btrfs_raid56_end_io(struct bio *bio)
376{
377 struct btrfs_io_context *bioc = bio->bi_private;
378 struct btrfs_bio *bbio = btrfs_bio(bio);
379
380 btrfs_bio_counter_dec(bioc->fs_info);
381 bbio->mirror_num = bioc->mirror_num;
382 if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio))
383 btrfs_check_read_bio(bbio, NULL);
384 else
385 btrfs_bio_end_io(bbio, bbio->bio.bi_status);
386
387 btrfs_put_bioc(bioc);
388}
389
390static void btrfs_orig_write_end_io(struct bio *bio)
391{
392 struct btrfs_io_stripe *stripe = bio->bi_private;
393 struct btrfs_io_context *bioc = stripe->bioc;
394 struct btrfs_bio *bbio = btrfs_bio(bio);
395
396 btrfs_bio_counter_dec(bioc->fs_info);
397
398 if (bio->bi_status) {
399 atomic_inc(&bioc->error);
400 btrfs_log_dev_io_error(bio, stripe->dev);
401 }
402
403 /*
404 * Only send an error to the higher layers if it is beyond the tolerance
405 * threshold.
406 */
407 if (atomic_read(&bioc->error) > bioc->max_errors)
408 bio->bi_status = BLK_STS_IOERR;
409 else
410 bio->bi_status = BLK_STS_OK;
411
412 if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
413 stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
414
415 btrfs_bio_end_io(bbio, bbio->bio.bi_status);
416 btrfs_put_bioc(bioc);
417}
418
419static void btrfs_clone_write_end_io(struct bio *bio)
420{
421 struct btrfs_io_stripe *stripe = bio->bi_private;
422
423 if (bio->bi_status) {
424 atomic_inc(&stripe->bioc->error);
425 btrfs_log_dev_io_error(bio, stripe->dev);
426 } else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
427 stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
428 }
429
430 /* Pass on control to the original bio this one was cloned from */
431 bio_endio(stripe->bioc->orig_bio);
432 bio_put(bio);
433}
434
435static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
436{
437 if (!dev || !dev->bdev ||
438 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
439 (btrfs_op(bio) == BTRFS_MAP_WRITE &&
440 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
441 bio_io_error(bio);
442 return;
443 }
444
445 bio_set_dev(bio, dev->bdev);
446
447 /*
448 * For zone append writing, bi_sector must point the beginning of the
449 * zone
450 */
451 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
452 u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
453 u64 zone_start = round_down(physical, dev->fs_info->zone_size);
454
455 ASSERT(btrfs_dev_is_sequential(dev, physical));
456 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
457 }
458 btrfs_debug_in_rcu(dev->fs_info,
459 "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
460 __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
461 (unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev),
462 dev->devid, bio->bi_iter.bi_size);
463
464 if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT)
465 blkcg_punt_bio_submit(bio);
466 else
467 submit_bio(bio);
468}
469
470static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
471{
472 struct bio *orig_bio = bioc->orig_bio, *bio;
473
474 ASSERT(bio_op(orig_bio) != REQ_OP_READ);
475
476 /* Reuse the bio embedded into the btrfs_bio for the last mirror */
477 if (dev_nr == bioc->num_stripes - 1) {
478 bio = orig_bio;
479 bio->bi_end_io = btrfs_orig_write_end_io;
480 } else {
481 bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &fs_bio_set);
482 bio_inc_remaining(orig_bio);
483 bio->bi_end_io = btrfs_clone_write_end_io;
484 }
485
486 bio->bi_private = &bioc->stripes[dev_nr];
487 bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT;
488 bioc->stripes[dev_nr].bioc = bioc;
489 bioc->size = bio->bi_iter.bi_size;
490 btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio);
491}
492
493static void btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
494 struct btrfs_io_stripe *smap, int mirror_num)
495{
496 if (!bioc) {
497 /* Single mirror read/write fast path. */
498 btrfs_bio(bio)->mirror_num = mirror_num;
499 bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT;
500 if (bio_op(bio) != REQ_OP_READ)
501 btrfs_bio(bio)->orig_physical = smap->physical;
502 bio->bi_private = smap->dev;
503 bio->bi_end_io = btrfs_simple_end_io;
504 btrfs_submit_dev_bio(smap->dev, bio);
505 } else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
506 /* Parity RAID write or read recovery. */
507 bio->bi_private = bioc;
508 bio->bi_end_io = btrfs_raid56_end_io;
509 if (bio_op(bio) == REQ_OP_READ)
510 raid56_parity_recover(bio, bioc, mirror_num);
511 else
512 raid56_parity_write(bio, bioc);
513 } else {
514 /* Write to multiple mirrors. */
515 int total_devs = bioc->num_stripes;
516
517 bioc->orig_bio = bio;
518 for (int dev_nr = 0; dev_nr < total_devs; dev_nr++)
519 btrfs_submit_mirrored_bio(bioc, dev_nr);
520 }
521}
522
523static blk_status_t btrfs_bio_csum(struct btrfs_bio *bbio)
524{
525 if (bbio->bio.bi_opf & REQ_META)
526 return btree_csum_one_bio(bbio);
527 return btrfs_csum_one_bio(bbio);
528}
529
530/*
531 * Async submit bios are used to offload expensive checksumming onto the worker
532 * threads.
533 */
534struct async_submit_bio {
535 struct btrfs_bio *bbio;
536 struct btrfs_io_context *bioc;
537 struct btrfs_io_stripe smap;
538 int mirror_num;
539 struct btrfs_work work;
540};
541
542/*
543 * In order to insert checksums into the metadata in large chunks, we wait
544 * until bio submission time. All the pages in the bio are checksummed and
545 * sums are attached onto the ordered extent record.
546 *
547 * At IO completion time the csums attached on the ordered extent record are
548 * inserted into the btree.
549 */
550static void run_one_async_start(struct btrfs_work *work)
551{
552 struct async_submit_bio *async =
553 container_of(work, struct async_submit_bio, work);
554 blk_status_t ret;
555
556 ret = btrfs_bio_csum(async->bbio);
557 if (ret)
558 async->bbio->bio.bi_status = ret;
559}
560
561/*
562 * In order to insert checksums into the metadata in large chunks, we wait
563 * until bio submission time. All the pages in the bio are checksummed and
564 * sums are attached onto the ordered extent record.
565 *
566 * At IO completion time the csums attached on the ordered extent record are
567 * inserted into the tree.
568 *
569 * If called with @do_free == true, then it will free the work struct.
570 */
571static void run_one_async_done(struct btrfs_work *work, bool do_free)
572{
573 struct async_submit_bio *async =
574 container_of(work, struct async_submit_bio, work);
575 struct bio *bio = &async->bbio->bio;
576
577 if (do_free) {
578 kfree(container_of(work, struct async_submit_bio, work));
579 return;
580 }
581
582 /* If an error occurred we just want to clean up the bio and move on. */
583 if (bio->bi_status) {
584 btrfs_bio_end_io(async->bbio, async->bbio->bio.bi_status);
585 return;
586 }
587
588 /*
589 * All of the bios that pass through here are from async helpers.
590 * Use REQ_BTRFS_CGROUP_PUNT to issue them from the owning cgroup's
591 * context. This changes nothing when cgroups aren't in use.
592 */
593 bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT;
594 btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num);
595}
596
597static bool should_async_write(struct btrfs_bio *bbio)
598{
599 bool auto_csum_mode = true;
600
601#ifdef CONFIG_BTRFS_DEBUG
602 struct btrfs_fs_devices *fs_devices = bbio->fs_info->fs_devices;
603 enum btrfs_offload_csum_mode csum_mode = READ_ONCE(fs_devices->offload_csum_mode);
604
605 if (csum_mode == BTRFS_OFFLOAD_CSUM_FORCE_OFF)
606 return false;
607
608 auto_csum_mode = (csum_mode == BTRFS_OFFLOAD_CSUM_AUTO);
609#endif
610
611 /* Submit synchronously if the checksum implementation is fast. */
612 if (auto_csum_mode && test_bit(BTRFS_FS_CSUM_IMPL_FAST, &bbio->fs_info->flags))
613 return false;
614
615 /*
616 * Try to defer the submission to a workqueue to parallelize the
617 * checksum calculation unless the I/O is issued synchronously.
618 */
619 if (op_is_sync(bbio->bio.bi_opf))
620 return false;
621
622 /* Zoned devices require I/O to be submitted in order. */
623 if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(bbio->fs_info))
624 return false;
625
626 return true;
627}
628
629/*
630 * Submit bio to an async queue.
631 *
632 * Return true if the work has been successfully submitted, else false.
633 */
634static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
635 struct btrfs_io_context *bioc,
636 struct btrfs_io_stripe *smap, int mirror_num)
637{
638 struct btrfs_fs_info *fs_info = bbio->fs_info;
639 struct async_submit_bio *async;
640
641 async = kmalloc(sizeof(*async), GFP_NOFS);
642 if (!async)
643 return false;
644
645 async->bbio = bbio;
646 async->bioc = bioc;
647 async->smap = *smap;
648 async->mirror_num = mirror_num;
649
650 btrfs_init_work(&async->work, run_one_async_start, run_one_async_done);
651 btrfs_queue_work(fs_info->workers, &async->work);
652 return true;
653}
654
655static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
656{
657 unsigned int nr_segs;
658 int sector_offset;
659
660 map_length = min(map_length, bbio->fs_info->max_zone_append_size);
661 sector_offset = bio_split_rw_at(&bbio->bio, &bbio->fs_info->limits,
662 &nr_segs, map_length);
663 if (sector_offset)
664 return sector_offset << SECTOR_SHIFT;
665 return map_length;
666}
667
668static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
669{
670 struct btrfs_inode *inode = bbio->inode;
671 struct btrfs_fs_info *fs_info = bbio->fs_info;
672 struct bio *bio = &bbio->bio;
673 u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
674 u64 length = bio->bi_iter.bi_size;
675 u64 map_length = length;
676 bool use_append = btrfs_use_zone_append(bbio);
677 struct btrfs_io_context *bioc = NULL;
678 struct btrfs_io_stripe smap;
679 blk_status_t ret;
680 int error;
681
682 if (!bbio->inode || btrfs_is_data_reloc_root(inode->root))
683 smap.rst_search_commit_root = true;
684 else
685 smap.rst_search_commit_root = false;
686
687 btrfs_bio_counter_inc_blocked(fs_info);
688 error = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
689 &bioc, &smap, &mirror_num);
690 if (error) {
691 ret = errno_to_blk_status(error);
692 goto fail;
693 }
694
695 map_length = min(map_length, length);
696 if (use_append)
697 map_length = btrfs_append_map_length(bbio, map_length);
698
699 if (map_length < length) {
700 bbio = btrfs_split_bio(fs_info, bbio, map_length);
701 bio = &bbio->bio;
702 }
703
704 /*
705 * Save the iter for the end_io handler and preload the checksums for
706 * data reads.
707 */
708 if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio)) {
709 bbio->saved_iter = bio->bi_iter;
710 ret = btrfs_lookup_bio_sums(bbio);
711 if (ret)
712 goto fail;
713 }
714
715 if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
716 if (use_append) {
717 bio->bi_opf &= ~REQ_OP_WRITE;
718 bio->bi_opf |= REQ_OP_ZONE_APPEND;
719 }
720
721 if (is_data_bbio(bbio) && bioc &&
722 btrfs_need_stripe_tree_update(bioc->fs_info, bioc->map_type)) {
723 /*
724 * No locking for the list update, as we only add to
725 * the list in the I/O submission path, and list
726 * iteration only happens in the completion path, which
727 * can't happen until after the last submission.
728 */
729 btrfs_get_bioc(bioc);
730 list_add_tail(&bioc->rst_ordered_entry, &bbio->ordered->bioc_list);
731 }
732
733 /*
734 * Csum items for reloc roots have already been cloned at this
735 * point, so they are handled as part of the no-checksum case.
736 */
737 if (inode && !(inode->flags & BTRFS_INODE_NODATASUM) &&
738 !test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state) &&
739 !btrfs_is_data_reloc_root(inode->root)) {
740 if (should_async_write(bbio) &&
741 btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num))
742 goto done;
743
744 ret = btrfs_bio_csum(bbio);
745 if (ret)
746 goto fail;
747 } else if (use_append ||
748 (btrfs_is_zoned(fs_info) && inode &&
749 inode->flags & BTRFS_INODE_NODATASUM)) {
750 ret = btrfs_alloc_dummy_sum(bbio);
751 if (ret)
752 goto fail;
753 }
754 }
755
756 btrfs_submit_bio(bio, bioc, &smap, mirror_num);
757done:
758 return map_length == length;
759
760fail:
761 btrfs_bio_counter_dec(fs_info);
762 /*
763 * We have split the original bbio, now we have to end both the current
764 * @bbio and remaining one, as the remaining one will never be submitted.
765 */
766 if (map_length < length) {
767 struct btrfs_bio *remaining = bbio->private;
768
769 ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset);
770 ASSERT(remaining);
771
772 btrfs_bio_end_io(remaining, ret);
773 }
774 btrfs_bio_end_io(bbio, ret);
775 /* Do not submit another chunk */
776 return true;
777}
778
779void btrfs_submit_bbio(struct btrfs_bio *bbio, int mirror_num)
780{
781 /* If bbio->inode is not populated, its file_offset must be 0. */
782 ASSERT(bbio->inode || bbio->file_offset == 0);
783
784 while (!btrfs_submit_chunk(bbio, mirror_num))
785 ;
786}
787
788/*
789 * Submit a repair write.
790 *
791 * This bypasses btrfs_submit_bbio() deliberately, as that writes all copies in a
792 * RAID setup. Here we only want to write the one bad copy, so we do the
793 * mapping ourselves and submit the bio directly.
794 *
795 * The I/O is issued synchronously to block the repair read completion from
796 * freeing the bio.
797 */
798int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
799 u64 length, u64 logical, struct folio *folio,
800 unsigned int folio_offset, int mirror_num)
801{
802 struct btrfs_io_stripe smap = { 0 };
803 struct bio_vec bvec;
804 struct bio bio;
805 int ret = 0;
806
807 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
808 BUG_ON(!mirror_num);
809
810 if (btrfs_repair_one_zone(fs_info, logical))
811 return 0;
812
813 /*
814 * Avoid races with device replace and make sure our bioc has devices
815 * associated to its stripes that don't go away while we are doing the
816 * read repair operation.
817 */
818 btrfs_bio_counter_inc_blocked(fs_info);
819 ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num);
820 if (ret < 0)
821 goto out_counter_dec;
822
823 if (!smap.dev->bdev ||
824 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state)) {
825 ret = -EIO;
826 goto out_counter_dec;
827 }
828
829 bio_init(&bio, smap.dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC);
830 bio.bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT;
831 ret = bio_add_folio(&bio, folio, length, folio_offset);
832 ASSERT(ret);
833 ret = submit_bio_wait(&bio);
834 if (ret) {
835 /* try to remap that extent elsewhere? */
836 btrfs_dev_stat_inc_and_print(smap.dev, BTRFS_DEV_STAT_WRITE_ERRS);
837 goto out_bio_uninit;
838 }
839
840 btrfs_info_rl_in_rcu(fs_info,
841 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
842 ino, start, btrfs_dev_name(smap.dev),
843 smap.physical >> SECTOR_SHIFT);
844 ret = 0;
845
846out_bio_uninit:
847 bio_uninit(&bio);
848out_counter_dec:
849 btrfs_bio_counter_dec(fs_info);
850 return ret;
851}
852
853/*
854 * Submit a btrfs_bio based repair write.
855 *
856 * If @dev_replace is true, the write would be submitted to dev-replace target.
857 */
858void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace)
859{
860 struct btrfs_fs_info *fs_info = bbio->fs_info;
861 u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
862 u64 length = bbio->bio.bi_iter.bi_size;
863 struct btrfs_io_stripe smap = { 0 };
864 int ret;
865
866 ASSERT(fs_info);
867 ASSERT(mirror_num > 0);
868 ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE);
869 ASSERT(!bbio->inode);
870
871 btrfs_bio_counter_inc_blocked(fs_info);
872 ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num);
873 if (ret < 0)
874 goto fail;
875
876 if (dev_replace) {
877 ASSERT(smap.dev == fs_info->dev_replace.srcdev);
878 smap.dev = fs_info->dev_replace.tgtdev;
879 }
880 btrfs_submit_bio(&bbio->bio, NULL, &smap, mirror_num);
881 return;
882
883fail:
884 btrfs_bio_counter_dec(fs_info);
885 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
886}
887
888int __init btrfs_bioset_init(void)
889{
890 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
891 offsetof(struct btrfs_bio, bio),
892 BIOSET_NEED_BVECS))
893 return -ENOMEM;
894 if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE,
895 offsetof(struct btrfs_bio, bio), 0))
896 goto out_free_bioset;
897 if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE,
898 offsetof(struct btrfs_bio, bio),
899 BIOSET_NEED_BVECS))
900 goto out_free_clone_bioset;
901 if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE,
902 sizeof(struct btrfs_failed_bio)))
903 goto out_free_repair_bioset;
904 return 0;
905
906out_free_repair_bioset:
907 bioset_exit(&btrfs_repair_bioset);
908out_free_clone_bioset:
909 bioset_exit(&btrfs_clone_bioset);
910out_free_bioset:
911 bioset_exit(&btrfs_bioset);
912 return -ENOMEM;
913}
914
915void __cold btrfs_bioset_exit(void)
916{
917 mempool_exit(&btrfs_failed_bio_pool);
918 bioset_exit(&btrfs_repair_bioset);
919 bioset_exit(&btrfs_clone_bioset);
920 bioset_exit(&btrfs_bioset);
921}