Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/blkdev.h>
6#include <linux/sched/mm.h>
7#include <linux/atomic.h>
8#include <linux/vmalloc.h>
9#include "ctree.h"
10#include "volumes.h"
11#include "zoned.h"
12#include "rcu-string.h"
13#include "disk-io.h"
14#include "block-group.h"
15#include "transaction.h"
16#include "dev-replace.h"
17#include "space-info.h"
18#include "super.h"
19#include "fs.h"
20#include "accessors.h"
21#include "bio.h"
22
23/* Maximum number of zones to report per blkdev_report_zones() call */
24#define BTRFS_REPORT_NR_ZONES 4096
25/* Invalid allocation pointer value for missing devices */
26#define WP_MISSING_DEV ((u64)-1)
27/* Pseudo write pointer value for conventional zone */
28#define WP_CONVENTIONAL ((u64)-2)
29
30/*
31 * Location of the first zone of superblock logging zone pairs.
32 *
33 * - primary superblock: 0B (zone 0)
34 * - first copy: 512G (zone starting at that offset)
35 * - second copy: 4T (zone starting at that offset)
36 */
37#define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
38#define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
39#define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
40
41#define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
42#define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
43
44/* Number of superblock log zones */
45#define BTRFS_NR_SB_LOG_ZONES 2
46
47/*
48 * Minimum of active zones we need:
49 *
50 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
51 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
52 * - 1 zone for tree-log dedicated block group
53 * - 1 zone for relocation
54 */
55#define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
56
57/*
58 * Minimum / maximum supported zone size. Currently, SMR disks have a zone
59 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
60 * We do not expect the zone size to become larger than 8GiB or smaller than
61 * 4MiB in the near future.
62 */
63#define BTRFS_MAX_ZONE_SIZE SZ_8G
64#define BTRFS_MIN_ZONE_SIZE SZ_4M
65
66#define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
67
68static void wait_eb_writebacks(struct btrfs_block_group *block_group);
69static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);
70
71static inline bool sb_zone_is_full(const struct blk_zone *zone)
72{
73 return (zone->cond == BLK_ZONE_COND_FULL) ||
74 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
75}
76
77static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
78{
79 struct blk_zone *zones = data;
80
81 memcpy(&zones[idx], zone, sizeof(*zone));
82
83 return 0;
84}
85
86static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
87 u64 *wp_ret)
88{
89 bool empty[BTRFS_NR_SB_LOG_ZONES];
90 bool full[BTRFS_NR_SB_LOG_ZONES];
91 sector_t sector;
92 int i;
93
94 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
95 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
96 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
97 full[i] = sb_zone_is_full(&zones[i]);
98 }
99
100 /*
101 * Possible states of log buffer zones
102 *
103 * Empty[0] In use[0] Full[0]
104 * Empty[1] * 0 1
105 * In use[1] x x 1
106 * Full[1] 0 0 C
107 *
108 * Log position:
109 * *: Special case, no superblock is written
110 * 0: Use write pointer of zones[0]
111 * 1: Use write pointer of zones[1]
112 * C: Compare super blocks from zones[0] and zones[1], use the latest
113 * one determined by generation
114 * x: Invalid state
115 */
116
117 if (empty[0] && empty[1]) {
118 /* Special case to distinguish no superblock to read */
119 *wp_ret = zones[0].start << SECTOR_SHIFT;
120 return -ENOENT;
121 } else if (full[0] && full[1]) {
122 /* Compare two super blocks */
123 struct address_space *mapping = bdev->bd_inode->i_mapping;
124 struct page *page[BTRFS_NR_SB_LOG_ZONES];
125 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
126 int i;
127
128 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
129 u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
130 u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
131 BTRFS_SUPER_INFO_SIZE;
132
133 page[i] = read_cache_page_gfp(mapping,
134 bytenr >> PAGE_SHIFT, GFP_NOFS);
135 if (IS_ERR(page[i])) {
136 if (i == 1)
137 btrfs_release_disk_super(super[0]);
138 return PTR_ERR(page[i]);
139 }
140 super[i] = page_address(page[i]);
141 }
142
143 if (btrfs_super_generation(super[0]) >
144 btrfs_super_generation(super[1]))
145 sector = zones[1].start;
146 else
147 sector = zones[0].start;
148
149 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
150 btrfs_release_disk_super(super[i]);
151 } else if (!full[0] && (empty[1] || full[1])) {
152 sector = zones[0].wp;
153 } else if (full[0]) {
154 sector = zones[1].wp;
155 } else {
156 return -EUCLEAN;
157 }
158 *wp_ret = sector << SECTOR_SHIFT;
159 return 0;
160}
161
162/*
163 * Get the first zone number of the superblock mirror
164 */
165static inline u32 sb_zone_number(int shift, int mirror)
166{
167 u64 zone = U64_MAX;
168
169 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
170 switch (mirror) {
171 case 0: zone = 0; break;
172 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
173 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
174 }
175
176 ASSERT(zone <= U32_MAX);
177
178 return (u32)zone;
179}
180
181static inline sector_t zone_start_sector(u32 zone_number,
182 struct block_device *bdev)
183{
184 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
185}
186
187static inline u64 zone_start_physical(u32 zone_number,
188 struct btrfs_zoned_device_info *zone_info)
189{
190 return (u64)zone_number << zone_info->zone_size_shift;
191}
192
193/*
194 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
195 * device into static sized chunks and fake a conventional zone on each of
196 * them.
197 */
198static int emulate_report_zones(struct btrfs_device *device, u64 pos,
199 struct blk_zone *zones, unsigned int nr_zones)
200{
201 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
202 sector_t bdev_size = bdev_nr_sectors(device->bdev);
203 unsigned int i;
204
205 pos >>= SECTOR_SHIFT;
206 for (i = 0; i < nr_zones; i++) {
207 zones[i].start = i * zone_sectors + pos;
208 zones[i].len = zone_sectors;
209 zones[i].capacity = zone_sectors;
210 zones[i].wp = zones[i].start + zone_sectors;
211 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
212 zones[i].cond = BLK_ZONE_COND_NOT_WP;
213
214 if (zones[i].wp >= bdev_size) {
215 i++;
216 break;
217 }
218 }
219
220 return i;
221}
222
223static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
224 struct blk_zone *zones, unsigned int *nr_zones)
225{
226 struct btrfs_zoned_device_info *zinfo = device->zone_info;
227 int ret;
228
229 if (!*nr_zones)
230 return 0;
231
232 if (!bdev_is_zoned(device->bdev)) {
233 ret = emulate_report_zones(device, pos, zones, *nr_zones);
234 *nr_zones = ret;
235 return 0;
236 }
237
238 /* Check cache */
239 if (zinfo->zone_cache) {
240 unsigned int i;
241 u32 zno;
242
243 ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
244 zno = pos >> zinfo->zone_size_shift;
245 /*
246 * We cannot report zones beyond the zone end. So, it is OK to
247 * cap *nr_zones to at the end.
248 */
249 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
250
251 for (i = 0; i < *nr_zones; i++) {
252 struct blk_zone *zone_info;
253
254 zone_info = &zinfo->zone_cache[zno + i];
255 if (!zone_info->len)
256 break;
257 }
258
259 if (i == *nr_zones) {
260 /* Cache hit on all the zones */
261 memcpy(zones, zinfo->zone_cache + zno,
262 sizeof(*zinfo->zone_cache) * *nr_zones);
263 return 0;
264 }
265 }
266
267 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
268 copy_zone_info_cb, zones);
269 if (ret < 0) {
270 btrfs_err_in_rcu(device->fs_info,
271 "zoned: failed to read zone %llu on %s (devid %llu)",
272 pos, rcu_str_deref(device->name),
273 device->devid);
274 return ret;
275 }
276 *nr_zones = ret;
277 if (!ret)
278 return -EIO;
279
280 /* Populate cache */
281 if (zinfo->zone_cache) {
282 u32 zno = pos >> zinfo->zone_size_shift;
283
284 memcpy(zinfo->zone_cache + zno, zones,
285 sizeof(*zinfo->zone_cache) * *nr_zones);
286 }
287
288 return 0;
289}
290
291/* The emulated zone size is determined from the size of device extent */
292static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
293{
294 struct btrfs_path *path;
295 struct btrfs_root *root = fs_info->dev_root;
296 struct btrfs_key key;
297 struct extent_buffer *leaf;
298 struct btrfs_dev_extent *dext;
299 int ret = 0;
300
301 key.objectid = 1;
302 key.type = BTRFS_DEV_EXTENT_KEY;
303 key.offset = 0;
304
305 path = btrfs_alloc_path();
306 if (!path)
307 return -ENOMEM;
308
309 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
310 if (ret < 0)
311 goto out;
312
313 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
314 ret = btrfs_next_leaf(root, path);
315 if (ret < 0)
316 goto out;
317 /* No dev extents at all? Not good */
318 if (ret > 0) {
319 ret = -EUCLEAN;
320 goto out;
321 }
322 }
323
324 leaf = path->nodes[0];
325 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
326 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
327 ret = 0;
328
329out:
330 btrfs_free_path(path);
331
332 return ret;
333}
334
335int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
336{
337 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
338 struct btrfs_device *device;
339 int ret = 0;
340
341 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
342 if (!btrfs_fs_incompat(fs_info, ZONED))
343 return 0;
344
345 mutex_lock(&fs_devices->device_list_mutex);
346 list_for_each_entry(device, &fs_devices->devices, dev_list) {
347 /* We can skip reading of zone info for missing devices */
348 if (!device->bdev)
349 continue;
350
351 ret = btrfs_get_dev_zone_info(device, true);
352 if (ret)
353 break;
354 }
355 mutex_unlock(&fs_devices->device_list_mutex);
356
357 return ret;
358}
359
360int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
361{
362 struct btrfs_fs_info *fs_info = device->fs_info;
363 struct btrfs_zoned_device_info *zone_info = NULL;
364 struct block_device *bdev = device->bdev;
365 unsigned int max_active_zones;
366 unsigned int nactive;
367 sector_t nr_sectors;
368 sector_t sector = 0;
369 struct blk_zone *zones = NULL;
370 unsigned int i, nreported = 0, nr_zones;
371 sector_t zone_sectors;
372 char *model, *emulated;
373 int ret;
374
375 /*
376 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
377 * yet be set.
378 */
379 if (!btrfs_fs_incompat(fs_info, ZONED))
380 return 0;
381
382 if (device->zone_info)
383 return 0;
384
385 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
386 if (!zone_info)
387 return -ENOMEM;
388
389 device->zone_info = zone_info;
390
391 if (!bdev_is_zoned(bdev)) {
392 if (!fs_info->zone_size) {
393 ret = calculate_emulated_zone_size(fs_info);
394 if (ret)
395 goto out;
396 }
397
398 ASSERT(fs_info->zone_size);
399 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
400 } else {
401 zone_sectors = bdev_zone_sectors(bdev);
402 }
403
404 ASSERT(is_power_of_two_u64(zone_sectors));
405 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
406
407 /* We reject devices with a zone size larger than 8GB */
408 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
409 btrfs_err_in_rcu(fs_info,
410 "zoned: %s: zone size %llu larger than supported maximum %llu",
411 rcu_str_deref(device->name),
412 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
413 ret = -EINVAL;
414 goto out;
415 } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
416 btrfs_err_in_rcu(fs_info,
417 "zoned: %s: zone size %llu smaller than supported minimum %u",
418 rcu_str_deref(device->name),
419 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
420 ret = -EINVAL;
421 goto out;
422 }
423
424 nr_sectors = bdev_nr_sectors(bdev);
425 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
426 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
427 if (!IS_ALIGNED(nr_sectors, zone_sectors))
428 zone_info->nr_zones++;
429
430 max_active_zones = bdev_max_active_zones(bdev);
431 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
432 btrfs_err_in_rcu(fs_info,
433"zoned: %s: max active zones %u is too small, need at least %u active zones",
434 rcu_str_deref(device->name), max_active_zones,
435 BTRFS_MIN_ACTIVE_ZONES);
436 ret = -EINVAL;
437 goto out;
438 }
439 zone_info->max_active_zones = max_active_zones;
440
441 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
442 if (!zone_info->seq_zones) {
443 ret = -ENOMEM;
444 goto out;
445 }
446
447 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
448 if (!zone_info->empty_zones) {
449 ret = -ENOMEM;
450 goto out;
451 }
452
453 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
454 if (!zone_info->active_zones) {
455 ret = -ENOMEM;
456 goto out;
457 }
458
459 zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
460 if (!zones) {
461 ret = -ENOMEM;
462 goto out;
463 }
464
465 /*
466 * Enable zone cache only for a zoned device. On a non-zoned device, we
467 * fill the zone info with emulated CONVENTIONAL zones, so no need to
468 * use the cache.
469 */
470 if (populate_cache && bdev_is_zoned(device->bdev)) {
471 zone_info->zone_cache = vcalloc(zone_info->nr_zones,
472 sizeof(struct blk_zone));
473 if (!zone_info->zone_cache) {
474 btrfs_err_in_rcu(device->fs_info,
475 "zoned: failed to allocate zone cache for %s",
476 rcu_str_deref(device->name));
477 ret = -ENOMEM;
478 goto out;
479 }
480 }
481
482 /* Get zones type */
483 nactive = 0;
484 while (sector < nr_sectors) {
485 nr_zones = BTRFS_REPORT_NR_ZONES;
486 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
487 &nr_zones);
488 if (ret)
489 goto out;
490
491 for (i = 0; i < nr_zones; i++) {
492 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
493 __set_bit(nreported, zone_info->seq_zones);
494 switch (zones[i].cond) {
495 case BLK_ZONE_COND_EMPTY:
496 __set_bit(nreported, zone_info->empty_zones);
497 break;
498 case BLK_ZONE_COND_IMP_OPEN:
499 case BLK_ZONE_COND_EXP_OPEN:
500 case BLK_ZONE_COND_CLOSED:
501 __set_bit(nreported, zone_info->active_zones);
502 nactive++;
503 break;
504 }
505 nreported++;
506 }
507 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
508 }
509
510 if (nreported != zone_info->nr_zones) {
511 btrfs_err_in_rcu(device->fs_info,
512 "inconsistent number of zones on %s (%u/%u)",
513 rcu_str_deref(device->name), nreported,
514 zone_info->nr_zones);
515 ret = -EIO;
516 goto out;
517 }
518
519 if (max_active_zones) {
520 if (nactive > max_active_zones) {
521 btrfs_err_in_rcu(device->fs_info,
522 "zoned: %u active zones on %s exceeds max_active_zones %u",
523 nactive, rcu_str_deref(device->name),
524 max_active_zones);
525 ret = -EIO;
526 goto out;
527 }
528 atomic_set(&zone_info->active_zones_left,
529 max_active_zones - nactive);
530 set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
531 }
532
533 /* Validate superblock log */
534 nr_zones = BTRFS_NR_SB_LOG_ZONES;
535 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
536 u32 sb_zone;
537 u64 sb_wp;
538 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
539
540 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
541 if (sb_zone + 1 >= zone_info->nr_zones)
542 continue;
543
544 ret = btrfs_get_dev_zones(device,
545 zone_start_physical(sb_zone, zone_info),
546 &zone_info->sb_zones[sb_pos],
547 &nr_zones);
548 if (ret)
549 goto out;
550
551 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
552 btrfs_err_in_rcu(device->fs_info,
553 "zoned: failed to read super block log zone info at devid %llu zone %u",
554 device->devid, sb_zone);
555 ret = -EUCLEAN;
556 goto out;
557 }
558
559 /*
560 * If zones[0] is conventional, always use the beginning of the
561 * zone to record superblock. No need to validate in that case.
562 */
563 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
564 BLK_ZONE_TYPE_CONVENTIONAL)
565 continue;
566
567 ret = sb_write_pointer(device->bdev,
568 &zone_info->sb_zones[sb_pos], &sb_wp);
569 if (ret != -ENOENT && ret) {
570 btrfs_err_in_rcu(device->fs_info,
571 "zoned: super block log zone corrupted devid %llu zone %u",
572 device->devid, sb_zone);
573 ret = -EUCLEAN;
574 goto out;
575 }
576 }
577
578
579 kvfree(zones);
580
581 if (bdev_is_zoned(bdev)) {
582 model = "host-managed zoned";
583 emulated = "";
584 } else {
585 model = "regular";
586 emulated = "emulated ";
587 }
588
589 btrfs_info_in_rcu(fs_info,
590 "%s block device %s, %u %szones of %llu bytes",
591 model, rcu_str_deref(device->name), zone_info->nr_zones,
592 emulated, zone_info->zone_size);
593
594 return 0;
595
596out:
597 kvfree(zones);
598 btrfs_destroy_dev_zone_info(device);
599 return ret;
600}
601
602void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
603{
604 struct btrfs_zoned_device_info *zone_info = device->zone_info;
605
606 if (!zone_info)
607 return;
608
609 bitmap_free(zone_info->active_zones);
610 bitmap_free(zone_info->seq_zones);
611 bitmap_free(zone_info->empty_zones);
612 vfree(zone_info->zone_cache);
613 kfree(zone_info);
614 device->zone_info = NULL;
615}
616
617struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
618{
619 struct btrfs_zoned_device_info *zone_info;
620
621 zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
622 if (!zone_info)
623 return NULL;
624
625 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
626 if (!zone_info->seq_zones)
627 goto out;
628
629 bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
630 zone_info->nr_zones);
631
632 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
633 if (!zone_info->empty_zones)
634 goto out;
635
636 bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
637 zone_info->nr_zones);
638
639 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
640 if (!zone_info->active_zones)
641 goto out;
642
643 bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
644 zone_info->nr_zones);
645 zone_info->zone_cache = NULL;
646
647 return zone_info;
648
649out:
650 bitmap_free(zone_info->seq_zones);
651 bitmap_free(zone_info->empty_zones);
652 bitmap_free(zone_info->active_zones);
653 kfree(zone_info);
654 return NULL;
655}
656
657int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
658 struct blk_zone *zone)
659{
660 unsigned int nr_zones = 1;
661 int ret;
662
663 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
664 if (ret != 0 || !nr_zones)
665 return ret ? ret : -EIO;
666
667 return 0;
668}
669
670static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
671{
672 struct btrfs_device *device;
673
674 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
675 if (device->bdev && bdev_is_zoned(device->bdev)) {
676 btrfs_err(fs_info,
677 "zoned: mode not enabled but zoned device found: %pg",
678 device->bdev);
679 return -EINVAL;
680 }
681 }
682
683 return 0;
684}
685
686int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
687{
688 struct queue_limits *lim = &fs_info->limits;
689 struct btrfs_device *device;
690 u64 zone_size = 0;
691 int ret;
692
693 /*
694 * Host-Managed devices can't be used without the ZONED flag. With the
695 * ZONED all devices can be used, using zone emulation if required.
696 */
697 if (!btrfs_fs_incompat(fs_info, ZONED))
698 return btrfs_check_for_zoned_device(fs_info);
699
700 blk_set_stacking_limits(lim);
701
702 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
703 struct btrfs_zoned_device_info *zone_info = device->zone_info;
704
705 if (!device->bdev)
706 continue;
707
708 if (!zone_size) {
709 zone_size = zone_info->zone_size;
710 } else if (zone_info->zone_size != zone_size) {
711 btrfs_err(fs_info,
712 "zoned: unequal block device zone sizes: have %llu found %llu",
713 zone_info->zone_size, zone_size);
714 return -EINVAL;
715 }
716
717 /*
718 * With the zoned emulation, we can have non-zoned device on the
719 * zoned mode. In this case, we don't have a valid max zone
720 * append size.
721 */
722 if (bdev_is_zoned(device->bdev)) {
723 blk_stack_limits(lim,
724 &bdev_get_queue(device->bdev)->limits,
725 0);
726 }
727 }
728
729 /*
730 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
731 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
732 * check the alignment here.
733 */
734 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
735 btrfs_err(fs_info,
736 "zoned: zone size %llu not aligned to stripe %u",
737 zone_size, BTRFS_STRIPE_LEN);
738 return -EINVAL;
739 }
740
741 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
742 btrfs_err(fs_info, "zoned: mixed block groups not supported");
743 return -EINVAL;
744 }
745
746 fs_info->zone_size = zone_size;
747 /*
748 * Also limit max_zone_append_size by max_segments * PAGE_SIZE.
749 * Technically, we can have multiple pages per segment. But, since
750 * we add the pages one by one to a bio, and cannot increase the
751 * metadata reservation even if it increases the number of extents, it
752 * is safe to stick with the limit.
753 */
754 fs_info->max_zone_append_size = ALIGN_DOWN(
755 min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT,
756 (u64)lim->max_sectors << SECTOR_SHIFT,
757 (u64)lim->max_segments << PAGE_SHIFT),
758 fs_info->sectorsize);
759 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
760 if (fs_info->max_zone_append_size < fs_info->max_extent_size)
761 fs_info->max_extent_size = fs_info->max_zone_append_size;
762
763 /*
764 * Check mount options here, because we might change fs_info->zoned
765 * from fs_info->zone_size.
766 */
767 ret = btrfs_check_mountopts_zoned(fs_info, &fs_info->mount_opt);
768 if (ret)
769 return ret;
770
771 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
772 return 0;
773}
774
775int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info, unsigned long *mount_opt)
776{
777 if (!btrfs_is_zoned(info))
778 return 0;
779
780 /*
781 * Space cache writing is not COWed. Disable that to avoid write errors
782 * in sequential zones.
783 */
784 if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
785 btrfs_err(info, "zoned: space cache v1 is not supported");
786 return -EINVAL;
787 }
788
789 if (btrfs_raw_test_opt(*mount_opt, NODATACOW)) {
790 btrfs_err(info, "zoned: NODATACOW not supported");
791 return -EINVAL;
792 }
793
794 if (btrfs_raw_test_opt(*mount_opt, DISCARD_ASYNC)) {
795 btrfs_info(info,
796 "zoned: async discard ignored and disabled for zoned mode");
797 btrfs_clear_opt(*mount_opt, DISCARD_ASYNC);
798 }
799
800 return 0;
801}
802
803static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
804 int rw, u64 *bytenr_ret)
805{
806 u64 wp;
807 int ret;
808
809 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
810 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
811 return 0;
812 }
813
814 ret = sb_write_pointer(bdev, zones, &wp);
815 if (ret != -ENOENT && ret < 0)
816 return ret;
817
818 if (rw == WRITE) {
819 struct blk_zone *reset = NULL;
820
821 if (wp == zones[0].start << SECTOR_SHIFT)
822 reset = &zones[0];
823 else if (wp == zones[1].start << SECTOR_SHIFT)
824 reset = &zones[1];
825
826 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
827 ASSERT(sb_zone_is_full(reset));
828
829 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
830 reset->start, reset->len,
831 GFP_NOFS);
832 if (ret)
833 return ret;
834
835 reset->cond = BLK_ZONE_COND_EMPTY;
836 reset->wp = reset->start;
837 }
838 } else if (ret != -ENOENT) {
839 /*
840 * For READ, we want the previous one. Move write pointer to
841 * the end of a zone, if it is at the head of a zone.
842 */
843 u64 zone_end = 0;
844
845 if (wp == zones[0].start << SECTOR_SHIFT)
846 zone_end = zones[1].start + zones[1].capacity;
847 else if (wp == zones[1].start << SECTOR_SHIFT)
848 zone_end = zones[0].start + zones[0].capacity;
849 if (zone_end)
850 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
851 BTRFS_SUPER_INFO_SIZE);
852
853 wp -= BTRFS_SUPER_INFO_SIZE;
854 }
855
856 *bytenr_ret = wp;
857 return 0;
858
859}
860
861int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
862 u64 *bytenr_ret)
863{
864 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
865 sector_t zone_sectors;
866 u32 sb_zone;
867 int ret;
868 u8 zone_sectors_shift;
869 sector_t nr_sectors;
870 u32 nr_zones;
871
872 if (!bdev_is_zoned(bdev)) {
873 *bytenr_ret = btrfs_sb_offset(mirror);
874 return 0;
875 }
876
877 ASSERT(rw == READ || rw == WRITE);
878
879 zone_sectors = bdev_zone_sectors(bdev);
880 if (!is_power_of_2(zone_sectors))
881 return -EINVAL;
882 zone_sectors_shift = ilog2(zone_sectors);
883 nr_sectors = bdev_nr_sectors(bdev);
884 nr_zones = nr_sectors >> zone_sectors_shift;
885
886 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
887 if (sb_zone + 1 >= nr_zones)
888 return -ENOENT;
889
890 ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
891 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
892 zones);
893 if (ret < 0)
894 return ret;
895 if (ret != BTRFS_NR_SB_LOG_ZONES)
896 return -EIO;
897
898 return sb_log_location(bdev, zones, rw, bytenr_ret);
899}
900
901int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
902 u64 *bytenr_ret)
903{
904 struct btrfs_zoned_device_info *zinfo = device->zone_info;
905 u32 zone_num;
906
907 /*
908 * For a zoned filesystem on a non-zoned block device, use the same
909 * super block locations as regular filesystem. Doing so, the super
910 * block can always be retrieved and the zoned flag of the volume
911 * detected from the super block information.
912 */
913 if (!bdev_is_zoned(device->bdev)) {
914 *bytenr_ret = btrfs_sb_offset(mirror);
915 return 0;
916 }
917
918 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
919 if (zone_num + 1 >= zinfo->nr_zones)
920 return -ENOENT;
921
922 return sb_log_location(device->bdev,
923 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
924 rw, bytenr_ret);
925}
926
927static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
928 int mirror)
929{
930 u32 zone_num;
931
932 if (!zinfo)
933 return false;
934
935 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
936 if (zone_num + 1 >= zinfo->nr_zones)
937 return false;
938
939 if (!test_bit(zone_num, zinfo->seq_zones))
940 return false;
941
942 return true;
943}
944
945int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
946{
947 struct btrfs_zoned_device_info *zinfo = device->zone_info;
948 struct blk_zone *zone;
949 int i;
950
951 if (!is_sb_log_zone(zinfo, mirror))
952 return 0;
953
954 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
955 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
956 /* Advance the next zone */
957 if (zone->cond == BLK_ZONE_COND_FULL) {
958 zone++;
959 continue;
960 }
961
962 if (zone->cond == BLK_ZONE_COND_EMPTY)
963 zone->cond = BLK_ZONE_COND_IMP_OPEN;
964
965 zone->wp += SUPER_INFO_SECTORS;
966
967 if (sb_zone_is_full(zone)) {
968 /*
969 * No room left to write new superblock. Since
970 * superblock is written with REQ_SYNC, it is safe to
971 * finish the zone now.
972 *
973 * If the write pointer is exactly at the capacity,
974 * explicit ZONE_FINISH is not necessary.
975 */
976 if (zone->wp != zone->start + zone->capacity) {
977 int ret;
978
979 ret = blkdev_zone_mgmt(device->bdev,
980 REQ_OP_ZONE_FINISH, zone->start,
981 zone->len, GFP_NOFS);
982 if (ret)
983 return ret;
984 }
985
986 zone->wp = zone->start + zone->len;
987 zone->cond = BLK_ZONE_COND_FULL;
988 }
989 return 0;
990 }
991
992 /* All the zones are FULL. Should not reach here. */
993 ASSERT(0);
994 return -EIO;
995}
996
997int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
998{
999 sector_t zone_sectors;
1000 sector_t nr_sectors;
1001 u8 zone_sectors_shift;
1002 u32 sb_zone;
1003 u32 nr_zones;
1004
1005 zone_sectors = bdev_zone_sectors(bdev);
1006 zone_sectors_shift = ilog2(zone_sectors);
1007 nr_sectors = bdev_nr_sectors(bdev);
1008 nr_zones = nr_sectors >> zone_sectors_shift;
1009
1010 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
1011 if (sb_zone + 1 >= nr_zones)
1012 return -ENOENT;
1013
1014 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1015 zone_start_sector(sb_zone, bdev),
1016 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
1017}
1018
1019/*
1020 * Find allocatable zones within a given region.
1021 *
1022 * @device: the device to allocate a region on
1023 * @hole_start: the position of the hole to allocate the region
1024 * @num_bytes: size of wanted region
1025 * @hole_end: the end of the hole
1026 * @return: position of allocatable zones
1027 *
1028 * Allocatable region should not contain any superblock locations.
1029 */
1030u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
1031 u64 hole_end, u64 num_bytes)
1032{
1033 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1034 const u8 shift = zinfo->zone_size_shift;
1035 u64 nzones = num_bytes >> shift;
1036 u64 pos = hole_start;
1037 u64 begin, end;
1038 bool have_sb;
1039 int i;
1040
1041 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
1042 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
1043
1044 while (pos < hole_end) {
1045 begin = pos >> shift;
1046 end = begin + nzones;
1047
1048 if (end > zinfo->nr_zones)
1049 return hole_end;
1050
1051 /* Check if zones in the region are all empty */
1052 if (btrfs_dev_is_sequential(device, pos) &&
1053 !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) {
1054 pos += zinfo->zone_size;
1055 continue;
1056 }
1057
1058 have_sb = false;
1059 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1060 u32 sb_zone;
1061 u64 sb_pos;
1062
1063 sb_zone = sb_zone_number(shift, i);
1064 if (!(end <= sb_zone ||
1065 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1066 have_sb = true;
1067 pos = zone_start_physical(
1068 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1069 break;
1070 }
1071
1072 /* We also need to exclude regular superblock positions */
1073 sb_pos = btrfs_sb_offset(i);
1074 if (!(pos + num_bytes <= sb_pos ||
1075 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1076 have_sb = true;
1077 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1078 zinfo->zone_size);
1079 break;
1080 }
1081 }
1082 if (!have_sb)
1083 break;
1084 }
1085
1086 return pos;
1087}
1088
1089static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1090{
1091 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1092 unsigned int zno = (pos >> zone_info->zone_size_shift);
1093
1094 /* We can use any number of zones */
1095 if (zone_info->max_active_zones == 0)
1096 return true;
1097
1098 if (!test_bit(zno, zone_info->active_zones)) {
1099 /* Active zone left? */
1100 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1101 return false;
1102 if (test_and_set_bit(zno, zone_info->active_zones)) {
1103 /* Someone already set the bit */
1104 atomic_inc(&zone_info->active_zones_left);
1105 }
1106 }
1107
1108 return true;
1109}
1110
1111static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1112{
1113 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1114 unsigned int zno = (pos >> zone_info->zone_size_shift);
1115
1116 /* We can use any number of zones */
1117 if (zone_info->max_active_zones == 0)
1118 return;
1119
1120 if (test_and_clear_bit(zno, zone_info->active_zones))
1121 atomic_inc(&zone_info->active_zones_left);
1122}
1123
1124int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1125 u64 length, u64 *bytes)
1126{
1127 int ret;
1128
1129 *bytes = 0;
1130 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1131 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
1132 GFP_NOFS);
1133 if (ret)
1134 return ret;
1135
1136 *bytes = length;
1137 while (length) {
1138 btrfs_dev_set_zone_empty(device, physical);
1139 btrfs_dev_clear_active_zone(device, physical);
1140 physical += device->zone_info->zone_size;
1141 length -= device->zone_info->zone_size;
1142 }
1143
1144 return 0;
1145}
1146
1147int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1148{
1149 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1150 const u8 shift = zinfo->zone_size_shift;
1151 unsigned long begin = start >> shift;
1152 unsigned long nbits = size >> shift;
1153 u64 pos;
1154 int ret;
1155
1156 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
1157 ASSERT(IS_ALIGNED(size, zinfo->zone_size));
1158
1159 if (begin + nbits > zinfo->nr_zones)
1160 return -ERANGE;
1161
1162 /* All the zones are conventional */
1163 if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits))
1164 return 0;
1165
1166 /* All the zones are sequential and empty */
1167 if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) &&
1168 bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits))
1169 return 0;
1170
1171 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1172 u64 reset_bytes;
1173
1174 if (!btrfs_dev_is_sequential(device, pos) ||
1175 btrfs_dev_is_empty_zone(device, pos))
1176 continue;
1177
1178 /* Free regions should be empty */
1179 btrfs_warn_in_rcu(
1180 device->fs_info,
1181 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
1182 rcu_str_deref(device->name), device->devid, pos >> shift);
1183 WARN_ON_ONCE(1);
1184
1185 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1186 &reset_bytes);
1187 if (ret)
1188 return ret;
1189 }
1190
1191 return 0;
1192}
1193
1194/*
1195 * Calculate an allocation pointer from the extent allocation information
1196 * for a block group consist of conventional zones. It is pointed to the
1197 * end of the highest addressed extent in the block group as an allocation
1198 * offset.
1199 */
1200static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1201 u64 *offset_ret, bool new)
1202{
1203 struct btrfs_fs_info *fs_info = cache->fs_info;
1204 struct btrfs_root *root;
1205 struct btrfs_path *path;
1206 struct btrfs_key key;
1207 struct btrfs_key found_key;
1208 int ret;
1209 u64 length;
1210
1211 /*
1212 * Avoid tree lookups for a new block group, there's no use for it.
1213 * It must always be 0.
1214 *
1215 * Also, we have a lock chain of extent buffer lock -> chunk mutex.
1216 * For new a block group, this function is called from
1217 * btrfs_make_block_group() which is already taking the chunk mutex.
1218 * Thus, we cannot call calculate_alloc_pointer() which takes extent
1219 * buffer locks to avoid deadlock.
1220 */
1221 if (new) {
1222 *offset_ret = 0;
1223 return 0;
1224 }
1225
1226 path = btrfs_alloc_path();
1227 if (!path)
1228 return -ENOMEM;
1229
1230 key.objectid = cache->start + cache->length;
1231 key.type = 0;
1232 key.offset = 0;
1233
1234 root = btrfs_extent_root(fs_info, key.objectid);
1235 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1236 /* We should not find the exact match */
1237 if (!ret)
1238 ret = -EUCLEAN;
1239 if (ret < 0)
1240 goto out;
1241
1242 ret = btrfs_previous_extent_item(root, path, cache->start);
1243 if (ret) {
1244 if (ret == 1) {
1245 ret = 0;
1246 *offset_ret = 0;
1247 }
1248 goto out;
1249 }
1250
1251 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1252
1253 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1254 length = found_key.offset;
1255 else
1256 length = fs_info->nodesize;
1257
1258 if (!(found_key.objectid >= cache->start &&
1259 found_key.objectid + length <= cache->start + cache->length)) {
1260 ret = -EUCLEAN;
1261 goto out;
1262 }
1263 *offset_ret = found_key.objectid + length - cache->start;
1264 ret = 0;
1265
1266out:
1267 btrfs_free_path(path);
1268 return ret;
1269}
1270
1271struct zone_info {
1272 u64 physical;
1273 u64 capacity;
1274 u64 alloc_offset;
1275};
1276
1277static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
1278 struct zone_info *info, unsigned long *active,
1279 struct btrfs_chunk_map *map)
1280{
1281 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1282 struct btrfs_device *device = map->stripes[zone_idx].dev;
1283 int dev_replace_is_ongoing = 0;
1284 unsigned int nofs_flag;
1285 struct blk_zone zone;
1286 int ret;
1287
1288 info->physical = map->stripes[zone_idx].physical;
1289
1290 if (!device->bdev) {
1291 info->alloc_offset = WP_MISSING_DEV;
1292 return 0;
1293 }
1294
1295 /* Consider a zone as active if we can allow any number of active zones. */
1296 if (!device->zone_info->max_active_zones)
1297 __set_bit(zone_idx, active);
1298
1299 if (!btrfs_dev_is_sequential(device, info->physical)) {
1300 info->alloc_offset = WP_CONVENTIONAL;
1301 return 0;
1302 }
1303
1304 /* This zone will be used for allocation, so mark this zone non-empty. */
1305 btrfs_dev_clear_zone_empty(device, info->physical);
1306
1307 down_read(&dev_replace->rwsem);
1308 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1309 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1310 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
1311 up_read(&dev_replace->rwsem);
1312
1313 /*
1314 * The group is mapped to a sequential zone. Get the zone write pointer
1315 * to determine the allocation offset within the zone.
1316 */
1317 WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
1318 nofs_flag = memalloc_nofs_save();
1319 ret = btrfs_get_dev_zone(device, info->physical, &zone);
1320 memalloc_nofs_restore(nofs_flag);
1321 if (ret) {
1322 if (ret != -EIO && ret != -EOPNOTSUPP)
1323 return ret;
1324 info->alloc_offset = WP_MISSING_DEV;
1325 return 0;
1326 }
1327
1328 if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
1329 btrfs_err_in_rcu(fs_info,
1330 "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1331 zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
1332 device->devid);
1333 return -EIO;
1334 }
1335
1336 info->capacity = (zone.capacity << SECTOR_SHIFT);
1337
1338 switch (zone.cond) {
1339 case BLK_ZONE_COND_OFFLINE:
1340 case BLK_ZONE_COND_READONLY:
1341 btrfs_err(fs_info,
1342 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1343 (info->physical >> device->zone_info->zone_size_shift),
1344 rcu_str_deref(device->name), device->devid);
1345 info->alloc_offset = WP_MISSING_DEV;
1346 break;
1347 case BLK_ZONE_COND_EMPTY:
1348 info->alloc_offset = 0;
1349 break;
1350 case BLK_ZONE_COND_FULL:
1351 info->alloc_offset = info->capacity;
1352 break;
1353 default:
1354 /* Partially used zone. */
1355 info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
1356 __set_bit(zone_idx, active);
1357 break;
1358 }
1359
1360 return 0;
1361}
1362
1363static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
1364 struct zone_info *info,
1365 unsigned long *active)
1366{
1367 if (info->alloc_offset == WP_MISSING_DEV) {
1368 btrfs_err(bg->fs_info,
1369 "zoned: cannot recover write pointer for zone %llu",
1370 info->physical);
1371 return -EIO;
1372 }
1373
1374 bg->alloc_offset = info->alloc_offset;
1375 bg->zone_capacity = info->capacity;
1376 if (test_bit(0, active))
1377 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1378 return 0;
1379}
1380
1381static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
1382 struct btrfs_chunk_map *map,
1383 struct zone_info *zone_info,
1384 unsigned long *active)
1385{
1386 struct btrfs_fs_info *fs_info = bg->fs_info;
1387
1388 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1389 btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree");
1390 return -EINVAL;
1391 }
1392
1393 if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
1394 btrfs_err(bg->fs_info,
1395 "zoned: cannot recover write pointer for zone %llu",
1396 zone_info[0].physical);
1397 return -EIO;
1398 }
1399 if (zone_info[1].alloc_offset == WP_MISSING_DEV) {
1400 btrfs_err(bg->fs_info,
1401 "zoned: cannot recover write pointer for zone %llu",
1402 zone_info[1].physical);
1403 return -EIO;
1404 }
1405 if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
1406 btrfs_err(bg->fs_info,
1407 "zoned: write pointer offset mismatch of zones in DUP profile");
1408 return -EIO;
1409 }
1410
1411 if (test_bit(0, active) != test_bit(1, active)) {
1412 if (!btrfs_zone_activate(bg))
1413 return -EIO;
1414 } else if (test_bit(0, active)) {
1415 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1416 }
1417
1418 bg->alloc_offset = zone_info[0].alloc_offset;
1419 bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity);
1420 return 0;
1421}
1422
1423static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
1424 struct btrfs_chunk_map *map,
1425 struct zone_info *zone_info,
1426 unsigned long *active)
1427{
1428 struct btrfs_fs_info *fs_info = bg->fs_info;
1429 int i;
1430
1431 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1432 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1433 btrfs_bg_type_to_raid_name(map->type));
1434 return -EINVAL;
1435 }
1436
1437 for (i = 0; i < map->num_stripes; i++) {
1438 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1439 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1440 continue;
1441
1442 if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
1443 !btrfs_test_opt(fs_info, DEGRADED)) {
1444 btrfs_err(fs_info,
1445 "zoned: write pointer offset mismatch of zones in %s profile",
1446 btrfs_bg_type_to_raid_name(map->type));
1447 return -EIO;
1448 }
1449 if (test_bit(0, active) != test_bit(i, active)) {
1450 if (!btrfs_test_opt(fs_info, DEGRADED) &&
1451 !btrfs_zone_activate(bg)) {
1452 return -EIO;
1453 }
1454 } else {
1455 if (test_bit(0, active))
1456 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1457 }
1458 /* In case a device is missing we have a cap of 0, so don't use it. */
1459 bg->zone_capacity = min_not_zero(zone_info[0].capacity,
1460 zone_info[1].capacity);
1461 }
1462
1463 if (zone_info[0].alloc_offset != WP_MISSING_DEV)
1464 bg->alloc_offset = zone_info[0].alloc_offset;
1465 else
1466 bg->alloc_offset = zone_info[i - 1].alloc_offset;
1467
1468 return 0;
1469}
1470
1471static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
1472 struct btrfs_chunk_map *map,
1473 struct zone_info *zone_info,
1474 unsigned long *active)
1475{
1476 struct btrfs_fs_info *fs_info = bg->fs_info;
1477
1478 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1479 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1480 btrfs_bg_type_to_raid_name(map->type));
1481 return -EINVAL;
1482 }
1483
1484 for (int i = 0; i < map->num_stripes; i++) {
1485 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1486 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1487 continue;
1488
1489 if (test_bit(0, active) != test_bit(i, active)) {
1490 if (!btrfs_zone_activate(bg))
1491 return -EIO;
1492 } else {
1493 if (test_bit(0, active))
1494 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1495 }
1496 bg->zone_capacity += zone_info[i].capacity;
1497 bg->alloc_offset += zone_info[i].alloc_offset;
1498 }
1499
1500 return 0;
1501}
1502
1503static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
1504 struct btrfs_chunk_map *map,
1505 struct zone_info *zone_info,
1506 unsigned long *active)
1507{
1508 struct btrfs_fs_info *fs_info = bg->fs_info;
1509
1510 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1511 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1512 btrfs_bg_type_to_raid_name(map->type));
1513 return -EINVAL;
1514 }
1515
1516 for (int i = 0; i < map->num_stripes; i++) {
1517 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1518 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1519 continue;
1520
1521 if (test_bit(0, active) != test_bit(i, active)) {
1522 if (!btrfs_zone_activate(bg))
1523 return -EIO;
1524 } else {
1525 if (test_bit(0, active))
1526 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1527 }
1528
1529 if ((i % map->sub_stripes) == 0) {
1530 bg->zone_capacity += zone_info[i].capacity;
1531 bg->alloc_offset += zone_info[i].alloc_offset;
1532 }
1533 }
1534
1535 return 0;
1536}
1537
1538int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1539{
1540 struct btrfs_fs_info *fs_info = cache->fs_info;
1541 struct btrfs_chunk_map *map;
1542 u64 logical = cache->start;
1543 u64 length = cache->length;
1544 struct zone_info *zone_info = NULL;
1545 int ret;
1546 int i;
1547 unsigned long *active = NULL;
1548 u64 last_alloc = 0;
1549 u32 num_sequential = 0, num_conventional = 0;
1550
1551 if (!btrfs_is_zoned(fs_info))
1552 return 0;
1553
1554 /* Sanity check */
1555 if (!IS_ALIGNED(length, fs_info->zone_size)) {
1556 btrfs_err(fs_info,
1557 "zoned: block group %llu len %llu unaligned to zone size %llu",
1558 logical, length, fs_info->zone_size);
1559 return -EIO;
1560 }
1561
1562 map = btrfs_find_chunk_map(fs_info, logical, length);
1563 if (!map)
1564 return -EINVAL;
1565
1566 cache->physical_map = btrfs_clone_chunk_map(map, GFP_NOFS);
1567 if (!cache->physical_map) {
1568 ret = -ENOMEM;
1569 goto out;
1570 }
1571
1572 zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
1573 if (!zone_info) {
1574 ret = -ENOMEM;
1575 goto out;
1576 }
1577
1578 active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1579 if (!active) {
1580 ret = -ENOMEM;
1581 goto out;
1582 }
1583
1584 for (i = 0; i < map->num_stripes; i++) {
1585 ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
1586 if (ret)
1587 goto out;
1588
1589 if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1590 num_conventional++;
1591 else
1592 num_sequential++;
1593 }
1594
1595 if (num_sequential > 0)
1596 set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1597
1598 if (num_conventional > 0) {
1599 /* Zone capacity is always zone size in emulation */
1600 cache->zone_capacity = cache->length;
1601 ret = calculate_alloc_pointer(cache, &last_alloc, new);
1602 if (ret) {
1603 btrfs_err(fs_info,
1604 "zoned: failed to determine allocation offset of bg %llu",
1605 cache->start);
1606 goto out;
1607 } else if (map->num_stripes == num_conventional) {
1608 cache->alloc_offset = last_alloc;
1609 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1610 goto out;
1611 }
1612 }
1613
1614 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
1615 case 0: /* single */
1616 ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
1617 break;
1618 case BTRFS_BLOCK_GROUP_DUP:
1619 ret = btrfs_load_block_group_dup(cache, map, zone_info, active);
1620 break;
1621 case BTRFS_BLOCK_GROUP_RAID1:
1622 case BTRFS_BLOCK_GROUP_RAID1C3:
1623 case BTRFS_BLOCK_GROUP_RAID1C4:
1624 ret = btrfs_load_block_group_raid1(cache, map, zone_info, active);
1625 break;
1626 case BTRFS_BLOCK_GROUP_RAID0:
1627 ret = btrfs_load_block_group_raid0(cache, map, zone_info, active);
1628 break;
1629 case BTRFS_BLOCK_GROUP_RAID10:
1630 ret = btrfs_load_block_group_raid10(cache, map, zone_info, active);
1631 break;
1632 case BTRFS_BLOCK_GROUP_RAID5:
1633 case BTRFS_BLOCK_GROUP_RAID6:
1634 default:
1635 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1636 btrfs_bg_type_to_raid_name(map->type));
1637 ret = -EINVAL;
1638 goto out;
1639 }
1640
1641out:
1642 if (cache->alloc_offset > cache->zone_capacity) {
1643 btrfs_err(fs_info,
1644"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1645 cache->alloc_offset, cache->zone_capacity,
1646 cache->start);
1647 ret = -EIO;
1648 }
1649
1650 /* An extent is allocated after the write pointer */
1651 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1652 btrfs_err(fs_info,
1653 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1654 logical, last_alloc, cache->alloc_offset);
1655 ret = -EIO;
1656 }
1657
1658 if (!ret) {
1659 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1660 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
1661 btrfs_get_block_group(cache);
1662 spin_lock(&fs_info->zone_active_bgs_lock);
1663 list_add_tail(&cache->active_bg_list,
1664 &fs_info->zone_active_bgs);
1665 spin_unlock(&fs_info->zone_active_bgs_lock);
1666 }
1667 } else {
1668 btrfs_free_chunk_map(cache->physical_map);
1669 cache->physical_map = NULL;
1670 }
1671 bitmap_free(active);
1672 kfree(zone_info);
1673
1674 return ret;
1675}
1676
1677void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1678{
1679 u64 unusable, free;
1680
1681 if (!btrfs_is_zoned(cache->fs_info))
1682 return;
1683
1684 WARN_ON(cache->bytes_super != 0);
1685 unusable = (cache->alloc_offset - cache->used) +
1686 (cache->length - cache->zone_capacity);
1687 free = cache->zone_capacity - cache->alloc_offset;
1688
1689 /* We only need ->free_space in ALLOC_SEQ block groups */
1690 cache->cached = BTRFS_CACHE_FINISHED;
1691 cache->free_space_ctl->free_space = free;
1692 cache->zone_unusable = unusable;
1693}
1694
1695bool btrfs_use_zone_append(struct btrfs_bio *bbio)
1696{
1697 u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
1698 struct btrfs_inode *inode = bbio->inode;
1699 struct btrfs_fs_info *fs_info = bbio->fs_info;
1700 struct btrfs_block_group *cache;
1701 bool ret = false;
1702
1703 if (!btrfs_is_zoned(fs_info))
1704 return false;
1705
1706 if (!inode || !is_data_inode(&inode->vfs_inode))
1707 return false;
1708
1709 if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
1710 return false;
1711
1712 /*
1713 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
1714 * extent layout the relocation code has.
1715 * Furthermore we have set aside own block-group from which only the
1716 * relocation "process" can allocate and make sure only one process at a
1717 * time can add pages to an extent that gets relocated, so it's safe to
1718 * use regular REQ_OP_WRITE for this special case.
1719 */
1720 if (btrfs_is_data_reloc_root(inode->root))
1721 return false;
1722
1723 cache = btrfs_lookup_block_group(fs_info, start);
1724 ASSERT(cache);
1725 if (!cache)
1726 return false;
1727
1728 ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1729 btrfs_put_block_group(cache);
1730
1731 return ret;
1732}
1733
1734void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
1735{
1736 const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
1737 struct btrfs_ordered_sum *sum = bbio->sums;
1738
1739 if (physical < bbio->orig_physical)
1740 sum->logical -= bbio->orig_physical - physical;
1741 else
1742 sum->logical += physical - bbio->orig_physical;
1743}
1744
1745static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
1746 u64 logical)
1747{
1748 struct extent_map_tree *em_tree = &BTRFS_I(ordered->inode)->extent_tree;
1749 struct extent_map *em;
1750
1751 ordered->disk_bytenr = logical;
1752
1753 write_lock(&em_tree->lock);
1754 em = search_extent_mapping(em_tree, ordered->file_offset,
1755 ordered->num_bytes);
1756 em->block_start = logical;
1757 free_extent_map(em);
1758 write_unlock(&em_tree->lock);
1759}
1760
1761static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
1762 u64 logical, u64 len)
1763{
1764 struct btrfs_ordered_extent *new;
1765
1766 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
1767 split_extent_map(BTRFS_I(ordered->inode), ordered->file_offset,
1768 ordered->num_bytes, len, logical))
1769 return false;
1770
1771 new = btrfs_split_ordered_extent(ordered, len);
1772 if (IS_ERR(new))
1773 return false;
1774 new->disk_bytenr = logical;
1775 btrfs_finish_one_ordered(new);
1776 return true;
1777}
1778
1779void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
1780{
1781 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1782 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1783 struct btrfs_ordered_sum *sum;
1784 u64 logical, len;
1785
1786 /*
1787 * Write to pre-allocated region is for the data relocation, and so
1788 * it should use WRITE operation. No split/rewrite are necessary.
1789 */
1790 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
1791 return;
1792
1793 ASSERT(!list_empty(&ordered->list));
1794 /* The ordered->list can be empty in the above pre-alloc case. */
1795 sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list);
1796 logical = sum->logical;
1797 len = sum->len;
1798
1799 while (len < ordered->disk_num_bytes) {
1800 sum = list_next_entry(sum, list);
1801 if (sum->logical == logical + len) {
1802 len += sum->len;
1803 continue;
1804 }
1805 if (!btrfs_zoned_split_ordered(ordered, logical, len)) {
1806 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
1807 btrfs_err(fs_info, "failed to split ordered extent");
1808 goto out;
1809 }
1810 logical = sum->logical;
1811 len = sum->len;
1812 }
1813
1814 if (ordered->disk_bytenr != logical)
1815 btrfs_rewrite_logical_zoned(ordered, logical);
1816
1817out:
1818 /*
1819 * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures
1820 * were allocated by btrfs_alloc_dummy_sum only to record the logical
1821 * addresses and don't contain actual checksums. We thus must free them
1822 * here so that we don't attempt to log the csums later.
1823 */
1824 if ((inode->flags & BTRFS_INODE_NODATASUM) ||
1825 test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) {
1826 while ((sum = list_first_entry_or_null(&ordered->list,
1827 typeof(*sum), list))) {
1828 list_del(&sum->list);
1829 kfree(sum);
1830 }
1831 }
1832}
1833
1834static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
1835 struct btrfs_block_group **active_bg)
1836{
1837 const struct writeback_control *wbc = ctx->wbc;
1838 struct btrfs_block_group *block_group = ctx->zoned_bg;
1839 struct btrfs_fs_info *fs_info = block_group->fs_info;
1840
1841 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
1842 return true;
1843
1844 if (fs_info->treelog_bg == block_group->start) {
1845 if (!btrfs_zone_activate(block_group)) {
1846 int ret_fin = btrfs_zone_finish_one_bg(fs_info);
1847
1848 if (ret_fin != 1 || !btrfs_zone_activate(block_group))
1849 return false;
1850 }
1851 } else if (*active_bg != block_group) {
1852 struct btrfs_block_group *tgt = *active_bg;
1853
1854 /* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */
1855 lockdep_assert_held(&fs_info->zoned_meta_io_lock);
1856
1857 if (tgt) {
1858 /*
1859 * If there is an unsent IO left in the allocated area,
1860 * we cannot wait for them as it may cause a deadlock.
1861 */
1862 if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
1863 if (wbc->sync_mode == WB_SYNC_NONE ||
1864 (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync))
1865 return false;
1866 }
1867
1868 /* Pivot active metadata/system block group. */
1869 btrfs_zoned_meta_io_unlock(fs_info);
1870 wait_eb_writebacks(tgt);
1871 do_zone_finish(tgt, true);
1872 btrfs_zoned_meta_io_lock(fs_info);
1873 if (*active_bg == tgt) {
1874 btrfs_put_block_group(tgt);
1875 *active_bg = NULL;
1876 }
1877 }
1878 if (!btrfs_zone_activate(block_group))
1879 return false;
1880 if (*active_bg != block_group) {
1881 ASSERT(*active_bg == NULL);
1882 *active_bg = block_group;
1883 btrfs_get_block_group(block_group);
1884 }
1885 }
1886
1887 return true;
1888}
1889
1890/*
1891 * Check if @ctx->eb is aligned to the write pointer.
1892 *
1893 * Return:
1894 * 0: @ctx->eb is at the write pointer. You can write it.
1895 * -EAGAIN: There is a hole. The caller should handle the case.
1896 * -EBUSY: There is a hole, but the caller can just bail out.
1897 */
1898int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
1899 struct btrfs_eb_write_context *ctx)
1900{
1901 const struct writeback_control *wbc = ctx->wbc;
1902 const struct extent_buffer *eb = ctx->eb;
1903 struct btrfs_block_group *block_group = ctx->zoned_bg;
1904
1905 if (!btrfs_is_zoned(fs_info))
1906 return 0;
1907
1908 if (block_group) {
1909 if (block_group->start > eb->start ||
1910 block_group->start + block_group->length <= eb->start) {
1911 btrfs_put_block_group(block_group);
1912 block_group = NULL;
1913 ctx->zoned_bg = NULL;
1914 }
1915 }
1916
1917 if (!block_group) {
1918 block_group = btrfs_lookup_block_group(fs_info, eb->start);
1919 if (!block_group)
1920 return 0;
1921 ctx->zoned_bg = block_group;
1922 }
1923
1924 if (block_group->meta_write_pointer == eb->start) {
1925 struct btrfs_block_group **tgt;
1926
1927 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
1928 return 0;
1929
1930 if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)
1931 tgt = &fs_info->active_system_bg;
1932 else
1933 tgt = &fs_info->active_meta_bg;
1934 if (check_bg_is_active(ctx, tgt))
1935 return 0;
1936 }
1937
1938 /*
1939 * Since we may release fs_info->zoned_meta_io_lock, someone can already
1940 * start writing this eb. In that case, we can just bail out.
1941 */
1942 if (block_group->meta_write_pointer > eb->start)
1943 return -EBUSY;
1944
1945 /* If for_sync, this hole will be filled with trasnsaction commit. */
1946 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
1947 return -EAGAIN;
1948 return -EBUSY;
1949}
1950
1951int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
1952{
1953 if (!btrfs_dev_is_sequential(device, physical))
1954 return -EOPNOTSUPP;
1955
1956 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
1957 length >> SECTOR_SHIFT, GFP_NOFS, 0);
1958}
1959
1960static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
1961 struct blk_zone *zone)
1962{
1963 struct btrfs_io_context *bioc = NULL;
1964 u64 mapped_length = PAGE_SIZE;
1965 unsigned int nofs_flag;
1966 int nmirrors;
1967 int i, ret;
1968
1969 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
1970 &mapped_length, &bioc, NULL, NULL);
1971 if (ret || !bioc || mapped_length < PAGE_SIZE) {
1972 ret = -EIO;
1973 goto out_put_bioc;
1974 }
1975
1976 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1977 ret = -EINVAL;
1978 goto out_put_bioc;
1979 }
1980
1981 nofs_flag = memalloc_nofs_save();
1982 nmirrors = (int)bioc->num_stripes;
1983 for (i = 0; i < nmirrors; i++) {
1984 u64 physical = bioc->stripes[i].physical;
1985 struct btrfs_device *dev = bioc->stripes[i].dev;
1986
1987 /* Missing device */
1988 if (!dev->bdev)
1989 continue;
1990
1991 ret = btrfs_get_dev_zone(dev, physical, zone);
1992 /* Failing device */
1993 if (ret == -EIO || ret == -EOPNOTSUPP)
1994 continue;
1995 break;
1996 }
1997 memalloc_nofs_restore(nofs_flag);
1998out_put_bioc:
1999 btrfs_put_bioc(bioc);
2000 return ret;
2001}
2002
2003/*
2004 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
2005 * filling zeros between @physical_pos to a write pointer of dev-replace
2006 * source device.
2007 */
2008int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
2009 u64 physical_start, u64 physical_pos)
2010{
2011 struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
2012 struct blk_zone zone;
2013 u64 length;
2014 u64 wp;
2015 int ret;
2016
2017 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
2018 return 0;
2019
2020 ret = read_zone_info(fs_info, logical, &zone);
2021 if (ret)
2022 return ret;
2023
2024 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
2025
2026 if (physical_pos == wp)
2027 return 0;
2028
2029 if (physical_pos > wp)
2030 return -EUCLEAN;
2031
2032 length = wp - physical_pos;
2033 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
2034}
2035
2036/*
2037 * Activate block group and underlying device zones
2038 *
2039 * @block_group: the block group to activate
2040 *
2041 * Return: true on success, false otherwise
2042 */
2043bool btrfs_zone_activate(struct btrfs_block_group *block_group)
2044{
2045 struct btrfs_fs_info *fs_info = block_group->fs_info;
2046 struct btrfs_chunk_map *map;
2047 struct btrfs_device *device;
2048 u64 physical;
2049 const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA);
2050 bool ret;
2051 int i;
2052
2053 if (!btrfs_is_zoned(block_group->fs_info))
2054 return true;
2055
2056 map = block_group->physical_map;
2057
2058 spin_lock(&fs_info->zone_active_bgs_lock);
2059 spin_lock(&block_group->lock);
2060 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2061 ret = true;
2062 goto out_unlock;
2063 }
2064
2065 /* No space left */
2066 if (btrfs_zoned_bg_is_full(block_group)) {
2067 ret = false;
2068 goto out_unlock;
2069 }
2070
2071 for (i = 0; i < map->num_stripes; i++) {
2072 struct btrfs_zoned_device_info *zinfo;
2073 int reserved = 0;
2074
2075 device = map->stripes[i].dev;
2076 physical = map->stripes[i].physical;
2077 zinfo = device->zone_info;
2078
2079 if (zinfo->max_active_zones == 0)
2080 continue;
2081
2082 if (is_data)
2083 reserved = zinfo->reserved_active_zones;
2084 /*
2085 * For the data block group, leave active zones for one
2086 * metadata block group and one system block group.
2087 */
2088 if (atomic_read(&zinfo->active_zones_left) <= reserved) {
2089 ret = false;
2090 goto out_unlock;
2091 }
2092
2093 if (!btrfs_dev_set_active_zone(device, physical)) {
2094 /* Cannot activate the zone */
2095 ret = false;
2096 goto out_unlock;
2097 }
2098 if (!is_data)
2099 zinfo->reserved_active_zones--;
2100 }
2101
2102 /* Successfully activated all the zones */
2103 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2104 spin_unlock(&block_group->lock);
2105
2106 /* For the active block group list */
2107 btrfs_get_block_group(block_group);
2108 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
2109 spin_unlock(&fs_info->zone_active_bgs_lock);
2110
2111 return true;
2112
2113out_unlock:
2114 spin_unlock(&block_group->lock);
2115 spin_unlock(&fs_info->zone_active_bgs_lock);
2116 return ret;
2117}
2118
2119static void wait_eb_writebacks(struct btrfs_block_group *block_group)
2120{
2121 struct btrfs_fs_info *fs_info = block_group->fs_info;
2122 const u64 end = block_group->start + block_group->length;
2123 struct radix_tree_iter iter;
2124 struct extent_buffer *eb;
2125 void __rcu **slot;
2126
2127 rcu_read_lock();
2128 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
2129 block_group->start >> fs_info->sectorsize_bits) {
2130 eb = radix_tree_deref_slot(slot);
2131 if (!eb)
2132 continue;
2133 if (radix_tree_deref_retry(eb)) {
2134 slot = radix_tree_iter_retry(&iter);
2135 continue;
2136 }
2137
2138 if (eb->start < block_group->start)
2139 continue;
2140 if (eb->start >= end)
2141 break;
2142
2143 slot = radix_tree_iter_resume(slot, &iter);
2144 rcu_read_unlock();
2145 wait_on_extent_buffer_writeback(eb);
2146 rcu_read_lock();
2147 }
2148 rcu_read_unlock();
2149}
2150
2151static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
2152{
2153 struct btrfs_fs_info *fs_info = block_group->fs_info;
2154 struct btrfs_chunk_map *map;
2155 const bool is_metadata = (block_group->flags &
2156 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
2157 int ret = 0;
2158 int i;
2159
2160 spin_lock(&block_group->lock);
2161 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2162 spin_unlock(&block_group->lock);
2163 return 0;
2164 }
2165
2166 /* Check if we have unwritten allocated space */
2167 if (is_metadata &&
2168 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
2169 spin_unlock(&block_group->lock);
2170 return -EAGAIN;
2171 }
2172
2173 /*
2174 * If we are sure that the block group is full (= no more room left for
2175 * new allocation) and the IO for the last usable block is completed, we
2176 * don't need to wait for the other IOs. This holds because we ensure
2177 * the sequential IO submissions using the ZONE_APPEND command for data
2178 * and block_group->meta_write_pointer for metadata.
2179 */
2180 if (!fully_written) {
2181 if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2182 spin_unlock(&block_group->lock);
2183 return -EAGAIN;
2184 }
2185 spin_unlock(&block_group->lock);
2186
2187 ret = btrfs_inc_block_group_ro(block_group, false);
2188 if (ret)
2189 return ret;
2190
2191 /* Ensure all writes in this block group finish */
2192 btrfs_wait_block_group_reservations(block_group);
2193 /* No need to wait for NOCOW writers. Zoned mode does not allow that */
2194 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
2195 block_group->length);
2196 /* Wait for extent buffers to be written. */
2197 if (is_metadata)
2198 wait_eb_writebacks(block_group);
2199
2200 spin_lock(&block_group->lock);
2201
2202 /*
2203 * Bail out if someone already deactivated the block group, or
2204 * allocated space is left in the block group.
2205 */
2206 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2207 &block_group->runtime_flags)) {
2208 spin_unlock(&block_group->lock);
2209 btrfs_dec_block_group_ro(block_group);
2210 return 0;
2211 }
2212
2213 if (block_group->reserved ||
2214 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2215 &block_group->runtime_flags)) {
2216 spin_unlock(&block_group->lock);
2217 btrfs_dec_block_group_ro(block_group);
2218 return -EAGAIN;
2219 }
2220 }
2221
2222 clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2223 block_group->alloc_offset = block_group->zone_capacity;
2224 if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))
2225 block_group->meta_write_pointer = block_group->start +
2226 block_group->zone_capacity;
2227 block_group->free_space_ctl->free_space = 0;
2228 btrfs_clear_treelog_bg(block_group);
2229 btrfs_clear_data_reloc_bg(block_group);
2230 spin_unlock(&block_group->lock);
2231
2232 map = block_group->physical_map;
2233 for (i = 0; i < map->num_stripes; i++) {
2234 struct btrfs_device *device = map->stripes[i].dev;
2235 const u64 physical = map->stripes[i].physical;
2236 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2237
2238 if (zinfo->max_active_zones == 0)
2239 continue;
2240
2241 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
2242 physical >> SECTOR_SHIFT,
2243 zinfo->zone_size >> SECTOR_SHIFT,
2244 GFP_NOFS);
2245
2246 if (ret)
2247 return ret;
2248
2249 if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
2250 zinfo->reserved_active_zones++;
2251 btrfs_dev_clear_active_zone(device, physical);
2252 }
2253
2254 if (!fully_written)
2255 btrfs_dec_block_group_ro(block_group);
2256
2257 spin_lock(&fs_info->zone_active_bgs_lock);
2258 ASSERT(!list_empty(&block_group->active_bg_list));
2259 list_del_init(&block_group->active_bg_list);
2260 spin_unlock(&fs_info->zone_active_bgs_lock);
2261
2262 /* For active_bg_list */
2263 btrfs_put_block_group(block_group);
2264
2265 clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2266
2267 return 0;
2268}
2269
2270int btrfs_zone_finish(struct btrfs_block_group *block_group)
2271{
2272 if (!btrfs_is_zoned(block_group->fs_info))
2273 return 0;
2274
2275 return do_zone_finish(block_group, false);
2276}
2277
2278bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
2279{
2280 struct btrfs_fs_info *fs_info = fs_devices->fs_info;
2281 struct btrfs_device *device;
2282 bool ret = false;
2283
2284 if (!btrfs_is_zoned(fs_info))
2285 return true;
2286
2287 /* Check if there is a device with active zones left */
2288 mutex_lock(&fs_info->chunk_mutex);
2289 spin_lock(&fs_info->zone_active_bgs_lock);
2290 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
2291 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2292 int reserved = 0;
2293
2294 if (!device->bdev)
2295 continue;
2296
2297 if (!zinfo->max_active_zones) {
2298 ret = true;
2299 break;
2300 }
2301
2302 if (flags & BTRFS_BLOCK_GROUP_DATA)
2303 reserved = zinfo->reserved_active_zones;
2304
2305 switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
2306 case 0: /* single */
2307 ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved));
2308 break;
2309 case BTRFS_BLOCK_GROUP_DUP:
2310 ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved));
2311 break;
2312 }
2313 if (ret)
2314 break;
2315 }
2316 spin_unlock(&fs_info->zone_active_bgs_lock);
2317 mutex_unlock(&fs_info->chunk_mutex);
2318
2319 if (!ret)
2320 set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2321
2322 return ret;
2323}
2324
2325void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
2326{
2327 struct btrfs_block_group *block_group;
2328 u64 min_alloc_bytes;
2329
2330 if (!btrfs_is_zoned(fs_info))
2331 return;
2332
2333 block_group = btrfs_lookup_block_group(fs_info, logical);
2334 ASSERT(block_group);
2335
2336 /* No MIXED_BG on zoned btrfs. */
2337 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
2338 min_alloc_bytes = fs_info->sectorsize;
2339 else
2340 min_alloc_bytes = fs_info->nodesize;
2341
2342 /* Bail out if we can allocate more data from this block group. */
2343 if (logical + length + min_alloc_bytes <=
2344 block_group->start + block_group->zone_capacity)
2345 goto out;
2346
2347 do_zone_finish(block_group, true);
2348
2349out:
2350 btrfs_put_block_group(block_group);
2351}
2352
2353static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
2354{
2355 struct btrfs_block_group *bg =
2356 container_of(work, struct btrfs_block_group, zone_finish_work);
2357
2358 wait_on_extent_buffer_writeback(bg->last_eb);
2359 free_extent_buffer(bg->last_eb);
2360 btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length);
2361 btrfs_put_block_group(bg);
2362}
2363
2364void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
2365 struct extent_buffer *eb)
2366{
2367 if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
2368 eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
2369 return;
2370
2371 if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
2372 btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
2373 bg->start);
2374 return;
2375 }
2376
2377 /* For the work */
2378 btrfs_get_block_group(bg);
2379 atomic_inc(&eb->refs);
2380 bg->last_eb = eb;
2381 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
2382 queue_work(system_unbound_wq, &bg->zone_finish_work);
2383}
2384
2385void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2386{
2387 struct btrfs_fs_info *fs_info = bg->fs_info;
2388
2389 spin_lock(&fs_info->relocation_bg_lock);
2390 if (fs_info->data_reloc_bg == bg->start)
2391 fs_info->data_reloc_bg = 0;
2392 spin_unlock(&fs_info->relocation_bg_lock);
2393}
2394
2395void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2396{
2397 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2398 struct btrfs_device *device;
2399
2400 if (!btrfs_is_zoned(fs_info))
2401 return;
2402
2403 mutex_lock(&fs_devices->device_list_mutex);
2404 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2405 if (device->zone_info) {
2406 vfree(device->zone_info->zone_cache);
2407 device->zone_info->zone_cache = NULL;
2408 }
2409 }
2410 mutex_unlock(&fs_devices->device_list_mutex);
2411}
2412
2413bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
2414{
2415 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2416 struct btrfs_device *device;
2417 u64 used = 0;
2418 u64 total = 0;
2419 u64 factor;
2420
2421 ASSERT(btrfs_is_zoned(fs_info));
2422
2423 if (fs_info->bg_reclaim_threshold == 0)
2424 return false;
2425
2426 mutex_lock(&fs_devices->device_list_mutex);
2427 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2428 if (!device->bdev)
2429 continue;
2430
2431 total += device->disk_total_bytes;
2432 used += device->bytes_used;
2433 }
2434 mutex_unlock(&fs_devices->device_list_mutex);
2435
2436 factor = div64_u64(used * 100, total);
2437 return factor >= fs_info->bg_reclaim_threshold;
2438}
2439
2440void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
2441 u64 length)
2442{
2443 struct btrfs_block_group *block_group;
2444
2445 if (!btrfs_is_zoned(fs_info))
2446 return;
2447
2448 block_group = btrfs_lookup_block_group(fs_info, logical);
2449 /* It should be called on a previous data relocation block group. */
2450 ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
2451
2452 spin_lock(&block_group->lock);
2453 if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
2454 goto out;
2455
2456 /* All relocation extents are written. */
2457 if (block_group->start + block_group->alloc_offset == logical + length) {
2458 /*
2459 * Now, release this block group for further allocations and
2460 * zone finish.
2461 */
2462 clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2463 &block_group->runtime_flags);
2464 }
2465
2466out:
2467 spin_unlock(&block_group->lock);
2468 btrfs_put_block_group(block_group);
2469}
2470
2471int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
2472{
2473 struct btrfs_block_group *block_group;
2474 struct btrfs_block_group *min_bg = NULL;
2475 u64 min_avail = U64_MAX;
2476 int ret;
2477
2478 spin_lock(&fs_info->zone_active_bgs_lock);
2479 list_for_each_entry(block_group, &fs_info->zone_active_bgs,
2480 active_bg_list) {
2481 u64 avail;
2482
2483 spin_lock(&block_group->lock);
2484 if (block_group->reserved || block_group->alloc_offset == 0 ||
2485 (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
2486 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2487 spin_unlock(&block_group->lock);
2488 continue;
2489 }
2490
2491 avail = block_group->zone_capacity - block_group->alloc_offset;
2492 if (min_avail > avail) {
2493 if (min_bg)
2494 btrfs_put_block_group(min_bg);
2495 min_bg = block_group;
2496 min_avail = avail;
2497 btrfs_get_block_group(min_bg);
2498 }
2499 spin_unlock(&block_group->lock);
2500 }
2501 spin_unlock(&fs_info->zone_active_bgs_lock);
2502
2503 if (!min_bg)
2504 return 0;
2505
2506 ret = btrfs_zone_finish(min_bg);
2507 btrfs_put_block_group(min_bg);
2508
2509 return ret < 0 ? ret : 1;
2510}
2511
2512int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
2513 struct btrfs_space_info *space_info,
2514 bool do_finish)
2515{
2516 struct btrfs_block_group *bg;
2517 int index;
2518
2519 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
2520 return 0;
2521
2522 for (;;) {
2523 int ret;
2524 bool need_finish = false;
2525
2526 down_read(&space_info->groups_sem);
2527 for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
2528 list_for_each_entry(bg, &space_info->block_groups[index],
2529 list) {
2530 if (!spin_trylock(&bg->lock))
2531 continue;
2532 if (btrfs_zoned_bg_is_full(bg) ||
2533 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2534 &bg->runtime_flags)) {
2535 spin_unlock(&bg->lock);
2536 continue;
2537 }
2538 spin_unlock(&bg->lock);
2539
2540 if (btrfs_zone_activate(bg)) {
2541 up_read(&space_info->groups_sem);
2542 return 1;
2543 }
2544
2545 need_finish = true;
2546 }
2547 }
2548 up_read(&space_info->groups_sem);
2549
2550 if (!do_finish || !need_finish)
2551 break;
2552
2553 ret = btrfs_zone_finish_one_bg(fs_info);
2554 if (ret == 0)
2555 break;
2556 if (ret < 0)
2557 return ret;
2558 }
2559
2560 return 0;
2561}
2562
2563/*
2564 * Reserve zones for one metadata block group, one tree-log block group, and one
2565 * system block group.
2566 */
2567void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
2568{
2569 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2570 struct btrfs_block_group *block_group;
2571 struct btrfs_device *device;
2572 /* Reserve zones for normal SINGLE metadata and tree-log block group. */
2573 unsigned int metadata_reserve = 2;
2574 /* Reserve a zone for SINGLE system block group. */
2575 unsigned int system_reserve = 1;
2576
2577 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
2578 return;
2579
2580 /*
2581 * This function is called from the mount context. So, there is no
2582 * parallel process touching the bits. No need for read_seqretry().
2583 */
2584 if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
2585 metadata_reserve = 4;
2586 if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
2587 system_reserve = 2;
2588
2589 /* Apply the reservation on all the devices. */
2590 mutex_lock(&fs_devices->device_list_mutex);
2591 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2592 if (!device->bdev)
2593 continue;
2594
2595 device->zone_info->reserved_active_zones =
2596 metadata_reserve + system_reserve;
2597 }
2598 mutex_unlock(&fs_devices->device_list_mutex);
2599
2600 /* Release reservation for currently active block groups. */
2601 spin_lock(&fs_info->zone_active_bgs_lock);
2602 list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
2603 struct btrfs_chunk_map *map = block_group->physical_map;
2604
2605 if (!(block_group->flags &
2606 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
2607 continue;
2608
2609 for (int i = 0; i < map->num_stripes; i++)
2610 map->stripes[i].dev->zone_info->reserved_active_zones--;
2611 }
2612 spin_unlock(&fs_info->zone_active_bgs_lock);
2613}