Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/err.h>
4#include <linux/slab.h>
5#include <linux/spinlock.h>
6#include "ctree.h"
7#include "volumes.h"
8#include "extent_map.h"
9#include "compression.h"
10
11
12static struct kmem_cache *extent_map_cache;
13
14int __init extent_map_init(void)
15{
16 extent_map_cache = kmem_cache_create("btrfs_extent_map",
17 sizeof(struct extent_map), 0,
18 SLAB_MEM_SPREAD, NULL);
19 if (!extent_map_cache)
20 return -ENOMEM;
21 return 0;
22}
23
24void __cold extent_map_exit(void)
25{
26 kmem_cache_destroy(extent_map_cache);
27}
28
29/**
30 * extent_map_tree_init - initialize extent map tree
31 * @tree: tree to initialize
32 *
33 * Initialize the extent tree @tree. Should be called for each new inode
34 * or other user of the extent_map interface.
35 */
36void extent_map_tree_init(struct extent_map_tree *tree)
37{
38 tree->map = RB_ROOT_CACHED;
39 INIT_LIST_HEAD(&tree->modified_extents);
40 rwlock_init(&tree->lock);
41}
42
43/**
44 * alloc_extent_map - allocate new extent map structure
45 *
46 * Allocate a new extent_map structure. The new structure is
47 * returned with a reference count of one and needs to be
48 * freed using free_extent_map()
49 */
50struct extent_map *alloc_extent_map(void)
51{
52 struct extent_map *em;
53 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
54 if (!em)
55 return NULL;
56 RB_CLEAR_NODE(&em->rb_node);
57 em->flags = 0;
58 em->compress_type = BTRFS_COMPRESS_NONE;
59 em->generation = 0;
60 refcount_set(&em->refs, 1);
61 INIT_LIST_HEAD(&em->list);
62 return em;
63}
64
65/**
66 * free_extent_map - drop reference count of an extent_map
67 * @em: extent map being released
68 *
69 * Drops the reference out on @em by one and free the structure
70 * if the reference count hits zero.
71 */
72void free_extent_map(struct extent_map *em)
73{
74 if (!em)
75 return;
76 WARN_ON(refcount_read(&em->refs) == 0);
77 if (refcount_dec_and_test(&em->refs)) {
78 WARN_ON(extent_map_in_tree(em));
79 WARN_ON(!list_empty(&em->list));
80 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
81 kfree(em->map_lookup);
82 kmem_cache_free(extent_map_cache, em);
83 }
84}
85
86/* simple helper to do math around the end of an extent, handling wrap */
87static u64 range_end(u64 start, u64 len)
88{
89 if (start + len < start)
90 return (u64)-1;
91 return start + len;
92}
93
94static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
95{
96 struct rb_node **p = &root->rb_root.rb_node;
97 struct rb_node *parent = NULL;
98 struct extent_map *entry = NULL;
99 struct rb_node *orig_parent = NULL;
100 u64 end = range_end(em->start, em->len);
101 bool leftmost = true;
102
103 while (*p) {
104 parent = *p;
105 entry = rb_entry(parent, struct extent_map, rb_node);
106
107 if (em->start < entry->start) {
108 p = &(*p)->rb_left;
109 } else if (em->start >= extent_map_end(entry)) {
110 p = &(*p)->rb_right;
111 leftmost = false;
112 } else {
113 return -EEXIST;
114 }
115 }
116
117 orig_parent = parent;
118 while (parent && em->start >= extent_map_end(entry)) {
119 parent = rb_next(parent);
120 entry = rb_entry(parent, struct extent_map, rb_node);
121 }
122 if (parent)
123 if (end > entry->start && em->start < extent_map_end(entry))
124 return -EEXIST;
125
126 parent = orig_parent;
127 entry = rb_entry(parent, struct extent_map, rb_node);
128 while (parent && em->start < entry->start) {
129 parent = rb_prev(parent);
130 entry = rb_entry(parent, struct extent_map, rb_node);
131 }
132 if (parent)
133 if (end > entry->start && em->start < extent_map_end(entry))
134 return -EEXIST;
135
136 rb_link_node(&em->rb_node, orig_parent, p);
137 rb_insert_color_cached(&em->rb_node, root, leftmost);
138 return 0;
139}
140
141/*
142 * search through the tree for an extent_map with a given offset. If
143 * it can't be found, try to find some neighboring extents
144 */
145static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
146 struct rb_node **prev_ret,
147 struct rb_node **next_ret)
148{
149 struct rb_node *n = root->rb_node;
150 struct rb_node *prev = NULL;
151 struct rb_node *orig_prev = NULL;
152 struct extent_map *entry;
153 struct extent_map *prev_entry = NULL;
154
155 while (n) {
156 entry = rb_entry(n, struct extent_map, rb_node);
157 prev = n;
158 prev_entry = entry;
159
160 if (offset < entry->start)
161 n = n->rb_left;
162 else if (offset >= extent_map_end(entry))
163 n = n->rb_right;
164 else
165 return n;
166 }
167
168 if (prev_ret) {
169 orig_prev = prev;
170 while (prev && offset >= extent_map_end(prev_entry)) {
171 prev = rb_next(prev);
172 prev_entry = rb_entry(prev, struct extent_map, rb_node);
173 }
174 *prev_ret = prev;
175 prev = orig_prev;
176 }
177
178 if (next_ret) {
179 prev_entry = rb_entry(prev, struct extent_map, rb_node);
180 while (prev && offset < prev_entry->start) {
181 prev = rb_prev(prev);
182 prev_entry = rb_entry(prev, struct extent_map, rb_node);
183 }
184 *next_ret = prev;
185 }
186 return NULL;
187}
188
189/* check to see if two extent_map structs are adjacent and safe to merge */
190static int mergable_maps(struct extent_map *prev, struct extent_map *next)
191{
192 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
193 return 0;
194
195 /*
196 * don't merge compressed extents, we need to know their
197 * actual size
198 */
199 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
200 return 0;
201
202 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
203 test_bit(EXTENT_FLAG_LOGGING, &next->flags))
204 return 0;
205
206 /*
207 * We don't want to merge stuff that hasn't been written to the log yet
208 * since it may not reflect exactly what is on disk, and that would be
209 * bad.
210 */
211 if (!list_empty(&prev->list) || !list_empty(&next->list))
212 return 0;
213
214 ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
215 prev->block_start != EXTENT_MAP_DELALLOC);
216
217 if (prev->map_lookup || next->map_lookup)
218 ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
219 test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
220
221 if (extent_map_end(prev) == next->start &&
222 prev->flags == next->flags &&
223 prev->map_lookup == next->map_lookup &&
224 ((next->block_start == EXTENT_MAP_HOLE &&
225 prev->block_start == EXTENT_MAP_HOLE) ||
226 (next->block_start == EXTENT_MAP_INLINE &&
227 prev->block_start == EXTENT_MAP_INLINE) ||
228 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
229 next->block_start == extent_map_block_end(prev)))) {
230 return 1;
231 }
232 return 0;
233}
234
235static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
236{
237 struct extent_map *merge = NULL;
238 struct rb_node *rb;
239
240 /*
241 * We can't modify an extent map that is in the tree and that is being
242 * used by another task, as it can cause that other task to see it in
243 * inconsistent state during the merging. We always have 1 reference for
244 * the tree and 1 for this task (which is unpinning the extent map or
245 * clearing the logging flag), so anything > 2 means it's being used by
246 * other tasks too.
247 */
248 if (refcount_read(&em->refs) > 2)
249 return;
250
251 if (em->start != 0) {
252 rb = rb_prev(&em->rb_node);
253 if (rb)
254 merge = rb_entry(rb, struct extent_map, rb_node);
255 if (rb && mergable_maps(merge, em)) {
256 em->start = merge->start;
257 em->orig_start = merge->orig_start;
258 em->len += merge->len;
259 em->block_len += merge->block_len;
260 em->block_start = merge->block_start;
261 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
262 em->mod_start = merge->mod_start;
263 em->generation = max(em->generation, merge->generation);
264 set_bit(EXTENT_FLAG_MERGED, &em->flags);
265
266 rb_erase_cached(&merge->rb_node, &tree->map);
267 RB_CLEAR_NODE(&merge->rb_node);
268 free_extent_map(merge);
269 }
270 }
271
272 rb = rb_next(&em->rb_node);
273 if (rb)
274 merge = rb_entry(rb, struct extent_map, rb_node);
275 if (rb && mergable_maps(em, merge)) {
276 em->len += merge->len;
277 em->block_len += merge->block_len;
278 rb_erase_cached(&merge->rb_node, &tree->map);
279 RB_CLEAR_NODE(&merge->rb_node);
280 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
281 em->generation = max(em->generation, merge->generation);
282 set_bit(EXTENT_FLAG_MERGED, &em->flags);
283 free_extent_map(merge);
284 }
285}
286
287/**
288 * unpin_extent_cache - unpin an extent from the cache
289 * @tree: tree to unpin the extent in
290 * @start: logical offset in the file
291 * @len: length of the extent
292 * @gen: generation that this extent has been modified in
293 *
294 * Called after an extent has been written to disk properly. Set the generation
295 * to the generation that actually added the file item to the inode so we know
296 * we need to sync this extent when we call fsync().
297 */
298int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
299 u64 gen)
300{
301 int ret = 0;
302 struct extent_map *em;
303 bool prealloc = false;
304
305 write_lock(&tree->lock);
306 em = lookup_extent_mapping(tree, start, len);
307
308 WARN_ON(!em || em->start != start);
309
310 if (!em)
311 goto out;
312
313 em->generation = gen;
314 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
315 em->mod_start = em->start;
316 em->mod_len = em->len;
317
318 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
319 prealloc = true;
320 clear_bit(EXTENT_FLAG_FILLING, &em->flags);
321 }
322
323 try_merge_map(tree, em);
324
325 if (prealloc) {
326 em->mod_start = em->start;
327 em->mod_len = em->len;
328 }
329
330 free_extent_map(em);
331out:
332 write_unlock(&tree->lock);
333 return ret;
334
335}
336
337void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
338{
339 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
340 if (extent_map_in_tree(em))
341 try_merge_map(tree, em);
342}
343
344static inline void setup_extent_mapping(struct extent_map_tree *tree,
345 struct extent_map *em,
346 int modified)
347{
348 refcount_inc(&em->refs);
349 em->mod_start = em->start;
350 em->mod_len = em->len;
351
352 if (modified)
353 list_move(&em->list, &tree->modified_extents);
354 else
355 try_merge_map(tree, em);
356}
357
358static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
359{
360 struct map_lookup *map = em->map_lookup;
361 u64 stripe_size = em->orig_block_len;
362 int i;
363
364 for (i = 0; i < map->num_stripes; i++) {
365 struct btrfs_io_stripe *stripe = &map->stripes[i];
366 struct btrfs_device *device = stripe->dev;
367
368 set_extent_bits_nowait(&device->alloc_state, stripe->physical,
369 stripe->physical + stripe_size - 1, bits);
370 }
371}
372
373static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
374{
375 struct map_lookup *map = em->map_lookup;
376 u64 stripe_size = em->orig_block_len;
377 int i;
378
379 for (i = 0; i < map->num_stripes; i++) {
380 struct btrfs_io_stripe *stripe = &map->stripes[i];
381 struct btrfs_device *device = stripe->dev;
382
383 __clear_extent_bit(&device->alloc_state, stripe->physical,
384 stripe->physical + stripe_size - 1, bits,
385 0, 0, NULL, GFP_NOWAIT, NULL);
386 }
387}
388
389/**
390 * Add new extent map to the extent tree
391 *
392 * @tree: tree to insert new map in
393 * @em: map to insert
394 * @modified: indicate whether the given @em should be added to the
395 * modified list, which indicates the extent needs to be logged
396 *
397 * Insert @em into @tree or perform a simple forward/backward merge with
398 * existing mappings. The extent_map struct passed in will be inserted
399 * into the tree directly, with an additional reference taken, or a
400 * reference dropped if the merge attempt was successful.
401 */
402int add_extent_mapping(struct extent_map_tree *tree,
403 struct extent_map *em, int modified)
404{
405 int ret = 0;
406
407 lockdep_assert_held_write(&tree->lock);
408
409 ret = tree_insert(&tree->map, em);
410 if (ret)
411 goto out;
412
413 setup_extent_mapping(tree, em, modified);
414 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
415 extent_map_device_set_bits(em, CHUNK_ALLOCATED);
416 extent_map_device_clear_bits(em, CHUNK_TRIMMED);
417 }
418out:
419 return ret;
420}
421
422static struct extent_map *
423__lookup_extent_mapping(struct extent_map_tree *tree,
424 u64 start, u64 len, int strict)
425{
426 struct extent_map *em;
427 struct rb_node *rb_node;
428 struct rb_node *prev = NULL;
429 struct rb_node *next = NULL;
430 u64 end = range_end(start, len);
431
432 rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next);
433 if (!rb_node) {
434 if (prev)
435 rb_node = prev;
436 else if (next)
437 rb_node = next;
438 else
439 return NULL;
440 }
441
442 em = rb_entry(rb_node, struct extent_map, rb_node);
443
444 if (strict && !(end > em->start && start < extent_map_end(em)))
445 return NULL;
446
447 refcount_inc(&em->refs);
448 return em;
449}
450
451/**
452 * lookup_extent_mapping - lookup extent_map
453 * @tree: tree to lookup in
454 * @start: byte offset to start the search
455 * @len: length of the lookup range
456 *
457 * Find and return the first extent_map struct in @tree that intersects the
458 * [start, len] range. There may be additional objects in the tree that
459 * intersect, so check the object returned carefully to make sure that no
460 * additional lookups are needed.
461 */
462struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
463 u64 start, u64 len)
464{
465 return __lookup_extent_mapping(tree, start, len, 1);
466}
467
468/**
469 * search_extent_mapping - find a nearby extent map
470 * @tree: tree to lookup in
471 * @start: byte offset to start the search
472 * @len: length of the lookup range
473 *
474 * Find and return the first extent_map struct in @tree that intersects the
475 * [start, len] range.
476 *
477 * If one can't be found, any nearby extent may be returned
478 */
479struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
480 u64 start, u64 len)
481{
482 return __lookup_extent_mapping(tree, start, len, 0);
483}
484
485/**
486 * remove_extent_mapping - removes an extent_map from the extent tree
487 * @tree: extent tree to remove from
488 * @em: extent map being removed
489 *
490 * Removes @em from @tree. No reference counts are dropped, and no checks
491 * are done to see if the range is in use
492 */
493void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
494{
495 lockdep_assert_held_write(&tree->lock);
496
497 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
498 rb_erase_cached(&em->rb_node, &tree->map);
499 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
500 list_del_init(&em->list);
501 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
502 extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
503 RB_CLEAR_NODE(&em->rb_node);
504}
505
506void replace_extent_mapping(struct extent_map_tree *tree,
507 struct extent_map *cur,
508 struct extent_map *new,
509 int modified)
510{
511 lockdep_assert_held_write(&tree->lock);
512
513 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
514 ASSERT(extent_map_in_tree(cur));
515 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
516 list_del_init(&cur->list);
517 rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
518 RB_CLEAR_NODE(&cur->rb_node);
519
520 setup_extent_mapping(tree, new, modified);
521}
522
523static struct extent_map *next_extent_map(struct extent_map *em)
524{
525 struct rb_node *next;
526
527 next = rb_next(&em->rb_node);
528 if (!next)
529 return NULL;
530 return container_of(next, struct extent_map, rb_node);
531}
532
533static struct extent_map *prev_extent_map(struct extent_map *em)
534{
535 struct rb_node *prev;
536
537 prev = rb_prev(&em->rb_node);
538 if (!prev)
539 return NULL;
540 return container_of(prev, struct extent_map, rb_node);
541}
542
543/*
544 * Helper for btrfs_get_extent. Given an existing extent in the tree,
545 * the existing extent is the nearest extent to map_start,
546 * and an extent that you want to insert, deal with overlap and insert
547 * the best fitted new extent into the tree.
548 */
549static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
550 struct extent_map *existing,
551 struct extent_map *em,
552 u64 map_start)
553{
554 struct extent_map *prev;
555 struct extent_map *next;
556 u64 start;
557 u64 end;
558 u64 start_diff;
559
560 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
561
562 if (existing->start > map_start) {
563 next = existing;
564 prev = prev_extent_map(next);
565 } else {
566 prev = existing;
567 next = next_extent_map(prev);
568 }
569
570 start = prev ? extent_map_end(prev) : em->start;
571 start = max_t(u64, start, em->start);
572 end = next ? next->start : extent_map_end(em);
573 end = min_t(u64, end, extent_map_end(em));
574 start_diff = start - em->start;
575 em->start = start;
576 em->len = end - start;
577 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
578 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
579 em->block_start += start_diff;
580 em->block_len = em->len;
581 }
582 return add_extent_mapping(em_tree, em, 0);
583}
584
585/**
586 * Add extent mapping into em_tree
587 *
588 * @fs_info: the filesystem
589 * @em_tree: extent tree into which we want to insert the extent mapping
590 * @em_in: extent we are inserting
591 * @start: start of the logical range btrfs_get_extent() is requesting
592 * @len: length of the logical range btrfs_get_extent() is requesting
593 *
594 * Note that @em_in's range may be different from [start, start+len),
595 * but they must be overlapped.
596 *
597 * Insert @em_in into @em_tree. In case there is an overlapping range, handle
598 * the -EEXIST by either:
599 * a) Returning the existing extent in @em_in if @start is within the
600 * existing em.
601 * b) Merge the existing extent with @em_in passed in.
602 *
603 * Return 0 on success, otherwise -EEXIST.
604 *
605 */
606int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
607 struct extent_map_tree *em_tree,
608 struct extent_map **em_in, u64 start, u64 len)
609{
610 int ret;
611 struct extent_map *em = *em_in;
612
613 ret = add_extent_mapping(em_tree, em, 0);
614 /* it is possible that someone inserted the extent into the tree
615 * while we had the lock dropped. It is also possible that
616 * an overlapping map exists in the tree
617 */
618 if (ret == -EEXIST) {
619 struct extent_map *existing;
620
621 ret = 0;
622
623 existing = search_extent_mapping(em_tree, start, len);
624
625 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
626
627 /*
628 * existing will always be non-NULL, since there must be
629 * extent causing the -EEXIST.
630 */
631 if (start >= existing->start &&
632 start < extent_map_end(existing)) {
633 free_extent_map(em);
634 *em_in = existing;
635 ret = 0;
636 } else {
637 u64 orig_start = em->start;
638 u64 orig_len = em->len;
639
640 /*
641 * The existing extent map is the one nearest to
642 * the [start, start + len) range which overlaps
643 */
644 ret = merge_extent_mapping(em_tree, existing,
645 em, start);
646 if (ret) {
647 free_extent_map(em);
648 *em_in = NULL;
649 WARN_ONCE(ret,
650"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
651 ret, existing->start, existing->len,
652 orig_start, orig_len);
653 }
654 free_extent_map(existing);
655 }
656 }
657
658 ASSERT(ret == 0 || ret == -EEXIST);
659 return ret;
660}