Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include <linux/sched/mm.h>
10#include "misc.h"
11#include "ctree.h"
12#include "transaction.h"
13#include "btrfs_inode.h"
14#include "extent_io.h"
15#include "disk-io.h"
16#include "compression.h"
17#include "delalloc-space.h"
18#include "qgroup.h"
19#include "subpage.h"
20
21static struct kmem_cache *btrfs_ordered_extent_cache;
22
23static u64 entry_end(struct btrfs_ordered_extent *entry)
24{
25 if (entry->file_offset + entry->num_bytes < entry->file_offset)
26 return (u64)-1;
27 return entry->file_offset + entry->num_bytes;
28}
29
30/* returns NULL if the insertion worked, or it returns the node it did find
31 * in the tree
32 */
33static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
34 struct rb_node *node)
35{
36 struct rb_node **p = &root->rb_node;
37 struct rb_node *parent = NULL;
38 struct btrfs_ordered_extent *entry;
39
40 while (*p) {
41 parent = *p;
42 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
43
44 if (file_offset < entry->file_offset)
45 p = &(*p)->rb_left;
46 else if (file_offset >= entry_end(entry))
47 p = &(*p)->rb_right;
48 else
49 return parent;
50 }
51
52 rb_link_node(node, parent, p);
53 rb_insert_color(node, root);
54 return NULL;
55}
56
57/*
58 * look for a given offset in the tree, and if it can't be found return the
59 * first lesser offset
60 */
61static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
62 struct rb_node **prev_ret)
63{
64 struct rb_node *n = root->rb_node;
65 struct rb_node *prev = NULL;
66 struct rb_node *test;
67 struct btrfs_ordered_extent *entry;
68 struct btrfs_ordered_extent *prev_entry = NULL;
69
70 while (n) {
71 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
72 prev = n;
73 prev_entry = entry;
74
75 if (file_offset < entry->file_offset)
76 n = n->rb_left;
77 else if (file_offset >= entry_end(entry))
78 n = n->rb_right;
79 else
80 return n;
81 }
82 if (!prev_ret)
83 return NULL;
84
85 while (prev && file_offset >= entry_end(prev_entry)) {
86 test = rb_next(prev);
87 if (!test)
88 break;
89 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
90 rb_node);
91 if (file_offset < entry_end(prev_entry))
92 break;
93
94 prev = test;
95 }
96 if (prev)
97 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
98 rb_node);
99 while (prev && file_offset < entry_end(prev_entry)) {
100 test = rb_prev(prev);
101 if (!test)
102 break;
103 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
104 rb_node);
105 prev = test;
106 }
107 *prev_ret = prev;
108 return NULL;
109}
110
111static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
112 u64 len)
113{
114 if (file_offset + len <= entry->file_offset ||
115 entry->file_offset + entry->num_bytes <= file_offset)
116 return 0;
117 return 1;
118}
119
120/*
121 * look find the first ordered struct that has this offset, otherwise
122 * the first one less than this offset
123 */
124static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
125 u64 file_offset)
126{
127 struct rb_root *root = &tree->tree;
128 struct rb_node *prev = NULL;
129 struct rb_node *ret;
130 struct btrfs_ordered_extent *entry;
131
132 if (tree->last) {
133 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
134 rb_node);
135 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
136 return tree->last;
137 }
138 ret = __tree_search(root, file_offset, &prev);
139 if (!ret)
140 ret = prev;
141 if (ret)
142 tree->last = ret;
143 return ret;
144}
145
146/**
147 * Add an ordered extent to the per-inode tree.
148 *
149 * @inode: Inode that this extent is for.
150 * @file_offset: Logical offset in file where the extent starts.
151 * @num_bytes: Logical length of extent in file.
152 * @ram_bytes: Full length of unencoded data.
153 * @disk_bytenr: Offset of extent on disk.
154 * @disk_num_bytes: Size of extent on disk.
155 * @offset: Offset into unencoded data where file data starts.
156 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
157 * @compress_type: Compression algorithm used for data.
158 *
159 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
160 * tree is given a single reference on the ordered extent that was inserted.
161 *
162 * Return: 0 or -ENOMEM.
163 */
164int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
165 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
166 u64 disk_num_bytes, u64 offset, unsigned flags,
167 int compress_type)
168{
169 struct btrfs_root *root = inode->root;
170 struct btrfs_fs_info *fs_info = root->fs_info;
171 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
172 struct rb_node *node;
173 struct btrfs_ordered_extent *entry;
174 int ret;
175
176 if (flags &
177 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
178 /* For nocow write, we can release the qgroup rsv right now */
179 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
180 if (ret < 0)
181 return ret;
182 ret = 0;
183 } else {
184 /*
185 * The ordered extent has reserved qgroup space, release now
186 * and pass the reserved number for qgroup_record to free.
187 */
188 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
189 if (ret < 0)
190 return ret;
191 }
192 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
193 if (!entry)
194 return -ENOMEM;
195
196 entry->file_offset = file_offset;
197 entry->num_bytes = num_bytes;
198 entry->ram_bytes = ram_bytes;
199 entry->disk_bytenr = disk_bytenr;
200 entry->disk_num_bytes = disk_num_bytes;
201 entry->offset = offset;
202 entry->bytes_left = num_bytes;
203 entry->inode = igrab(&inode->vfs_inode);
204 entry->compress_type = compress_type;
205 entry->truncated_len = (u64)-1;
206 entry->qgroup_rsv = ret;
207 entry->physical = (u64)-1;
208
209 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
210 entry->flags = flags;
211
212 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
213 fs_info->delalloc_batch);
214
215 /* one ref for the tree */
216 refcount_set(&entry->refs, 1);
217 init_waitqueue_head(&entry->wait);
218 INIT_LIST_HEAD(&entry->list);
219 INIT_LIST_HEAD(&entry->log_list);
220 INIT_LIST_HEAD(&entry->root_extent_list);
221 INIT_LIST_HEAD(&entry->work_list);
222 init_completion(&entry->completion);
223
224 trace_btrfs_ordered_extent_add(inode, entry);
225
226 spin_lock_irq(&tree->lock);
227 node = tree_insert(&tree->tree, file_offset,
228 &entry->rb_node);
229 if (node)
230 btrfs_panic(fs_info, -EEXIST,
231 "inconsistency in ordered tree at offset %llu",
232 file_offset);
233 spin_unlock_irq(&tree->lock);
234
235 spin_lock(&root->ordered_extent_lock);
236 list_add_tail(&entry->root_extent_list,
237 &root->ordered_extents);
238 root->nr_ordered_extents++;
239 if (root->nr_ordered_extents == 1) {
240 spin_lock(&fs_info->ordered_root_lock);
241 BUG_ON(!list_empty(&root->ordered_root));
242 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
243 spin_unlock(&fs_info->ordered_root_lock);
244 }
245 spin_unlock(&root->ordered_extent_lock);
246
247 /*
248 * We don't need the count_max_extents here, we can assume that all of
249 * that work has been done at higher layers, so this is truly the
250 * smallest the extent is going to get.
251 */
252 spin_lock(&inode->lock);
253 btrfs_mod_outstanding_extents(inode, 1);
254 spin_unlock(&inode->lock);
255
256 return 0;
257}
258
259/*
260 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
261 * when an ordered extent is finished. If the list covers more than one
262 * ordered extent, it is split across multiples.
263 */
264void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
265 struct btrfs_ordered_sum *sum)
266{
267 struct btrfs_ordered_inode_tree *tree;
268
269 tree = &BTRFS_I(entry->inode)->ordered_tree;
270 spin_lock_irq(&tree->lock);
271 list_add_tail(&sum->list, &entry->list);
272 spin_unlock_irq(&tree->lock);
273}
274
275/*
276 * Mark all ordered extents io inside the specified range finished.
277 *
278 * @page: The invovled page for the opeartion.
279 * For uncompressed buffered IO, the page status also needs to be
280 * updated to indicate whether the pending ordered io is finished.
281 * Can be NULL for direct IO and compressed write.
282 * For these cases, callers are ensured they won't execute the
283 * endio function twice.
284 * @finish_func: The function to be executed when all the IO of an ordered
285 * extent are finished.
286 *
287 * This function is called for endio, thus the range must have ordered
288 * extent(s) coveri it.
289 */
290void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
291 struct page *page, u64 file_offset,
292 u64 num_bytes, btrfs_func_t finish_func,
293 bool uptodate)
294{
295 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
296 struct btrfs_fs_info *fs_info = inode->root->fs_info;
297 struct btrfs_workqueue *wq;
298 struct rb_node *node;
299 struct btrfs_ordered_extent *entry = NULL;
300 unsigned long flags;
301 u64 cur = file_offset;
302
303 if (btrfs_is_free_space_inode(inode))
304 wq = fs_info->endio_freespace_worker;
305 else
306 wq = fs_info->endio_write_workers;
307
308 if (page)
309 ASSERT(page->mapping && page_offset(page) <= file_offset &&
310 file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
311
312 spin_lock_irqsave(&tree->lock, flags);
313 while (cur < file_offset + num_bytes) {
314 u64 entry_end;
315 u64 end;
316 u32 len;
317
318 node = tree_search(tree, cur);
319 /* No ordered extents at all */
320 if (!node)
321 break;
322
323 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
324 entry_end = entry->file_offset + entry->num_bytes;
325 /*
326 * |<-- OE --->| |
327 * cur
328 * Go to next OE.
329 */
330 if (cur >= entry_end) {
331 node = rb_next(node);
332 /* No more ordered extents, exit */
333 if (!node)
334 break;
335 entry = rb_entry(node, struct btrfs_ordered_extent,
336 rb_node);
337
338 /* Go to next ordered extent and continue */
339 cur = entry->file_offset;
340 continue;
341 }
342 /*
343 * | |<--- OE --->|
344 * cur
345 * Go to the start of OE.
346 */
347 if (cur < entry->file_offset) {
348 cur = entry->file_offset;
349 continue;
350 }
351
352 /*
353 * Now we are definitely inside one ordered extent.
354 *
355 * |<--- OE --->|
356 * |
357 * cur
358 */
359 end = min(entry->file_offset + entry->num_bytes,
360 file_offset + num_bytes) - 1;
361 ASSERT(end + 1 - cur < U32_MAX);
362 len = end + 1 - cur;
363
364 if (page) {
365 /*
366 * Ordered (Private2) bit indicates whether we still
367 * have pending io unfinished for the ordered extent.
368 *
369 * If there's no such bit, we need to skip to next range.
370 */
371 if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
372 cur += len;
373 continue;
374 }
375 btrfs_page_clear_ordered(fs_info, page, cur, len);
376 }
377
378 /* Now we're fine to update the accounting */
379 if (unlikely(len > entry->bytes_left)) {
380 WARN_ON(1);
381 btrfs_crit(fs_info,
382"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
383 inode->root->root_key.objectid,
384 btrfs_ino(inode),
385 entry->file_offset,
386 entry->num_bytes,
387 len, entry->bytes_left);
388 entry->bytes_left = 0;
389 } else {
390 entry->bytes_left -= len;
391 }
392
393 if (!uptodate)
394 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
395
396 /*
397 * All the IO of the ordered extent is finished, we need to queue
398 * the finish_func to be executed.
399 */
400 if (entry->bytes_left == 0) {
401 set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
402 cond_wake_up(&entry->wait);
403 refcount_inc(&entry->refs);
404 spin_unlock_irqrestore(&tree->lock, flags);
405 btrfs_init_work(&entry->work, finish_func, NULL, NULL);
406 btrfs_queue_work(wq, &entry->work);
407 spin_lock_irqsave(&tree->lock, flags);
408 }
409 cur += len;
410 }
411 spin_unlock_irqrestore(&tree->lock, flags);
412}
413
414/*
415 * Finish IO for one ordered extent across a given range. The range can only
416 * contain one ordered extent.
417 *
418 * @cached: The cached ordered extent. If not NULL, we can skip the tree
419 * search and use the ordered extent directly.
420 * Will be also used to store the finished ordered extent.
421 * @file_offset: File offset for the finished IO
422 * @io_size: Length of the finish IO range
423 *
424 * Return true if the ordered extent is finished in the range, and update
425 * @cached.
426 * Return false otherwise.
427 *
428 * NOTE: The range can NOT cross multiple ordered extents.
429 * Thus caller should ensure the range doesn't cross ordered extents.
430 */
431bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
432 struct btrfs_ordered_extent **cached,
433 u64 file_offset, u64 io_size)
434{
435 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
436 struct rb_node *node;
437 struct btrfs_ordered_extent *entry = NULL;
438 unsigned long flags;
439 bool finished = false;
440
441 spin_lock_irqsave(&tree->lock, flags);
442 if (cached && *cached) {
443 entry = *cached;
444 goto have_entry;
445 }
446
447 node = tree_search(tree, file_offset);
448 if (!node)
449 goto out;
450
451 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
452have_entry:
453 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
454 goto out;
455
456 if (io_size > entry->bytes_left)
457 btrfs_crit(inode->root->fs_info,
458 "bad ordered accounting left %llu size %llu",
459 entry->bytes_left, io_size);
460
461 entry->bytes_left -= io_size;
462
463 if (entry->bytes_left == 0) {
464 /*
465 * Ensure only one caller can set the flag and finished_ret
466 * accordingly
467 */
468 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
469 /* test_and_set_bit implies a barrier */
470 cond_wake_up_nomb(&entry->wait);
471 }
472out:
473 if (finished && cached && entry) {
474 *cached = entry;
475 refcount_inc(&entry->refs);
476 }
477 spin_unlock_irqrestore(&tree->lock, flags);
478 return finished;
479}
480
481/*
482 * used to drop a reference on an ordered extent. This will free
483 * the extent if the last reference is dropped
484 */
485void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
486{
487 struct list_head *cur;
488 struct btrfs_ordered_sum *sum;
489
490 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
491
492 if (refcount_dec_and_test(&entry->refs)) {
493 ASSERT(list_empty(&entry->root_extent_list));
494 ASSERT(list_empty(&entry->log_list));
495 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
496 if (entry->inode)
497 btrfs_add_delayed_iput(entry->inode);
498 while (!list_empty(&entry->list)) {
499 cur = entry->list.next;
500 sum = list_entry(cur, struct btrfs_ordered_sum, list);
501 list_del(&sum->list);
502 kvfree(sum);
503 }
504 kmem_cache_free(btrfs_ordered_extent_cache, entry);
505 }
506}
507
508/*
509 * remove an ordered extent from the tree. No references are dropped
510 * and waiters are woken up.
511 */
512void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
513 struct btrfs_ordered_extent *entry)
514{
515 struct btrfs_ordered_inode_tree *tree;
516 struct btrfs_root *root = btrfs_inode->root;
517 struct btrfs_fs_info *fs_info = root->fs_info;
518 struct rb_node *node;
519 bool pending;
520
521 /* This is paired with btrfs_add_ordered_extent. */
522 spin_lock(&btrfs_inode->lock);
523 btrfs_mod_outstanding_extents(btrfs_inode, -1);
524 spin_unlock(&btrfs_inode->lock);
525 if (root != fs_info->tree_root) {
526 u64 release;
527
528 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
529 release = entry->disk_num_bytes;
530 else
531 release = entry->num_bytes;
532 btrfs_delalloc_release_metadata(btrfs_inode, release, false);
533 }
534
535 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
536 fs_info->delalloc_batch);
537
538 tree = &btrfs_inode->ordered_tree;
539 spin_lock_irq(&tree->lock);
540 node = &entry->rb_node;
541 rb_erase(node, &tree->tree);
542 RB_CLEAR_NODE(node);
543 if (tree->last == node)
544 tree->last = NULL;
545 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
546 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
547 spin_unlock_irq(&tree->lock);
548
549 /*
550 * The current running transaction is waiting on us, we need to let it
551 * know that we're complete and wake it up.
552 */
553 if (pending) {
554 struct btrfs_transaction *trans;
555
556 /*
557 * The checks for trans are just a formality, it should be set,
558 * but if it isn't we don't want to deref/assert under the spin
559 * lock, so be nice and check if trans is set, but ASSERT() so
560 * if it isn't set a developer will notice.
561 */
562 spin_lock(&fs_info->trans_lock);
563 trans = fs_info->running_transaction;
564 if (trans)
565 refcount_inc(&trans->use_count);
566 spin_unlock(&fs_info->trans_lock);
567
568 ASSERT(trans);
569 if (trans) {
570 if (atomic_dec_and_test(&trans->pending_ordered))
571 wake_up(&trans->pending_wait);
572 btrfs_put_transaction(trans);
573 }
574 }
575
576 spin_lock(&root->ordered_extent_lock);
577 list_del_init(&entry->root_extent_list);
578 root->nr_ordered_extents--;
579
580 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
581
582 if (!root->nr_ordered_extents) {
583 spin_lock(&fs_info->ordered_root_lock);
584 BUG_ON(list_empty(&root->ordered_root));
585 list_del_init(&root->ordered_root);
586 spin_unlock(&fs_info->ordered_root_lock);
587 }
588 spin_unlock(&root->ordered_extent_lock);
589 wake_up(&entry->wait);
590}
591
592static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
593{
594 struct btrfs_ordered_extent *ordered;
595
596 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
597 btrfs_start_ordered_extent(ordered, 1);
598 complete(&ordered->completion);
599}
600
601/*
602 * wait for all the ordered extents in a root. This is done when balancing
603 * space between drives.
604 */
605u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
606 const u64 range_start, const u64 range_len)
607{
608 struct btrfs_fs_info *fs_info = root->fs_info;
609 LIST_HEAD(splice);
610 LIST_HEAD(skipped);
611 LIST_HEAD(works);
612 struct btrfs_ordered_extent *ordered, *next;
613 u64 count = 0;
614 const u64 range_end = range_start + range_len;
615
616 mutex_lock(&root->ordered_extent_mutex);
617 spin_lock(&root->ordered_extent_lock);
618 list_splice_init(&root->ordered_extents, &splice);
619 while (!list_empty(&splice) && nr) {
620 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
621 root_extent_list);
622
623 if (range_end <= ordered->disk_bytenr ||
624 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
625 list_move_tail(&ordered->root_extent_list, &skipped);
626 cond_resched_lock(&root->ordered_extent_lock);
627 continue;
628 }
629
630 list_move_tail(&ordered->root_extent_list,
631 &root->ordered_extents);
632 refcount_inc(&ordered->refs);
633 spin_unlock(&root->ordered_extent_lock);
634
635 btrfs_init_work(&ordered->flush_work,
636 btrfs_run_ordered_extent_work, NULL, NULL);
637 list_add_tail(&ordered->work_list, &works);
638 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
639
640 cond_resched();
641 spin_lock(&root->ordered_extent_lock);
642 if (nr != U64_MAX)
643 nr--;
644 count++;
645 }
646 list_splice_tail(&skipped, &root->ordered_extents);
647 list_splice_tail(&splice, &root->ordered_extents);
648 spin_unlock(&root->ordered_extent_lock);
649
650 list_for_each_entry_safe(ordered, next, &works, work_list) {
651 list_del_init(&ordered->work_list);
652 wait_for_completion(&ordered->completion);
653 btrfs_put_ordered_extent(ordered);
654 cond_resched();
655 }
656 mutex_unlock(&root->ordered_extent_mutex);
657
658 return count;
659}
660
661void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
662 const u64 range_start, const u64 range_len)
663{
664 struct btrfs_root *root;
665 struct list_head splice;
666 u64 done;
667
668 INIT_LIST_HEAD(&splice);
669
670 mutex_lock(&fs_info->ordered_operations_mutex);
671 spin_lock(&fs_info->ordered_root_lock);
672 list_splice_init(&fs_info->ordered_roots, &splice);
673 while (!list_empty(&splice) && nr) {
674 root = list_first_entry(&splice, struct btrfs_root,
675 ordered_root);
676 root = btrfs_grab_root(root);
677 BUG_ON(!root);
678 list_move_tail(&root->ordered_root,
679 &fs_info->ordered_roots);
680 spin_unlock(&fs_info->ordered_root_lock);
681
682 done = btrfs_wait_ordered_extents(root, nr,
683 range_start, range_len);
684 btrfs_put_root(root);
685
686 spin_lock(&fs_info->ordered_root_lock);
687 if (nr != U64_MAX) {
688 nr -= done;
689 }
690 }
691 list_splice_tail(&splice, &fs_info->ordered_roots);
692 spin_unlock(&fs_info->ordered_root_lock);
693 mutex_unlock(&fs_info->ordered_operations_mutex);
694}
695
696/*
697 * Used to start IO or wait for a given ordered extent to finish.
698 *
699 * If wait is one, this effectively waits on page writeback for all the pages
700 * in the extent, and it waits on the io completion code to insert
701 * metadata into the btree corresponding to the extent
702 */
703void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
704{
705 u64 start = entry->file_offset;
706 u64 end = start + entry->num_bytes - 1;
707 struct btrfs_inode *inode = BTRFS_I(entry->inode);
708
709 trace_btrfs_ordered_extent_start(inode, entry);
710
711 /*
712 * pages in the range can be dirty, clean or writeback. We
713 * start IO on any dirty ones so the wait doesn't stall waiting
714 * for the flusher thread to find them
715 */
716 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
717 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
718 if (wait) {
719 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
720 &entry->flags));
721 }
722}
723
724/*
725 * Used to wait on ordered extents across a large range of bytes.
726 */
727int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
728{
729 int ret = 0;
730 int ret_wb = 0;
731 u64 end;
732 u64 orig_end;
733 struct btrfs_ordered_extent *ordered;
734
735 if (start + len < start) {
736 orig_end = INT_LIMIT(loff_t);
737 } else {
738 orig_end = start + len - 1;
739 if (orig_end > INT_LIMIT(loff_t))
740 orig_end = INT_LIMIT(loff_t);
741 }
742
743 /* start IO across the range first to instantiate any delalloc
744 * extents
745 */
746 ret = btrfs_fdatawrite_range(inode, start, orig_end);
747 if (ret)
748 return ret;
749
750 /*
751 * If we have a writeback error don't return immediately. Wait first
752 * for any ordered extents that haven't completed yet. This is to make
753 * sure no one can dirty the same page ranges and call writepages()
754 * before the ordered extents complete - to avoid failures (-EEXIST)
755 * when adding the new ordered extents to the ordered tree.
756 */
757 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
758
759 end = orig_end;
760 while (1) {
761 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
762 if (!ordered)
763 break;
764 if (ordered->file_offset > orig_end) {
765 btrfs_put_ordered_extent(ordered);
766 break;
767 }
768 if (ordered->file_offset + ordered->num_bytes <= start) {
769 btrfs_put_ordered_extent(ordered);
770 break;
771 }
772 btrfs_start_ordered_extent(ordered, 1);
773 end = ordered->file_offset;
774 /*
775 * If the ordered extent had an error save the error but don't
776 * exit without waiting first for all other ordered extents in
777 * the range to complete.
778 */
779 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
780 ret = -EIO;
781 btrfs_put_ordered_extent(ordered);
782 if (end == 0 || end == start)
783 break;
784 end--;
785 }
786 return ret_wb ? ret_wb : ret;
787}
788
789/*
790 * find an ordered extent corresponding to file_offset. return NULL if
791 * nothing is found, otherwise take a reference on the extent and return it
792 */
793struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
794 u64 file_offset)
795{
796 struct btrfs_ordered_inode_tree *tree;
797 struct rb_node *node;
798 struct btrfs_ordered_extent *entry = NULL;
799 unsigned long flags;
800
801 tree = &inode->ordered_tree;
802 spin_lock_irqsave(&tree->lock, flags);
803 node = tree_search(tree, file_offset);
804 if (!node)
805 goto out;
806
807 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
808 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
809 entry = NULL;
810 if (entry)
811 refcount_inc(&entry->refs);
812out:
813 spin_unlock_irqrestore(&tree->lock, flags);
814 return entry;
815}
816
817/* Since the DIO code tries to lock a wide area we need to look for any ordered
818 * extents that exist in the range, rather than just the start of the range.
819 */
820struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
821 struct btrfs_inode *inode, u64 file_offset, u64 len)
822{
823 struct btrfs_ordered_inode_tree *tree;
824 struct rb_node *node;
825 struct btrfs_ordered_extent *entry = NULL;
826
827 tree = &inode->ordered_tree;
828 spin_lock_irq(&tree->lock);
829 node = tree_search(tree, file_offset);
830 if (!node) {
831 node = tree_search(tree, file_offset + len);
832 if (!node)
833 goto out;
834 }
835
836 while (1) {
837 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
838 if (range_overlaps(entry, file_offset, len))
839 break;
840
841 if (entry->file_offset >= file_offset + len) {
842 entry = NULL;
843 break;
844 }
845 entry = NULL;
846 node = rb_next(node);
847 if (!node)
848 break;
849 }
850out:
851 if (entry)
852 refcount_inc(&entry->refs);
853 spin_unlock_irq(&tree->lock);
854 return entry;
855}
856
857/*
858 * Adds all ordered extents to the given list. The list ends up sorted by the
859 * file_offset of the ordered extents.
860 */
861void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
862 struct list_head *list)
863{
864 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
865 struct rb_node *n;
866
867 ASSERT(inode_is_locked(&inode->vfs_inode));
868
869 spin_lock_irq(&tree->lock);
870 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
871 struct btrfs_ordered_extent *ordered;
872
873 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
874
875 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
876 continue;
877
878 ASSERT(list_empty(&ordered->log_list));
879 list_add_tail(&ordered->log_list, list);
880 refcount_inc(&ordered->refs);
881 }
882 spin_unlock_irq(&tree->lock);
883}
884
885/*
886 * lookup and return any extent before 'file_offset'. NULL is returned
887 * if none is found
888 */
889struct btrfs_ordered_extent *
890btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
891{
892 struct btrfs_ordered_inode_tree *tree;
893 struct rb_node *node;
894 struct btrfs_ordered_extent *entry = NULL;
895
896 tree = &inode->ordered_tree;
897 spin_lock_irq(&tree->lock);
898 node = tree_search(tree, file_offset);
899 if (!node)
900 goto out;
901
902 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
903 refcount_inc(&entry->refs);
904out:
905 spin_unlock_irq(&tree->lock);
906 return entry;
907}
908
909/*
910 * Lookup the first ordered extent that overlaps the range
911 * [@file_offset, @file_offset + @len).
912 *
913 * The difference between this and btrfs_lookup_first_ordered_extent() is
914 * that this one won't return any ordered extent that does not overlap the range.
915 * And the difference against btrfs_lookup_ordered_extent() is, this function
916 * ensures the first ordered extent gets returned.
917 */
918struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
919 struct btrfs_inode *inode, u64 file_offset, u64 len)
920{
921 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
922 struct rb_node *node;
923 struct rb_node *cur;
924 struct rb_node *prev;
925 struct rb_node *next;
926 struct btrfs_ordered_extent *entry = NULL;
927
928 spin_lock_irq(&tree->lock);
929 node = tree->tree.rb_node;
930 /*
931 * Here we don't want to use tree_search() which will use tree->last
932 * and screw up the search order.
933 * And __tree_search() can't return the adjacent ordered extents
934 * either, thus here we do our own search.
935 */
936 while (node) {
937 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
938
939 if (file_offset < entry->file_offset) {
940 node = node->rb_left;
941 } else if (file_offset >= entry_end(entry)) {
942 node = node->rb_right;
943 } else {
944 /*
945 * Direct hit, got an ordered extent that starts at
946 * @file_offset
947 */
948 goto out;
949 }
950 }
951 if (!entry) {
952 /* Empty tree */
953 goto out;
954 }
955
956 cur = &entry->rb_node;
957 /* We got an entry around @file_offset, check adjacent entries */
958 if (entry->file_offset < file_offset) {
959 prev = cur;
960 next = rb_next(cur);
961 } else {
962 prev = rb_prev(cur);
963 next = cur;
964 }
965 if (prev) {
966 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
967 if (range_overlaps(entry, file_offset, len))
968 goto out;
969 }
970 if (next) {
971 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
972 if (range_overlaps(entry, file_offset, len))
973 goto out;
974 }
975 /* No ordered extent in the range */
976 entry = NULL;
977out:
978 if (entry)
979 refcount_inc(&entry->refs);
980 spin_unlock_irq(&tree->lock);
981 return entry;
982}
983
984/*
985 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
986 * ordered extents in it are run to completion.
987 *
988 * @inode: Inode whose ordered tree is to be searched
989 * @start: Beginning of range to flush
990 * @end: Last byte of range to lock
991 * @cached_state: If passed, will return the extent state responsible for the
992 * locked range. It's the caller's responsibility to free the cached state.
993 *
994 * This function always returns with the given range locked, ensuring after it's
995 * called no order extent can be pending.
996 */
997void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
998 u64 end,
999 struct extent_state **cached_state)
1000{
1001 struct btrfs_ordered_extent *ordered;
1002 struct extent_state *cache = NULL;
1003 struct extent_state **cachedp = &cache;
1004
1005 if (cached_state)
1006 cachedp = cached_state;
1007
1008 while (1) {
1009 lock_extent_bits(&inode->io_tree, start, end, cachedp);
1010 ordered = btrfs_lookup_ordered_range(inode, start,
1011 end - start + 1);
1012 if (!ordered) {
1013 /*
1014 * If no external cached_state has been passed then
1015 * decrement the extra ref taken for cachedp since we
1016 * aren't exposing it outside of this function
1017 */
1018 if (!cached_state)
1019 refcount_dec(&cache->refs);
1020 break;
1021 }
1022 unlock_extent_cached(&inode->io_tree, start, end, cachedp);
1023 btrfs_start_ordered_extent(ordered, 1);
1024 btrfs_put_ordered_extent(ordered);
1025 }
1026}
1027
1028static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
1029 u64 len)
1030{
1031 struct inode *inode = ordered->inode;
1032 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1033 u64 file_offset = ordered->file_offset + pos;
1034 u64 disk_bytenr = ordered->disk_bytenr + pos;
1035 unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS;
1036
1037 /*
1038 * The splitting extent is already counted and will be added again in
1039 * btrfs_add_ordered_extent_*(). Subtract len to avoid double counting.
1040 */
1041 percpu_counter_add_batch(&fs_info->ordered_bytes, -len,
1042 fs_info->delalloc_batch);
1043 WARN_ON_ONCE(flags & (1 << BTRFS_ORDERED_COMPRESSED));
1044 return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len,
1045 disk_bytenr, len, 0, flags,
1046 ordered->compress_type);
1047}
1048
1049int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
1050 u64 post)
1051{
1052 struct inode *inode = ordered->inode;
1053 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1054 struct rb_node *node;
1055 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1056 int ret = 0;
1057
1058 spin_lock_irq(&tree->lock);
1059 /* Remove from tree once */
1060 node = &ordered->rb_node;
1061 rb_erase(node, &tree->tree);
1062 RB_CLEAR_NODE(node);
1063 if (tree->last == node)
1064 tree->last = NULL;
1065
1066 ordered->file_offset += pre;
1067 ordered->disk_bytenr += pre;
1068 ordered->num_bytes -= (pre + post);
1069 ordered->disk_num_bytes -= (pre + post);
1070 ordered->bytes_left -= (pre + post);
1071
1072 /* Re-insert the node */
1073 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1074 if (node)
1075 btrfs_panic(fs_info, -EEXIST,
1076 "zoned: inconsistency in ordered tree at offset %llu",
1077 ordered->file_offset);
1078
1079 spin_unlock_irq(&tree->lock);
1080
1081 if (pre)
1082 ret = clone_ordered_extent(ordered, 0, pre);
1083 if (ret == 0 && post)
1084 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
1085 post);
1086
1087 return ret;
1088}
1089
1090int __init ordered_data_init(void)
1091{
1092 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1093 sizeof(struct btrfs_ordered_extent), 0,
1094 SLAB_MEM_SPREAD,
1095 NULL);
1096 if (!btrfs_ordered_extent_cache)
1097 return -ENOMEM;
1098
1099 return 0;
1100}
1101
1102void __cold ordered_data_exit(void)
1103{
1104 kmem_cache_destroy(btrfs_ordered_extent_cache);
1105}