Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 */
5
6#include <linux/bsearch.h>
7#include <linux/fs.h>
8#include <linux/file.h>
9#include <linux/sort.h>
10#include <linux/mount.h>
11#include <linux/xattr.h>
12#include <linux/posix_acl_xattr.h>
13#include <linux/radix-tree.h>
14#include <linux/vmalloc.h>
15#include <linux/string.h>
16#include <linux/compat.h>
17#include <linux/crc32c.h>
18
19#include "send.h"
20#include "backref.h"
21#include "locking.h"
22#include "disk-io.h"
23#include "btrfs_inode.h"
24#include "transaction.h"
25#include "compression.h"
26#include "xattr.h"
27#include "print-tree.h"
28
29/*
30 * Maximum number of references an extent can have in order for us to attempt to
31 * issue clone operations instead of write operations. This currently exists to
32 * avoid hitting limitations of the backreference walking code (taking a lot of
33 * time and using too much memory for extents with large number of references).
34 */
35#define SEND_MAX_EXTENT_REFS 64
36
37/*
38 * A fs_path is a helper to dynamically build path names with unknown size.
39 * It reallocates the internal buffer on demand.
40 * It allows fast adding of path elements on the right side (normal path) and
41 * fast adding to the left side (reversed path). A reversed path can also be
42 * unreversed if needed.
43 */
44struct fs_path {
45 union {
46 struct {
47 char *start;
48 char *end;
49
50 char *buf;
51 unsigned short buf_len:15;
52 unsigned short reversed:1;
53 char inline_buf[];
54 };
55 /*
56 * Average path length does not exceed 200 bytes, we'll have
57 * better packing in the slab and higher chance to satisfy
58 * a allocation later during send.
59 */
60 char pad[256];
61 };
62};
63#define FS_PATH_INLINE_SIZE \
64 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
65
66
67/* reused for each extent */
68struct clone_root {
69 struct btrfs_root *root;
70 u64 ino;
71 u64 offset;
72
73 u64 found_refs;
74};
75
76#define SEND_CTX_MAX_NAME_CACHE_SIZE 128
77#define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
78
79struct send_ctx {
80 struct file *send_filp;
81 loff_t send_off;
82 char *send_buf;
83 u32 send_size;
84 u32 send_max_size;
85 u64 total_send_size;
86 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
87 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
88 /* Protocol version compatibility requested */
89 u32 proto;
90
91 struct btrfs_root *send_root;
92 struct btrfs_root *parent_root;
93 struct clone_root *clone_roots;
94 int clone_roots_cnt;
95
96 /* current state of the compare_tree call */
97 struct btrfs_path *left_path;
98 struct btrfs_path *right_path;
99 struct btrfs_key *cmp_key;
100
101 /*
102 * Keep track of the generation of the last transaction that was used
103 * for relocating a block group. This is periodically checked in order
104 * to detect if a relocation happened since the last check, so that we
105 * don't operate on stale extent buffers for nodes (level >= 1) or on
106 * stale disk_bytenr values of file extent items.
107 */
108 u64 last_reloc_trans;
109
110 /*
111 * infos of the currently processed inode. In case of deleted inodes,
112 * these are the values from the deleted inode.
113 */
114 u64 cur_ino;
115 u64 cur_inode_gen;
116 int cur_inode_new;
117 int cur_inode_new_gen;
118 int cur_inode_deleted;
119 u64 cur_inode_size;
120 u64 cur_inode_mode;
121 u64 cur_inode_rdev;
122 u64 cur_inode_last_extent;
123 u64 cur_inode_next_write_offset;
124 bool ignore_cur_inode;
125
126 u64 send_progress;
127
128 struct list_head new_refs;
129 struct list_head deleted_refs;
130
131 struct radix_tree_root name_cache;
132 struct list_head name_cache_list;
133 int name_cache_size;
134
135 struct file_ra_state ra;
136
137 /*
138 * We process inodes by their increasing order, so if before an
139 * incremental send we reverse the parent/child relationship of
140 * directories such that a directory with a lower inode number was
141 * the parent of a directory with a higher inode number, and the one
142 * becoming the new parent got renamed too, we can't rename/move the
143 * directory with lower inode number when we finish processing it - we
144 * must process the directory with higher inode number first, then
145 * rename/move it and then rename/move the directory with lower inode
146 * number. Example follows.
147 *
148 * Tree state when the first send was performed:
149 *
150 * .
151 * |-- a (ino 257)
152 * |-- b (ino 258)
153 * |
154 * |
155 * |-- c (ino 259)
156 * | |-- d (ino 260)
157 * |
158 * |-- c2 (ino 261)
159 *
160 * Tree state when the second (incremental) send is performed:
161 *
162 * .
163 * |-- a (ino 257)
164 * |-- b (ino 258)
165 * |-- c2 (ino 261)
166 * |-- d2 (ino 260)
167 * |-- cc (ino 259)
168 *
169 * The sequence of steps that lead to the second state was:
170 *
171 * mv /a/b/c/d /a/b/c2/d2
172 * mv /a/b/c /a/b/c2/d2/cc
173 *
174 * "c" has lower inode number, but we can't move it (2nd mv operation)
175 * before we move "d", which has higher inode number.
176 *
177 * So we just memorize which move/rename operations must be performed
178 * later when their respective parent is processed and moved/renamed.
179 */
180
181 /* Indexed by parent directory inode number. */
182 struct rb_root pending_dir_moves;
183
184 /*
185 * Reverse index, indexed by the inode number of a directory that
186 * is waiting for the move/rename of its immediate parent before its
187 * own move/rename can be performed.
188 */
189 struct rb_root waiting_dir_moves;
190
191 /*
192 * A directory that is going to be rm'ed might have a child directory
193 * which is in the pending directory moves index above. In this case,
194 * the directory can only be removed after the move/rename of its child
195 * is performed. Example:
196 *
197 * Parent snapshot:
198 *
199 * . (ino 256)
200 * |-- a/ (ino 257)
201 * |-- b/ (ino 258)
202 * |-- c/ (ino 259)
203 * | |-- x/ (ino 260)
204 * |
205 * |-- y/ (ino 261)
206 *
207 * Send snapshot:
208 *
209 * . (ino 256)
210 * |-- a/ (ino 257)
211 * |-- b/ (ino 258)
212 * |-- YY/ (ino 261)
213 * |-- x/ (ino 260)
214 *
215 * Sequence of steps that lead to the send snapshot:
216 * rm -f /a/b/c/foo.txt
217 * mv /a/b/y /a/b/YY
218 * mv /a/b/c/x /a/b/YY
219 * rmdir /a/b/c
220 *
221 * When the child is processed, its move/rename is delayed until its
222 * parent is processed (as explained above), but all other operations
223 * like update utimes, chown, chgrp, etc, are performed and the paths
224 * that it uses for those operations must use the orphanized name of
225 * its parent (the directory we're going to rm later), so we need to
226 * memorize that name.
227 *
228 * Indexed by the inode number of the directory to be deleted.
229 */
230 struct rb_root orphan_dirs;
231};
232
233struct pending_dir_move {
234 struct rb_node node;
235 struct list_head list;
236 u64 parent_ino;
237 u64 ino;
238 u64 gen;
239 struct list_head update_refs;
240};
241
242struct waiting_dir_move {
243 struct rb_node node;
244 u64 ino;
245 /*
246 * There might be some directory that could not be removed because it
247 * was waiting for this directory inode to be moved first. Therefore
248 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
249 */
250 u64 rmdir_ino;
251 u64 rmdir_gen;
252 bool orphanized;
253};
254
255struct orphan_dir_info {
256 struct rb_node node;
257 u64 ino;
258 u64 gen;
259 u64 last_dir_index_offset;
260};
261
262struct name_cache_entry {
263 struct list_head list;
264 /*
265 * radix_tree has only 32bit entries but we need to handle 64bit inums.
266 * We use the lower 32bit of the 64bit inum to store it in the tree. If
267 * more then one inum would fall into the same entry, we use radix_list
268 * to store the additional entries. radix_list is also used to store
269 * entries where two entries have the same inum but different
270 * generations.
271 */
272 struct list_head radix_list;
273 u64 ino;
274 u64 gen;
275 u64 parent_ino;
276 u64 parent_gen;
277 int ret;
278 int need_later_update;
279 int name_len;
280 char name[];
281};
282
283#define ADVANCE 1
284#define ADVANCE_ONLY_NEXT -1
285
286enum btrfs_compare_tree_result {
287 BTRFS_COMPARE_TREE_NEW,
288 BTRFS_COMPARE_TREE_DELETED,
289 BTRFS_COMPARE_TREE_CHANGED,
290 BTRFS_COMPARE_TREE_SAME,
291};
292
293__cold
294static void inconsistent_snapshot_error(struct send_ctx *sctx,
295 enum btrfs_compare_tree_result result,
296 const char *what)
297{
298 const char *result_string;
299
300 switch (result) {
301 case BTRFS_COMPARE_TREE_NEW:
302 result_string = "new";
303 break;
304 case BTRFS_COMPARE_TREE_DELETED:
305 result_string = "deleted";
306 break;
307 case BTRFS_COMPARE_TREE_CHANGED:
308 result_string = "updated";
309 break;
310 case BTRFS_COMPARE_TREE_SAME:
311 ASSERT(0);
312 result_string = "unchanged";
313 break;
314 default:
315 ASSERT(0);
316 result_string = "unexpected";
317 }
318
319 btrfs_err(sctx->send_root->fs_info,
320 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
321 result_string, what, sctx->cmp_key->objectid,
322 sctx->send_root->root_key.objectid,
323 (sctx->parent_root ?
324 sctx->parent_root->root_key.objectid : 0));
325}
326
327__maybe_unused
328static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd)
329{
330 switch (sctx->proto) {
331 case 1: return cmd < __BTRFS_SEND_C_MAX_V1;
332 case 2: return cmd < __BTRFS_SEND_C_MAX_V2;
333 default: return false;
334 }
335}
336
337static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
338
339static struct waiting_dir_move *
340get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
341
342static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
343
344static int need_send_hole(struct send_ctx *sctx)
345{
346 return (sctx->parent_root && !sctx->cur_inode_new &&
347 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
348 S_ISREG(sctx->cur_inode_mode));
349}
350
351static void fs_path_reset(struct fs_path *p)
352{
353 if (p->reversed) {
354 p->start = p->buf + p->buf_len - 1;
355 p->end = p->start;
356 *p->start = 0;
357 } else {
358 p->start = p->buf;
359 p->end = p->start;
360 *p->start = 0;
361 }
362}
363
364static struct fs_path *fs_path_alloc(void)
365{
366 struct fs_path *p;
367
368 p = kmalloc(sizeof(*p), GFP_KERNEL);
369 if (!p)
370 return NULL;
371 p->reversed = 0;
372 p->buf = p->inline_buf;
373 p->buf_len = FS_PATH_INLINE_SIZE;
374 fs_path_reset(p);
375 return p;
376}
377
378static struct fs_path *fs_path_alloc_reversed(void)
379{
380 struct fs_path *p;
381
382 p = fs_path_alloc();
383 if (!p)
384 return NULL;
385 p->reversed = 1;
386 fs_path_reset(p);
387 return p;
388}
389
390static void fs_path_free(struct fs_path *p)
391{
392 if (!p)
393 return;
394 if (p->buf != p->inline_buf)
395 kfree(p->buf);
396 kfree(p);
397}
398
399static int fs_path_len(struct fs_path *p)
400{
401 return p->end - p->start;
402}
403
404static int fs_path_ensure_buf(struct fs_path *p, int len)
405{
406 char *tmp_buf;
407 int path_len;
408 int old_buf_len;
409
410 len++;
411
412 if (p->buf_len >= len)
413 return 0;
414
415 if (len > PATH_MAX) {
416 WARN_ON(1);
417 return -ENOMEM;
418 }
419
420 path_len = p->end - p->start;
421 old_buf_len = p->buf_len;
422
423 /*
424 * First time the inline_buf does not suffice
425 */
426 if (p->buf == p->inline_buf) {
427 tmp_buf = kmalloc(len, GFP_KERNEL);
428 if (tmp_buf)
429 memcpy(tmp_buf, p->buf, old_buf_len);
430 } else {
431 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
432 }
433 if (!tmp_buf)
434 return -ENOMEM;
435 p->buf = tmp_buf;
436 /*
437 * The real size of the buffer is bigger, this will let the fast path
438 * happen most of the time
439 */
440 p->buf_len = ksize(p->buf);
441
442 if (p->reversed) {
443 tmp_buf = p->buf + old_buf_len - path_len - 1;
444 p->end = p->buf + p->buf_len - 1;
445 p->start = p->end - path_len;
446 memmove(p->start, tmp_buf, path_len + 1);
447 } else {
448 p->start = p->buf;
449 p->end = p->start + path_len;
450 }
451 return 0;
452}
453
454static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
455 char **prepared)
456{
457 int ret;
458 int new_len;
459
460 new_len = p->end - p->start + name_len;
461 if (p->start != p->end)
462 new_len++;
463 ret = fs_path_ensure_buf(p, new_len);
464 if (ret < 0)
465 goto out;
466
467 if (p->reversed) {
468 if (p->start != p->end)
469 *--p->start = '/';
470 p->start -= name_len;
471 *prepared = p->start;
472 } else {
473 if (p->start != p->end)
474 *p->end++ = '/';
475 *prepared = p->end;
476 p->end += name_len;
477 *p->end = 0;
478 }
479
480out:
481 return ret;
482}
483
484static int fs_path_add(struct fs_path *p, const char *name, int name_len)
485{
486 int ret;
487 char *prepared;
488
489 ret = fs_path_prepare_for_add(p, name_len, &prepared);
490 if (ret < 0)
491 goto out;
492 memcpy(prepared, name, name_len);
493
494out:
495 return ret;
496}
497
498static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
499{
500 int ret;
501 char *prepared;
502
503 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
504 if (ret < 0)
505 goto out;
506 memcpy(prepared, p2->start, p2->end - p2->start);
507
508out:
509 return ret;
510}
511
512static int fs_path_add_from_extent_buffer(struct fs_path *p,
513 struct extent_buffer *eb,
514 unsigned long off, int len)
515{
516 int ret;
517 char *prepared;
518
519 ret = fs_path_prepare_for_add(p, len, &prepared);
520 if (ret < 0)
521 goto out;
522
523 read_extent_buffer(eb, prepared, off, len);
524
525out:
526 return ret;
527}
528
529static int fs_path_copy(struct fs_path *p, struct fs_path *from)
530{
531 p->reversed = from->reversed;
532 fs_path_reset(p);
533
534 return fs_path_add_path(p, from);
535}
536
537static void fs_path_unreverse(struct fs_path *p)
538{
539 char *tmp;
540 int len;
541
542 if (!p->reversed)
543 return;
544
545 tmp = p->start;
546 len = p->end - p->start;
547 p->start = p->buf;
548 p->end = p->start + len;
549 memmove(p->start, tmp, len + 1);
550 p->reversed = 0;
551}
552
553static struct btrfs_path *alloc_path_for_send(void)
554{
555 struct btrfs_path *path;
556
557 path = btrfs_alloc_path();
558 if (!path)
559 return NULL;
560 path->search_commit_root = 1;
561 path->skip_locking = 1;
562 path->need_commit_sem = 1;
563 return path;
564}
565
566static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
567{
568 int ret;
569 u32 pos = 0;
570
571 while (pos < len) {
572 ret = kernel_write(filp, buf + pos, len - pos, off);
573 /* TODO handle that correctly */
574 /*if (ret == -ERESTARTSYS) {
575 continue;
576 }*/
577 if (ret < 0)
578 return ret;
579 if (ret == 0) {
580 return -EIO;
581 }
582 pos += ret;
583 }
584
585 return 0;
586}
587
588static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
589{
590 struct btrfs_tlv_header *hdr;
591 int total_len = sizeof(*hdr) + len;
592 int left = sctx->send_max_size - sctx->send_size;
593
594 if (unlikely(left < total_len))
595 return -EOVERFLOW;
596
597 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
598 put_unaligned_le16(attr, &hdr->tlv_type);
599 put_unaligned_le16(len, &hdr->tlv_len);
600 memcpy(hdr + 1, data, len);
601 sctx->send_size += total_len;
602
603 return 0;
604}
605
606#define TLV_PUT_DEFINE_INT(bits) \
607 static int tlv_put_u##bits(struct send_ctx *sctx, \
608 u##bits attr, u##bits value) \
609 { \
610 __le##bits __tmp = cpu_to_le##bits(value); \
611 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
612 }
613
614TLV_PUT_DEFINE_INT(64)
615
616static int tlv_put_string(struct send_ctx *sctx, u16 attr,
617 const char *str, int len)
618{
619 if (len == -1)
620 len = strlen(str);
621 return tlv_put(sctx, attr, str, len);
622}
623
624static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
625 const u8 *uuid)
626{
627 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
628}
629
630static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
631 struct extent_buffer *eb,
632 struct btrfs_timespec *ts)
633{
634 struct btrfs_timespec bts;
635 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
636 return tlv_put(sctx, attr, &bts, sizeof(bts));
637}
638
639
640#define TLV_PUT(sctx, attrtype, data, attrlen) \
641 do { \
642 ret = tlv_put(sctx, attrtype, data, attrlen); \
643 if (ret < 0) \
644 goto tlv_put_failure; \
645 } while (0)
646
647#define TLV_PUT_INT(sctx, attrtype, bits, value) \
648 do { \
649 ret = tlv_put_u##bits(sctx, attrtype, value); \
650 if (ret < 0) \
651 goto tlv_put_failure; \
652 } while (0)
653
654#define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
655#define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
656#define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
657#define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
658#define TLV_PUT_STRING(sctx, attrtype, str, len) \
659 do { \
660 ret = tlv_put_string(sctx, attrtype, str, len); \
661 if (ret < 0) \
662 goto tlv_put_failure; \
663 } while (0)
664#define TLV_PUT_PATH(sctx, attrtype, p) \
665 do { \
666 ret = tlv_put_string(sctx, attrtype, p->start, \
667 p->end - p->start); \
668 if (ret < 0) \
669 goto tlv_put_failure; \
670 } while(0)
671#define TLV_PUT_UUID(sctx, attrtype, uuid) \
672 do { \
673 ret = tlv_put_uuid(sctx, attrtype, uuid); \
674 if (ret < 0) \
675 goto tlv_put_failure; \
676 } while (0)
677#define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
678 do { \
679 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
680 if (ret < 0) \
681 goto tlv_put_failure; \
682 } while (0)
683
684static int send_header(struct send_ctx *sctx)
685{
686 struct btrfs_stream_header hdr;
687
688 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
689 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
690
691 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
692 &sctx->send_off);
693}
694
695/*
696 * For each command/item we want to send to userspace, we call this function.
697 */
698static int begin_cmd(struct send_ctx *sctx, int cmd)
699{
700 struct btrfs_cmd_header *hdr;
701
702 if (WARN_ON(!sctx->send_buf))
703 return -EINVAL;
704
705 BUG_ON(sctx->send_size);
706
707 sctx->send_size += sizeof(*hdr);
708 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
709 put_unaligned_le16(cmd, &hdr->cmd);
710
711 return 0;
712}
713
714static int send_cmd(struct send_ctx *sctx)
715{
716 int ret;
717 struct btrfs_cmd_header *hdr;
718 u32 crc;
719
720 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
721 put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
722 put_unaligned_le32(0, &hdr->crc);
723
724 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
725 put_unaligned_le32(crc, &hdr->crc);
726
727 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
728 &sctx->send_off);
729
730 sctx->total_send_size += sctx->send_size;
731 sctx->cmd_send_size[get_unaligned_le16(&hdr->cmd)] += sctx->send_size;
732 sctx->send_size = 0;
733
734 return ret;
735}
736
737/*
738 * Sends a move instruction to user space
739 */
740static int send_rename(struct send_ctx *sctx,
741 struct fs_path *from, struct fs_path *to)
742{
743 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
744 int ret;
745
746 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
747
748 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
749 if (ret < 0)
750 goto out;
751
752 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
753 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
754
755 ret = send_cmd(sctx);
756
757tlv_put_failure:
758out:
759 return ret;
760}
761
762/*
763 * Sends a link instruction to user space
764 */
765static int send_link(struct send_ctx *sctx,
766 struct fs_path *path, struct fs_path *lnk)
767{
768 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
769 int ret;
770
771 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
772
773 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
774 if (ret < 0)
775 goto out;
776
777 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
778 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
779
780 ret = send_cmd(sctx);
781
782tlv_put_failure:
783out:
784 return ret;
785}
786
787/*
788 * Sends an unlink instruction to user space
789 */
790static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
791{
792 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
793 int ret;
794
795 btrfs_debug(fs_info, "send_unlink %s", path->start);
796
797 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
798 if (ret < 0)
799 goto out;
800
801 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
802
803 ret = send_cmd(sctx);
804
805tlv_put_failure:
806out:
807 return ret;
808}
809
810/*
811 * Sends a rmdir instruction to user space
812 */
813static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
814{
815 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
816 int ret;
817
818 btrfs_debug(fs_info, "send_rmdir %s", path->start);
819
820 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
821 if (ret < 0)
822 goto out;
823
824 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
825
826 ret = send_cmd(sctx);
827
828tlv_put_failure:
829out:
830 return ret;
831}
832
833/*
834 * Helper function to retrieve some fields from an inode item.
835 */
836static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
837 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
838 u64 *gid, u64 *rdev)
839{
840 int ret;
841 struct btrfs_inode_item *ii;
842 struct btrfs_key key;
843
844 key.objectid = ino;
845 key.type = BTRFS_INODE_ITEM_KEY;
846 key.offset = 0;
847 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
848 if (ret) {
849 if (ret > 0)
850 ret = -ENOENT;
851 return ret;
852 }
853
854 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
855 struct btrfs_inode_item);
856 if (size)
857 *size = btrfs_inode_size(path->nodes[0], ii);
858 if (gen)
859 *gen = btrfs_inode_generation(path->nodes[0], ii);
860 if (mode)
861 *mode = btrfs_inode_mode(path->nodes[0], ii);
862 if (uid)
863 *uid = btrfs_inode_uid(path->nodes[0], ii);
864 if (gid)
865 *gid = btrfs_inode_gid(path->nodes[0], ii);
866 if (rdev)
867 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
868
869 return ret;
870}
871
872static int get_inode_info(struct btrfs_root *root,
873 u64 ino, u64 *size, u64 *gen,
874 u64 *mode, u64 *uid, u64 *gid,
875 u64 *rdev)
876{
877 struct btrfs_path *path;
878 int ret;
879
880 path = alloc_path_for_send();
881 if (!path)
882 return -ENOMEM;
883 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
884 rdev);
885 btrfs_free_path(path);
886 return ret;
887}
888
889typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
890 struct fs_path *p,
891 void *ctx);
892
893/*
894 * Helper function to iterate the entries in ONE btrfs_inode_ref or
895 * btrfs_inode_extref.
896 * The iterate callback may return a non zero value to stop iteration. This can
897 * be a negative value for error codes or 1 to simply stop it.
898 *
899 * path must point to the INODE_REF or INODE_EXTREF when called.
900 */
901static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
902 struct btrfs_key *found_key, int resolve,
903 iterate_inode_ref_t iterate, void *ctx)
904{
905 struct extent_buffer *eb = path->nodes[0];
906 struct btrfs_inode_ref *iref;
907 struct btrfs_inode_extref *extref;
908 struct btrfs_path *tmp_path;
909 struct fs_path *p;
910 u32 cur = 0;
911 u32 total;
912 int slot = path->slots[0];
913 u32 name_len;
914 char *start;
915 int ret = 0;
916 int num = 0;
917 int index;
918 u64 dir;
919 unsigned long name_off;
920 unsigned long elem_size;
921 unsigned long ptr;
922
923 p = fs_path_alloc_reversed();
924 if (!p)
925 return -ENOMEM;
926
927 tmp_path = alloc_path_for_send();
928 if (!tmp_path) {
929 fs_path_free(p);
930 return -ENOMEM;
931 }
932
933
934 if (found_key->type == BTRFS_INODE_REF_KEY) {
935 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
936 struct btrfs_inode_ref);
937 total = btrfs_item_size(eb, slot);
938 elem_size = sizeof(*iref);
939 } else {
940 ptr = btrfs_item_ptr_offset(eb, slot);
941 total = btrfs_item_size(eb, slot);
942 elem_size = sizeof(*extref);
943 }
944
945 while (cur < total) {
946 fs_path_reset(p);
947
948 if (found_key->type == BTRFS_INODE_REF_KEY) {
949 iref = (struct btrfs_inode_ref *)(ptr + cur);
950 name_len = btrfs_inode_ref_name_len(eb, iref);
951 name_off = (unsigned long)(iref + 1);
952 index = btrfs_inode_ref_index(eb, iref);
953 dir = found_key->offset;
954 } else {
955 extref = (struct btrfs_inode_extref *)(ptr + cur);
956 name_len = btrfs_inode_extref_name_len(eb, extref);
957 name_off = (unsigned long)&extref->name;
958 index = btrfs_inode_extref_index(eb, extref);
959 dir = btrfs_inode_extref_parent(eb, extref);
960 }
961
962 if (resolve) {
963 start = btrfs_ref_to_path(root, tmp_path, name_len,
964 name_off, eb, dir,
965 p->buf, p->buf_len);
966 if (IS_ERR(start)) {
967 ret = PTR_ERR(start);
968 goto out;
969 }
970 if (start < p->buf) {
971 /* overflow , try again with larger buffer */
972 ret = fs_path_ensure_buf(p,
973 p->buf_len + p->buf - start);
974 if (ret < 0)
975 goto out;
976 start = btrfs_ref_to_path(root, tmp_path,
977 name_len, name_off,
978 eb, dir,
979 p->buf, p->buf_len);
980 if (IS_ERR(start)) {
981 ret = PTR_ERR(start);
982 goto out;
983 }
984 BUG_ON(start < p->buf);
985 }
986 p->start = start;
987 } else {
988 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
989 name_len);
990 if (ret < 0)
991 goto out;
992 }
993
994 cur += elem_size + name_len;
995 ret = iterate(num, dir, index, p, ctx);
996 if (ret)
997 goto out;
998 num++;
999 }
1000
1001out:
1002 btrfs_free_path(tmp_path);
1003 fs_path_free(p);
1004 return ret;
1005}
1006
1007typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
1008 const char *name, int name_len,
1009 const char *data, int data_len,
1010 void *ctx);
1011
1012/*
1013 * Helper function to iterate the entries in ONE btrfs_dir_item.
1014 * The iterate callback may return a non zero value to stop iteration. This can
1015 * be a negative value for error codes or 1 to simply stop it.
1016 *
1017 * path must point to the dir item when called.
1018 */
1019static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1020 iterate_dir_item_t iterate, void *ctx)
1021{
1022 int ret = 0;
1023 struct extent_buffer *eb;
1024 struct btrfs_dir_item *di;
1025 struct btrfs_key di_key;
1026 char *buf = NULL;
1027 int buf_len;
1028 u32 name_len;
1029 u32 data_len;
1030 u32 cur;
1031 u32 len;
1032 u32 total;
1033 int slot;
1034 int num;
1035
1036 /*
1037 * Start with a small buffer (1 page). If later we end up needing more
1038 * space, which can happen for xattrs on a fs with a leaf size greater
1039 * then the page size, attempt to increase the buffer. Typically xattr
1040 * values are small.
1041 */
1042 buf_len = PATH_MAX;
1043 buf = kmalloc(buf_len, GFP_KERNEL);
1044 if (!buf) {
1045 ret = -ENOMEM;
1046 goto out;
1047 }
1048
1049 eb = path->nodes[0];
1050 slot = path->slots[0];
1051 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1052 cur = 0;
1053 len = 0;
1054 total = btrfs_item_size(eb, slot);
1055
1056 num = 0;
1057 while (cur < total) {
1058 name_len = btrfs_dir_name_len(eb, di);
1059 data_len = btrfs_dir_data_len(eb, di);
1060 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1061
1062 if (btrfs_dir_type(eb, di) == BTRFS_FT_XATTR) {
1063 if (name_len > XATTR_NAME_MAX) {
1064 ret = -ENAMETOOLONG;
1065 goto out;
1066 }
1067 if (name_len + data_len >
1068 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1069 ret = -E2BIG;
1070 goto out;
1071 }
1072 } else {
1073 /*
1074 * Path too long
1075 */
1076 if (name_len + data_len > PATH_MAX) {
1077 ret = -ENAMETOOLONG;
1078 goto out;
1079 }
1080 }
1081
1082 if (name_len + data_len > buf_len) {
1083 buf_len = name_len + data_len;
1084 if (is_vmalloc_addr(buf)) {
1085 vfree(buf);
1086 buf = NULL;
1087 } else {
1088 char *tmp = krealloc(buf, buf_len,
1089 GFP_KERNEL | __GFP_NOWARN);
1090
1091 if (!tmp)
1092 kfree(buf);
1093 buf = tmp;
1094 }
1095 if (!buf) {
1096 buf = kvmalloc(buf_len, GFP_KERNEL);
1097 if (!buf) {
1098 ret = -ENOMEM;
1099 goto out;
1100 }
1101 }
1102 }
1103
1104 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1105 name_len + data_len);
1106
1107 len = sizeof(*di) + name_len + data_len;
1108 di = (struct btrfs_dir_item *)((char *)di + len);
1109 cur += len;
1110
1111 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1112 data_len, ctx);
1113 if (ret < 0)
1114 goto out;
1115 if (ret) {
1116 ret = 0;
1117 goto out;
1118 }
1119
1120 num++;
1121 }
1122
1123out:
1124 kvfree(buf);
1125 return ret;
1126}
1127
1128static int __copy_first_ref(int num, u64 dir, int index,
1129 struct fs_path *p, void *ctx)
1130{
1131 int ret;
1132 struct fs_path *pt = ctx;
1133
1134 ret = fs_path_copy(pt, p);
1135 if (ret < 0)
1136 return ret;
1137
1138 /* we want the first only */
1139 return 1;
1140}
1141
1142/*
1143 * Retrieve the first path of an inode. If an inode has more then one
1144 * ref/hardlink, this is ignored.
1145 */
1146static int get_inode_path(struct btrfs_root *root,
1147 u64 ino, struct fs_path *path)
1148{
1149 int ret;
1150 struct btrfs_key key, found_key;
1151 struct btrfs_path *p;
1152
1153 p = alloc_path_for_send();
1154 if (!p)
1155 return -ENOMEM;
1156
1157 fs_path_reset(path);
1158
1159 key.objectid = ino;
1160 key.type = BTRFS_INODE_REF_KEY;
1161 key.offset = 0;
1162
1163 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1164 if (ret < 0)
1165 goto out;
1166 if (ret) {
1167 ret = 1;
1168 goto out;
1169 }
1170 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1171 if (found_key.objectid != ino ||
1172 (found_key.type != BTRFS_INODE_REF_KEY &&
1173 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1174 ret = -ENOENT;
1175 goto out;
1176 }
1177
1178 ret = iterate_inode_ref(root, p, &found_key, 1,
1179 __copy_first_ref, path);
1180 if (ret < 0)
1181 goto out;
1182 ret = 0;
1183
1184out:
1185 btrfs_free_path(p);
1186 return ret;
1187}
1188
1189struct backref_ctx {
1190 struct send_ctx *sctx;
1191
1192 /* number of total found references */
1193 u64 found;
1194
1195 /*
1196 * used for clones found in send_root. clones found behind cur_objectid
1197 * and cur_offset are not considered as allowed clones.
1198 */
1199 u64 cur_objectid;
1200 u64 cur_offset;
1201
1202 /* may be truncated in case it's the last extent in a file */
1203 u64 extent_len;
1204
1205 /* Just to check for bugs in backref resolving */
1206 int found_itself;
1207};
1208
1209static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1210{
1211 u64 root = (u64)(uintptr_t)key;
1212 const struct clone_root *cr = elt;
1213
1214 if (root < cr->root->root_key.objectid)
1215 return -1;
1216 if (root > cr->root->root_key.objectid)
1217 return 1;
1218 return 0;
1219}
1220
1221static int __clone_root_cmp_sort(const void *e1, const void *e2)
1222{
1223 const struct clone_root *cr1 = e1;
1224 const struct clone_root *cr2 = e2;
1225
1226 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1227 return -1;
1228 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1229 return 1;
1230 return 0;
1231}
1232
1233/*
1234 * Called for every backref that is found for the current extent.
1235 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1236 */
1237static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1238{
1239 struct backref_ctx *bctx = ctx_;
1240 struct clone_root *found;
1241
1242 /* First check if the root is in the list of accepted clone sources */
1243 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1244 bctx->sctx->clone_roots_cnt,
1245 sizeof(struct clone_root),
1246 __clone_root_cmp_bsearch);
1247 if (!found)
1248 return 0;
1249
1250 if (found->root == bctx->sctx->send_root &&
1251 ino == bctx->cur_objectid &&
1252 offset == bctx->cur_offset) {
1253 bctx->found_itself = 1;
1254 }
1255
1256 /*
1257 * Make sure we don't consider clones from send_root that are
1258 * behind the current inode/offset.
1259 */
1260 if (found->root == bctx->sctx->send_root) {
1261 /*
1262 * If the source inode was not yet processed we can't issue a
1263 * clone operation, as the source extent does not exist yet at
1264 * the destination of the stream.
1265 */
1266 if (ino > bctx->cur_objectid)
1267 return 0;
1268 /*
1269 * We clone from the inode currently being sent as long as the
1270 * source extent is already processed, otherwise we could try
1271 * to clone from an extent that does not exist yet at the
1272 * destination of the stream.
1273 */
1274 if (ino == bctx->cur_objectid &&
1275 offset + bctx->extent_len >
1276 bctx->sctx->cur_inode_next_write_offset)
1277 return 0;
1278 }
1279
1280 bctx->found++;
1281 found->found_refs++;
1282 if (ino < found->ino) {
1283 found->ino = ino;
1284 found->offset = offset;
1285 } else if (found->ino == ino) {
1286 /*
1287 * same extent found more then once in the same file.
1288 */
1289 if (found->offset > offset + bctx->extent_len)
1290 found->offset = offset;
1291 }
1292
1293 return 0;
1294}
1295
1296/*
1297 * Given an inode, offset and extent item, it finds a good clone for a clone
1298 * instruction. Returns -ENOENT when none could be found. The function makes
1299 * sure that the returned clone is usable at the point where sending is at the
1300 * moment. This means, that no clones are accepted which lie behind the current
1301 * inode+offset.
1302 *
1303 * path must point to the extent item when called.
1304 */
1305static int find_extent_clone(struct send_ctx *sctx,
1306 struct btrfs_path *path,
1307 u64 ino, u64 data_offset,
1308 u64 ino_size,
1309 struct clone_root **found)
1310{
1311 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1312 int ret;
1313 int extent_type;
1314 u64 logical;
1315 u64 disk_byte;
1316 u64 num_bytes;
1317 u64 extent_item_pos;
1318 u64 flags = 0;
1319 struct btrfs_file_extent_item *fi;
1320 struct extent_buffer *eb = path->nodes[0];
1321 struct backref_ctx backref_ctx = {0};
1322 struct clone_root *cur_clone_root;
1323 struct btrfs_key found_key;
1324 struct btrfs_path *tmp_path;
1325 struct btrfs_extent_item *ei;
1326 int compressed;
1327 u32 i;
1328
1329 tmp_path = alloc_path_for_send();
1330 if (!tmp_path)
1331 return -ENOMEM;
1332
1333 /* We only use this path under the commit sem */
1334 tmp_path->need_commit_sem = 0;
1335
1336 if (data_offset >= ino_size) {
1337 /*
1338 * There may be extents that lie behind the file's size.
1339 * I at least had this in combination with snapshotting while
1340 * writing large files.
1341 */
1342 ret = 0;
1343 goto out;
1344 }
1345
1346 fi = btrfs_item_ptr(eb, path->slots[0],
1347 struct btrfs_file_extent_item);
1348 extent_type = btrfs_file_extent_type(eb, fi);
1349 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1350 ret = -ENOENT;
1351 goto out;
1352 }
1353 compressed = btrfs_file_extent_compression(eb, fi);
1354
1355 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1356 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1357 if (disk_byte == 0) {
1358 ret = -ENOENT;
1359 goto out;
1360 }
1361 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1362
1363 down_read(&fs_info->commit_root_sem);
1364 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1365 &found_key, &flags);
1366 up_read(&fs_info->commit_root_sem);
1367
1368 if (ret < 0)
1369 goto out;
1370 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1371 ret = -EIO;
1372 goto out;
1373 }
1374
1375 ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
1376 struct btrfs_extent_item);
1377 /*
1378 * Backreference walking (iterate_extent_inodes() below) is currently
1379 * too expensive when an extent has a large number of references, both
1380 * in time spent and used memory. So for now just fallback to write
1381 * operations instead of clone operations when an extent has more than
1382 * a certain amount of references.
1383 */
1384 if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
1385 ret = -ENOENT;
1386 goto out;
1387 }
1388 btrfs_release_path(tmp_path);
1389
1390 /*
1391 * Setup the clone roots.
1392 */
1393 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1394 cur_clone_root = sctx->clone_roots + i;
1395 cur_clone_root->ino = (u64)-1;
1396 cur_clone_root->offset = 0;
1397 cur_clone_root->found_refs = 0;
1398 }
1399
1400 backref_ctx.sctx = sctx;
1401 backref_ctx.found = 0;
1402 backref_ctx.cur_objectid = ino;
1403 backref_ctx.cur_offset = data_offset;
1404 backref_ctx.found_itself = 0;
1405 backref_ctx.extent_len = num_bytes;
1406
1407 /*
1408 * The last extent of a file may be too large due to page alignment.
1409 * We need to adjust extent_len in this case so that the checks in
1410 * __iterate_backrefs work.
1411 */
1412 if (data_offset + num_bytes >= ino_size)
1413 backref_ctx.extent_len = ino_size - data_offset;
1414
1415 /*
1416 * Now collect all backrefs.
1417 */
1418 if (compressed == BTRFS_COMPRESS_NONE)
1419 extent_item_pos = logical - found_key.objectid;
1420 else
1421 extent_item_pos = 0;
1422 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1423 extent_item_pos, 1, __iterate_backrefs,
1424 &backref_ctx, false);
1425
1426 if (ret < 0)
1427 goto out;
1428
1429 down_read(&fs_info->commit_root_sem);
1430 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
1431 /*
1432 * A transaction commit for a transaction in which block group
1433 * relocation was done just happened.
1434 * The disk_bytenr of the file extent item we processed is
1435 * possibly stale, referring to the extent's location before
1436 * relocation. So act as if we haven't found any clone sources
1437 * and fallback to write commands, which will read the correct
1438 * data from the new extent location. Otherwise we will fail
1439 * below because we haven't found our own back reference or we
1440 * could be getting incorrect sources in case the old extent
1441 * was already reallocated after the relocation.
1442 */
1443 up_read(&fs_info->commit_root_sem);
1444 ret = -ENOENT;
1445 goto out;
1446 }
1447 up_read(&fs_info->commit_root_sem);
1448
1449 if (!backref_ctx.found_itself) {
1450 /* found a bug in backref code? */
1451 ret = -EIO;
1452 btrfs_err(fs_info,
1453 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1454 ino, data_offset, disk_byte, found_key.objectid);
1455 goto out;
1456 }
1457
1458 btrfs_debug(fs_info,
1459 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1460 data_offset, ino, num_bytes, logical);
1461
1462 if (!backref_ctx.found)
1463 btrfs_debug(fs_info, "no clones found");
1464
1465 cur_clone_root = NULL;
1466 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1467 if (sctx->clone_roots[i].found_refs) {
1468 if (!cur_clone_root)
1469 cur_clone_root = sctx->clone_roots + i;
1470 else if (sctx->clone_roots[i].root == sctx->send_root)
1471 /* prefer clones from send_root over others */
1472 cur_clone_root = sctx->clone_roots + i;
1473 }
1474
1475 }
1476
1477 if (cur_clone_root) {
1478 *found = cur_clone_root;
1479 ret = 0;
1480 } else {
1481 ret = -ENOENT;
1482 }
1483
1484out:
1485 btrfs_free_path(tmp_path);
1486 return ret;
1487}
1488
1489static int read_symlink(struct btrfs_root *root,
1490 u64 ino,
1491 struct fs_path *dest)
1492{
1493 int ret;
1494 struct btrfs_path *path;
1495 struct btrfs_key key;
1496 struct btrfs_file_extent_item *ei;
1497 u8 type;
1498 u8 compression;
1499 unsigned long off;
1500 int len;
1501
1502 path = alloc_path_for_send();
1503 if (!path)
1504 return -ENOMEM;
1505
1506 key.objectid = ino;
1507 key.type = BTRFS_EXTENT_DATA_KEY;
1508 key.offset = 0;
1509 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1510 if (ret < 0)
1511 goto out;
1512 if (ret) {
1513 /*
1514 * An empty symlink inode. Can happen in rare error paths when
1515 * creating a symlink (transaction committed before the inode
1516 * eviction handler removed the symlink inode items and a crash
1517 * happened in between or the subvol was snapshoted in between).
1518 * Print an informative message to dmesg/syslog so that the user
1519 * can delete the symlink.
1520 */
1521 btrfs_err(root->fs_info,
1522 "Found empty symlink inode %llu at root %llu",
1523 ino, root->root_key.objectid);
1524 ret = -EIO;
1525 goto out;
1526 }
1527
1528 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1529 struct btrfs_file_extent_item);
1530 type = btrfs_file_extent_type(path->nodes[0], ei);
1531 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1532 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1533 BUG_ON(compression);
1534
1535 off = btrfs_file_extent_inline_start(ei);
1536 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1537
1538 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1539
1540out:
1541 btrfs_free_path(path);
1542 return ret;
1543}
1544
1545/*
1546 * Helper function to generate a file name that is unique in the root of
1547 * send_root and parent_root. This is used to generate names for orphan inodes.
1548 */
1549static int gen_unique_name(struct send_ctx *sctx,
1550 u64 ino, u64 gen,
1551 struct fs_path *dest)
1552{
1553 int ret = 0;
1554 struct btrfs_path *path;
1555 struct btrfs_dir_item *di;
1556 char tmp[64];
1557 int len;
1558 u64 idx = 0;
1559
1560 path = alloc_path_for_send();
1561 if (!path)
1562 return -ENOMEM;
1563
1564 while (1) {
1565 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1566 ino, gen, idx);
1567 ASSERT(len < sizeof(tmp));
1568
1569 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1570 path, BTRFS_FIRST_FREE_OBJECTID,
1571 tmp, strlen(tmp), 0);
1572 btrfs_release_path(path);
1573 if (IS_ERR(di)) {
1574 ret = PTR_ERR(di);
1575 goto out;
1576 }
1577 if (di) {
1578 /* not unique, try again */
1579 idx++;
1580 continue;
1581 }
1582
1583 if (!sctx->parent_root) {
1584 /* unique */
1585 ret = 0;
1586 break;
1587 }
1588
1589 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1590 path, BTRFS_FIRST_FREE_OBJECTID,
1591 tmp, strlen(tmp), 0);
1592 btrfs_release_path(path);
1593 if (IS_ERR(di)) {
1594 ret = PTR_ERR(di);
1595 goto out;
1596 }
1597 if (di) {
1598 /* not unique, try again */
1599 idx++;
1600 continue;
1601 }
1602 /* unique */
1603 break;
1604 }
1605
1606 ret = fs_path_add(dest, tmp, strlen(tmp));
1607
1608out:
1609 btrfs_free_path(path);
1610 return ret;
1611}
1612
1613enum inode_state {
1614 inode_state_no_change,
1615 inode_state_will_create,
1616 inode_state_did_create,
1617 inode_state_will_delete,
1618 inode_state_did_delete,
1619};
1620
1621static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1622{
1623 int ret;
1624 int left_ret;
1625 int right_ret;
1626 u64 left_gen;
1627 u64 right_gen;
1628
1629 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1630 NULL, NULL);
1631 if (ret < 0 && ret != -ENOENT)
1632 goto out;
1633 left_ret = ret;
1634
1635 if (!sctx->parent_root) {
1636 right_ret = -ENOENT;
1637 } else {
1638 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1639 NULL, NULL, NULL, NULL);
1640 if (ret < 0 && ret != -ENOENT)
1641 goto out;
1642 right_ret = ret;
1643 }
1644
1645 if (!left_ret && !right_ret) {
1646 if (left_gen == gen && right_gen == gen) {
1647 ret = inode_state_no_change;
1648 } else if (left_gen == gen) {
1649 if (ino < sctx->send_progress)
1650 ret = inode_state_did_create;
1651 else
1652 ret = inode_state_will_create;
1653 } else if (right_gen == gen) {
1654 if (ino < sctx->send_progress)
1655 ret = inode_state_did_delete;
1656 else
1657 ret = inode_state_will_delete;
1658 } else {
1659 ret = -ENOENT;
1660 }
1661 } else if (!left_ret) {
1662 if (left_gen == gen) {
1663 if (ino < sctx->send_progress)
1664 ret = inode_state_did_create;
1665 else
1666 ret = inode_state_will_create;
1667 } else {
1668 ret = -ENOENT;
1669 }
1670 } else if (!right_ret) {
1671 if (right_gen == gen) {
1672 if (ino < sctx->send_progress)
1673 ret = inode_state_did_delete;
1674 else
1675 ret = inode_state_will_delete;
1676 } else {
1677 ret = -ENOENT;
1678 }
1679 } else {
1680 ret = -ENOENT;
1681 }
1682
1683out:
1684 return ret;
1685}
1686
1687static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1688{
1689 int ret;
1690
1691 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1692 return 1;
1693
1694 ret = get_cur_inode_state(sctx, ino, gen);
1695 if (ret < 0)
1696 goto out;
1697
1698 if (ret == inode_state_no_change ||
1699 ret == inode_state_did_create ||
1700 ret == inode_state_will_delete)
1701 ret = 1;
1702 else
1703 ret = 0;
1704
1705out:
1706 return ret;
1707}
1708
1709/*
1710 * Helper function to lookup a dir item in a dir.
1711 */
1712static int lookup_dir_item_inode(struct btrfs_root *root,
1713 u64 dir, const char *name, int name_len,
1714 u64 *found_inode)
1715{
1716 int ret = 0;
1717 struct btrfs_dir_item *di;
1718 struct btrfs_key key;
1719 struct btrfs_path *path;
1720
1721 path = alloc_path_for_send();
1722 if (!path)
1723 return -ENOMEM;
1724
1725 di = btrfs_lookup_dir_item(NULL, root, path,
1726 dir, name, name_len, 0);
1727 if (IS_ERR_OR_NULL(di)) {
1728 ret = di ? PTR_ERR(di) : -ENOENT;
1729 goto out;
1730 }
1731 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1732 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1733 ret = -ENOENT;
1734 goto out;
1735 }
1736 *found_inode = key.objectid;
1737
1738out:
1739 btrfs_free_path(path);
1740 return ret;
1741}
1742
1743/*
1744 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1745 * generation of the parent dir and the name of the dir entry.
1746 */
1747static int get_first_ref(struct btrfs_root *root, u64 ino,
1748 u64 *dir, u64 *dir_gen, struct fs_path *name)
1749{
1750 int ret;
1751 struct btrfs_key key;
1752 struct btrfs_key found_key;
1753 struct btrfs_path *path;
1754 int len;
1755 u64 parent_dir;
1756
1757 path = alloc_path_for_send();
1758 if (!path)
1759 return -ENOMEM;
1760
1761 key.objectid = ino;
1762 key.type = BTRFS_INODE_REF_KEY;
1763 key.offset = 0;
1764
1765 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1766 if (ret < 0)
1767 goto out;
1768 if (!ret)
1769 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1770 path->slots[0]);
1771 if (ret || found_key.objectid != ino ||
1772 (found_key.type != BTRFS_INODE_REF_KEY &&
1773 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1774 ret = -ENOENT;
1775 goto out;
1776 }
1777
1778 if (found_key.type == BTRFS_INODE_REF_KEY) {
1779 struct btrfs_inode_ref *iref;
1780 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1781 struct btrfs_inode_ref);
1782 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1783 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1784 (unsigned long)(iref + 1),
1785 len);
1786 parent_dir = found_key.offset;
1787 } else {
1788 struct btrfs_inode_extref *extref;
1789 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1790 struct btrfs_inode_extref);
1791 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1792 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1793 (unsigned long)&extref->name, len);
1794 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1795 }
1796 if (ret < 0)
1797 goto out;
1798 btrfs_release_path(path);
1799
1800 if (dir_gen) {
1801 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1802 NULL, NULL, NULL);
1803 if (ret < 0)
1804 goto out;
1805 }
1806
1807 *dir = parent_dir;
1808
1809out:
1810 btrfs_free_path(path);
1811 return ret;
1812}
1813
1814static int is_first_ref(struct btrfs_root *root,
1815 u64 ino, u64 dir,
1816 const char *name, int name_len)
1817{
1818 int ret;
1819 struct fs_path *tmp_name;
1820 u64 tmp_dir;
1821
1822 tmp_name = fs_path_alloc();
1823 if (!tmp_name)
1824 return -ENOMEM;
1825
1826 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1827 if (ret < 0)
1828 goto out;
1829
1830 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1831 ret = 0;
1832 goto out;
1833 }
1834
1835 ret = !memcmp(tmp_name->start, name, name_len);
1836
1837out:
1838 fs_path_free(tmp_name);
1839 return ret;
1840}
1841
1842/*
1843 * Used by process_recorded_refs to determine if a new ref would overwrite an
1844 * already existing ref. In case it detects an overwrite, it returns the
1845 * inode/gen in who_ino/who_gen.
1846 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1847 * to make sure later references to the overwritten inode are possible.
1848 * Orphanizing is however only required for the first ref of an inode.
1849 * process_recorded_refs does an additional is_first_ref check to see if
1850 * orphanizing is really required.
1851 */
1852static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1853 const char *name, int name_len,
1854 u64 *who_ino, u64 *who_gen, u64 *who_mode)
1855{
1856 int ret = 0;
1857 u64 gen;
1858 u64 other_inode = 0;
1859
1860 if (!sctx->parent_root)
1861 goto out;
1862
1863 ret = is_inode_existent(sctx, dir, dir_gen);
1864 if (ret <= 0)
1865 goto out;
1866
1867 /*
1868 * If we have a parent root we need to verify that the parent dir was
1869 * not deleted and then re-created, if it was then we have no overwrite
1870 * and we can just unlink this entry.
1871 */
1872 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1873 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1874 NULL, NULL, NULL);
1875 if (ret < 0 && ret != -ENOENT)
1876 goto out;
1877 if (ret) {
1878 ret = 0;
1879 goto out;
1880 }
1881 if (gen != dir_gen)
1882 goto out;
1883 }
1884
1885 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1886 &other_inode);
1887 if (ret < 0 && ret != -ENOENT)
1888 goto out;
1889 if (ret) {
1890 ret = 0;
1891 goto out;
1892 }
1893
1894 /*
1895 * Check if the overwritten ref was already processed. If yes, the ref
1896 * was already unlinked/moved, so we can safely assume that we will not
1897 * overwrite anything at this point in time.
1898 */
1899 if (other_inode > sctx->send_progress ||
1900 is_waiting_for_move(sctx, other_inode)) {
1901 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1902 who_gen, who_mode, NULL, NULL, NULL);
1903 if (ret < 0)
1904 goto out;
1905
1906 ret = 1;
1907 *who_ino = other_inode;
1908 } else {
1909 ret = 0;
1910 }
1911
1912out:
1913 return ret;
1914}
1915
1916/*
1917 * Checks if the ref was overwritten by an already processed inode. This is
1918 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1919 * thus the orphan name needs be used.
1920 * process_recorded_refs also uses it to avoid unlinking of refs that were
1921 * overwritten.
1922 */
1923static int did_overwrite_ref(struct send_ctx *sctx,
1924 u64 dir, u64 dir_gen,
1925 u64 ino, u64 ino_gen,
1926 const char *name, int name_len)
1927{
1928 int ret = 0;
1929 u64 gen;
1930 u64 ow_inode;
1931
1932 if (!sctx->parent_root)
1933 goto out;
1934
1935 ret = is_inode_existent(sctx, dir, dir_gen);
1936 if (ret <= 0)
1937 goto out;
1938
1939 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1940 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1941 NULL, NULL, NULL);
1942 if (ret < 0 && ret != -ENOENT)
1943 goto out;
1944 if (ret) {
1945 ret = 0;
1946 goto out;
1947 }
1948 if (gen != dir_gen)
1949 goto out;
1950 }
1951
1952 /* check if the ref was overwritten by another ref */
1953 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1954 &ow_inode);
1955 if (ret < 0 && ret != -ENOENT)
1956 goto out;
1957 if (ret) {
1958 /* was never and will never be overwritten */
1959 ret = 0;
1960 goto out;
1961 }
1962
1963 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1964 NULL, NULL);
1965 if (ret < 0)
1966 goto out;
1967
1968 if (ow_inode == ino && gen == ino_gen) {
1969 ret = 0;
1970 goto out;
1971 }
1972
1973 /*
1974 * We know that it is or will be overwritten. Check this now.
1975 * The current inode being processed might have been the one that caused
1976 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1977 * the current inode being processed.
1978 */
1979 if ((ow_inode < sctx->send_progress) ||
1980 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1981 gen == sctx->cur_inode_gen))
1982 ret = 1;
1983 else
1984 ret = 0;
1985
1986out:
1987 return ret;
1988}
1989
1990/*
1991 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1992 * that got overwritten. This is used by process_recorded_refs to determine
1993 * if it has to use the path as returned by get_cur_path or the orphan name.
1994 */
1995static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1996{
1997 int ret = 0;
1998 struct fs_path *name = NULL;
1999 u64 dir;
2000 u64 dir_gen;
2001
2002 if (!sctx->parent_root)
2003 goto out;
2004
2005 name = fs_path_alloc();
2006 if (!name)
2007 return -ENOMEM;
2008
2009 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2010 if (ret < 0)
2011 goto out;
2012
2013 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2014 name->start, fs_path_len(name));
2015
2016out:
2017 fs_path_free(name);
2018 return ret;
2019}
2020
2021/*
2022 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2023 * so we need to do some special handling in case we have clashes. This function
2024 * takes care of this with the help of name_cache_entry::radix_list.
2025 * In case of error, nce is kfreed.
2026 */
2027static int name_cache_insert(struct send_ctx *sctx,
2028 struct name_cache_entry *nce)
2029{
2030 int ret = 0;
2031 struct list_head *nce_head;
2032
2033 nce_head = radix_tree_lookup(&sctx->name_cache,
2034 (unsigned long)nce->ino);
2035 if (!nce_head) {
2036 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2037 if (!nce_head) {
2038 kfree(nce);
2039 return -ENOMEM;
2040 }
2041 INIT_LIST_HEAD(nce_head);
2042
2043 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2044 if (ret < 0) {
2045 kfree(nce_head);
2046 kfree(nce);
2047 return ret;
2048 }
2049 }
2050 list_add_tail(&nce->radix_list, nce_head);
2051 list_add_tail(&nce->list, &sctx->name_cache_list);
2052 sctx->name_cache_size++;
2053
2054 return ret;
2055}
2056
2057static void name_cache_delete(struct send_ctx *sctx,
2058 struct name_cache_entry *nce)
2059{
2060 struct list_head *nce_head;
2061
2062 nce_head = radix_tree_lookup(&sctx->name_cache,
2063 (unsigned long)nce->ino);
2064 if (!nce_head) {
2065 btrfs_err(sctx->send_root->fs_info,
2066 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2067 nce->ino, sctx->name_cache_size);
2068 }
2069
2070 list_del(&nce->radix_list);
2071 list_del(&nce->list);
2072 sctx->name_cache_size--;
2073
2074 /*
2075 * We may not get to the final release of nce_head if the lookup fails
2076 */
2077 if (nce_head && list_empty(nce_head)) {
2078 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2079 kfree(nce_head);
2080 }
2081}
2082
2083static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2084 u64 ino, u64 gen)
2085{
2086 struct list_head *nce_head;
2087 struct name_cache_entry *cur;
2088
2089 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2090 if (!nce_head)
2091 return NULL;
2092
2093 list_for_each_entry(cur, nce_head, radix_list) {
2094 if (cur->ino == ino && cur->gen == gen)
2095 return cur;
2096 }
2097 return NULL;
2098}
2099
2100/*
2101 * Remove some entries from the beginning of name_cache_list.
2102 */
2103static void name_cache_clean_unused(struct send_ctx *sctx)
2104{
2105 struct name_cache_entry *nce;
2106
2107 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2108 return;
2109
2110 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2111 nce = list_entry(sctx->name_cache_list.next,
2112 struct name_cache_entry, list);
2113 name_cache_delete(sctx, nce);
2114 kfree(nce);
2115 }
2116}
2117
2118static void name_cache_free(struct send_ctx *sctx)
2119{
2120 struct name_cache_entry *nce;
2121
2122 while (!list_empty(&sctx->name_cache_list)) {
2123 nce = list_entry(sctx->name_cache_list.next,
2124 struct name_cache_entry, list);
2125 name_cache_delete(sctx, nce);
2126 kfree(nce);
2127 }
2128}
2129
2130/*
2131 * Used by get_cur_path for each ref up to the root.
2132 * Returns 0 if it succeeded.
2133 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2134 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2135 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2136 * Returns <0 in case of error.
2137 */
2138static int __get_cur_name_and_parent(struct send_ctx *sctx,
2139 u64 ino, u64 gen,
2140 u64 *parent_ino,
2141 u64 *parent_gen,
2142 struct fs_path *dest)
2143{
2144 int ret;
2145 int nce_ret;
2146 struct name_cache_entry *nce = NULL;
2147
2148 /*
2149 * First check if we already did a call to this function with the same
2150 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2151 * return the cached result.
2152 */
2153 nce = name_cache_search(sctx, ino, gen);
2154 if (nce) {
2155 if (ino < sctx->send_progress && nce->need_later_update) {
2156 name_cache_delete(sctx, nce);
2157 kfree(nce);
2158 nce = NULL;
2159 } else {
2160 /*
2161 * Removes the entry from the list and adds it back to
2162 * the end. This marks the entry as recently used so
2163 * that name_cache_clean_unused does not remove it.
2164 */
2165 list_move_tail(&nce->list, &sctx->name_cache_list);
2166
2167 *parent_ino = nce->parent_ino;
2168 *parent_gen = nce->parent_gen;
2169 ret = fs_path_add(dest, nce->name, nce->name_len);
2170 if (ret < 0)
2171 goto out;
2172 ret = nce->ret;
2173 goto out;
2174 }
2175 }
2176
2177 /*
2178 * If the inode is not existent yet, add the orphan name and return 1.
2179 * This should only happen for the parent dir that we determine in
2180 * __record_new_ref
2181 */
2182 ret = is_inode_existent(sctx, ino, gen);
2183 if (ret < 0)
2184 goto out;
2185
2186 if (!ret) {
2187 ret = gen_unique_name(sctx, ino, gen, dest);
2188 if (ret < 0)
2189 goto out;
2190 ret = 1;
2191 goto out_cache;
2192 }
2193
2194 /*
2195 * Depending on whether the inode was already processed or not, use
2196 * send_root or parent_root for ref lookup.
2197 */
2198 if (ino < sctx->send_progress)
2199 ret = get_first_ref(sctx->send_root, ino,
2200 parent_ino, parent_gen, dest);
2201 else
2202 ret = get_first_ref(sctx->parent_root, ino,
2203 parent_ino, parent_gen, dest);
2204 if (ret < 0)
2205 goto out;
2206
2207 /*
2208 * Check if the ref was overwritten by an inode's ref that was processed
2209 * earlier. If yes, treat as orphan and return 1.
2210 */
2211 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2212 dest->start, dest->end - dest->start);
2213 if (ret < 0)
2214 goto out;
2215 if (ret) {
2216 fs_path_reset(dest);
2217 ret = gen_unique_name(sctx, ino, gen, dest);
2218 if (ret < 0)
2219 goto out;
2220 ret = 1;
2221 }
2222
2223out_cache:
2224 /*
2225 * Store the result of the lookup in the name cache.
2226 */
2227 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2228 if (!nce) {
2229 ret = -ENOMEM;
2230 goto out;
2231 }
2232
2233 nce->ino = ino;
2234 nce->gen = gen;
2235 nce->parent_ino = *parent_ino;
2236 nce->parent_gen = *parent_gen;
2237 nce->name_len = fs_path_len(dest);
2238 nce->ret = ret;
2239 strcpy(nce->name, dest->start);
2240
2241 if (ino < sctx->send_progress)
2242 nce->need_later_update = 0;
2243 else
2244 nce->need_later_update = 1;
2245
2246 nce_ret = name_cache_insert(sctx, nce);
2247 if (nce_ret < 0)
2248 ret = nce_ret;
2249 name_cache_clean_unused(sctx);
2250
2251out:
2252 return ret;
2253}
2254
2255/*
2256 * Magic happens here. This function returns the first ref to an inode as it
2257 * would look like while receiving the stream at this point in time.
2258 * We walk the path up to the root. For every inode in between, we check if it
2259 * was already processed/sent. If yes, we continue with the parent as found
2260 * in send_root. If not, we continue with the parent as found in parent_root.
2261 * If we encounter an inode that was deleted at this point in time, we use the
2262 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2263 * that were not created yet and overwritten inodes/refs.
2264 *
2265 * When do we have orphan inodes:
2266 * 1. When an inode is freshly created and thus no valid refs are available yet
2267 * 2. When a directory lost all it's refs (deleted) but still has dir items
2268 * inside which were not processed yet (pending for move/delete). If anyone
2269 * tried to get the path to the dir items, it would get a path inside that
2270 * orphan directory.
2271 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2272 * of an unprocessed inode. If in that case the first ref would be
2273 * overwritten, the overwritten inode gets "orphanized". Later when we
2274 * process this overwritten inode, it is restored at a new place by moving
2275 * the orphan inode.
2276 *
2277 * sctx->send_progress tells this function at which point in time receiving
2278 * would be.
2279 */
2280static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2281 struct fs_path *dest)
2282{
2283 int ret = 0;
2284 struct fs_path *name = NULL;
2285 u64 parent_inode = 0;
2286 u64 parent_gen = 0;
2287 int stop = 0;
2288
2289 name = fs_path_alloc();
2290 if (!name) {
2291 ret = -ENOMEM;
2292 goto out;
2293 }
2294
2295 dest->reversed = 1;
2296 fs_path_reset(dest);
2297
2298 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2299 struct waiting_dir_move *wdm;
2300
2301 fs_path_reset(name);
2302
2303 if (is_waiting_for_rm(sctx, ino, gen)) {
2304 ret = gen_unique_name(sctx, ino, gen, name);
2305 if (ret < 0)
2306 goto out;
2307 ret = fs_path_add_path(dest, name);
2308 break;
2309 }
2310
2311 wdm = get_waiting_dir_move(sctx, ino);
2312 if (wdm && wdm->orphanized) {
2313 ret = gen_unique_name(sctx, ino, gen, name);
2314 stop = 1;
2315 } else if (wdm) {
2316 ret = get_first_ref(sctx->parent_root, ino,
2317 &parent_inode, &parent_gen, name);
2318 } else {
2319 ret = __get_cur_name_and_parent(sctx, ino, gen,
2320 &parent_inode,
2321 &parent_gen, name);
2322 if (ret)
2323 stop = 1;
2324 }
2325
2326 if (ret < 0)
2327 goto out;
2328
2329 ret = fs_path_add_path(dest, name);
2330 if (ret < 0)
2331 goto out;
2332
2333 ino = parent_inode;
2334 gen = parent_gen;
2335 }
2336
2337out:
2338 fs_path_free(name);
2339 if (!ret)
2340 fs_path_unreverse(dest);
2341 return ret;
2342}
2343
2344/*
2345 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2346 */
2347static int send_subvol_begin(struct send_ctx *sctx)
2348{
2349 int ret;
2350 struct btrfs_root *send_root = sctx->send_root;
2351 struct btrfs_root *parent_root = sctx->parent_root;
2352 struct btrfs_path *path;
2353 struct btrfs_key key;
2354 struct btrfs_root_ref *ref;
2355 struct extent_buffer *leaf;
2356 char *name = NULL;
2357 int namelen;
2358
2359 path = btrfs_alloc_path();
2360 if (!path)
2361 return -ENOMEM;
2362
2363 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2364 if (!name) {
2365 btrfs_free_path(path);
2366 return -ENOMEM;
2367 }
2368
2369 key.objectid = send_root->root_key.objectid;
2370 key.type = BTRFS_ROOT_BACKREF_KEY;
2371 key.offset = 0;
2372
2373 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2374 &key, path, 1, 0);
2375 if (ret < 0)
2376 goto out;
2377 if (ret) {
2378 ret = -ENOENT;
2379 goto out;
2380 }
2381
2382 leaf = path->nodes[0];
2383 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2384 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2385 key.objectid != send_root->root_key.objectid) {
2386 ret = -ENOENT;
2387 goto out;
2388 }
2389 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2390 namelen = btrfs_root_ref_name_len(leaf, ref);
2391 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2392 btrfs_release_path(path);
2393
2394 if (parent_root) {
2395 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2396 if (ret < 0)
2397 goto out;
2398 } else {
2399 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2400 if (ret < 0)
2401 goto out;
2402 }
2403
2404 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2405
2406 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2407 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2408 sctx->send_root->root_item.received_uuid);
2409 else
2410 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2411 sctx->send_root->root_item.uuid);
2412
2413 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2414 btrfs_root_ctransid(&sctx->send_root->root_item));
2415 if (parent_root) {
2416 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2417 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2418 parent_root->root_item.received_uuid);
2419 else
2420 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2421 parent_root->root_item.uuid);
2422 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2423 btrfs_root_ctransid(&sctx->parent_root->root_item));
2424 }
2425
2426 ret = send_cmd(sctx);
2427
2428tlv_put_failure:
2429out:
2430 btrfs_free_path(path);
2431 kfree(name);
2432 return ret;
2433}
2434
2435static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2436{
2437 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2438 int ret = 0;
2439 struct fs_path *p;
2440
2441 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2442
2443 p = fs_path_alloc();
2444 if (!p)
2445 return -ENOMEM;
2446
2447 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2448 if (ret < 0)
2449 goto out;
2450
2451 ret = get_cur_path(sctx, ino, gen, p);
2452 if (ret < 0)
2453 goto out;
2454 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2455 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2456
2457 ret = send_cmd(sctx);
2458
2459tlv_put_failure:
2460out:
2461 fs_path_free(p);
2462 return ret;
2463}
2464
2465static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2466{
2467 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2468 int ret = 0;
2469 struct fs_path *p;
2470
2471 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2472
2473 p = fs_path_alloc();
2474 if (!p)
2475 return -ENOMEM;
2476
2477 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2478 if (ret < 0)
2479 goto out;
2480
2481 ret = get_cur_path(sctx, ino, gen, p);
2482 if (ret < 0)
2483 goto out;
2484 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2485 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2486
2487 ret = send_cmd(sctx);
2488
2489tlv_put_failure:
2490out:
2491 fs_path_free(p);
2492 return ret;
2493}
2494
2495static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2496{
2497 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2498 int ret = 0;
2499 struct fs_path *p;
2500
2501 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2502 ino, uid, gid);
2503
2504 p = fs_path_alloc();
2505 if (!p)
2506 return -ENOMEM;
2507
2508 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2509 if (ret < 0)
2510 goto out;
2511
2512 ret = get_cur_path(sctx, ino, gen, p);
2513 if (ret < 0)
2514 goto out;
2515 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2516 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2517 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2518
2519 ret = send_cmd(sctx);
2520
2521tlv_put_failure:
2522out:
2523 fs_path_free(p);
2524 return ret;
2525}
2526
2527static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2528{
2529 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2530 int ret = 0;
2531 struct fs_path *p = NULL;
2532 struct btrfs_inode_item *ii;
2533 struct btrfs_path *path = NULL;
2534 struct extent_buffer *eb;
2535 struct btrfs_key key;
2536 int slot;
2537
2538 btrfs_debug(fs_info, "send_utimes %llu", ino);
2539
2540 p = fs_path_alloc();
2541 if (!p)
2542 return -ENOMEM;
2543
2544 path = alloc_path_for_send();
2545 if (!path) {
2546 ret = -ENOMEM;
2547 goto out;
2548 }
2549
2550 key.objectid = ino;
2551 key.type = BTRFS_INODE_ITEM_KEY;
2552 key.offset = 0;
2553 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2554 if (ret > 0)
2555 ret = -ENOENT;
2556 if (ret < 0)
2557 goto out;
2558
2559 eb = path->nodes[0];
2560 slot = path->slots[0];
2561 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2562
2563 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2564 if (ret < 0)
2565 goto out;
2566
2567 ret = get_cur_path(sctx, ino, gen, p);
2568 if (ret < 0)
2569 goto out;
2570 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2571 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2572 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2573 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2574 /* TODO Add otime support when the otime patches get into upstream */
2575
2576 ret = send_cmd(sctx);
2577
2578tlv_put_failure:
2579out:
2580 fs_path_free(p);
2581 btrfs_free_path(path);
2582 return ret;
2583}
2584
2585/*
2586 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2587 * a valid path yet because we did not process the refs yet. So, the inode
2588 * is created as orphan.
2589 */
2590static int send_create_inode(struct send_ctx *sctx, u64 ino)
2591{
2592 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2593 int ret = 0;
2594 struct fs_path *p;
2595 int cmd;
2596 u64 gen;
2597 u64 mode;
2598 u64 rdev;
2599
2600 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2601
2602 p = fs_path_alloc();
2603 if (!p)
2604 return -ENOMEM;
2605
2606 if (ino != sctx->cur_ino) {
2607 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2608 NULL, NULL, &rdev);
2609 if (ret < 0)
2610 goto out;
2611 } else {
2612 gen = sctx->cur_inode_gen;
2613 mode = sctx->cur_inode_mode;
2614 rdev = sctx->cur_inode_rdev;
2615 }
2616
2617 if (S_ISREG(mode)) {
2618 cmd = BTRFS_SEND_C_MKFILE;
2619 } else if (S_ISDIR(mode)) {
2620 cmd = BTRFS_SEND_C_MKDIR;
2621 } else if (S_ISLNK(mode)) {
2622 cmd = BTRFS_SEND_C_SYMLINK;
2623 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2624 cmd = BTRFS_SEND_C_MKNOD;
2625 } else if (S_ISFIFO(mode)) {
2626 cmd = BTRFS_SEND_C_MKFIFO;
2627 } else if (S_ISSOCK(mode)) {
2628 cmd = BTRFS_SEND_C_MKSOCK;
2629 } else {
2630 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2631 (int)(mode & S_IFMT));
2632 ret = -EOPNOTSUPP;
2633 goto out;
2634 }
2635
2636 ret = begin_cmd(sctx, cmd);
2637 if (ret < 0)
2638 goto out;
2639
2640 ret = gen_unique_name(sctx, ino, gen, p);
2641 if (ret < 0)
2642 goto out;
2643
2644 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2645 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2646
2647 if (S_ISLNK(mode)) {
2648 fs_path_reset(p);
2649 ret = read_symlink(sctx->send_root, ino, p);
2650 if (ret < 0)
2651 goto out;
2652 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2653 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2654 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2655 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2656 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2657 }
2658
2659 ret = send_cmd(sctx);
2660 if (ret < 0)
2661 goto out;
2662
2663
2664tlv_put_failure:
2665out:
2666 fs_path_free(p);
2667 return ret;
2668}
2669
2670/*
2671 * We need some special handling for inodes that get processed before the parent
2672 * directory got created. See process_recorded_refs for details.
2673 * This function does the check if we already created the dir out of order.
2674 */
2675static int did_create_dir(struct send_ctx *sctx, u64 dir)
2676{
2677 int ret = 0;
2678 struct btrfs_path *path = NULL;
2679 struct btrfs_key key;
2680 struct btrfs_key found_key;
2681 struct btrfs_key di_key;
2682 struct extent_buffer *eb;
2683 struct btrfs_dir_item *di;
2684 int slot;
2685
2686 path = alloc_path_for_send();
2687 if (!path) {
2688 ret = -ENOMEM;
2689 goto out;
2690 }
2691
2692 key.objectid = dir;
2693 key.type = BTRFS_DIR_INDEX_KEY;
2694 key.offset = 0;
2695 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2696 if (ret < 0)
2697 goto out;
2698
2699 while (1) {
2700 eb = path->nodes[0];
2701 slot = path->slots[0];
2702 if (slot >= btrfs_header_nritems(eb)) {
2703 ret = btrfs_next_leaf(sctx->send_root, path);
2704 if (ret < 0) {
2705 goto out;
2706 } else if (ret > 0) {
2707 ret = 0;
2708 break;
2709 }
2710 continue;
2711 }
2712
2713 btrfs_item_key_to_cpu(eb, &found_key, slot);
2714 if (found_key.objectid != key.objectid ||
2715 found_key.type != key.type) {
2716 ret = 0;
2717 goto out;
2718 }
2719
2720 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2721 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2722
2723 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2724 di_key.objectid < sctx->send_progress) {
2725 ret = 1;
2726 goto out;
2727 }
2728
2729 path->slots[0]++;
2730 }
2731
2732out:
2733 btrfs_free_path(path);
2734 return ret;
2735}
2736
2737/*
2738 * Only creates the inode if it is:
2739 * 1. Not a directory
2740 * 2. Or a directory which was not created already due to out of order
2741 * directories. See did_create_dir and process_recorded_refs for details.
2742 */
2743static int send_create_inode_if_needed(struct send_ctx *sctx)
2744{
2745 int ret;
2746
2747 if (S_ISDIR(sctx->cur_inode_mode)) {
2748 ret = did_create_dir(sctx, sctx->cur_ino);
2749 if (ret < 0)
2750 return ret;
2751 else if (ret > 0)
2752 return 0;
2753 }
2754
2755 return send_create_inode(sctx, sctx->cur_ino);
2756}
2757
2758struct recorded_ref {
2759 struct list_head list;
2760 char *name;
2761 struct fs_path *full_path;
2762 u64 dir;
2763 u64 dir_gen;
2764 int name_len;
2765};
2766
2767static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2768{
2769 ref->full_path = path;
2770 ref->name = (char *)kbasename(ref->full_path->start);
2771 ref->name_len = ref->full_path->end - ref->name;
2772}
2773
2774/*
2775 * We need to process new refs before deleted refs, but compare_tree gives us
2776 * everything mixed. So we first record all refs and later process them.
2777 * This function is a helper to record one ref.
2778 */
2779static int __record_ref(struct list_head *head, u64 dir,
2780 u64 dir_gen, struct fs_path *path)
2781{
2782 struct recorded_ref *ref;
2783
2784 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2785 if (!ref)
2786 return -ENOMEM;
2787
2788 ref->dir = dir;
2789 ref->dir_gen = dir_gen;
2790 set_ref_path(ref, path);
2791 list_add_tail(&ref->list, head);
2792 return 0;
2793}
2794
2795static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2796{
2797 struct recorded_ref *new;
2798
2799 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2800 if (!new)
2801 return -ENOMEM;
2802
2803 new->dir = ref->dir;
2804 new->dir_gen = ref->dir_gen;
2805 new->full_path = NULL;
2806 INIT_LIST_HEAD(&new->list);
2807 list_add_tail(&new->list, list);
2808 return 0;
2809}
2810
2811static void __free_recorded_refs(struct list_head *head)
2812{
2813 struct recorded_ref *cur;
2814
2815 while (!list_empty(head)) {
2816 cur = list_entry(head->next, struct recorded_ref, list);
2817 fs_path_free(cur->full_path);
2818 list_del(&cur->list);
2819 kfree(cur);
2820 }
2821}
2822
2823static void free_recorded_refs(struct send_ctx *sctx)
2824{
2825 __free_recorded_refs(&sctx->new_refs);
2826 __free_recorded_refs(&sctx->deleted_refs);
2827}
2828
2829/*
2830 * Renames/moves a file/dir to its orphan name. Used when the first
2831 * ref of an unprocessed inode gets overwritten and for all non empty
2832 * directories.
2833 */
2834static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2835 struct fs_path *path)
2836{
2837 int ret;
2838 struct fs_path *orphan;
2839
2840 orphan = fs_path_alloc();
2841 if (!orphan)
2842 return -ENOMEM;
2843
2844 ret = gen_unique_name(sctx, ino, gen, orphan);
2845 if (ret < 0)
2846 goto out;
2847
2848 ret = send_rename(sctx, path, orphan);
2849
2850out:
2851 fs_path_free(orphan);
2852 return ret;
2853}
2854
2855static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
2856 u64 dir_ino, u64 dir_gen)
2857{
2858 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2859 struct rb_node *parent = NULL;
2860 struct orphan_dir_info *entry, *odi;
2861
2862 while (*p) {
2863 parent = *p;
2864 entry = rb_entry(parent, struct orphan_dir_info, node);
2865 if (dir_ino < entry->ino)
2866 p = &(*p)->rb_left;
2867 else if (dir_ino > entry->ino)
2868 p = &(*p)->rb_right;
2869 else if (dir_gen < entry->gen)
2870 p = &(*p)->rb_left;
2871 else if (dir_gen > entry->gen)
2872 p = &(*p)->rb_right;
2873 else
2874 return entry;
2875 }
2876
2877 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2878 if (!odi)
2879 return ERR_PTR(-ENOMEM);
2880 odi->ino = dir_ino;
2881 odi->gen = dir_gen;
2882 odi->last_dir_index_offset = 0;
2883
2884 rb_link_node(&odi->node, parent, p);
2885 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2886 return odi;
2887}
2888
2889static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
2890 u64 dir_ino, u64 gen)
2891{
2892 struct rb_node *n = sctx->orphan_dirs.rb_node;
2893 struct orphan_dir_info *entry;
2894
2895 while (n) {
2896 entry = rb_entry(n, struct orphan_dir_info, node);
2897 if (dir_ino < entry->ino)
2898 n = n->rb_left;
2899 else if (dir_ino > entry->ino)
2900 n = n->rb_right;
2901 else if (gen < entry->gen)
2902 n = n->rb_left;
2903 else if (gen > entry->gen)
2904 n = n->rb_right;
2905 else
2906 return entry;
2907 }
2908 return NULL;
2909}
2910
2911static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
2912{
2913 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
2914
2915 return odi != NULL;
2916}
2917
2918static void free_orphan_dir_info(struct send_ctx *sctx,
2919 struct orphan_dir_info *odi)
2920{
2921 if (!odi)
2922 return;
2923 rb_erase(&odi->node, &sctx->orphan_dirs);
2924 kfree(odi);
2925}
2926
2927/*
2928 * Returns 1 if a directory can be removed at this point in time.
2929 * We check this by iterating all dir items and checking if the inode behind
2930 * the dir item was already processed.
2931 */
2932static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2933 u64 send_progress)
2934{
2935 int ret = 0;
2936 struct btrfs_root *root = sctx->parent_root;
2937 struct btrfs_path *path;
2938 struct btrfs_key key;
2939 struct btrfs_key found_key;
2940 struct btrfs_key loc;
2941 struct btrfs_dir_item *di;
2942 struct orphan_dir_info *odi = NULL;
2943
2944 /*
2945 * Don't try to rmdir the top/root subvolume dir.
2946 */
2947 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2948 return 0;
2949
2950 path = alloc_path_for_send();
2951 if (!path)
2952 return -ENOMEM;
2953
2954 key.objectid = dir;
2955 key.type = BTRFS_DIR_INDEX_KEY;
2956 key.offset = 0;
2957
2958 odi = get_orphan_dir_info(sctx, dir, dir_gen);
2959 if (odi)
2960 key.offset = odi->last_dir_index_offset;
2961
2962 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2963 if (ret < 0)
2964 goto out;
2965
2966 while (1) {
2967 struct waiting_dir_move *dm;
2968
2969 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2970 ret = btrfs_next_leaf(root, path);
2971 if (ret < 0)
2972 goto out;
2973 else if (ret > 0)
2974 break;
2975 continue;
2976 }
2977 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2978 path->slots[0]);
2979 if (found_key.objectid != key.objectid ||
2980 found_key.type != key.type)
2981 break;
2982
2983 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2984 struct btrfs_dir_item);
2985 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2986
2987 dm = get_waiting_dir_move(sctx, loc.objectid);
2988 if (dm) {
2989 odi = add_orphan_dir_info(sctx, dir, dir_gen);
2990 if (IS_ERR(odi)) {
2991 ret = PTR_ERR(odi);
2992 goto out;
2993 }
2994 odi->gen = dir_gen;
2995 odi->last_dir_index_offset = found_key.offset;
2996 dm->rmdir_ino = dir;
2997 dm->rmdir_gen = dir_gen;
2998 ret = 0;
2999 goto out;
3000 }
3001
3002 if (loc.objectid > send_progress) {
3003 odi = add_orphan_dir_info(sctx, dir, dir_gen);
3004 if (IS_ERR(odi)) {
3005 ret = PTR_ERR(odi);
3006 goto out;
3007 }
3008 odi->gen = dir_gen;
3009 odi->last_dir_index_offset = found_key.offset;
3010 ret = 0;
3011 goto out;
3012 }
3013
3014 path->slots[0]++;
3015 }
3016 free_orphan_dir_info(sctx, odi);
3017
3018 ret = 1;
3019
3020out:
3021 btrfs_free_path(path);
3022 return ret;
3023}
3024
3025static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3026{
3027 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3028
3029 return entry != NULL;
3030}
3031
3032static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3033{
3034 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3035 struct rb_node *parent = NULL;
3036 struct waiting_dir_move *entry, *dm;
3037
3038 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3039 if (!dm)
3040 return -ENOMEM;
3041 dm->ino = ino;
3042 dm->rmdir_ino = 0;
3043 dm->rmdir_gen = 0;
3044 dm->orphanized = orphanized;
3045
3046 while (*p) {
3047 parent = *p;
3048 entry = rb_entry(parent, struct waiting_dir_move, node);
3049 if (ino < entry->ino) {
3050 p = &(*p)->rb_left;
3051 } else if (ino > entry->ino) {
3052 p = &(*p)->rb_right;
3053 } else {
3054 kfree(dm);
3055 return -EEXIST;
3056 }
3057 }
3058
3059 rb_link_node(&dm->node, parent, p);
3060 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3061 return 0;
3062}
3063
3064static struct waiting_dir_move *
3065get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3066{
3067 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3068 struct waiting_dir_move *entry;
3069
3070 while (n) {
3071 entry = rb_entry(n, struct waiting_dir_move, node);
3072 if (ino < entry->ino)
3073 n = n->rb_left;
3074 else if (ino > entry->ino)
3075 n = n->rb_right;
3076 else
3077 return entry;
3078 }
3079 return NULL;
3080}
3081
3082static void free_waiting_dir_move(struct send_ctx *sctx,
3083 struct waiting_dir_move *dm)
3084{
3085 if (!dm)
3086 return;
3087 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3088 kfree(dm);
3089}
3090
3091static int add_pending_dir_move(struct send_ctx *sctx,
3092 u64 ino,
3093 u64 ino_gen,
3094 u64 parent_ino,
3095 struct list_head *new_refs,
3096 struct list_head *deleted_refs,
3097 const bool is_orphan)
3098{
3099 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3100 struct rb_node *parent = NULL;
3101 struct pending_dir_move *entry = NULL, *pm;
3102 struct recorded_ref *cur;
3103 int exists = 0;
3104 int ret;
3105
3106 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3107 if (!pm)
3108 return -ENOMEM;
3109 pm->parent_ino = parent_ino;
3110 pm->ino = ino;
3111 pm->gen = ino_gen;
3112 INIT_LIST_HEAD(&pm->list);
3113 INIT_LIST_HEAD(&pm->update_refs);
3114 RB_CLEAR_NODE(&pm->node);
3115
3116 while (*p) {
3117 parent = *p;
3118 entry = rb_entry(parent, struct pending_dir_move, node);
3119 if (parent_ino < entry->parent_ino) {
3120 p = &(*p)->rb_left;
3121 } else if (parent_ino > entry->parent_ino) {
3122 p = &(*p)->rb_right;
3123 } else {
3124 exists = 1;
3125 break;
3126 }
3127 }
3128
3129 list_for_each_entry(cur, deleted_refs, list) {
3130 ret = dup_ref(cur, &pm->update_refs);
3131 if (ret < 0)
3132 goto out;
3133 }
3134 list_for_each_entry(cur, new_refs, list) {
3135 ret = dup_ref(cur, &pm->update_refs);
3136 if (ret < 0)
3137 goto out;
3138 }
3139
3140 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3141 if (ret)
3142 goto out;
3143
3144 if (exists) {
3145 list_add_tail(&pm->list, &entry->list);
3146 } else {
3147 rb_link_node(&pm->node, parent, p);
3148 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3149 }
3150 ret = 0;
3151out:
3152 if (ret) {
3153 __free_recorded_refs(&pm->update_refs);
3154 kfree(pm);
3155 }
3156 return ret;
3157}
3158
3159static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3160 u64 parent_ino)
3161{
3162 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3163 struct pending_dir_move *entry;
3164
3165 while (n) {
3166 entry = rb_entry(n, struct pending_dir_move, node);
3167 if (parent_ino < entry->parent_ino)
3168 n = n->rb_left;
3169 else if (parent_ino > entry->parent_ino)
3170 n = n->rb_right;
3171 else
3172 return entry;
3173 }
3174 return NULL;
3175}
3176
3177static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3178 u64 ino, u64 gen, u64 *ancestor_ino)
3179{
3180 int ret = 0;
3181 u64 parent_inode = 0;
3182 u64 parent_gen = 0;
3183 u64 start_ino = ino;
3184
3185 *ancestor_ino = 0;
3186 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3187 fs_path_reset(name);
3188
3189 if (is_waiting_for_rm(sctx, ino, gen))
3190 break;
3191 if (is_waiting_for_move(sctx, ino)) {
3192 if (*ancestor_ino == 0)
3193 *ancestor_ino = ino;
3194 ret = get_first_ref(sctx->parent_root, ino,
3195 &parent_inode, &parent_gen, name);
3196 } else {
3197 ret = __get_cur_name_and_parent(sctx, ino, gen,
3198 &parent_inode,
3199 &parent_gen, name);
3200 if (ret > 0) {
3201 ret = 0;
3202 break;
3203 }
3204 }
3205 if (ret < 0)
3206 break;
3207 if (parent_inode == start_ino) {
3208 ret = 1;
3209 if (*ancestor_ino == 0)
3210 *ancestor_ino = ino;
3211 break;
3212 }
3213 ino = parent_inode;
3214 gen = parent_gen;
3215 }
3216 return ret;
3217}
3218
3219static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3220{
3221 struct fs_path *from_path = NULL;
3222 struct fs_path *to_path = NULL;
3223 struct fs_path *name = NULL;
3224 u64 orig_progress = sctx->send_progress;
3225 struct recorded_ref *cur;
3226 u64 parent_ino, parent_gen;
3227 struct waiting_dir_move *dm = NULL;
3228 u64 rmdir_ino = 0;
3229 u64 rmdir_gen;
3230 u64 ancestor;
3231 bool is_orphan;
3232 int ret;
3233
3234 name = fs_path_alloc();
3235 from_path = fs_path_alloc();
3236 if (!name || !from_path) {
3237 ret = -ENOMEM;
3238 goto out;
3239 }
3240
3241 dm = get_waiting_dir_move(sctx, pm->ino);
3242 ASSERT(dm);
3243 rmdir_ino = dm->rmdir_ino;
3244 rmdir_gen = dm->rmdir_gen;
3245 is_orphan = dm->orphanized;
3246 free_waiting_dir_move(sctx, dm);
3247
3248 if (is_orphan) {
3249 ret = gen_unique_name(sctx, pm->ino,
3250 pm->gen, from_path);
3251 } else {
3252 ret = get_first_ref(sctx->parent_root, pm->ino,
3253 &parent_ino, &parent_gen, name);
3254 if (ret < 0)
3255 goto out;
3256 ret = get_cur_path(sctx, parent_ino, parent_gen,
3257 from_path);
3258 if (ret < 0)
3259 goto out;
3260 ret = fs_path_add_path(from_path, name);
3261 }
3262 if (ret < 0)
3263 goto out;
3264
3265 sctx->send_progress = sctx->cur_ino + 1;
3266 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3267 if (ret < 0)
3268 goto out;
3269 if (ret) {
3270 LIST_HEAD(deleted_refs);
3271 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3272 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3273 &pm->update_refs, &deleted_refs,
3274 is_orphan);
3275 if (ret < 0)
3276 goto out;
3277 if (rmdir_ino) {
3278 dm = get_waiting_dir_move(sctx, pm->ino);
3279 ASSERT(dm);
3280 dm->rmdir_ino = rmdir_ino;
3281 dm->rmdir_gen = rmdir_gen;
3282 }
3283 goto out;
3284 }
3285 fs_path_reset(name);
3286 to_path = name;
3287 name = NULL;
3288 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3289 if (ret < 0)
3290 goto out;
3291
3292 ret = send_rename(sctx, from_path, to_path);
3293 if (ret < 0)
3294 goto out;
3295
3296 if (rmdir_ino) {
3297 struct orphan_dir_info *odi;
3298 u64 gen;
3299
3300 odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
3301 if (!odi) {
3302 /* already deleted */
3303 goto finish;
3304 }
3305 gen = odi->gen;
3306
3307 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3308 if (ret < 0)
3309 goto out;
3310 if (!ret)
3311 goto finish;
3312
3313 name = fs_path_alloc();
3314 if (!name) {
3315 ret = -ENOMEM;
3316 goto out;
3317 }
3318 ret = get_cur_path(sctx, rmdir_ino, gen, name);
3319 if (ret < 0)
3320 goto out;
3321 ret = send_rmdir(sctx, name);
3322 if (ret < 0)
3323 goto out;
3324 }
3325
3326finish:
3327 ret = send_utimes(sctx, pm->ino, pm->gen);
3328 if (ret < 0)
3329 goto out;
3330
3331 /*
3332 * After rename/move, need to update the utimes of both new parent(s)
3333 * and old parent(s).
3334 */
3335 list_for_each_entry(cur, &pm->update_refs, list) {
3336 /*
3337 * The parent inode might have been deleted in the send snapshot
3338 */
3339 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3340 NULL, NULL, NULL, NULL, NULL);
3341 if (ret == -ENOENT) {
3342 ret = 0;
3343 continue;
3344 }
3345 if (ret < 0)
3346 goto out;
3347
3348 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3349 if (ret < 0)
3350 goto out;
3351 }
3352
3353out:
3354 fs_path_free(name);
3355 fs_path_free(from_path);
3356 fs_path_free(to_path);
3357 sctx->send_progress = orig_progress;
3358
3359 return ret;
3360}
3361
3362static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3363{
3364 if (!list_empty(&m->list))
3365 list_del(&m->list);
3366 if (!RB_EMPTY_NODE(&m->node))
3367 rb_erase(&m->node, &sctx->pending_dir_moves);
3368 __free_recorded_refs(&m->update_refs);
3369 kfree(m);
3370}
3371
3372static void tail_append_pending_moves(struct send_ctx *sctx,
3373 struct pending_dir_move *moves,
3374 struct list_head *stack)
3375{
3376 if (list_empty(&moves->list)) {
3377 list_add_tail(&moves->list, stack);
3378 } else {
3379 LIST_HEAD(list);
3380 list_splice_init(&moves->list, &list);
3381 list_add_tail(&moves->list, stack);
3382 list_splice_tail(&list, stack);
3383 }
3384 if (!RB_EMPTY_NODE(&moves->node)) {
3385 rb_erase(&moves->node, &sctx->pending_dir_moves);
3386 RB_CLEAR_NODE(&moves->node);
3387 }
3388}
3389
3390static int apply_children_dir_moves(struct send_ctx *sctx)
3391{
3392 struct pending_dir_move *pm;
3393 struct list_head stack;
3394 u64 parent_ino = sctx->cur_ino;
3395 int ret = 0;
3396
3397 pm = get_pending_dir_moves(sctx, parent_ino);
3398 if (!pm)
3399 return 0;
3400
3401 INIT_LIST_HEAD(&stack);
3402 tail_append_pending_moves(sctx, pm, &stack);
3403
3404 while (!list_empty(&stack)) {
3405 pm = list_first_entry(&stack, struct pending_dir_move, list);
3406 parent_ino = pm->ino;
3407 ret = apply_dir_move(sctx, pm);
3408 free_pending_move(sctx, pm);
3409 if (ret)
3410 goto out;
3411 pm = get_pending_dir_moves(sctx, parent_ino);
3412 if (pm)
3413 tail_append_pending_moves(sctx, pm, &stack);
3414 }
3415 return 0;
3416
3417out:
3418 while (!list_empty(&stack)) {
3419 pm = list_first_entry(&stack, struct pending_dir_move, list);
3420 free_pending_move(sctx, pm);
3421 }
3422 return ret;
3423}
3424
3425/*
3426 * We might need to delay a directory rename even when no ancestor directory
3427 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3428 * renamed. This happens when we rename a directory to the old name (the name
3429 * in the parent root) of some other unrelated directory that got its rename
3430 * delayed due to some ancestor with higher number that got renamed.
3431 *
3432 * Example:
3433 *
3434 * Parent snapshot:
3435 * . (ino 256)
3436 * |---- a/ (ino 257)
3437 * | |---- file (ino 260)
3438 * |
3439 * |---- b/ (ino 258)
3440 * |---- c/ (ino 259)
3441 *
3442 * Send snapshot:
3443 * . (ino 256)
3444 * |---- a/ (ino 258)
3445 * |---- x/ (ino 259)
3446 * |---- y/ (ino 257)
3447 * |----- file (ino 260)
3448 *
3449 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3450 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3451 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3452 * must issue is:
3453 *
3454 * 1 - rename 259 from 'c' to 'x'
3455 * 2 - rename 257 from 'a' to 'x/y'
3456 * 3 - rename 258 from 'b' to 'a'
3457 *
3458 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3459 * be done right away and < 0 on error.
3460 */
3461static int wait_for_dest_dir_move(struct send_ctx *sctx,
3462 struct recorded_ref *parent_ref,
3463 const bool is_orphan)
3464{
3465 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3466 struct btrfs_path *path;
3467 struct btrfs_key key;
3468 struct btrfs_key di_key;
3469 struct btrfs_dir_item *di;
3470 u64 left_gen;
3471 u64 right_gen;
3472 int ret = 0;
3473 struct waiting_dir_move *wdm;
3474
3475 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3476 return 0;
3477
3478 path = alloc_path_for_send();
3479 if (!path)
3480 return -ENOMEM;
3481
3482 key.objectid = parent_ref->dir;
3483 key.type = BTRFS_DIR_ITEM_KEY;
3484 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3485
3486 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3487 if (ret < 0) {
3488 goto out;
3489 } else if (ret > 0) {
3490 ret = 0;
3491 goto out;
3492 }
3493
3494 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3495 parent_ref->name_len);
3496 if (!di) {
3497 ret = 0;
3498 goto out;
3499 }
3500 /*
3501 * di_key.objectid has the number of the inode that has a dentry in the
3502 * parent directory with the same name that sctx->cur_ino is being
3503 * renamed to. We need to check if that inode is in the send root as
3504 * well and if it is currently marked as an inode with a pending rename,
3505 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3506 * that it happens after that other inode is renamed.
3507 */
3508 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3509 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3510 ret = 0;
3511 goto out;
3512 }
3513
3514 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3515 &left_gen, NULL, NULL, NULL, NULL);
3516 if (ret < 0)
3517 goto out;
3518 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3519 &right_gen, NULL, NULL, NULL, NULL);
3520 if (ret < 0) {
3521 if (ret == -ENOENT)
3522 ret = 0;
3523 goto out;
3524 }
3525
3526 /* Different inode, no need to delay the rename of sctx->cur_ino */
3527 if (right_gen != left_gen) {
3528 ret = 0;
3529 goto out;
3530 }
3531
3532 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3533 if (wdm && !wdm->orphanized) {
3534 ret = add_pending_dir_move(sctx,
3535 sctx->cur_ino,
3536 sctx->cur_inode_gen,
3537 di_key.objectid,
3538 &sctx->new_refs,
3539 &sctx->deleted_refs,
3540 is_orphan);
3541 if (!ret)
3542 ret = 1;
3543 }
3544out:
3545 btrfs_free_path(path);
3546 return ret;
3547}
3548
3549/*
3550 * Check if inode ino2, or any of its ancestors, is inode ino1.
3551 * Return 1 if true, 0 if false and < 0 on error.
3552 */
3553static int check_ino_in_path(struct btrfs_root *root,
3554 const u64 ino1,
3555 const u64 ino1_gen,
3556 const u64 ino2,
3557 const u64 ino2_gen,
3558 struct fs_path *fs_path)
3559{
3560 u64 ino = ino2;
3561
3562 if (ino1 == ino2)
3563 return ino1_gen == ino2_gen;
3564
3565 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3566 u64 parent;
3567 u64 parent_gen;
3568 int ret;
3569
3570 fs_path_reset(fs_path);
3571 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3572 if (ret < 0)
3573 return ret;
3574 if (parent == ino1)
3575 return parent_gen == ino1_gen;
3576 ino = parent;
3577 }
3578 return 0;
3579}
3580
3581/*
3582 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3583 * possible path (in case ino2 is not a directory and has multiple hard links).
3584 * Return 1 if true, 0 if false and < 0 on error.
3585 */
3586static int is_ancestor(struct btrfs_root *root,
3587 const u64 ino1,
3588 const u64 ino1_gen,
3589 const u64 ino2,
3590 struct fs_path *fs_path)
3591{
3592 bool free_fs_path = false;
3593 int ret = 0;
3594 struct btrfs_path *path = NULL;
3595 struct btrfs_key key;
3596
3597 if (!fs_path) {
3598 fs_path = fs_path_alloc();
3599 if (!fs_path)
3600 return -ENOMEM;
3601 free_fs_path = true;
3602 }
3603
3604 path = alloc_path_for_send();
3605 if (!path) {
3606 ret = -ENOMEM;
3607 goto out;
3608 }
3609
3610 key.objectid = ino2;
3611 key.type = BTRFS_INODE_REF_KEY;
3612 key.offset = 0;
3613
3614 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3615 if (ret < 0)
3616 goto out;
3617
3618 while (true) {
3619 struct extent_buffer *leaf = path->nodes[0];
3620 int slot = path->slots[0];
3621 u32 cur_offset = 0;
3622 u32 item_size;
3623
3624 if (slot >= btrfs_header_nritems(leaf)) {
3625 ret = btrfs_next_leaf(root, path);
3626 if (ret < 0)
3627 goto out;
3628 if (ret > 0)
3629 break;
3630 continue;
3631 }
3632
3633 btrfs_item_key_to_cpu(leaf, &key, slot);
3634 if (key.objectid != ino2)
3635 break;
3636 if (key.type != BTRFS_INODE_REF_KEY &&
3637 key.type != BTRFS_INODE_EXTREF_KEY)
3638 break;
3639
3640 item_size = btrfs_item_size(leaf, slot);
3641 while (cur_offset < item_size) {
3642 u64 parent;
3643 u64 parent_gen;
3644
3645 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3646 unsigned long ptr;
3647 struct btrfs_inode_extref *extref;
3648
3649 ptr = btrfs_item_ptr_offset(leaf, slot);
3650 extref = (struct btrfs_inode_extref *)
3651 (ptr + cur_offset);
3652 parent = btrfs_inode_extref_parent(leaf,
3653 extref);
3654 cur_offset += sizeof(*extref);
3655 cur_offset += btrfs_inode_extref_name_len(leaf,
3656 extref);
3657 } else {
3658 parent = key.offset;
3659 cur_offset = item_size;
3660 }
3661
3662 ret = get_inode_info(root, parent, NULL, &parent_gen,
3663 NULL, NULL, NULL, NULL);
3664 if (ret < 0)
3665 goto out;
3666 ret = check_ino_in_path(root, ino1, ino1_gen,
3667 parent, parent_gen, fs_path);
3668 if (ret)
3669 goto out;
3670 }
3671 path->slots[0]++;
3672 }
3673 ret = 0;
3674 out:
3675 btrfs_free_path(path);
3676 if (free_fs_path)
3677 fs_path_free(fs_path);
3678 return ret;
3679}
3680
3681static int wait_for_parent_move(struct send_ctx *sctx,
3682 struct recorded_ref *parent_ref,
3683 const bool is_orphan)
3684{
3685 int ret = 0;
3686 u64 ino = parent_ref->dir;
3687 u64 ino_gen = parent_ref->dir_gen;
3688 u64 parent_ino_before, parent_ino_after;
3689 struct fs_path *path_before = NULL;
3690 struct fs_path *path_after = NULL;
3691 int len1, len2;
3692
3693 path_after = fs_path_alloc();
3694 path_before = fs_path_alloc();
3695 if (!path_after || !path_before) {
3696 ret = -ENOMEM;
3697 goto out;
3698 }
3699
3700 /*
3701 * Our current directory inode may not yet be renamed/moved because some
3702 * ancestor (immediate or not) has to be renamed/moved first. So find if
3703 * such ancestor exists and make sure our own rename/move happens after
3704 * that ancestor is processed to avoid path build infinite loops (done
3705 * at get_cur_path()).
3706 */
3707 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3708 u64 parent_ino_after_gen;
3709
3710 if (is_waiting_for_move(sctx, ino)) {
3711 /*
3712 * If the current inode is an ancestor of ino in the
3713 * parent root, we need to delay the rename of the
3714 * current inode, otherwise don't delayed the rename
3715 * because we can end up with a circular dependency
3716 * of renames, resulting in some directories never
3717 * getting the respective rename operations issued in
3718 * the send stream or getting into infinite path build
3719 * loops.
3720 */
3721 ret = is_ancestor(sctx->parent_root,
3722 sctx->cur_ino, sctx->cur_inode_gen,
3723 ino, path_before);
3724 if (ret)
3725 break;
3726 }
3727
3728 fs_path_reset(path_before);
3729 fs_path_reset(path_after);
3730
3731 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3732 &parent_ino_after_gen, path_after);
3733 if (ret < 0)
3734 goto out;
3735 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3736 NULL, path_before);
3737 if (ret < 0 && ret != -ENOENT) {
3738 goto out;
3739 } else if (ret == -ENOENT) {
3740 ret = 0;
3741 break;
3742 }
3743
3744 len1 = fs_path_len(path_before);
3745 len2 = fs_path_len(path_after);
3746 if (ino > sctx->cur_ino &&
3747 (parent_ino_before != parent_ino_after || len1 != len2 ||
3748 memcmp(path_before->start, path_after->start, len1))) {
3749 u64 parent_ino_gen;
3750
3751 ret = get_inode_info(sctx->parent_root, ino, NULL,
3752 &parent_ino_gen, NULL, NULL, NULL,
3753 NULL);
3754 if (ret < 0)
3755 goto out;
3756 if (ino_gen == parent_ino_gen) {
3757 ret = 1;
3758 break;
3759 }
3760 }
3761 ino = parent_ino_after;
3762 ino_gen = parent_ino_after_gen;
3763 }
3764
3765out:
3766 fs_path_free(path_before);
3767 fs_path_free(path_after);
3768
3769 if (ret == 1) {
3770 ret = add_pending_dir_move(sctx,
3771 sctx->cur_ino,
3772 sctx->cur_inode_gen,
3773 ino,
3774 &sctx->new_refs,
3775 &sctx->deleted_refs,
3776 is_orphan);
3777 if (!ret)
3778 ret = 1;
3779 }
3780
3781 return ret;
3782}
3783
3784static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3785{
3786 int ret;
3787 struct fs_path *new_path;
3788
3789 /*
3790 * Our reference's name member points to its full_path member string, so
3791 * we use here a new path.
3792 */
3793 new_path = fs_path_alloc();
3794 if (!new_path)
3795 return -ENOMEM;
3796
3797 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3798 if (ret < 0) {
3799 fs_path_free(new_path);
3800 return ret;
3801 }
3802 ret = fs_path_add(new_path, ref->name, ref->name_len);
3803 if (ret < 0) {
3804 fs_path_free(new_path);
3805 return ret;
3806 }
3807
3808 fs_path_free(ref->full_path);
3809 set_ref_path(ref, new_path);
3810
3811 return 0;
3812}
3813
3814/*
3815 * When processing the new references for an inode we may orphanize an existing
3816 * directory inode because its old name conflicts with one of the new references
3817 * of the current inode. Later, when processing another new reference of our
3818 * inode, we might need to orphanize another inode, but the path we have in the
3819 * reference reflects the pre-orphanization name of the directory we previously
3820 * orphanized. For example:
3821 *
3822 * parent snapshot looks like:
3823 *
3824 * . (ino 256)
3825 * |----- f1 (ino 257)
3826 * |----- f2 (ino 258)
3827 * |----- d1/ (ino 259)
3828 * |----- d2/ (ino 260)
3829 *
3830 * send snapshot looks like:
3831 *
3832 * . (ino 256)
3833 * |----- d1 (ino 258)
3834 * |----- f2/ (ino 259)
3835 * |----- f2_link/ (ino 260)
3836 * | |----- f1 (ino 257)
3837 * |
3838 * |----- d2 (ino 258)
3839 *
3840 * When processing inode 257 we compute the name for inode 259 as "d1", and we
3841 * cache it in the name cache. Later when we start processing inode 258, when
3842 * collecting all its new references we set a full path of "d1/d2" for its new
3843 * reference with name "d2". When we start processing the new references we
3844 * start by processing the new reference with name "d1", and this results in
3845 * orphanizing inode 259, since its old reference causes a conflict. Then we
3846 * move on the next new reference, with name "d2", and we find out we must
3847 * orphanize inode 260, as its old reference conflicts with ours - but for the
3848 * orphanization we use a source path corresponding to the path we stored in the
3849 * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
3850 * receiver fail since the path component "d1/" no longer exists, it was renamed
3851 * to "o259-6-0/" when processing the previous new reference. So in this case we
3852 * must recompute the path in the new reference and use it for the new
3853 * orphanization operation.
3854 */
3855static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3856{
3857 char *name;
3858 int ret;
3859
3860 name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
3861 if (!name)
3862 return -ENOMEM;
3863
3864 fs_path_reset(ref->full_path);
3865 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
3866 if (ret < 0)
3867 goto out;
3868
3869 ret = fs_path_add(ref->full_path, name, ref->name_len);
3870 if (ret < 0)
3871 goto out;
3872
3873 /* Update the reference's base name pointer. */
3874 set_ref_path(ref, ref->full_path);
3875out:
3876 kfree(name);
3877 return ret;
3878}
3879
3880/*
3881 * This does all the move/link/unlink/rmdir magic.
3882 */
3883static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3884{
3885 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3886 int ret = 0;
3887 struct recorded_ref *cur;
3888 struct recorded_ref *cur2;
3889 struct list_head check_dirs;
3890 struct fs_path *valid_path = NULL;
3891 u64 ow_inode = 0;
3892 u64 ow_gen;
3893 u64 ow_mode;
3894 int did_overwrite = 0;
3895 int is_orphan = 0;
3896 u64 last_dir_ino_rm = 0;
3897 bool can_rename = true;
3898 bool orphanized_dir = false;
3899 bool orphanized_ancestor = false;
3900
3901 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3902
3903 /*
3904 * This should never happen as the root dir always has the same ref
3905 * which is always '..'
3906 */
3907 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3908 INIT_LIST_HEAD(&check_dirs);
3909
3910 valid_path = fs_path_alloc();
3911 if (!valid_path) {
3912 ret = -ENOMEM;
3913 goto out;
3914 }
3915
3916 /*
3917 * First, check if the first ref of the current inode was overwritten
3918 * before. If yes, we know that the current inode was already orphanized
3919 * and thus use the orphan name. If not, we can use get_cur_path to
3920 * get the path of the first ref as it would like while receiving at
3921 * this point in time.
3922 * New inodes are always orphan at the beginning, so force to use the
3923 * orphan name in this case.
3924 * The first ref is stored in valid_path and will be updated if it
3925 * gets moved around.
3926 */
3927 if (!sctx->cur_inode_new) {
3928 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3929 sctx->cur_inode_gen);
3930 if (ret < 0)
3931 goto out;
3932 if (ret)
3933 did_overwrite = 1;
3934 }
3935 if (sctx->cur_inode_new || did_overwrite) {
3936 ret = gen_unique_name(sctx, sctx->cur_ino,
3937 sctx->cur_inode_gen, valid_path);
3938 if (ret < 0)
3939 goto out;
3940 is_orphan = 1;
3941 } else {
3942 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3943 valid_path);
3944 if (ret < 0)
3945 goto out;
3946 }
3947
3948 /*
3949 * Before doing any rename and link operations, do a first pass on the
3950 * new references to orphanize any unprocessed inodes that may have a
3951 * reference that conflicts with one of the new references of the current
3952 * inode. This needs to happen first because a new reference may conflict
3953 * with the old reference of a parent directory, so we must make sure
3954 * that the path used for link and rename commands don't use an
3955 * orphanized name when an ancestor was not yet orphanized.
3956 *
3957 * Example:
3958 *
3959 * Parent snapshot:
3960 *
3961 * . (ino 256)
3962 * |----- testdir/ (ino 259)
3963 * | |----- a (ino 257)
3964 * |
3965 * |----- b (ino 258)
3966 *
3967 * Send snapshot:
3968 *
3969 * . (ino 256)
3970 * |----- testdir_2/ (ino 259)
3971 * | |----- a (ino 260)
3972 * |
3973 * |----- testdir (ino 257)
3974 * |----- b (ino 257)
3975 * |----- b2 (ino 258)
3976 *
3977 * Processing the new reference for inode 257 with name "b" may happen
3978 * before processing the new reference with name "testdir". If so, we
3979 * must make sure that by the time we send a link command to create the
3980 * hard link "b", inode 259 was already orphanized, since the generated
3981 * path in "valid_path" already contains the orphanized name for 259.
3982 * We are processing inode 257, so only later when processing 259 we do
3983 * the rename operation to change its temporary (orphanized) name to
3984 * "testdir_2".
3985 */
3986 list_for_each_entry(cur, &sctx->new_refs, list) {
3987 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3988 if (ret < 0)
3989 goto out;
3990 if (ret == inode_state_will_create)
3991 continue;
3992
3993 /*
3994 * Check if this new ref would overwrite the first ref of another
3995 * unprocessed inode. If yes, orphanize the overwritten inode.
3996 * If we find an overwritten ref that is not the first ref,
3997 * simply unlink it.
3998 */
3999 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4000 cur->name, cur->name_len,
4001 &ow_inode, &ow_gen, &ow_mode);
4002 if (ret < 0)
4003 goto out;
4004 if (ret) {
4005 ret = is_first_ref(sctx->parent_root,
4006 ow_inode, cur->dir, cur->name,
4007 cur->name_len);
4008 if (ret < 0)
4009 goto out;
4010 if (ret) {
4011 struct name_cache_entry *nce;
4012 struct waiting_dir_move *wdm;
4013
4014 if (orphanized_dir) {
4015 ret = refresh_ref_path(sctx, cur);
4016 if (ret < 0)
4017 goto out;
4018 }
4019
4020 ret = orphanize_inode(sctx, ow_inode, ow_gen,
4021 cur->full_path);
4022 if (ret < 0)
4023 goto out;
4024 if (S_ISDIR(ow_mode))
4025 orphanized_dir = true;
4026
4027 /*
4028 * If ow_inode has its rename operation delayed
4029 * make sure that its orphanized name is used in
4030 * the source path when performing its rename
4031 * operation.
4032 */
4033 if (is_waiting_for_move(sctx, ow_inode)) {
4034 wdm = get_waiting_dir_move(sctx,
4035 ow_inode);
4036 ASSERT(wdm);
4037 wdm->orphanized = true;
4038 }
4039
4040 /*
4041 * Make sure we clear our orphanized inode's
4042 * name from the name cache. This is because the
4043 * inode ow_inode might be an ancestor of some
4044 * other inode that will be orphanized as well
4045 * later and has an inode number greater than
4046 * sctx->send_progress. We need to prevent
4047 * future name lookups from using the old name
4048 * and get instead the orphan name.
4049 */
4050 nce = name_cache_search(sctx, ow_inode, ow_gen);
4051 if (nce) {
4052 name_cache_delete(sctx, nce);
4053 kfree(nce);
4054 }
4055
4056 /*
4057 * ow_inode might currently be an ancestor of
4058 * cur_ino, therefore compute valid_path (the
4059 * current path of cur_ino) again because it
4060 * might contain the pre-orphanization name of
4061 * ow_inode, which is no longer valid.
4062 */
4063 ret = is_ancestor(sctx->parent_root,
4064 ow_inode, ow_gen,
4065 sctx->cur_ino, NULL);
4066 if (ret > 0) {
4067 orphanized_ancestor = true;
4068 fs_path_reset(valid_path);
4069 ret = get_cur_path(sctx, sctx->cur_ino,
4070 sctx->cur_inode_gen,
4071 valid_path);
4072 }
4073 if (ret < 0)
4074 goto out;
4075 } else {
4076 /*
4077 * If we previously orphanized a directory that
4078 * collided with a new reference that we already
4079 * processed, recompute the current path because
4080 * that directory may be part of the path.
4081 */
4082 if (orphanized_dir) {
4083 ret = refresh_ref_path(sctx, cur);
4084 if (ret < 0)
4085 goto out;
4086 }
4087 ret = send_unlink(sctx, cur->full_path);
4088 if (ret < 0)
4089 goto out;
4090 }
4091 }
4092
4093 }
4094
4095 list_for_each_entry(cur, &sctx->new_refs, list) {
4096 /*
4097 * We may have refs where the parent directory does not exist
4098 * yet. This happens if the parent directories inum is higher
4099 * than the current inum. To handle this case, we create the
4100 * parent directory out of order. But we need to check if this
4101 * did already happen before due to other refs in the same dir.
4102 */
4103 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4104 if (ret < 0)
4105 goto out;
4106 if (ret == inode_state_will_create) {
4107 ret = 0;
4108 /*
4109 * First check if any of the current inodes refs did
4110 * already create the dir.
4111 */
4112 list_for_each_entry(cur2, &sctx->new_refs, list) {
4113 if (cur == cur2)
4114 break;
4115 if (cur2->dir == cur->dir) {
4116 ret = 1;
4117 break;
4118 }
4119 }
4120
4121 /*
4122 * If that did not happen, check if a previous inode
4123 * did already create the dir.
4124 */
4125 if (!ret)
4126 ret = did_create_dir(sctx, cur->dir);
4127 if (ret < 0)
4128 goto out;
4129 if (!ret) {
4130 ret = send_create_inode(sctx, cur->dir);
4131 if (ret < 0)
4132 goto out;
4133 }
4134 }
4135
4136 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
4137 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
4138 if (ret < 0)
4139 goto out;
4140 if (ret == 1) {
4141 can_rename = false;
4142 *pending_move = 1;
4143 }
4144 }
4145
4146 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
4147 can_rename) {
4148 ret = wait_for_parent_move(sctx, cur, is_orphan);
4149 if (ret < 0)
4150 goto out;
4151 if (ret == 1) {
4152 can_rename = false;
4153 *pending_move = 1;
4154 }
4155 }
4156
4157 /*
4158 * link/move the ref to the new place. If we have an orphan
4159 * inode, move it and update valid_path. If not, link or move
4160 * it depending on the inode mode.
4161 */
4162 if (is_orphan && can_rename) {
4163 ret = send_rename(sctx, valid_path, cur->full_path);
4164 if (ret < 0)
4165 goto out;
4166 is_orphan = 0;
4167 ret = fs_path_copy(valid_path, cur->full_path);
4168 if (ret < 0)
4169 goto out;
4170 } else if (can_rename) {
4171 if (S_ISDIR(sctx->cur_inode_mode)) {
4172 /*
4173 * Dirs can't be linked, so move it. For moved
4174 * dirs, we always have one new and one deleted
4175 * ref. The deleted ref is ignored later.
4176 */
4177 ret = send_rename(sctx, valid_path,
4178 cur->full_path);
4179 if (!ret)
4180 ret = fs_path_copy(valid_path,
4181 cur->full_path);
4182 if (ret < 0)
4183 goto out;
4184 } else {
4185 /*
4186 * We might have previously orphanized an inode
4187 * which is an ancestor of our current inode,
4188 * so our reference's full path, which was
4189 * computed before any such orphanizations, must
4190 * be updated.
4191 */
4192 if (orphanized_dir) {
4193 ret = update_ref_path(sctx, cur);
4194 if (ret < 0)
4195 goto out;
4196 }
4197 ret = send_link(sctx, cur->full_path,
4198 valid_path);
4199 if (ret < 0)
4200 goto out;
4201 }
4202 }
4203 ret = dup_ref(cur, &check_dirs);
4204 if (ret < 0)
4205 goto out;
4206 }
4207
4208 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4209 /*
4210 * Check if we can already rmdir the directory. If not,
4211 * orphanize it. For every dir item inside that gets deleted
4212 * later, we do this check again and rmdir it then if possible.
4213 * See the use of check_dirs for more details.
4214 */
4215 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4216 sctx->cur_ino);
4217 if (ret < 0)
4218 goto out;
4219 if (ret) {
4220 ret = send_rmdir(sctx, valid_path);
4221 if (ret < 0)
4222 goto out;
4223 } else if (!is_orphan) {
4224 ret = orphanize_inode(sctx, sctx->cur_ino,
4225 sctx->cur_inode_gen, valid_path);
4226 if (ret < 0)
4227 goto out;
4228 is_orphan = 1;
4229 }
4230
4231 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4232 ret = dup_ref(cur, &check_dirs);
4233 if (ret < 0)
4234 goto out;
4235 }
4236 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4237 !list_empty(&sctx->deleted_refs)) {
4238 /*
4239 * We have a moved dir. Add the old parent to check_dirs
4240 */
4241 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4242 list);
4243 ret = dup_ref(cur, &check_dirs);
4244 if (ret < 0)
4245 goto out;
4246 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4247 /*
4248 * We have a non dir inode. Go through all deleted refs and
4249 * unlink them if they were not already overwritten by other
4250 * inodes.
4251 */
4252 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4253 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4254 sctx->cur_ino, sctx->cur_inode_gen,
4255 cur->name, cur->name_len);
4256 if (ret < 0)
4257 goto out;
4258 if (!ret) {
4259 /*
4260 * If we orphanized any ancestor before, we need
4261 * to recompute the full path for deleted names,
4262 * since any such path was computed before we
4263 * processed any references and orphanized any
4264 * ancestor inode.
4265 */
4266 if (orphanized_ancestor) {
4267 ret = update_ref_path(sctx, cur);
4268 if (ret < 0)
4269 goto out;
4270 }
4271 ret = send_unlink(sctx, cur->full_path);
4272 if (ret < 0)
4273 goto out;
4274 }
4275 ret = dup_ref(cur, &check_dirs);
4276 if (ret < 0)
4277 goto out;
4278 }
4279 /*
4280 * If the inode is still orphan, unlink the orphan. This may
4281 * happen when a previous inode did overwrite the first ref
4282 * of this inode and no new refs were added for the current
4283 * inode. Unlinking does not mean that the inode is deleted in
4284 * all cases. There may still be links to this inode in other
4285 * places.
4286 */
4287 if (is_orphan) {
4288 ret = send_unlink(sctx, valid_path);
4289 if (ret < 0)
4290 goto out;
4291 }
4292 }
4293
4294 /*
4295 * We did collect all parent dirs where cur_inode was once located. We
4296 * now go through all these dirs and check if they are pending for
4297 * deletion and if it's finally possible to perform the rmdir now.
4298 * We also update the inode stats of the parent dirs here.
4299 */
4300 list_for_each_entry(cur, &check_dirs, list) {
4301 /*
4302 * In case we had refs into dirs that were not processed yet,
4303 * we don't need to do the utime and rmdir logic for these dirs.
4304 * The dir will be processed later.
4305 */
4306 if (cur->dir > sctx->cur_ino)
4307 continue;
4308
4309 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4310 if (ret < 0)
4311 goto out;
4312
4313 if (ret == inode_state_did_create ||
4314 ret == inode_state_no_change) {
4315 /* TODO delayed utimes */
4316 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4317 if (ret < 0)
4318 goto out;
4319 } else if (ret == inode_state_did_delete &&
4320 cur->dir != last_dir_ino_rm) {
4321 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4322 sctx->cur_ino);
4323 if (ret < 0)
4324 goto out;
4325 if (ret) {
4326 ret = get_cur_path(sctx, cur->dir,
4327 cur->dir_gen, valid_path);
4328 if (ret < 0)
4329 goto out;
4330 ret = send_rmdir(sctx, valid_path);
4331 if (ret < 0)
4332 goto out;
4333 last_dir_ino_rm = cur->dir;
4334 }
4335 }
4336 }
4337
4338 ret = 0;
4339
4340out:
4341 __free_recorded_refs(&check_dirs);
4342 free_recorded_refs(sctx);
4343 fs_path_free(valid_path);
4344 return ret;
4345}
4346
4347static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4348 void *ctx, struct list_head *refs)
4349{
4350 int ret = 0;
4351 struct send_ctx *sctx = ctx;
4352 struct fs_path *p;
4353 u64 gen;
4354
4355 p = fs_path_alloc();
4356 if (!p)
4357 return -ENOMEM;
4358
4359 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4360 NULL, NULL);
4361 if (ret < 0)
4362 goto out;
4363
4364 ret = get_cur_path(sctx, dir, gen, p);
4365 if (ret < 0)
4366 goto out;
4367 ret = fs_path_add_path(p, name);
4368 if (ret < 0)
4369 goto out;
4370
4371 ret = __record_ref(refs, dir, gen, p);
4372
4373out:
4374 if (ret)
4375 fs_path_free(p);
4376 return ret;
4377}
4378
4379static int __record_new_ref(int num, u64 dir, int index,
4380 struct fs_path *name,
4381 void *ctx)
4382{
4383 struct send_ctx *sctx = ctx;
4384 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4385}
4386
4387
4388static int __record_deleted_ref(int num, u64 dir, int index,
4389 struct fs_path *name,
4390 void *ctx)
4391{
4392 struct send_ctx *sctx = ctx;
4393 return record_ref(sctx->parent_root, dir, name, ctx,
4394 &sctx->deleted_refs);
4395}
4396
4397static int record_new_ref(struct send_ctx *sctx)
4398{
4399 int ret;
4400
4401 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4402 sctx->cmp_key, 0, __record_new_ref, sctx);
4403 if (ret < 0)
4404 goto out;
4405 ret = 0;
4406
4407out:
4408 return ret;
4409}
4410
4411static int record_deleted_ref(struct send_ctx *sctx)
4412{
4413 int ret;
4414
4415 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4416 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4417 if (ret < 0)
4418 goto out;
4419 ret = 0;
4420
4421out:
4422 return ret;
4423}
4424
4425struct find_ref_ctx {
4426 u64 dir;
4427 u64 dir_gen;
4428 struct btrfs_root *root;
4429 struct fs_path *name;
4430 int found_idx;
4431};
4432
4433static int __find_iref(int num, u64 dir, int index,
4434 struct fs_path *name,
4435 void *ctx_)
4436{
4437 struct find_ref_ctx *ctx = ctx_;
4438 u64 dir_gen;
4439 int ret;
4440
4441 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4442 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4443 /*
4444 * To avoid doing extra lookups we'll only do this if everything
4445 * else matches.
4446 */
4447 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4448 NULL, NULL, NULL);
4449 if (ret)
4450 return ret;
4451 if (dir_gen != ctx->dir_gen)
4452 return 0;
4453 ctx->found_idx = num;
4454 return 1;
4455 }
4456 return 0;
4457}
4458
4459static int find_iref(struct btrfs_root *root,
4460 struct btrfs_path *path,
4461 struct btrfs_key *key,
4462 u64 dir, u64 dir_gen, struct fs_path *name)
4463{
4464 int ret;
4465 struct find_ref_ctx ctx;
4466
4467 ctx.dir = dir;
4468 ctx.name = name;
4469 ctx.dir_gen = dir_gen;
4470 ctx.found_idx = -1;
4471 ctx.root = root;
4472
4473 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4474 if (ret < 0)
4475 return ret;
4476
4477 if (ctx.found_idx == -1)
4478 return -ENOENT;
4479
4480 return ctx.found_idx;
4481}
4482
4483static int __record_changed_new_ref(int num, u64 dir, int index,
4484 struct fs_path *name,
4485 void *ctx)
4486{
4487 u64 dir_gen;
4488 int ret;
4489 struct send_ctx *sctx = ctx;
4490
4491 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4492 NULL, NULL, NULL);
4493 if (ret)
4494 return ret;
4495
4496 ret = find_iref(sctx->parent_root, sctx->right_path,
4497 sctx->cmp_key, dir, dir_gen, name);
4498 if (ret == -ENOENT)
4499 ret = __record_new_ref(num, dir, index, name, sctx);
4500 else if (ret > 0)
4501 ret = 0;
4502
4503 return ret;
4504}
4505
4506static int __record_changed_deleted_ref(int num, u64 dir, int index,
4507 struct fs_path *name,
4508 void *ctx)
4509{
4510 u64 dir_gen;
4511 int ret;
4512 struct send_ctx *sctx = ctx;
4513
4514 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4515 NULL, NULL, NULL);
4516 if (ret)
4517 return ret;
4518
4519 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4520 dir, dir_gen, name);
4521 if (ret == -ENOENT)
4522 ret = __record_deleted_ref(num, dir, index, name, sctx);
4523 else if (ret > 0)
4524 ret = 0;
4525
4526 return ret;
4527}
4528
4529static int record_changed_ref(struct send_ctx *sctx)
4530{
4531 int ret = 0;
4532
4533 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4534 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4535 if (ret < 0)
4536 goto out;
4537 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4538 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4539 if (ret < 0)
4540 goto out;
4541 ret = 0;
4542
4543out:
4544 return ret;
4545}
4546
4547/*
4548 * Record and process all refs at once. Needed when an inode changes the
4549 * generation number, which means that it was deleted and recreated.
4550 */
4551static int process_all_refs(struct send_ctx *sctx,
4552 enum btrfs_compare_tree_result cmd)
4553{
4554 int ret;
4555 struct btrfs_root *root;
4556 struct btrfs_path *path;
4557 struct btrfs_key key;
4558 struct btrfs_key found_key;
4559 struct extent_buffer *eb;
4560 int slot;
4561 iterate_inode_ref_t cb;
4562 int pending_move = 0;
4563
4564 path = alloc_path_for_send();
4565 if (!path)
4566 return -ENOMEM;
4567
4568 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4569 root = sctx->send_root;
4570 cb = __record_new_ref;
4571 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4572 root = sctx->parent_root;
4573 cb = __record_deleted_ref;
4574 } else {
4575 btrfs_err(sctx->send_root->fs_info,
4576 "Wrong command %d in process_all_refs", cmd);
4577 ret = -EINVAL;
4578 goto out;
4579 }
4580
4581 key.objectid = sctx->cmp_key->objectid;
4582 key.type = BTRFS_INODE_REF_KEY;
4583 key.offset = 0;
4584 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4585 if (ret < 0)
4586 goto out;
4587
4588 while (1) {
4589 eb = path->nodes[0];
4590 slot = path->slots[0];
4591 if (slot >= btrfs_header_nritems(eb)) {
4592 ret = btrfs_next_leaf(root, path);
4593 if (ret < 0)
4594 goto out;
4595 else if (ret > 0)
4596 break;
4597 continue;
4598 }
4599
4600 btrfs_item_key_to_cpu(eb, &found_key, slot);
4601
4602 if (found_key.objectid != key.objectid ||
4603 (found_key.type != BTRFS_INODE_REF_KEY &&
4604 found_key.type != BTRFS_INODE_EXTREF_KEY))
4605 break;
4606
4607 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4608 if (ret < 0)
4609 goto out;
4610
4611 path->slots[0]++;
4612 }
4613 btrfs_release_path(path);
4614
4615 /*
4616 * We don't actually care about pending_move as we are simply
4617 * re-creating this inode and will be rename'ing it into place once we
4618 * rename the parent directory.
4619 */
4620 ret = process_recorded_refs(sctx, &pending_move);
4621out:
4622 btrfs_free_path(path);
4623 return ret;
4624}
4625
4626static int send_set_xattr(struct send_ctx *sctx,
4627 struct fs_path *path,
4628 const char *name, int name_len,
4629 const char *data, int data_len)
4630{
4631 int ret = 0;
4632
4633 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4634 if (ret < 0)
4635 goto out;
4636
4637 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4638 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4639 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4640
4641 ret = send_cmd(sctx);
4642
4643tlv_put_failure:
4644out:
4645 return ret;
4646}
4647
4648static int send_remove_xattr(struct send_ctx *sctx,
4649 struct fs_path *path,
4650 const char *name, int name_len)
4651{
4652 int ret = 0;
4653
4654 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4655 if (ret < 0)
4656 goto out;
4657
4658 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4659 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4660
4661 ret = send_cmd(sctx);
4662
4663tlv_put_failure:
4664out:
4665 return ret;
4666}
4667
4668static int __process_new_xattr(int num, struct btrfs_key *di_key,
4669 const char *name, int name_len, const char *data,
4670 int data_len, void *ctx)
4671{
4672 int ret;
4673 struct send_ctx *sctx = ctx;
4674 struct fs_path *p;
4675 struct posix_acl_xattr_header dummy_acl;
4676
4677 /* Capabilities are emitted by finish_inode_if_needed */
4678 if (!strncmp(name, XATTR_NAME_CAPS, name_len))
4679 return 0;
4680
4681 p = fs_path_alloc();
4682 if (!p)
4683 return -ENOMEM;
4684
4685 /*
4686 * This hack is needed because empty acls are stored as zero byte
4687 * data in xattrs. Problem with that is, that receiving these zero byte
4688 * acls will fail later. To fix this, we send a dummy acl list that
4689 * only contains the version number and no entries.
4690 */
4691 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4692 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4693 if (data_len == 0) {
4694 dummy_acl.a_version =
4695 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4696 data = (char *)&dummy_acl;
4697 data_len = sizeof(dummy_acl);
4698 }
4699 }
4700
4701 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4702 if (ret < 0)
4703 goto out;
4704
4705 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4706
4707out:
4708 fs_path_free(p);
4709 return ret;
4710}
4711
4712static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4713 const char *name, int name_len,
4714 const char *data, int data_len, void *ctx)
4715{
4716 int ret;
4717 struct send_ctx *sctx = ctx;
4718 struct fs_path *p;
4719
4720 p = fs_path_alloc();
4721 if (!p)
4722 return -ENOMEM;
4723
4724 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4725 if (ret < 0)
4726 goto out;
4727
4728 ret = send_remove_xattr(sctx, p, name, name_len);
4729
4730out:
4731 fs_path_free(p);
4732 return ret;
4733}
4734
4735static int process_new_xattr(struct send_ctx *sctx)
4736{
4737 int ret = 0;
4738
4739 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4740 __process_new_xattr, sctx);
4741
4742 return ret;
4743}
4744
4745static int process_deleted_xattr(struct send_ctx *sctx)
4746{
4747 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4748 __process_deleted_xattr, sctx);
4749}
4750
4751struct find_xattr_ctx {
4752 const char *name;
4753 int name_len;
4754 int found_idx;
4755 char *found_data;
4756 int found_data_len;
4757};
4758
4759static int __find_xattr(int num, struct btrfs_key *di_key, const char *name,
4760 int name_len, const char *data, int data_len, void *vctx)
4761{
4762 struct find_xattr_ctx *ctx = vctx;
4763
4764 if (name_len == ctx->name_len &&
4765 strncmp(name, ctx->name, name_len) == 0) {
4766 ctx->found_idx = num;
4767 ctx->found_data_len = data_len;
4768 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4769 if (!ctx->found_data)
4770 return -ENOMEM;
4771 return 1;
4772 }
4773 return 0;
4774}
4775
4776static int find_xattr(struct btrfs_root *root,
4777 struct btrfs_path *path,
4778 struct btrfs_key *key,
4779 const char *name, int name_len,
4780 char **data, int *data_len)
4781{
4782 int ret;
4783 struct find_xattr_ctx ctx;
4784
4785 ctx.name = name;
4786 ctx.name_len = name_len;
4787 ctx.found_idx = -1;
4788 ctx.found_data = NULL;
4789 ctx.found_data_len = 0;
4790
4791 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4792 if (ret < 0)
4793 return ret;
4794
4795 if (ctx.found_idx == -1)
4796 return -ENOENT;
4797 if (data) {
4798 *data = ctx.found_data;
4799 *data_len = ctx.found_data_len;
4800 } else {
4801 kfree(ctx.found_data);
4802 }
4803 return ctx.found_idx;
4804}
4805
4806
4807static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4808 const char *name, int name_len,
4809 const char *data, int data_len,
4810 void *ctx)
4811{
4812 int ret;
4813 struct send_ctx *sctx = ctx;
4814 char *found_data = NULL;
4815 int found_data_len = 0;
4816
4817 ret = find_xattr(sctx->parent_root, sctx->right_path,
4818 sctx->cmp_key, name, name_len, &found_data,
4819 &found_data_len);
4820 if (ret == -ENOENT) {
4821 ret = __process_new_xattr(num, di_key, name, name_len, data,
4822 data_len, ctx);
4823 } else if (ret >= 0) {
4824 if (data_len != found_data_len ||
4825 memcmp(data, found_data, data_len)) {
4826 ret = __process_new_xattr(num, di_key, name, name_len,
4827 data, data_len, ctx);
4828 } else {
4829 ret = 0;
4830 }
4831 }
4832
4833 kfree(found_data);
4834 return ret;
4835}
4836
4837static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4838 const char *name, int name_len,
4839 const char *data, int data_len,
4840 void *ctx)
4841{
4842 int ret;
4843 struct send_ctx *sctx = ctx;
4844
4845 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4846 name, name_len, NULL, NULL);
4847 if (ret == -ENOENT)
4848 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4849 data_len, ctx);
4850 else if (ret >= 0)
4851 ret = 0;
4852
4853 return ret;
4854}
4855
4856static int process_changed_xattr(struct send_ctx *sctx)
4857{
4858 int ret = 0;
4859
4860 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4861 __process_changed_new_xattr, sctx);
4862 if (ret < 0)
4863 goto out;
4864 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4865 __process_changed_deleted_xattr, sctx);
4866
4867out:
4868 return ret;
4869}
4870
4871static int process_all_new_xattrs(struct send_ctx *sctx)
4872{
4873 int ret;
4874 struct btrfs_root *root;
4875 struct btrfs_path *path;
4876 struct btrfs_key key;
4877 struct btrfs_key found_key;
4878 struct extent_buffer *eb;
4879 int slot;
4880
4881 path = alloc_path_for_send();
4882 if (!path)
4883 return -ENOMEM;
4884
4885 root = sctx->send_root;
4886
4887 key.objectid = sctx->cmp_key->objectid;
4888 key.type = BTRFS_XATTR_ITEM_KEY;
4889 key.offset = 0;
4890 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4891 if (ret < 0)
4892 goto out;
4893
4894 while (1) {
4895 eb = path->nodes[0];
4896 slot = path->slots[0];
4897 if (slot >= btrfs_header_nritems(eb)) {
4898 ret = btrfs_next_leaf(root, path);
4899 if (ret < 0) {
4900 goto out;
4901 } else if (ret > 0) {
4902 ret = 0;
4903 break;
4904 }
4905 continue;
4906 }
4907
4908 btrfs_item_key_to_cpu(eb, &found_key, slot);
4909 if (found_key.objectid != key.objectid ||
4910 found_key.type != key.type) {
4911 ret = 0;
4912 goto out;
4913 }
4914
4915 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4916 if (ret < 0)
4917 goto out;
4918
4919 path->slots[0]++;
4920 }
4921
4922out:
4923 btrfs_free_path(path);
4924 return ret;
4925}
4926
4927static inline u64 max_send_read_size(const struct send_ctx *sctx)
4928{
4929 return sctx->send_max_size - SZ_16K;
4930}
4931
4932static int put_data_header(struct send_ctx *sctx, u32 len)
4933{
4934 struct btrfs_tlv_header *hdr;
4935
4936 if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len)
4937 return -EOVERFLOW;
4938 hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
4939 put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
4940 put_unaligned_le16(len, &hdr->tlv_len);
4941 sctx->send_size += sizeof(*hdr);
4942 return 0;
4943}
4944
4945static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
4946{
4947 struct btrfs_root *root = sctx->send_root;
4948 struct btrfs_fs_info *fs_info = root->fs_info;
4949 struct inode *inode;
4950 struct page *page;
4951 pgoff_t index = offset >> PAGE_SHIFT;
4952 pgoff_t last_index;
4953 unsigned pg_offset = offset_in_page(offset);
4954 int ret;
4955
4956 ret = put_data_header(sctx, len);
4957 if (ret)
4958 return ret;
4959
4960 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
4961 if (IS_ERR(inode))
4962 return PTR_ERR(inode);
4963
4964 last_index = (offset + len - 1) >> PAGE_SHIFT;
4965
4966 /* initial readahead */
4967 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4968 file_ra_state_init(&sctx->ra, inode->i_mapping);
4969
4970 while (index <= last_index) {
4971 unsigned cur_len = min_t(unsigned, len,
4972 PAGE_SIZE - pg_offset);
4973
4974 page = find_lock_page(inode->i_mapping, index);
4975 if (!page) {
4976 page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4977 NULL, index, last_index + 1 - index);
4978
4979 page = find_or_create_page(inode->i_mapping, index,
4980 GFP_KERNEL);
4981 if (!page) {
4982 ret = -ENOMEM;
4983 break;
4984 }
4985 }
4986
4987 if (PageReadahead(page)) {
4988 page_cache_async_readahead(inode->i_mapping, &sctx->ra,
4989 NULL, page, index, last_index + 1 - index);
4990 }
4991
4992 if (!PageUptodate(page)) {
4993 btrfs_readpage(NULL, page);
4994 lock_page(page);
4995 if (!PageUptodate(page)) {
4996 unlock_page(page);
4997 btrfs_err(fs_info,
4998 "send: IO error at offset %llu for inode %llu root %llu",
4999 page_offset(page), sctx->cur_ino,
5000 sctx->send_root->root_key.objectid);
5001 put_page(page);
5002 ret = -EIO;
5003 break;
5004 }
5005 }
5006
5007 memcpy_from_page(sctx->send_buf + sctx->send_size, page,
5008 pg_offset, cur_len);
5009 unlock_page(page);
5010 put_page(page);
5011 index++;
5012 pg_offset = 0;
5013 len -= cur_len;
5014 sctx->send_size += cur_len;
5015 }
5016 iput(inode);
5017 return ret;
5018}
5019
5020/*
5021 * Read some bytes from the current inode/file and send a write command to
5022 * user space.
5023 */
5024static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
5025{
5026 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
5027 int ret = 0;
5028 struct fs_path *p;
5029
5030 p = fs_path_alloc();
5031 if (!p)
5032 return -ENOMEM;
5033
5034 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
5035
5036 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5037 if (ret < 0)
5038 goto out;
5039
5040 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5041 if (ret < 0)
5042 goto out;
5043
5044 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5045 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5046 ret = put_file_data(sctx, offset, len);
5047 if (ret < 0)
5048 goto out;
5049
5050 ret = send_cmd(sctx);
5051
5052tlv_put_failure:
5053out:
5054 fs_path_free(p);
5055 return ret;
5056}
5057
5058/*
5059 * Send a clone command to user space.
5060 */
5061static int send_clone(struct send_ctx *sctx,
5062 u64 offset, u32 len,
5063 struct clone_root *clone_root)
5064{
5065 int ret = 0;
5066 struct fs_path *p;
5067 u64 gen;
5068
5069 btrfs_debug(sctx->send_root->fs_info,
5070 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
5071 offset, len, clone_root->root->root_key.objectid,
5072 clone_root->ino, clone_root->offset);
5073
5074 p = fs_path_alloc();
5075 if (!p)
5076 return -ENOMEM;
5077
5078 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
5079 if (ret < 0)
5080 goto out;
5081
5082 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5083 if (ret < 0)
5084 goto out;
5085
5086 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5087 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
5088 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5089
5090 if (clone_root->root == sctx->send_root) {
5091 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
5092 &gen, NULL, NULL, NULL, NULL);
5093 if (ret < 0)
5094 goto out;
5095 ret = get_cur_path(sctx, clone_root->ino, gen, p);
5096 } else {
5097 ret = get_inode_path(clone_root->root, clone_root->ino, p);
5098 }
5099 if (ret < 0)
5100 goto out;
5101
5102 /*
5103 * If the parent we're using has a received_uuid set then use that as
5104 * our clone source as that is what we will look for when doing a
5105 * receive.
5106 *
5107 * This covers the case that we create a snapshot off of a received
5108 * subvolume and then use that as the parent and try to receive on a
5109 * different host.
5110 */
5111 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
5112 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5113 clone_root->root->root_item.received_uuid);
5114 else
5115 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5116 clone_root->root->root_item.uuid);
5117 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
5118 btrfs_root_ctransid(&clone_root->root->root_item));
5119 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
5120 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
5121 clone_root->offset);
5122
5123 ret = send_cmd(sctx);
5124
5125tlv_put_failure:
5126out:
5127 fs_path_free(p);
5128 return ret;
5129}
5130
5131/*
5132 * Send an update extent command to user space.
5133 */
5134static int send_update_extent(struct send_ctx *sctx,
5135 u64 offset, u32 len)
5136{
5137 int ret = 0;
5138 struct fs_path *p;
5139
5140 p = fs_path_alloc();
5141 if (!p)
5142 return -ENOMEM;
5143
5144 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
5145 if (ret < 0)
5146 goto out;
5147
5148 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5149 if (ret < 0)
5150 goto out;
5151
5152 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5153 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5154 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
5155
5156 ret = send_cmd(sctx);
5157
5158tlv_put_failure:
5159out:
5160 fs_path_free(p);
5161 return ret;
5162}
5163
5164static int send_hole(struct send_ctx *sctx, u64 end)
5165{
5166 struct fs_path *p = NULL;
5167 u64 read_size = max_send_read_size(sctx);
5168 u64 offset = sctx->cur_inode_last_extent;
5169 int ret = 0;
5170
5171 /*
5172 * A hole that starts at EOF or beyond it. Since we do not yet support
5173 * fallocate (for extent preallocation and hole punching), sending a
5174 * write of zeroes starting at EOF or beyond would later require issuing
5175 * a truncate operation which would undo the write and achieve nothing.
5176 */
5177 if (offset >= sctx->cur_inode_size)
5178 return 0;
5179
5180 /*
5181 * Don't go beyond the inode's i_size due to prealloc extents that start
5182 * after the i_size.
5183 */
5184 end = min_t(u64, end, sctx->cur_inode_size);
5185
5186 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5187 return send_update_extent(sctx, offset, end - offset);
5188
5189 p = fs_path_alloc();
5190 if (!p)
5191 return -ENOMEM;
5192 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5193 if (ret < 0)
5194 goto tlv_put_failure;
5195 while (offset < end) {
5196 u64 len = min(end - offset, read_size);
5197
5198 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5199 if (ret < 0)
5200 break;
5201 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5202 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5203 ret = put_data_header(sctx, len);
5204 if (ret < 0)
5205 break;
5206 memset(sctx->send_buf + sctx->send_size, 0, len);
5207 sctx->send_size += len;
5208 ret = send_cmd(sctx);
5209 if (ret < 0)
5210 break;
5211 offset += len;
5212 }
5213 sctx->cur_inode_next_write_offset = offset;
5214tlv_put_failure:
5215 fs_path_free(p);
5216 return ret;
5217}
5218
5219static int send_extent_data(struct send_ctx *sctx,
5220 const u64 offset,
5221 const u64 len)
5222{
5223 u64 read_size = max_send_read_size(sctx);
5224 u64 sent = 0;
5225
5226 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5227 return send_update_extent(sctx, offset, len);
5228
5229 while (sent < len) {
5230 u64 size = min(len - sent, read_size);
5231 int ret;
5232
5233 ret = send_write(sctx, offset + sent, size);
5234 if (ret < 0)
5235 return ret;
5236 sent += size;
5237 }
5238 return 0;
5239}
5240
5241/*
5242 * Search for a capability xattr related to sctx->cur_ino. If the capability is
5243 * found, call send_set_xattr function to emit it.
5244 *
5245 * Return 0 if there isn't a capability, or when the capability was emitted
5246 * successfully, or < 0 if an error occurred.
5247 */
5248static int send_capabilities(struct send_ctx *sctx)
5249{
5250 struct fs_path *fspath = NULL;
5251 struct btrfs_path *path;
5252 struct btrfs_dir_item *di;
5253 struct extent_buffer *leaf;
5254 unsigned long data_ptr;
5255 char *buf = NULL;
5256 int buf_len;
5257 int ret = 0;
5258
5259 path = alloc_path_for_send();
5260 if (!path)
5261 return -ENOMEM;
5262
5263 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5264 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
5265 if (!di) {
5266 /* There is no xattr for this inode */
5267 goto out;
5268 } else if (IS_ERR(di)) {
5269 ret = PTR_ERR(di);
5270 goto out;
5271 }
5272
5273 leaf = path->nodes[0];
5274 buf_len = btrfs_dir_data_len(leaf, di);
5275
5276 fspath = fs_path_alloc();
5277 buf = kmalloc(buf_len, GFP_KERNEL);
5278 if (!fspath || !buf) {
5279 ret = -ENOMEM;
5280 goto out;
5281 }
5282
5283 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5284 if (ret < 0)
5285 goto out;
5286
5287 data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
5288 read_extent_buffer(leaf, buf, data_ptr, buf_len);
5289
5290 ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
5291 strlen(XATTR_NAME_CAPS), buf, buf_len);
5292out:
5293 kfree(buf);
5294 fs_path_free(fspath);
5295 btrfs_free_path(path);
5296 return ret;
5297}
5298
5299static int clone_range(struct send_ctx *sctx,
5300 struct clone_root *clone_root,
5301 const u64 disk_byte,
5302 u64 data_offset,
5303 u64 offset,
5304 u64 len)
5305{
5306 struct btrfs_path *path;
5307 struct btrfs_key key;
5308 int ret;
5309 u64 clone_src_i_size = 0;
5310
5311 /*
5312 * Prevent cloning from a zero offset with a length matching the sector
5313 * size because in some scenarios this will make the receiver fail.
5314 *
5315 * For example, if in the source filesystem the extent at offset 0
5316 * has a length of sectorsize and it was written using direct IO, then
5317 * it can never be an inline extent (even if compression is enabled).
5318 * Then this extent can be cloned in the original filesystem to a non
5319 * zero file offset, but it may not be possible to clone in the
5320 * destination filesystem because it can be inlined due to compression
5321 * on the destination filesystem (as the receiver's write operations are
5322 * always done using buffered IO). The same happens when the original
5323 * filesystem does not have compression enabled but the destination
5324 * filesystem has.
5325 */
5326 if (clone_root->offset == 0 &&
5327 len == sctx->send_root->fs_info->sectorsize)
5328 return send_extent_data(sctx, offset, len);
5329
5330 path = alloc_path_for_send();
5331 if (!path)
5332 return -ENOMEM;
5333
5334 /*
5335 * There are inodes that have extents that lie behind its i_size. Don't
5336 * accept clones from these extents.
5337 */
5338 ret = __get_inode_info(clone_root->root, path, clone_root->ino,
5339 &clone_src_i_size, NULL, NULL, NULL, NULL, NULL);
5340 btrfs_release_path(path);
5341 if (ret < 0)
5342 goto out;
5343
5344 /*
5345 * We can't send a clone operation for the entire range if we find
5346 * extent items in the respective range in the source file that
5347 * refer to different extents or if we find holes.
5348 * So check for that and do a mix of clone and regular write/copy
5349 * operations if needed.
5350 *
5351 * Example:
5352 *
5353 * mkfs.btrfs -f /dev/sda
5354 * mount /dev/sda /mnt
5355 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5356 * cp --reflink=always /mnt/foo /mnt/bar
5357 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5358 * btrfs subvolume snapshot -r /mnt /mnt/snap
5359 *
5360 * If when we send the snapshot and we are processing file bar (which
5361 * has a higher inode number than foo) we blindly send a clone operation
5362 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5363 * a file bar that matches the content of file foo - iow, doesn't match
5364 * the content from bar in the original filesystem.
5365 */
5366 key.objectid = clone_root->ino;
5367 key.type = BTRFS_EXTENT_DATA_KEY;
5368 key.offset = clone_root->offset;
5369 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5370 if (ret < 0)
5371 goto out;
5372 if (ret > 0 && path->slots[0] > 0) {
5373 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5374 if (key.objectid == clone_root->ino &&
5375 key.type == BTRFS_EXTENT_DATA_KEY)
5376 path->slots[0]--;
5377 }
5378
5379 while (true) {
5380 struct extent_buffer *leaf = path->nodes[0];
5381 int slot = path->slots[0];
5382 struct btrfs_file_extent_item *ei;
5383 u8 type;
5384 u64 ext_len;
5385 u64 clone_len;
5386 u64 clone_data_offset;
5387
5388 if (slot >= btrfs_header_nritems(leaf)) {
5389 ret = btrfs_next_leaf(clone_root->root, path);
5390 if (ret < 0)
5391 goto out;
5392 else if (ret > 0)
5393 break;
5394 continue;
5395 }
5396
5397 btrfs_item_key_to_cpu(leaf, &key, slot);
5398
5399 /*
5400 * We might have an implicit trailing hole (NO_HOLES feature
5401 * enabled). We deal with it after leaving this loop.
5402 */
5403 if (key.objectid != clone_root->ino ||
5404 key.type != BTRFS_EXTENT_DATA_KEY)
5405 break;
5406
5407 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5408 type = btrfs_file_extent_type(leaf, ei);
5409 if (type == BTRFS_FILE_EXTENT_INLINE) {
5410 ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5411 ext_len = PAGE_ALIGN(ext_len);
5412 } else {
5413 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5414 }
5415
5416 if (key.offset + ext_len <= clone_root->offset)
5417 goto next;
5418
5419 if (key.offset > clone_root->offset) {
5420 /* Implicit hole, NO_HOLES feature enabled. */
5421 u64 hole_len = key.offset - clone_root->offset;
5422
5423 if (hole_len > len)
5424 hole_len = len;
5425 ret = send_extent_data(sctx, offset, hole_len);
5426 if (ret < 0)
5427 goto out;
5428
5429 len -= hole_len;
5430 if (len == 0)
5431 break;
5432 offset += hole_len;
5433 clone_root->offset += hole_len;
5434 data_offset += hole_len;
5435 }
5436
5437 if (key.offset >= clone_root->offset + len)
5438 break;
5439
5440 if (key.offset >= clone_src_i_size)
5441 break;
5442
5443 if (key.offset + ext_len > clone_src_i_size)
5444 ext_len = clone_src_i_size - key.offset;
5445
5446 clone_data_offset = btrfs_file_extent_offset(leaf, ei);
5447 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
5448 clone_root->offset = key.offset;
5449 if (clone_data_offset < data_offset &&
5450 clone_data_offset + ext_len > data_offset) {
5451 u64 extent_offset;
5452
5453 extent_offset = data_offset - clone_data_offset;
5454 ext_len -= extent_offset;
5455 clone_data_offset += extent_offset;
5456 clone_root->offset += extent_offset;
5457 }
5458 }
5459
5460 clone_len = min_t(u64, ext_len, len);
5461
5462 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5463 clone_data_offset == data_offset) {
5464 const u64 src_end = clone_root->offset + clone_len;
5465 const u64 sectorsize = SZ_64K;
5466
5467 /*
5468 * We can't clone the last block, when its size is not
5469 * sector size aligned, into the middle of a file. If we
5470 * do so, the receiver will get a failure (-EINVAL) when
5471 * trying to clone or will silently corrupt the data in
5472 * the destination file if it's on a kernel without the
5473 * fix introduced by commit ac765f83f1397646
5474 * ("Btrfs: fix data corruption due to cloning of eof
5475 * block).
5476 *
5477 * So issue a clone of the aligned down range plus a
5478 * regular write for the eof block, if we hit that case.
5479 *
5480 * Also, we use the maximum possible sector size, 64K,
5481 * because we don't know what's the sector size of the
5482 * filesystem that receives the stream, so we have to
5483 * assume the largest possible sector size.
5484 */
5485 if (src_end == clone_src_i_size &&
5486 !IS_ALIGNED(src_end, sectorsize) &&
5487 offset + clone_len < sctx->cur_inode_size) {
5488 u64 slen;
5489
5490 slen = ALIGN_DOWN(src_end - clone_root->offset,
5491 sectorsize);
5492 if (slen > 0) {
5493 ret = send_clone(sctx, offset, slen,
5494 clone_root);
5495 if (ret < 0)
5496 goto out;
5497 }
5498 ret = send_extent_data(sctx, offset + slen,
5499 clone_len - slen);
5500 } else {
5501 ret = send_clone(sctx, offset, clone_len,
5502 clone_root);
5503 }
5504 } else {
5505 ret = send_extent_data(sctx, offset, clone_len);
5506 }
5507
5508 if (ret < 0)
5509 goto out;
5510
5511 len -= clone_len;
5512 if (len == 0)
5513 break;
5514 offset += clone_len;
5515 clone_root->offset += clone_len;
5516
5517 /*
5518 * If we are cloning from the file we are currently processing,
5519 * and using the send root as the clone root, we must stop once
5520 * the current clone offset reaches the current eof of the file
5521 * at the receiver, otherwise we would issue an invalid clone
5522 * operation (source range going beyond eof) and cause the
5523 * receiver to fail. So if we reach the current eof, bail out
5524 * and fallback to a regular write.
5525 */
5526 if (clone_root->root == sctx->send_root &&
5527 clone_root->ino == sctx->cur_ino &&
5528 clone_root->offset >= sctx->cur_inode_next_write_offset)
5529 break;
5530
5531 data_offset += clone_len;
5532next:
5533 path->slots[0]++;
5534 }
5535
5536 if (len > 0)
5537 ret = send_extent_data(sctx, offset, len);
5538 else
5539 ret = 0;
5540out:
5541 btrfs_free_path(path);
5542 return ret;
5543}
5544
5545static int send_write_or_clone(struct send_ctx *sctx,
5546 struct btrfs_path *path,
5547 struct btrfs_key *key,
5548 struct clone_root *clone_root)
5549{
5550 int ret = 0;
5551 u64 offset = key->offset;
5552 u64 end;
5553 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5554
5555 end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
5556 if (offset >= end)
5557 return 0;
5558
5559 if (clone_root && IS_ALIGNED(end, bs)) {
5560 struct btrfs_file_extent_item *ei;
5561 u64 disk_byte;
5562 u64 data_offset;
5563
5564 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5565 struct btrfs_file_extent_item);
5566 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5567 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5568 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5569 offset, end - offset);
5570 } else {
5571 ret = send_extent_data(sctx, offset, end - offset);
5572 }
5573 sctx->cur_inode_next_write_offset = end;
5574 return ret;
5575}
5576
5577static int is_extent_unchanged(struct send_ctx *sctx,
5578 struct btrfs_path *left_path,
5579 struct btrfs_key *ekey)
5580{
5581 int ret = 0;
5582 struct btrfs_key key;
5583 struct btrfs_path *path = NULL;
5584 struct extent_buffer *eb;
5585 int slot;
5586 struct btrfs_key found_key;
5587 struct btrfs_file_extent_item *ei;
5588 u64 left_disknr;
5589 u64 right_disknr;
5590 u64 left_offset;
5591 u64 right_offset;
5592 u64 left_offset_fixed;
5593 u64 left_len;
5594 u64 right_len;
5595 u64 left_gen;
5596 u64 right_gen;
5597 u8 left_type;
5598 u8 right_type;
5599
5600 path = alloc_path_for_send();
5601 if (!path)
5602 return -ENOMEM;
5603
5604 eb = left_path->nodes[0];
5605 slot = left_path->slots[0];
5606 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5607 left_type = btrfs_file_extent_type(eb, ei);
5608
5609 if (left_type != BTRFS_FILE_EXTENT_REG) {
5610 ret = 0;
5611 goto out;
5612 }
5613 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5614 left_len = btrfs_file_extent_num_bytes(eb, ei);
5615 left_offset = btrfs_file_extent_offset(eb, ei);
5616 left_gen = btrfs_file_extent_generation(eb, ei);
5617
5618 /*
5619 * Following comments will refer to these graphics. L is the left
5620 * extents which we are checking at the moment. 1-8 are the right
5621 * extents that we iterate.
5622 *
5623 * |-----L-----|
5624 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5625 *
5626 * |-----L-----|
5627 * |--1--|-2b-|...(same as above)
5628 *
5629 * Alternative situation. Happens on files where extents got split.
5630 * |-----L-----|
5631 * |-----------7-----------|-6-|
5632 *
5633 * Alternative situation. Happens on files which got larger.
5634 * |-----L-----|
5635 * |-8-|
5636 * Nothing follows after 8.
5637 */
5638
5639 key.objectid = ekey->objectid;
5640 key.type = BTRFS_EXTENT_DATA_KEY;
5641 key.offset = ekey->offset;
5642 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5643 if (ret < 0)
5644 goto out;
5645 if (ret) {
5646 ret = 0;
5647 goto out;
5648 }
5649
5650 /*
5651 * Handle special case where the right side has no extents at all.
5652 */
5653 eb = path->nodes[0];
5654 slot = path->slots[0];
5655 btrfs_item_key_to_cpu(eb, &found_key, slot);
5656 if (found_key.objectid != key.objectid ||
5657 found_key.type != key.type) {
5658 /* If we're a hole then just pretend nothing changed */
5659 ret = (left_disknr) ? 0 : 1;
5660 goto out;
5661 }
5662
5663 /*
5664 * We're now on 2a, 2b or 7.
5665 */
5666 key = found_key;
5667 while (key.offset < ekey->offset + left_len) {
5668 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5669 right_type = btrfs_file_extent_type(eb, ei);
5670 if (right_type != BTRFS_FILE_EXTENT_REG &&
5671 right_type != BTRFS_FILE_EXTENT_INLINE) {
5672 ret = 0;
5673 goto out;
5674 }
5675
5676 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5677 right_len = btrfs_file_extent_ram_bytes(eb, ei);
5678 right_len = PAGE_ALIGN(right_len);
5679 } else {
5680 right_len = btrfs_file_extent_num_bytes(eb, ei);
5681 }
5682
5683 /*
5684 * Are we at extent 8? If yes, we know the extent is changed.
5685 * This may only happen on the first iteration.
5686 */
5687 if (found_key.offset + right_len <= ekey->offset) {
5688 /* If we're a hole just pretend nothing changed */
5689 ret = (left_disknr) ? 0 : 1;
5690 goto out;
5691 }
5692
5693 /*
5694 * We just wanted to see if when we have an inline extent, what
5695 * follows it is a regular extent (wanted to check the above
5696 * condition for inline extents too). This should normally not
5697 * happen but it's possible for example when we have an inline
5698 * compressed extent representing data with a size matching
5699 * the page size (currently the same as sector size).
5700 */
5701 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5702 ret = 0;
5703 goto out;
5704 }
5705
5706 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5707 right_offset = btrfs_file_extent_offset(eb, ei);
5708 right_gen = btrfs_file_extent_generation(eb, ei);
5709
5710 left_offset_fixed = left_offset;
5711 if (key.offset < ekey->offset) {
5712 /* Fix the right offset for 2a and 7. */
5713 right_offset += ekey->offset - key.offset;
5714 } else {
5715 /* Fix the left offset for all behind 2a and 2b */
5716 left_offset_fixed += key.offset - ekey->offset;
5717 }
5718
5719 /*
5720 * Check if we have the same extent.
5721 */
5722 if (left_disknr != right_disknr ||
5723 left_offset_fixed != right_offset ||
5724 left_gen != right_gen) {
5725 ret = 0;
5726 goto out;
5727 }
5728
5729 /*
5730 * Go to the next extent.
5731 */
5732 ret = btrfs_next_item(sctx->parent_root, path);
5733 if (ret < 0)
5734 goto out;
5735 if (!ret) {
5736 eb = path->nodes[0];
5737 slot = path->slots[0];
5738 btrfs_item_key_to_cpu(eb, &found_key, slot);
5739 }
5740 if (ret || found_key.objectid != key.objectid ||
5741 found_key.type != key.type) {
5742 key.offset += right_len;
5743 break;
5744 }
5745 if (found_key.offset != key.offset + right_len) {
5746 ret = 0;
5747 goto out;
5748 }
5749 key = found_key;
5750 }
5751
5752 /*
5753 * We're now behind the left extent (treat as unchanged) or at the end
5754 * of the right side (treat as changed).
5755 */
5756 if (key.offset >= ekey->offset + left_len)
5757 ret = 1;
5758 else
5759 ret = 0;
5760
5761
5762out:
5763 btrfs_free_path(path);
5764 return ret;
5765}
5766
5767static int get_last_extent(struct send_ctx *sctx, u64 offset)
5768{
5769 struct btrfs_path *path;
5770 struct btrfs_root *root = sctx->send_root;
5771 struct btrfs_key key;
5772 int ret;
5773
5774 path = alloc_path_for_send();
5775 if (!path)
5776 return -ENOMEM;
5777
5778 sctx->cur_inode_last_extent = 0;
5779
5780 key.objectid = sctx->cur_ino;
5781 key.type = BTRFS_EXTENT_DATA_KEY;
5782 key.offset = offset;
5783 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5784 if (ret < 0)
5785 goto out;
5786 ret = 0;
5787 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5788 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5789 goto out;
5790
5791 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
5792out:
5793 btrfs_free_path(path);
5794 return ret;
5795}
5796
5797static int range_is_hole_in_parent(struct send_ctx *sctx,
5798 const u64 start,
5799 const u64 end)
5800{
5801 struct btrfs_path *path;
5802 struct btrfs_key key;
5803 struct btrfs_root *root = sctx->parent_root;
5804 u64 search_start = start;
5805 int ret;
5806
5807 path = alloc_path_for_send();
5808 if (!path)
5809 return -ENOMEM;
5810
5811 key.objectid = sctx->cur_ino;
5812 key.type = BTRFS_EXTENT_DATA_KEY;
5813 key.offset = search_start;
5814 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5815 if (ret < 0)
5816 goto out;
5817 if (ret > 0 && path->slots[0] > 0)
5818 path->slots[0]--;
5819
5820 while (search_start < end) {
5821 struct extent_buffer *leaf = path->nodes[0];
5822 int slot = path->slots[0];
5823 struct btrfs_file_extent_item *fi;
5824 u64 extent_end;
5825
5826 if (slot >= btrfs_header_nritems(leaf)) {
5827 ret = btrfs_next_leaf(root, path);
5828 if (ret < 0)
5829 goto out;
5830 else if (ret > 0)
5831 break;
5832 continue;
5833 }
5834
5835 btrfs_item_key_to_cpu(leaf, &key, slot);
5836 if (key.objectid < sctx->cur_ino ||
5837 key.type < BTRFS_EXTENT_DATA_KEY)
5838 goto next;
5839 if (key.objectid > sctx->cur_ino ||
5840 key.type > BTRFS_EXTENT_DATA_KEY ||
5841 key.offset >= end)
5842 break;
5843
5844 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5845 extent_end = btrfs_file_extent_end(path);
5846 if (extent_end <= start)
5847 goto next;
5848 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5849 search_start = extent_end;
5850 goto next;
5851 }
5852 ret = 0;
5853 goto out;
5854next:
5855 path->slots[0]++;
5856 }
5857 ret = 1;
5858out:
5859 btrfs_free_path(path);
5860 return ret;
5861}
5862
5863static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5864 struct btrfs_key *key)
5865{
5866 int ret = 0;
5867
5868 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5869 return 0;
5870
5871 if (sctx->cur_inode_last_extent == (u64)-1) {
5872 ret = get_last_extent(sctx, key->offset - 1);
5873 if (ret)
5874 return ret;
5875 }
5876
5877 if (path->slots[0] == 0 &&
5878 sctx->cur_inode_last_extent < key->offset) {
5879 /*
5880 * We might have skipped entire leafs that contained only
5881 * file extent items for our current inode. These leafs have
5882 * a generation number smaller (older) than the one in the
5883 * current leaf and the leaf our last extent came from, and
5884 * are located between these 2 leafs.
5885 */
5886 ret = get_last_extent(sctx, key->offset - 1);
5887 if (ret)
5888 return ret;
5889 }
5890
5891 if (sctx->cur_inode_last_extent < key->offset) {
5892 ret = range_is_hole_in_parent(sctx,
5893 sctx->cur_inode_last_extent,
5894 key->offset);
5895 if (ret < 0)
5896 return ret;
5897 else if (ret == 0)
5898 ret = send_hole(sctx, key->offset);
5899 else
5900 ret = 0;
5901 }
5902 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
5903 return ret;
5904}
5905
5906static int process_extent(struct send_ctx *sctx,
5907 struct btrfs_path *path,
5908 struct btrfs_key *key)
5909{
5910 struct clone_root *found_clone = NULL;
5911 int ret = 0;
5912
5913 if (S_ISLNK(sctx->cur_inode_mode))
5914 return 0;
5915
5916 if (sctx->parent_root && !sctx->cur_inode_new) {
5917 ret = is_extent_unchanged(sctx, path, key);
5918 if (ret < 0)
5919 goto out;
5920 if (ret) {
5921 ret = 0;
5922 goto out_hole;
5923 }
5924 } else {
5925 struct btrfs_file_extent_item *ei;
5926 u8 type;
5927
5928 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5929 struct btrfs_file_extent_item);
5930 type = btrfs_file_extent_type(path->nodes[0], ei);
5931 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5932 type == BTRFS_FILE_EXTENT_REG) {
5933 /*
5934 * The send spec does not have a prealloc command yet,
5935 * so just leave a hole for prealloc'ed extents until
5936 * we have enough commands queued up to justify rev'ing
5937 * the send spec.
5938 */
5939 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5940 ret = 0;
5941 goto out;
5942 }
5943
5944 /* Have a hole, just skip it. */
5945 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5946 ret = 0;
5947 goto out;
5948 }
5949 }
5950 }
5951
5952 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5953 sctx->cur_inode_size, &found_clone);
5954 if (ret != -ENOENT && ret < 0)
5955 goto out;
5956
5957 ret = send_write_or_clone(sctx, path, key, found_clone);
5958 if (ret)
5959 goto out;
5960out_hole:
5961 ret = maybe_send_hole(sctx, path, key);
5962out:
5963 return ret;
5964}
5965
5966static int process_all_extents(struct send_ctx *sctx)
5967{
5968 int ret;
5969 struct btrfs_root *root;
5970 struct btrfs_path *path;
5971 struct btrfs_key key;
5972 struct btrfs_key found_key;
5973 struct extent_buffer *eb;
5974 int slot;
5975
5976 root = sctx->send_root;
5977 path = alloc_path_for_send();
5978 if (!path)
5979 return -ENOMEM;
5980
5981 key.objectid = sctx->cmp_key->objectid;
5982 key.type = BTRFS_EXTENT_DATA_KEY;
5983 key.offset = 0;
5984 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5985 if (ret < 0)
5986 goto out;
5987
5988 while (1) {
5989 eb = path->nodes[0];
5990 slot = path->slots[0];
5991
5992 if (slot >= btrfs_header_nritems(eb)) {
5993 ret = btrfs_next_leaf(root, path);
5994 if (ret < 0) {
5995 goto out;
5996 } else if (ret > 0) {
5997 ret = 0;
5998 break;
5999 }
6000 continue;
6001 }
6002
6003 btrfs_item_key_to_cpu(eb, &found_key, slot);
6004
6005 if (found_key.objectid != key.objectid ||
6006 found_key.type != key.type) {
6007 ret = 0;
6008 goto out;
6009 }
6010
6011 ret = process_extent(sctx, path, &found_key);
6012 if (ret < 0)
6013 goto out;
6014
6015 path->slots[0]++;
6016 }
6017
6018out:
6019 btrfs_free_path(path);
6020 return ret;
6021}
6022
6023static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
6024 int *pending_move,
6025 int *refs_processed)
6026{
6027 int ret = 0;
6028
6029 if (sctx->cur_ino == 0)
6030 goto out;
6031 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
6032 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
6033 goto out;
6034 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
6035 goto out;
6036
6037 ret = process_recorded_refs(sctx, pending_move);
6038 if (ret < 0)
6039 goto out;
6040
6041 *refs_processed = 1;
6042out:
6043 return ret;
6044}
6045
6046static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
6047{
6048 int ret = 0;
6049 u64 left_mode;
6050 u64 left_uid;
6051 u64 left_gid;
6052 u64 right_mode;
6053 u64 right_uid;
6054 u64 right_gid;
6055 int need_chmod = 0;
6056 int need_chown = 0;
6057 int need_truncate = 1;
6058 int pending_move = 0;
6059 int refs_processed = 0;
6060
6061 if (sctx->ignore_cur_inode)
6062 return 0;
6063
6064 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
6065 &refs_processed);
6066 if (ret < 0)
6067 goto out;
6068
6069 /*
6070 * We have processed the refs and thus need to advance send_progress.
6071 * Now, calls to get_cur_xxx will take the updated refs of the current
6072 * inode into account.
6073 *
6074 * On the other hand, if our current inode is a directory and couldn't
6075 * be moved/renamed because its parent was renamed/moved too and it has
6076 * a higher inode number, we can only move/rename our current inode
6077 * after we moved/renamed its parent. Therefore in this case operate on
6078 * the old path (pre move/rename) of our current inode, and the
6079 * move/rename will be performed later.
6080 */
6081 if (refs_processed && !pending_move)
6082 sctx->send_progress = sctx->cur_ino + 1;
6083
6084 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
6085 goto out;
6086 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
6087 goto out;
6088
6089 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
6090 &left_mode, &left_uid, &left_gid, NULL);
6091 if (ret < 0)
6092 goto out;
6093
6094 if (!sctx->parent_root || sctx->cur_inode_new) {
6095 need_chown = 1;
6096 if (!S_ISLNK(sctx->cur_inode_mode))
6097 need_chmod = 1;
6098 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
6099 need_truncate = 0;
6100 } else {
6101 u64 old_size;
6102
6103 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
6104 &old_size, NULL, &right_mode, &right_uid,
6105 &right_gid, NULL);
6106 if (ret < 0)
6107 goto out;
6108
6109 if (left_uid != right_uid || left_gid != right_gid)
6110 need_chown = 1;
6111 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
6112 need_chmod = 1;
6113 if ((old_size == sctx->cur_inode_size) ||
6114 (sctx->cur_inode_size > old_size &&
6115 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
6116 need_truncate = 0;
6117 }
6118
6119 if (S_ISREG(sctx->cur_inode_mode)) {
6120 if (need_send_hole(sctx)) {
6121 if (sctx->cur_inode_last_extent == (u64)-1 ||
6122 sctx->cur_inode_last_extent <
6123 sctx->cur_inode_size) {
6124 ret = get_last_extent(sctx, (u64)-1);
6125 if (ret)
6126 goto out;
6127 }
6128 if (sctx->cur_inode_last_extent <
6129 sctx->cur_inode_size) {
6130 ret = send_hole(sctx, sctx->cur_inode_size);
6131 if (ret)
6132 goto out;
6133 }
6134 }
6135 if (need_truncate) {
6136 ret = send_truncate(sctx, sctx->cur_ino,
6137 sctx->cur_inode_gen,
6138 sctx->cur_inode_size);
6139 if (ret < 0)
6140 goto out;
6141 }
6142 }
6143
6144 if (need_chown) {
6145 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6146 left_uid, left_gid);
6147 if (ret < 0)
6148 goto out;
6149 }
6150 if (need_chmod) {
6151 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6152 left_mode);
6153 if (ret < 0)
6154 goto out;
6155 }
6156
6157 ret = send_capabilities(sctx);
6158 if (ret < 0)
6159 goto out;
6160
6161 /*
6162 * If other directory inodes depended on our current directory
6163 * inode's move/rename, now do their move/rename operations.
6164 */
6165 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
6166 ret = apply_children_dir_moves(sctx);
6167 if (ret)
6168 goto out;
6169 /*
6170 * Need to send that every time, no matter if it actually
6171 * changed between the two trees as we have done changes to
6172 * the inode before. If our inode is a directory and it's
6173 * waiting to be moved/renamed, we will send its utimes when
6174 * it's moved/renamed, therefore we don't need to do it here.
6175 */
6176 sctx->send_progress = sctx->cur_ino + 1;
6177 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6178 if (ret < 0)
6179 goto out;
6180 }
6181
6182out:
6183 return ret;
6184}
6185
6186struct parent_paths_ctx {
6187 struct list_head *refs;
6188 struct send_ctx *sctx;
6189};
6190
6191static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
6192 void *ctx)
6193{
6194 struct parent_paths_ctx *ppctx = ctx;
6195
6196 return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
6197 ppctx->refs);
6198}
6199
6200/*
6201 * Issue unlink operations for all paths of the current inode found in the
6202 * parent snapshot.
6203 */
6204static int btrfs_unlink_all_paths(struct send_ctx *sctx)
6205{
6206 LIST_HEAD(deleted_refs);
6207 struct btrfs_path *path;
6208 struct btrfs_key key;
6209 struct parent_paths_ctx ctx;
6210 int ret;
6211
6212 path = alloc_path_for_send();
6213 if (!path)
6214 return -ENOMEM;
6215
6216 key.objectid = sctx->cur_ino;
6217 key.type = BTRFS_INODE_REF_KEY;
6218 key.offset = 0;
6219 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
6220 if (ret < 0)
6221 goto out;
6222
6223 ctx.refs = &deleted_refs;
6224 ctx.sctx = sctx;
6225
6226 while (true) {
6227 struct extent_buffer *eb = path->nodes[0];
6228 int slot = path->slots[0];
6229
6230 if (slot >= btrfs_header_nritems(eb)) {
6231 ret = btrfs_next_leaf(sctx->parent_root, path);
6232 if (ret < 0)
6233 goto out;
6234 else if (ret > 0)
6235 break;
6236 continue;
6237 }
6238
6239 btrfs_item_key_to_cpu(eb, &key, slot);
6240 if (key.objectid != sctx->cur_ino)
6241 break;
6242 if (key.type != BTRFS_INODE_REF_KEY &&
6243 key.type != BTRFS_INODE_EXTREF_KEY)
6244 break;
6245
6246 ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
6247 record_parent_ref, &ctx);
6248 if (ret < 0)
6249 goto out;
6250
6251 path->slots[0]++;
6252 }
6253
6254 while (!list_empty(&deleted_refs)) {
6255 struct recorded_ref *ref;
6256
6257 ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
6258 ret = send_unlink(sctx, ref->full_path);
6259 if (ret < 0)
6260 goto out;
6261 fs_path_free(ref->full_path);
6262 list_del(&ref->list);
6263 kfree(ref);
6264 }
6265 ret = 0;
6266out:
6267 btrfs_free_path(path);
6268 if (ret)
6269 __free_recorded_refs(&deleted_refs);
6270 return ret;
6271}
6272
6273static int changed_inode(struct send_ctx *sctx,
6274 enum btrfs_compare_tree_result result)
6275{
6276 int ret = 0;
6277 struct btrfs_key *key = sctx->cmp_key;
6278 struct btrfs_inode_item *left_ii = NULL;
6279 struct btrfs_inode_item *right_ii = NULL;
6280 u64 left_gen = 0;
6281 u64 right_gen = 0;
6282
6283 sctx->cur_ino = key->objectid;
6284 sctx->cur_inode_new_gen = 0;
6285 sctx->cur_inode_last_extent = (u64)-1;
6286 sctx->cur_inode_next_write_offset = 0;
6287 sctx->ignore_cur_inode = false;
6288
6289 /*
6290 * Set send_progress to current inode. This will tell all get_cur_xxx
6291 * functions that the current inode's refs are not updated yet. Later,
6292 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6293 */
6294 sctx->send_progress = sctx->cur_ino;
6295
6296 if (result == BTRFS_COMPARE_TREE_NEW ||
6297 result == BTRFS_COMPARE_TREE_CHANGED) {
6298 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6299 sctx->left_path->slots[0],
6300 struct btrfs_inode_item);
6301 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6302 left_ii);
6303 } else {
6304 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6305 sctx->right_path->slots[0],
6306 struct btrfs_inode_item);
6307 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6308 right_ii);
6309 }
6310 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6311 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6312 sctx->right_path->slots[0],
6313 struct btrfs_inode_item);
6314
6315 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6316 right_ii);
6317
6318 /*
6319 * The cur_ino = root dir case is special here. We can't treat
6320 * the inode as deleted+reused because it would generate a
6321 * stream that tries to delete/mkdir the root dir.
6322 */
6323 if (left_gen != right_gen &&
6324 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6325 sctx->cur_inode_new_gen = 1;
6326 }
6327
6328 /*
6329 * Normally we do not find inodes with a link count of zero (orphans)
6330 * because the most common case is to create a snapshot and use it
6331 * for a send operation. However other less common use cases involve
6332 * using a subvolume and send it after turning it to RO mode just
6333 * after deleting all hard links of a file while holding an open
6334 * file descriptor against it or turning a RO snapshot into RW mode,
6335 * keep an open file descriptor against a file, delete it and then
6336 * turn the snapshot back to RO mode before using it for a send
6337 * operation. So if we find such cases, ignore the inode and all its
6338 * items completely if it's a new inode, or if it's a changed inode
6339 * make sure all its previous paths (from the parent snapshot) are all
6340 * unlinked and all other the inode items are ignored.
6341 */
6342 if (result == BTRFS_COMPARE_TREE_NEW ||
6343 result == BTRFS_COMPARE_TREE_CHANGED) {
6344 u32 nlinks;
6345
6346 nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6347 if (nlinks == 0) {
6348 sctx->ignore_cur_inode = true;
6349 if (result == BTRFS_COMPARE_TREE_CHANGED)
6350 ret = btrfs_unlink_all_paths(sctx);
6351 goto out;
6352 }
6353 }
6354
6355 if (result == BTRFS_COMPARE_TREE_NEW) {
6356 sctx->cur_inode_gen = left_gen;
6357 sctx->cur_inode_new = 1;
6358 sctx->cur_inode_deleted = 0;
6359 sctx->cur_inode_size = btrfs_inode_size(
6360 sctx->left_path->nodes[0], left_ii);
6361 sctx->cur_inode_mode = btrfs_inode_mode(
6362 sctx->left_path->nodes[0], left_ii);
6363 sctx->cur_inode_rdev = btrfs_inode_rdev(
6364 sctx->left_path->nodes[0], left_ii);
6365 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6366 ret = send_create_inode_if_needed(sctx);
6367 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6368 sctx->cur_inode_gen = right_gen;
6369 sctx->cur_inode_new = 0;
6370 sctx->cur_inode_deleted = 1;
6371 sctx->cur_inode_size = btrfs_inode_size(
6372 sctx->right_path->nodes[0], right_ii);
6373 sctx->cur_inode_mode = btrfs_inode_mode(
6374 sctx->right_path->nodes[0], right_ii);
6375 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6376 /*
6377 * We need to do some special handling in case the inode was
6378 * reported as changed with a changed generation number. This
6379 * means that the original inode was deleted and new inode
6380 * reused the same inum. So we have to treat the old inode as
6381 * deleted and the new one as new.
6382 */
6383 if (sctx->cur_inode_new_gen) {
6384 /*
6385 * First, process the inode as if it was deleted.
6386 */
6387 sctx->cur_inode_gen = right_gen;
6388 sctx->cur_inode_new = 0;
6389 sctx->cur_inode_deleted = 1;
6390 sctx->cur_inode_size = btrfs_inode_size(
6391 sctx->right_path->nodes[0], right_ii);
6392 sctx->cur_inode_mode = btrfs_inode_mode(
6393 sctx->right_path->nodes[0], right_ii);
6394 ret = process_all_refs(sctx,
6395 BTRFS_COMPARE_TREE_DELETED);
6396 if (ret < 0)
6397 goto out;
6398
6399 /*
6400 * Now process the inode as if it was new.
6401 */
6402 sctx->cur_inode_gen = left_gen;
6403 sctx->cur_inode_new = 1;
6404 sctx->cur_inode_deleted = 0;
6405 sctx->cur_inode_size = btrfs_inode_size(
6406 sctx->left_path->nodes[0], left_ii);
6407 sctx->cur_inode_mode = btrfs_inode_mode(
6408 sctx->left_path->nodes[0], left_ii);
6409 sctx->cur_inode_rdev = btrfs_inode_rdev(
6410 sctx->left_path->nodes[0], left_ii);
6411 ret = send_create_inode_if_needed(sctx);
6412 if (ret < 0)
6413 goto out;
6414
6415 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6416 if (ret < 0)
6417 goto out;
6418 /*
6419 * Advance send_progress now as we did not get into
6420 * process_recorded_refs_if_needed in the new_gen case.
6421 */
6422 sctx->send_progress = sctx->cur_ino + 1;
6423
6424 /*
6425 * Now process all extents and xattrs of the inode as if
6426 * they were all new.
6427 */
6428 ret = process_all_extents(sctx);
6429 if (ret < 0)
6430 goto out;
6431 ret = process_all_new_xattrs(sctx);
6432 if (ret < 0)
6433 goto out;
6434 } else {
6435 sctx->cur_inode_gen = left_gen;
6436 sctx->cur_inode_new = 0;
6437 sctx->cur_inode_new_gen = 0;
6438 sctx->cur_inode_deleted = 0;
6439 sctx->cur_inode_size = btrfs_inode_size(
6440 sctx->left_path->nodes[0], left_ii);
6441 sctx->cur_inode_mode = btrfs_inode_mode(
6442 sctx->left_path->nodes[0], left_ii);
6443 }
6444 }
6445
6446out:
6447 return ret;
6448}
6449
6450/*
6451 * We have to process new refs before deleted refs, but compare_trees gives us
6452 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6453 * first and later process them in process_recorded_refs.
6454 * For the cur_inode_new_gen case, we skip recording completely because
6455 * changed_inode did already initiate processing of refs. The reason for this is
6456 * that in this case, compare_tree actually compares the refs of 2 different
6457 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6458 * refs of the right tree as deleted and all refs of the left tree as new.
6459 */
6460static int changed_ref(struct send_ctx *sctx,
6461 enum btrfs_compare_tree_result result)
6462{
6463 int ret = 0;
6464
6465 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6466 inconsistent_snapshot_error(sctx, result, "reference");
6467 return -EIO;
6468 }
6469
6470 if (!sctx->cur_inode_new_gen &&
6471 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6472 if (result == BTRFS_COMPARE_TREE_NEW)
6473 ret = record_new_ref(sctx);
6474 else if (result == BTRFS_COMPARE_TREE_DELETED)
6475 ret = record_deleted_ref(sctx);
6476 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6477 ret = record_changed_ref(sctx);
6478 }
6479
6480 return ret;
6481}
6482
6483/*
6484 * Process new/deleted/changed xattrs. We skip processing in the
6485 * cur_inode_new_gen case because changed_inode did already initiate processing
6486 * of xattrs. The reason is the same as in changed_ref
6487 */
6488static int changed_xattr(struct send_ctx *sctx,
6489 enum btrfs_compare_tree_result result)
6490{
6491 int ret = 0;
6492
6493 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6494 inconsistent_snapshot_error(sctx, result, "xattr");
6495 return -EIO;
6496 }
6497
6498 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6499 if (result == BTRFS_COMPARE_TREE_NEW)
6500 ret = process_new_xattr(sctx);
6501 else if (result == BTRFS_COMPARE_TREE_DELETED)
6502 ret = process_deleted_xattr(sctx);
6503 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6504 ret = process_changed_xattr(sctx);
6505 }
6506
6507 return ret;
6508}
6509
6510/*
6511 * Process new/deleted/changed extents. We skip processing in the
6512 * cur_inode_new_gen case because changed_inode did already initiate processing
6513 * of extents. The reason is the same as in changed_ref
6514 */
6515static int changed_extent(struct send_ctx *sctx,
6516 enum btrfs_compare_tree_result result)
6517{
6518 int ret = 0;
6519
6520 /*
6521 * We have found an extent item that changed without the inode item
6522 * having changed. This can happen either after relocation (where the
6523 * disk_bytenr of an extent item is replaced at
6524 * relocation.c:replace_file_extents()) or after deduplication into a
6525 * file in both the parent and send snapshots (where an extent item can
6526 * get modified or replaced with a new one). Note that deduplication
6527 * updates the inode item, but it only changes the iversion (sequence
6528 * field in the inode item) of the inode, so if a file is deduplicated
6529 * the same amount of times in both the parent and send snapshots, its
6530 * iversion becomes the same in both snapshots, whence the inode item is
6531 * the same on both snapshots.
6532 */
6533 if (sctx->cur_ino != sctx->cmp_key->objectid)
6534 return 0;
6535
6536 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6537 if (result != BTRFS_COMPARE_TREE_DELETED)
6538 ret = process_extent(sctx, sctx->left_path,
6539 sctx->cmp_key);
6540 }
6541
6542 return ret;
6543}
6544
6545static int dir_changed(struct send_ctx *sctx, u64 dir)
6546{
6547 u64 orig_gen, new_gen;
6548 int ret;
6549
6550 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6551 NULL, NULL);
6552 if (ret)
6553 return ret;
6554
6555 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6556 NULL, NULL, NULL);
6557 if (ret)
6558 return ret;
6559
6560 return (orig_gen != new_gen) ? 1 : 0;
6561}
6562
6563static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6564 struct btrfs_key *key)
6565{
6566 struct btrfs_inode_extref *extref;
6567 struct extent_buffer *leaf;
6568 u64 dirid = 0, last_dirid = 0;
6569 unsigned long ptr;
6570 u32 item_size;
6571 u32 cur_offset = 0;
6572 int ref_name_len;
6573 int ret = 0;
6574
6575 /* Easy case, just check this one dirid */
6576 if (key->type == BTRFS_INODE_REF_KEY) {
6577 dirid = key->offset;
6578
6579 ret = dir_changed(sctx, dirid);
6580 goto out;
6581 }
6582
6583 leaf = path->nodes[0];
6584 item_size = btrfs_item_size(leaf, path->slots[0]);
6585 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6586 while (cur_offset < item_size) {
6587 extref = (struct btrfs_inode_extref *)(ptr +
6588 cur_offset);
6589 dirid = btrfs_inode_extref_parent(leaf, extref);
6590 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6591 cur_offset += ref_name_len + sizeof(*extref);
6592 if (dirid == last_dirid)
6593 continue;
6594 ret = dir_changed(sctx, dirid);
6595 if (ret)
6596 break;
6597 last_dirid = dirid;
6598 }
6599out:
6600 return ret;
6601}
6602
6603/*
6604 * Updates compare related fields in sctx and simply forwards to the actual
6605 * changed_xxx functions.
6606 */
6607static int changed_cb(struct btrfs_path *left_path,
6608 struct btrfs_path *right_path,
6609 struct btrfs_key *key,
6610 enum btrfs_compare_tree_result result,
6611 struct send_ctx *sctx)
6612{
6613 int ret = 0;
6614
6615 /*
6616 * We can not hold the commit root semaphore here. This is because in
6617 * the case of sending and receiving to the same filesystem, using a
6618 * pipe, could result in a deadlock:
6619 *
6620 * 1) The task running send blocks on the pipe because it's full;
6621 *
6622 * 2) The task running receive, which is the only consumer of the pipe,
6623 * is waiting for a transaction commit (for example due to a space
6624 * reservation when doing a write or triggering a transaction commit
6625 * when creating a subvolume);
6626 *
6627 * 3) The transaction is waiting to write lock the commit root semaphore,
6628 * but can not acquire it since it's being held at 1).
6629 *
6630 * Down this call chain we write to the pipe through kernel_write().
6631 * The same type of problem can also happen when sending to a file that
6632 * is stored in the same filesystem - when reserving space for a write
6633 * into the file, we can trigger a transaction commit.
6634 *
6635 * Our caller has supplied us with clones of leaves from the send and
6636 * parent roots, so we're safe here from a concurrent relocation and
6637 * further reallocation of metadata extents while we are here. Below we
6638 * also assert that the leaves are clones.
6639 */
6640 lockdep_assert_not_held(&sctx->send_root->fs_info->commit_root_sem);
6641
6642 /*
6643 * We always have a send root, so left_path is never NULL. We will not
6644 * have a leaf when we have reached the end of the send root but have
6645 * not yet reached the end of the parent root.
6646 */
6647 if (left_path->nodes[0])
6648 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
6649 &left_path->nodes[0]->bflags));
6650 /*
6651 * When doing a full send we don't have a parent root, so right_path is
6652 * NULL. When doing an incremental send, we may have reached the end of
6653 * the parent root already, so we don't have a leaf at right_path.
6654 */
6655 if (right_path && right_path->nodes[0])
6656 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
6657 &right_path->nodes[0]->bflags));
6658
6659 if (result == BTRFS_COMPARE_TREE_SAME) {
6660 if (key->type == BTRFS_INODE_REF_KEY ||
6661 key->type == BTRFS_INODE_EXTREF_KEY) {
6662 ret = compare_refs(sctx, left_path, key);
6663 if (!ret)
6664 return 0;
6665 if (ret < 0)
6666 return ret;
6667 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6668 return maybe_send_hole(sctx, left_path, key);
6669 } else {
6670 return 0;
6671 }
6672 result = BTRFS_COMPARE_TREE_CHANGED;
6673 ret = 0;
6674 }
6675
6676 sctx->left_path = left_path;
6677 sctx->right_path = right_path;
6678 sctx->cmp_key = key;
6679
6680 ret = finish_inode_if_needed(sctx, 0);
6681 if (ret < 0)
6682 goto out;
6683
6684 /* Ignore non-FS objects */
6685 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6686 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6687 goto out;
6688
6689 if (key->type == BTRFS_INODE_ITEM_KEY) {
6690 ret = changed_inode(sctx, result);
6691 } else if (!sctx->ignore_cur_inode) {
6692 if (key->type == BTRFS_INODE_REF_KEY ||
6693 key->type == BTRFS_INODE_EXTREF_KEY)
6694 ret = changed_ref(sctx, result);
6695 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6696 ret = changed_xattr(sctx, result);
6697 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6698 ret = changed_extent(sctx, result);
6699 }
6700
6701out:
6702 return ret;
6703}
6704
6705static int search_key_again(const struct send_ctx *sctx,
6706 struct btrfs_root *root,
6707 struct btrfs_path *path,
6708 const struct btrfs_key *key)
6709{
6710 int ret;
6711
6712 if (!path->need_commit_sem)
6713 lockdep_assert_held_read(&root->fs_info->commit_root_sem);
6714
6715 /*
6716 * Roots used for send operations are readonly and no one can add,
6717 * update or remove keys from them, so we should be able to find our
6718 * key again. The only exception is deduplication, which can operate on
6719 * readonly roots and add, update or remove keys to/from them - but at
6720 * the moment we don't allow it to run in parallel with send.
6721 */
6722 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6723 ASSERT(ret <= 0);
6724 if (ret > 0) {
6725 btrfs_print_tree(path->nodes[path->lowest_level], false);
6726 btrfs_err(root->fs_info,
6727"send: key (%llu %u %llu) not found in %s root %llu, lowest_level %d, slot %d",
6728 key->objectid, key->type, key->offset,
6729 (root == sctx->parent_root ? "parent" : "send"),
6730 root->root_key.objectid, path->lowest_level,
6731 path->slots[path->lowest_level]);
6732 return -EUCLEAN;
6733 }
6734
6735 return ret;
6736}
6737
6738static int full_send_tree(struct send_ctx *sctx)
6739{
6740 int ret;
6741 struct btrfs_root *send_root = sctx->send_root;
6742 struct btrfs_key key;
6743 struct btrfs_fs_info *fs_info = send_root->fs_info;
6744 struct btrfs_path *path;
6745
6746 path = alloc_path_for_send();
6747 if (!path)
6748 return -ENOMEM;
6749 path->reada = READA_FORWARD_ALWAYS;
6750
6751 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6752 key.type = BTRFS_INODE_ITEM_KEY;
6753 key.offset = 0;
6754
6755 down_read(&fs_info->commit_root_sem);
6756 sctx->last_reloc_trans = fs_info->last_reloc_trans;
6757 up_read(&fs_info->commit_root_sem);
6758
6759 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6760 if (ret < 0)
6761 goto out;
6762 if (ret)
6763 goto out_finish;
6764
6765 while (1) {
6766 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
6767
6768 ret = changed_cb(path, NULL, &key,
6769 BTRFS_COMPARE_TREE_NEW, sctx);
6770 if (ret < 0)
6771 goto out;
6772
6773 down_read(&fs_info->commit_root_sem);
6774 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
6775 sctx->last_reloc_trans = fs_info->last_reloc_trans;
6776 up_read(&fs_info->commit_root_sem);
6777 /*
6778 * A transaction used for relocating a block group was
6779 * committed or is about to finish its commit. Release
6780 * our path (leaf) and restart the search, so that we
6781 * avoid operating on any file extent items that are
6782 * stale, with a disk_bytenr that reflects a pre
6783 * relocation value. This way we avoid as much as
6784 * possible to fallback to regular writes when checking
6785 * if we can clone file ranges.
6786 */
6787 btrfs_release_path(path);
6788 ret = search_key_again(sctx, send_root, path, &key);
6789 if (ret < 0)
6790 goto out;
6791 } else {
6792 up_read(&fs_info->commit_root_sem);
6793 }
6794
6795 ret = btrfs_next_item(send_root, path);
6796 if (ret < 0)
6797 goto out;
6798 if (ret) {
6799 ret = 0;
6800 break;
6801 }
6802 }
6803
6804out_finish:
6805 ret = finish_inode_if_needed(sctx, 1);
6806
6807out:
6808 btrfs_free_path(path);
6809 return ret;
6810}
6811
6812static int replace_node_with_clone(struct btrfs_path *path, int level)
6813{
6814 struct extent_buffer *clone;
6815
6816 clone = btrfs_clone_extent_buffer(path->nodes[level]);
6817 if (!clone)
6818 return -ENOMEM;
6819
6820 free_extent_buffer(path->nodes[level]);
6821 path->nodes[level] = clone;
6822
6823 return 0;
6824}
6825
6826static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen)
6827{
6828 struct extent_buffer *eb;
6829 struct extent_buffer *parent = path->nodes[*level];
6830 int slot = path->slots[*level];
6831 const int nritems = btrfs_header_nritems(parent);
6832 u64 reada_max;
6833 u64 reada_done = 0;
6834
6835 lockdep_assert_held_read(&parent->fs_info->commit_root_sem);
6836
6837 BUG_ON(*level == 0);
6838 eb = btrfs_read_node_slot(parent, slot);
6839 if (IS_ERR(eb))
6840 return PTR_ERR(eb);
6841
6842 /*
6843 * Trigger readahead for the next leaves we will process, so that it is
6844 * very likely that when we need them they are already in memory and we
6845 * will not block on disk IO. For nodes we only do readahead for one,
6846 * since the time window between processing nodes is typically larger.
6847 */
6848 reada_max = (*level == 1 ? SZ_128K : eb->fs_info->nodesize);
6849
6850 for (slot++; slot < nritems && reada_done < reada_max; slot++) {
6851 if (btrfs_node_ptr_generation(parent, slot) > reada_min_gen) {
6852 btrfs_readahead_node_child(parent, slot);
6853 reada_done += eb->fs_info->nodesize;
6854 }
6855 }
6856
6857 path->nodes[*level - 1] = eb;
6858 path->slots[*level - 1] = 0;
6859 (*level)--;
6860
6861 if (*level == 0)
6862 return replace_node_with_clone(path, 0);
6863
6864 return 0;
6865}
6866
6867static int tree_move_next_or_upnext(struct btrfs_path *path,
6868 int *level, int root_level)
6869{
6870 int ret = 0;
6871 int nritems;
6872 nritems = btrfs_header_nritems(path->nodes[*level]);
6873
6874 path->slots[*level]++;
6875
6876 while (path->slots[*level] >= nritems) {
6877 if (*level == root_level) {
6878 path->slots[*level] = nritems - 1;
6879 return -1;
6880 }
6881
6882 /* move upnext */
6883 path->slots[*level] = 0;
6884 free_extent_buffer(path->nodes[*level]);
6885 path->nodes[*level] = NULL;
6886 (*level)++;
6887 path->slots[*level]++;
6888
6889 nritems = btrfs_header_nritems(path->nodes[*level]);
6890 ret = 1;
6891 }
6892 return ret;
6893}
6894
6895/*
6896 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
6897 * or down.
6898 */
6899static int tree_advance(struct btrfs_path *path,
6900 int *level, int root_level,
6901 int allow_down,
6902 struct btrfs_key *key,
6903 u64 reada_min_gen)
6904{
6905 int ret;
6906
6907 if (*level == 0 || !allow_down) {
6908 ret = tree_move_next_or_upnext(path, level, root_level);
6909 } else {
6910 ret = tree_move_down(path, level, reada_min_gen);
6911 }
6912
6913 /*
6914 * Even if we have reached the end of a tree, ret is -1, update the key
6915 * anyway, so that in case we need to restart due to a block group
6916 * relocation, we can assert that the last key of the root node still
6917 * exists in the tree.
6918 */
6919 if (*level == 0)
6920 btrfs_item_key_to_cpu(path->nodes[*level], key,
6921 path->slots[*level]);
6922 else
6923 btrfs_node_key_to_cpu(path->nodes[*level], key,
6924 path->slots[*level]);
6925
6926 return ret;
6927}
6928
6929static int tree_compare_item(struct btrfs_path *left_path,
6930 struct btrfs_path *right_path,
6931 char *tmp_buf)
6932{
6933 int cmp;
6934 int len1, len2;
6935 unsigned long off1, off2;
6936
6937 len1 = btrfs_item_size(left_path->nodes[0], left_path->slots[0]);
6938 len2 = btrfs_item_size(right_path->nodes[0], right_path->slots[0]);
6939 if (len1 != len2)
6940 return 1;
6941
6942 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
6943 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
6944 right_path->slots[0]);
6945
6946 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
6947
6948 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
6949 if (cmp)
6950 return 1;
6951 return 0;
6952}
6953
6954/*
6955 * A transaction used for relocating a block group was committed or is about to
6956 * finish its commit. Release our paths and restart the search, so that we are
6957 * not using stale extent buffers:
6958 *
6959 * 1) For levels > 0, we are only holding references of extent buffers, without
6960 * any locks on them, which does not prevent them from having been relocated
6961 * and reallocated after the last time we released the commit root semaphore.
6962 * The exception are the root nodes, for which we always have a clone, see
6963 * the comment at btrfs_compare_trees();
6964 *
6965 * 2) For leaves, level 0, we are holding copies (clones) of extent buffers, so
6966 * we are safe from the concurrent relocation and reallocation. However they
6967 * can have file extent items with a pre relocation disk_bytenr value, so we
6968 * restart the start from the current commit roots and clone the new leaves so
6969 * that we get the post relocation disk_bytenr values. Not doing so, could
6970 * make us clone the wrong data in case there are new extents using the old
6971 * disk_bytenr that happen to be shared.
6972 */
6973static int restart_after_relocation(struct btrfs_path *left_path,
6974 struct btrfs_path *right_path,
6975 const struct btrfs_key *left_key,
6976 const struct btrfs_key *right_key,
6977 int left_level,
6978 int right_level,
6979 const struct send_ctx *sctx)
6980{
6981 int root_level;
6982 int ret;
6983
6984 lockdep_assert_held_read(&sctx->send_root->fs_info->commit_root_sem);
6985
6986 btrfs_release_path(left_path);
6987 btrfs_release_path(right_path);
6988
6989 /*
6990 * Since keys can not be added or removed to/from our roots because they
6991 * are readonly and we do not allow deduplication to run in parallel
6992 * (which can add, remove or change keys), the layout of the trees should
6993 * not change.
6994 */
6995 left_path->lowest_level = left_level;
6996 ret = search_key_again(sctx, sctx->send_root, left_path, left_key);
6997 if (ret < 0)
6998 return ret;
6999
7000 right_path->lowest_level = right_level;
7001 ret = search_key_again(sctx, sctx->parent_root, right_path, right_key);
7002 if (ret < 0)
7003 return ret;
7004
7005 /*
7006 * If the lowest level nodes are leaves, clone them so that they can be
7007 * safely used by changed_cb() while not under the protection of the
7008 * commit root semaphore, even if relocation and reallocation happens in
7009 * parallel.
7010 */
7011 if (left_level == 0) {
7012 ret = replace_node_with_clone(left_path, 0);
7013 if (ret < 0)
7014 return ret;
7015 }
7016
7017 if (right_level == 0) {
7018 ret = replace_node_with_clone(right_path, 0);
7019 if (ret < 0)
7020 return ret;
7021 }
7022
7023 /*
7024 * Now clone the root nodes (unless they happen to be the leaves we have
7025 * already cloned). This is to protect against concurrent snapshotting of
7026 * the send and parent roots (see the comment at btrfs_compare_trees()).
7027 */
7028 root_level = btrfs_header_level(sctx->send_root->commit_root);
7029 if (root_level > 0) {
7030 ret = replace_node_with_clone(left_path, root_level);
7031 if (ret < 0)
7032 return ret;
7033 }
7034
7035 root_level = btrfs_header_level(sctx->parent_root->commit_root);
7036 if (root_level > 0) {
7037 ret = replace_node_with_clone(right_path, root_level);
7038 if (ret < 0)
7039 return ret;
7040 }
7041
7042 return 0;
7043}
7044
7045/*
7046 * This function compares two trees and calls the provided callback for
7047 * every changed/new/deleted item it finds.
7048 * If shared tree blocks are encountered, whole subtrees are skipped, making
7049 * the compare pretty fast on snapshotted subvolumes.
7050 *
7051 * This currently works on commit roots only. As commit roots are read only,
7052 * we don't do any locking. The commit roots are protected with transactions.
7053 * Transactions are ended and rejoined when a commit is tried in between.
7054 *
7055 * This function checks for modifications done to the trees while comparing.
7056 * If it detects a change, it aborts immediately.
7057 */
7058static int btrfs_compare_trees(struct btrfs_root *left_root,
7059 struct btrfs_root *right_root, struct send_ctx *sctx)
7060{
7061 struct btrfs_fs_info *fs_info = left_root->fs_info;
7062 int ret;
7063 int cmp;
7064 struct btrfs_path *left_path = NULL;
7065 struct btrfs_path *right_path = NULL;
7066 struct btrfs_key left_key;
7067 struct btrfs_key right_key;
7068 char *tmp_buf = NULL;
7069 int left_root_level;
7070 int right_root_level;
7071 int left_level;
7072 int right_level;
7073 int left_end_reached = 0;
7074 int right_end_reached = 0;
7075 int advance_left = 0;
7076 int advance_right = 0;
7077 u64 left_blockptr;
7078 u64 right_blockptr;
7079 u64 left_gen;
7080 u64 right_gen;
7081 u64 reada_min_gen;
7082
7083 left_path = btrfs_alloc_path();
7084 if (!left_path) {
7085 ret = -ENOMEM;
7086 goto out;
7087 }
7088 right_path = btrfs_alloc_path();
7089 if (!right_path) {
7090 ret = -ENOMEM;
7091 goto out;
7092 }
7093
7094 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
7095 if (!tmp_buf) {
7096 ret = -ENOMEM;
7097 goto out;
7098 }
7099
7100 left_path->search_commit_root = 1;
7101 left_path->skip_locking = 1;
7102 right_path->search_commit_root = 1;
7103 right_path->skip_locking = 1;
7104
7105 /*
7106 * Strategy: Go to the first items of both trees. Then do
7107 *
7108 * If both trees are at level 0
7109 * Compare keys of current items
7110 * If left < right treat left item as new, advance left tree
7111 * and repeat
7112 * If left > right treat right item as deleted, advance right tree
7113 * and repeat
7114 * If left == right do deep compare of items, treat as changed if
7115 * needed, advance both trees and repeat
7116 * If both trees are at the same level but not at level 0
7117 * Compare keys of current nodes/leafs
7118 * If left < right advance left tree and repeat
7119 * If left > right advance right tree and repeat
7120 * If left == right compare blockptrs of the next nodes/leafs
7121 * If they match advance both trees but stay at the same level
7122 * and repeat
7123 * If they don't match advance both trees while allowing to go
7124 * deeper and repeat
7125 * If tree levels are different
7126 * Advance the tree that needs it and repeat
7127 *
7128 * Advancing a tree means:
7129 * If we are at level 0, try to go to the next slot. If that's not
7130 * possible, go one level up and repeat. Stop when we found a level
7131 * where we could go to the next slot. We may at this point be on a
7132 * node or a leaf.
7133 *
7134 * If we are not at level 0 and not on shared tree blocks, go one
7135 * level deeper.
7136 *
7137 * If we are not at level 0 and on shared tree blocks, go one slot to
7138 * the right if possible or go up and right.
7139 */
7140
7141 down_read(&fs_info->commit_root_sem);
7142 left_level = btrfs_header_level(left_root->commit_root);
7143 left_root_level = left_level;
7144 /*
7145 * We clone the root node of the send and parent roots to prevent races
7146 * with snapshot creation of these roots. Snapshot creation COWs the
7147 * root node of a tree, so after the transaction is committed the old
7148 * extent can be reallocated while this send operation is still ongoing.
7149 * So we clone them, under the commit root semaphore, to be race free.
7150 */
7151 left_path->nodes[left_level] =
7152 btrfs_clone_extent_buffer(left_root->commit_root);
7153 if (!left_path->nodes[left_level]) {
7154 ret = -ENOMEM;
7155 goto out_unlock;
7156 }
7157
7158 right_level = btrfs_header_level(right_root->commit_root);
7159 right_root_level = right_level;
7160 right_path->nodes[right_level] =
7161 btrfs_clone_extent_buffer(right_root->commit_root);
7162 if (!right_path->nodes[right_level]) {
7163 ret = -ENOMEM;
7164 goto out_unlock;
7165 }
7166 /*
7167 * Our right root is the parent root, while the left root is the "send"
7168 * root. We know that all new nodes/leaves in the left root must have
7169 * a generation greater than the right root's generation, so we trigger
7170 * readahead for those nodes and leaves of the left root, as we know we
7171 * will need to read them at some point.
7172 */
7173 reada_min_gen = btrfs_header_generation(right_root->commit_root);
7174
7175 if (left_level == 0)
7176 btrfs_item_key_to_cpu(left_path->nodes[left_level],
7177 &left_key, left_path->slots[left_level]);
7178 else
7179 btrfs_node_key_to_cpu(left_path->nodes[left_level],
7180 &left_key, left_path->slots[left_level]);
7181 if (right_level == 0)
7182 btrfs_item_key_to_cpu(right_path->nodes[right_level],
7183 &right_key, right_path->slots[right_level]);
7184 else
7185 btrfs_node_key_to_cpu(right_path->nodes[right_level],
7186 &right_key, right_path->slots[right_level]);
7187
7188 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7189
7190 while (1) {
7191 if (need_resched() ||
7192 rwsem_is_contended(&fs_info->commit_root_sem)) {
7193 up_read(&fs_info->commit_root_sem);
7194 cond_resched();
7195 down_read(&fs_info->commit_root_sem);
7196 }
7197
7198 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
7199 ret = restart_after_relocation(left_path, right_path,
7200 &left_key, &right_key,
7201 left_level, right_level,
7202 sctx);
7203 if (ret < 0)
7204 goto out_unlock;
7205 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7206 }
7207
7208 if (advance_left && !left_end_reached) {
7209 ret = tree_advance(left_path, &left_level,
7210 left_root_level,
7211 advance_left != ADVANCE_ONLY_NEXT,
7212 &left_key, reada_min_gen);
7213 if (ret == -1)
7214 left_end_reached = ADVANCE;
7215 else if (ret < 0)
7216 goto out_unlock;
7217 advance_left = 0;
7218 }
7219 if (advance_right && !right_end_reached) {
7220 ret = tree_advance(right_path, &right_level,
7221 right_root_level,
7222 advance_right != ADVANCE_ONLY_NEXT,
7223 &right_key, reada_min_gen);
7224 if (ret == -1)
7225 right_end_reached = ADVANCE;
7226 else if (ret < 0)
7227 goto out_unlock;
7228 advance_right = 0;
7229 }
7230
7231 if (left_end_reached && right_end_reached) {
7232 ret = 0;
7233 goto out_unlock;
7234 } else if (left_end_reached) {
7235 if (right_level == 0) {
7236 up_read(&fs_info->commit_root_sem);
7237 ret = changed_cb(left_path, right_path,
7238 &right_key,
7239 BTRFS_COMPARE_TREE_DELETED,
7240 sctx);
7241 if (ret < 0)
7242 goto out;
7243 down_read(&fs_info->commit_root_sem);
7244 }
7245 advance_right = ADVANCE;
7246 continue;
7247 } else if (right_end_reached) {
7248 if (left_level == 0) {
7249 up_read(&fs_info->commit_root_sem);
7250 ret = changed_cb(left_path, right_path,
7251 &left_key,
7252 BTRFS_COMPARE_TREE_NEW,
7253 sctx);
7254 if (ret < 0)
7255 goto out;
7256 down_read(&fs_info->commit_root_sem);
7257 }
7258 advance_left = ADVANCE;
7259 continue;
7260 }
7261
7262 if (left_level == 0 && right_level == 0) {
7263 up_read(&fs_info->commit_root_sem);
7264 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7265 if (cmp < 0) {
7266 ret = changed_cb(left_path, right_path,
7267 &left_key,
7268 BTRFS_COMPARE_TREE_NEW,
7269 sctx);
7270 advance_left = ADVANCE;
7271 } else if (cmp > 0) {
7272 ret = changed_cb(left_path, right_path,
7273 &right_key,
7274 BTRFS_COMPARE_TREE_DELETED,
7275 sctx);
7276 advance_right = ADVANCE;
7277 } else {
7278 enum btrfs_compare_tree_result result;
7279
7280 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
7281 ret = tree_compare_item(left_path, right_path,
7282 tmp_buf);
7283 if (ret)
7284 result = BTRFS_COMPARE_TREE_CHANGED;
7285 else
7286 result = BTRFS_COMPARE_TREE_SAME;
7287 ret = changed_cb(left_path, right_path,
7288 &left_key, result, sctx);
7289 advance_left = ADVANCE;
7290 advance_right = ADVANCE;
7291 }
7292
7293 if (ret < 0)
7294 goto out;
7295 down_read(&fs_info->commit_root_sem);
7296 } else if (left_level == right_level) {
7297 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7298 if (cmp < 0) {
7299 advance_left = ADVANCE;
7300 } else if (cmp > 0) {
7301 advance_right = ADVANCE;
7302 } else {
7303 left_blockptr = btrfs_node_blockptr(
7304 left_path->nodes[left_level],
7305 left_path->slots[left_level]);
7306 right_blockptr = btrfs_node_blockptr(
7307 right_path->nodes[right_level],
7308 right_path->slots[right_level]);
7309 left_gen = btrfs_node_ptr_generation(
7310 left_path->nodes[left_level],
7311 left_path->slots[left_level]);
7312 right_gen = btrfs_node_ptr_generation(
7313 right_path->nodes[right_level],
7314 right_path->slots[right_level]);
7315 if (left_blockptr == right_blockptr &&
7316 left_gen == right_gen) {
7317 /*
7318 * As we're on a shared block, don't
7319 * allow to go deeper.
7320 */
7321 advance_left = ADVANCE_ONLY_NEXT;
7322 advance_right = ADVANCE_ONLY_NEXT;
7323 } else {
7324 advance_left = ADVANCE;
7325 advance_right = ADVANCE;
7326 }
7327 }
7328 } else if (left_level < right_level) {
7329 advance_right = ADVANCE;
7330 } else {
7331 advance_left = ADVANCE;
7332 }
7333 }
7334
7335out_unlock:
7336 up_read(&fs_info->commit_root_sem);
7337out:
7338 btrfs_free_path(left_path);
7339 btrfs_free_path(right_path);
7340 kvfree(tmp_buf);
7341 return ret;
7342}
7343
7344static int send_subvol(struct send_ctx *sctx)
7345{
7346 int ret;
7347
7348 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
7349 ret = send_header(sctx);
7350 if (ret < 0)
7351 goto out;
7352 }
7353
7354 ret = send_subvol_begin(sctx);
7355 if (ret < 0)
7356 goto out;
7357
7358 if (sctx->parent_root) {
7359 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, sctx);
7360 if (ret < 0)
7361 goto out;
7362 ret = finish_inode_if_needed(sctx, 1);
7363 if (ret < 0)
7364 goto out;
7365 } else {
7366 ret = full_send_tree(sctx);
7367 if (ret < 0)
7368 goto out;
7369 }
7370
7371out:
7372 free_recorded_refs(sctx);
7373 return ret;
7374}
7375
7376/*
7377 * If orphan cleanup did remove any orphans from a root, it means the tree
7378 * was modified and therefore the commit root is not the same as the current
7379 * root anymore. This is a problem, because send uses the commit root and
7380 * therefore can see inode items that don't exist in the current root anymore,
7381 * and for example make calls to btrfs_iget, which will do tree lookups based
7382 * on the current root and not on the commit root. Those lookups will fail,
7383 * returning a -ESTALE error, and making send fail with that error. So make
7384 * sure a send does not see any orphans we have just removed, and that it will
7385 * see the same inodes regardless of whether a transaction commit happened
7386 * before it started (meaning that the commit root will be the same as the
7387 * current root) or not.
7388 */
7389static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
7390{
7391 int i;
7392 struct btrfs_trans_handle *trans = NULL;
7393
7394again:
7395 if (sctx->parent_root &&
7396 sctx->parent_root->node != sctx->parent_root->commit_root)
7397 goto commit_trans;
7398
7399 for (i = 0; i < sctx->clone_roots_cnt; i++)
7400 if (sctx->clone_roots[i].root->node !=
7401 sctx->clone_roots[i].root->commit_root)
7402 goto commit_trans;
7403
7404 if (trans)
7405 return btrfs_end_transaction(trans);
7406
7407 return 0;
7408
7409commit_trans:
7410 /* Use any root, all fs roots will get their commit roots updated. */
7411 if (!trans) {
7412 trans = btrfs_join_transaction(sctx->send_root);
7413 if (IS_ERR(trans))
7414 return PTR_ERR(trans);
7415 goto again;
7416 }
7417
7418 return btrfs_commit_transaction(trans);
7419}
7420
7421/*
7422 * Make sure any existing dellaloc is flushed for any root used by a send
7423 * operation so that we do not miss any data and we do not race with writeback
7424 * finishing and changing a tree while send is using the tree. This could
7425 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
7426 * a send operation then uses the subvolume.
7427 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
7428 */
7429static int flush_delalloc_roots(struct send_ctx *sctx)
7430{
7431 struct btrfs_root *root = sctx->parent_root;
7432 int ret;
7433 int i;
7434
7435 if (root) {
7436 ret = btrfs_start_delalloc_snapshot(root, false);
7437 if (ret)
7438 return ret;
7439 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7440 }
7441
7442 for (i = 0; i < sctx->clone_roots_cnt; i++) {
7443 root = sctx->clone_roots[i].root;
7444 ret = btrfs_start_delalloc_snapshot(root, false);
7445 if (ret)
7446 return ret;
7447 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7448 }
7449
7450 return 0;
7451}
7452
7453static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
7454{
7455 spin_lock(&root->root_item_lock);
7456 root->send_in_progress--;
7457 /*
7458 * Not much left to do, we don't know why it's unbalanced and
7459 * can't blindly reset it to 0.
7460 */
7461 if (root->send_in_progress < 0)
7462 btrfs_err(root->fs_info,
7463 "send_in_progress unbalanced %d root %llu",
7464 root->send_in_progress, root->root_key.objectid);
7465 spin_unlock(&root->root_item_lock);
7466}
7467
7468static void dedupe_in_progress_warn(const struct btrfs_root *root)
7469{
7470 btrfs_warn_rl(root->fs_info,
7471"cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
7472 root->root_key.objectid, root->dedupe_in_progress);
7473}
7474
7475long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
7476{
7477 int ret = 0;
7478 struct btrfs_root *send_root = BTRFS_I(inode)->root;
7479 struct btrfs_fs_info *fs_info = send_root->fs_info;
7480 struct btrfs_root *clone_root;
7481 struct send_ctx *sctx = NULL;
7482 u32 i;
7483 u64 *clone_sources_tmp = NULL;
7484 int clone_sources_to_rollback = 0;
7485 size_t alloc_size;
7486 int sort_clone_roots = 0;
7487
7488 if (!capable(CAP_SYS_ADMIN))
7489 return -EPERM;
7490
7491 /*
7492 * The subvolume must remain read-only during send, protect against
7493 * making it RW. This also protects against deletion.
7494 */
7495 spin_lock(&send_root->root_item_lock);
7496 if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
7497 dedupe_in_progress_warn(send_root);
7498 spin_unlock(&send_root->root_item_lock);
7499 return -EAGAIN;
7500 }
7501 send_root->send_in_progress++;
7502 spin_unlock(&send_root->root_item_lock);
7503
7504 /*
7505 * Userspace tools do the checks and warn the user if it's
7506 * not RO.
7507 */
7508 if (!btrfs_root_readonly(send_root)) {
7509 ret = -EPERM;
7510 goto out;
7511 }
7512
7513 /*
7514 * Check that we don't overflow at later allocations, we request
7515 * clone_sources_count + 1 items, and compare to unsigned long inside
7516 * access_ok.
7517 */
7518 if (arg->clone_sources_count >
7519 ULONG_MAX / sizeof(struct clone_root) - 1) {
7520 ret = -EINVAL;
7521 goto out;
7522 }
7523
7524 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
7525 ret = -EINVAL;
7526 goto out;
7527 }
7528
7529 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
7530 if (!sctx) {
7531 ret = -ENOMEM;
7532 goto out;
7533 }
7534
7535 INIT_LIST_HEAD(&sctx->new_refs);
7536 INIT_LIST_HEAD(&sctx->deleted_refs);
7537 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
7538 INIT_LIST_HEAD(&sctx->name_cache_list);
7539
7540 sctx->flags = arg->flags;
7541
7542 if (arg->flags & BTRFS_SEND_FLAG_VERSION) {
7543 if (arg->version > BTRFS_SEND_STREAM_VERSION) {
7544 ret = -EPROTO;
7545 goto out;
7546 }
7547 /* Zero means "use the highest version" */
7548 sctx->proto = arg->version ?: BTRFS_SEND_STREAM_VERSION;
7549 } else {
7550 sctx->proto = 1;
7551 }
7552
7553 sctx->send_filp = fget(arg->send_fd);
7554 if (!sctx->send_filp) {
7555 ret = -EBADF;
7556 goto out;
7557 }
7558
7559 sctx->send_root = send_root;
7560 /*
7561 * Unlikely but possible, if the subvolume is marked for deletion but
7562 * is slow to remove the directory entry, send can still be started
7563 */
7564 if (btrfs_root_dead(sctx->send_root)) {
7565 ret = -EPERM;
7566 goto out;
7567 }
7568
7569 sctx->clone_roots_cnt = arg->clone_sources_count;
7570
7571 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
7572 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
7573 if (!sctx->send_buf) {
7574 ret = -ENOMEM;
7575 goto out;
7576 }
7577
7578 sctx->pending_dir_moves = RB_ROOT;
7579 sctx->waiting_dir_moves = RB_ROOT;
7580 sctx->orphan_dirs = RB_ROOT;
7581
7582 sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
7583 arg->clone_sources_count + 1,
7584 GFP_KERNEL);
7585 if (!sctx->clone_roots) {
7586 ret = -ENOMEM;
7587 goto out;
7588 }
7589
7590 alloc_size = array_size(sizeof(*arg->clone_sources),
7591 arg->clone_sources_count);
7592
7593 if (arg->clone_sources_count) {
7594 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
7595 if (!clone_sources_tmp) {
7596 ret = -ENOMEM;
7597 goto out;
7598 }
7599
7600 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
7601 alloc_size);
7602 if (ret) {
7603 ret = -EFAULT;
7604 goto out;
7605 }
7606
7607 for (i = 0; i < arg->clone_sources_count; i++) {
7608 clone_root = btrfs_get_fs_root(fs_info,
7609 clone_sources_tmp[i], true);
7610 if (IS_ERR(clone_root)) {
7611 ret = PTR_ERR(clone_root);
7612 goto out;
7613 }
7614 spin_lock(&clone_root->root_item_lock);
7615 if (!btrfs_root_readonly(clone_root) ||
7616 btrfs_root_dead(clone_root)) {
7617 spin_unlock(&clone_root->root_item_lock);
7618 btrfs_put_root(clone_root);
7619 ret = -EPERM;
7620 goto out;
7621 }
7622 if (clone_root->dedupe_in_progress) {
7623 dedupe_in_progress_warn(clone_root);
7624 spin_unlock(&clone_root->root_item_lock);
7625 btrfs_put_root(clone_root);
7626 ret = -EAGAIN;
7627 goto out;
7628 }
7629 clone_root->send_in_progress++;
7630 spin_unlock(&clone_root->root_item_lock);
7631
7632 sctx->clone_roots[i].root = clone_root;
7633 clone_sources_to_rollback = i + 1;
7634 }
7635 kvfree(clone_sources_tmp);
7636 clone_sources_tmp = NULL;
7637 }
7638
7639 if (arg->parent_root) {
7640 sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root,
7641 true);
7642 if (IS_ERR(sctx->parent_root)) {
7643 ret = PTR_ERR(sctx->parent_root);
7644 goto out;
7645 }
7646
7647 spin_lock(&sctx->parent_root->root_item_lock);
7648 sctx->parent_root->send_in_progress++;
7649 if (!btrfs_root_readonly(sctx->parent_root) ||
7650 btrfs_root_dead(sctx->parent_root)) {
7651 spin_unlock(&sctx->parent_root->root_item_lock);
7652 ret = -EPERM;
7653 goto out;
7654 }
7655 if (sctx->parent_root->dedupe_in_progress) {
7656 dedupe_in_progress_warn(sctx->parent_root);
7657 spin_unlock(&sctx->parent_root->root_item_lock);
7658 ret = -EAGAIN;
7659 goto out;
7660 }
7661 spin_unlock(&sctx->parent_root->root_item_lock);
7662 }
7663
7664 /*
7665 * Clones from send_root are allowed, but only if the clone source
7666 * is behind the current send position. This is checked while searching
7667 * for possible clone sources.
7668 */
7669 sctx->clone_roots[sctx->clone_roots_cnt++].root =
7670 btrfs_grab_root(sctx->send_root);
7671
7672 /* We do a bsearch later */
7673 sort(sctx->clone_roots, sctx->clone_roots_cnt,
7674 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
7675 NULL);
7676 sort_clone_roots = 1;
7677
7678 ret = flush_delalloc_roots(sctx);
7679 if (ret)
7680 goto out;
7681
7682 ret = ensure_commit_roots_uptodate(sctx);
7683 if (ret)
7684 goto out;
7685
7686 ret = send_subvol(sctx);
7687 if (ret < 0)
7688 goto out;
7689
7690 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
7691 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
7692 if (ret < 0)
7693 goto out;
7694 ret = send_cmd(sctx);
7695 if (ret < 0)
7696 goto out;
7697 }
7698
7699out:
7700 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
7701 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
7702 struct rb_node *n;
7703 struct pending_dir_move *pm;
7704
7705 n = rb_first(&sctx->pending_dir_moves);
7706 pm = rb_entry(n, struct pending_dir_move, node);
7707 while (!list_empty(&pm->list)) {
7708 struct pending_dir_move *pm2;
7709
7710 pm2 = list_first_entry(&pm->list,
7711 struct pending_dir_move, list);
7712 free_pending_move(sctx, pm2);
7713 }
7714 free_pending_move(sctx, pm);
7715 }
7716
7717 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
7718 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
7719 struct rb_node *n;
7720 struct waiting_dir_move *dm;
7721
7722 n = rb_first(&sctx->waiting_dir_moves);
7723 dm = rb_entry(n, struct waiting_dir_move, node);
7724 rb_erase(&dm->node, &sctx->waiting_dir_moves);
7725 kfree(dm);
7726 }
7727
7728 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
7729 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
7730 struct rb_node *n;
7731 struct orphan_dir_info *odi;
7732
7733 n = rb_first(&sctx->orphan_dirs);
7734 odi = rb_entry(n, struct orphan_dir_info, node);
7735 free_orphan_dir_info(sctx, odi);
7736 }
7737
7738 if (sort_clone_roots) {
7739 for (i = 0; i < sctx->clone_roots_cnt; i++) {
7740 btrfs_root_dec_send_in_progress(
7741 sctx->clone_roots[i].root);
7742 btrfs_put_root(sctx->clone_roots[i].root);
7743 }
7744 } else {
7745 for (i = 0; sctx && i < clone_sources_to_rollback; i++) {
7746 btrfs_root_dec_send_in_progress(
7747 sctx->clone_roots[i].root);
7748 btrfs_put_root(sctx->clone_roots[i].root);
7749 }
7750
7751 btrfs_root_dec_send_in_progress(send_root);
7752 }
7753 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) {
7754 btrfs_root_dec_send_in_progress(sctx->parent_root);
7755 btrfs_put_root(sctx->parent_root);
7756 }
7757
7758 kvfree(clone_sources_tmp);
7759
7760 if (sctx) {
7761 if (sctx->send_filp)
7762 fput(sctx->send_filp);
7763
7764 kvfree(sctx->clone_roots);
7765 kvfree(sctx->send_buf);
7766
7767 name_cache_free(sctx);
7768
7769 kfree(sctx);
7770 }
7771
7772 return ret;
7773}