1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 */
5
6 #include <linux/bsearch.h>
7 #include <linux/fs.h>
8 #include <linux/file.h>
9 #include <linux/sort.h>
10 #include <linux/mount.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl_xattr.h>
13 #include <linux/radix-tree.h>
14 #include <linux/vmalloc.h>
15 #include <linux/string.h>
16 #include <linux/compat.h>
17 #include <linux/crc32c.h>
18
19 #include "send.h"
20 #include "backref.h"
21 #include "locking.h"
22 #include "disk-io.h"
23 #include "btrfs_inode.h"
24 #include "transaction.h"
25 #include "compression.h"
26 #include "xattr.h"
27
28 /*
29 * Maximum number of references an extent can have in order for us to attempt to
30 * issue clone operations instead of write operations. This currently exists to
31 * avoid hitting limitations of the backreference walking code (taking a lot of
32 * time and using too much memory for extents with large number of references).
33 */
34 #define SEND_MAX_EXTENT_REFS 64
35
36 /*
37 * A fs_path is a helper to dynamically build path names with unknown size.
38 * It reallocates the internal buffer on demand.
39 * It allows fast adding of path elements on the right side (normal path) and
40 * fast adding to the left side (reversed path). A reversed path can also be
41 * unreversed if needed.
42 */
43 struct fs_path {
44 union {
45 struct {
46 char *start;
47 char *end;
48
49 char *buf;
50 unsigned short buf_len:15;
51 unsigned short reversed:1;
52 char inline_buf[];
53 };
54 /*
55 * Average path length does not exceed 200 bytes, we'll have
56 * better packing in the slab and higher chance to satisfy
57 * a allocation later during send.
58 */
59 char pad[256];
60 };
61 };
62 #define FS_PATH_INLINE_SIZE \
63 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
64
65
66 /* reused for each extent */
67 struct clone_root {
68 struct btrfs_root *root;
69 u64 ino;
70 u64 offset;
71
72 u64 found_refs;
73 };
74
75 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
76 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
77
78 struct send_ctx {
79 struct file *send_filp;
80 loff_t send_off;
81 char *send_buf;
82 u32 send_size;
83 u32 send_max_size;
84 u64 total_send_size;
85 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
86 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
87
88 struct btrfs_root *send_root;
89 struct btrfs_root *parent_root;
90 struct clone_root *clone_roots;
91 int clone_roots_cnt;
92
93 /* current state of the compare_tree call */
94 struct btrfs_path *left_path;
95 struct btrfs_path *right_path;
96 struct btrfs_key *cmp_key;
97
98 /*
99 * infos of the currently processed inode. In case of deleted inodes,
100 * these are the values from the deleted inode.
101 */
102 u64 cur_ino;
103 u64 cur_inode_gen;
104 int cur_inode_new;
105 int cur_inode_new_gen;
106 int cur_inode_deleted;
107 u64 cur_inode_size;
108 u64 cur_inode_mode;
109 u64 cur_inode_rdev;
110 u64 cur_inode_last_extent;
111 u64 cur_inode_next_write_offset;
112 bool ignore_cur_inode;
113
114 u64 send_progress;
115
116 struct list_head new_refs;
117 struct list_head deleted_refs;
118
119 struct radix_tree_root name_cache;
120 struct list_head name_cache_list;
121 int name_cache_size;
122
123 struct file_ra_state ra;
124
125 char *read_buf;
126
127 /*
128 * We process inodes by their increasing order, so if before an
129 * incremental send we reverse the parent/child relationship of
130 * directories such that a directory with a lower inode number was
131 * the parent of a directory with a higher inode number, and the one
132 * becoming the new parent got renamed too, we can't rename/move the
133 * directory with lower inode number when we finish processing it - we
134 * must process the directory with higher inode number first, then
135 * rename/move it and then rename/move the directory with lower inode
136 * number. Example follows.
137 *
138 * Tree state when the first send was performed:
139 *
140 * .
141 * |-- a (ino 257)
142 * |-- b (ino 258)
143 * |
144 * |
145 * |-- c (ino 259)
146 * | |-- d (ino 260)
147 * |
148 * |-- c2 (ino 261)
149 *
150 * Tree state when the second (incremental) send is performed:
151 *
152 * .
153 * |-- a (ino 257)
154 * |-- b (ino 258)
155 * |-- c2 (ino 261)
156 * |-- d2 (ino 260)
157 * |-- cc (ino 259)
158 *
159 * The sequence of steps that lead to the second state was:
160 *
161 * mv /a/b/c/d /a/b/c2/d2
162 * mv /a/b/c /a/b/c2/d2/cc
163 *
164 * "c" has lower inode number, but we can't move it (2nd mv operation)
165 * before we move "d", which has higher inode number.
166 *
167 * So we just memorize which move/rename operations must be performed
168 * later when their respective parent is processed and moved/renamed.
169 */
170
171 /* Indexed by parent directory inode number. */
172 struct rb_root pending_dir_moves;
173
174 /*
175 * Reverse index, indexed by the inode number of a directory that
176 * is waiting for the move/rename of its immediate parent before its
177 * own move/rename can be performed.
178 */
179 struct rb_root waiting_dir_moves;
180
181 /*
182 * A directory that is going to be rm'ed might have a child directory
183 * which is in the pending directory moves index above. In this case,
184 * the directory can only be removed after the move/rename of its child
185 * is performed. Example:
186 *
187 * Parent snapshot:
188 *
189 * . (ino 256)
190 * |-- a/ (ino 257)
191 * |-- b/ (ino 258)
192 * |-- c/ (ino 259)
193 * | |-- x/ (ino 260)
194 * |
195 * |-- y/ (ino 261)
196 *
197 * Send snapshot:
198 *
199 * . (ino 256)
200 * |-- a/ (ino 257)
201 * |-- b/ (ino 258)
202 * |-- YY/ (ino 261)
203 * |-- x/ (ino 260)
204 *
205 * Sequence of steps that lead to the send snapshot:
206 * rm -f /a/b/c/foo.txt
207 * mv /a/b/y /a/b/YY
208 * mv /a/b/c/x /a/b/YY
209 * rmdir /a/b/c
210 *
211 * When the child is processed, its move/rename is delayed until its
212 * parent is processed (as explained above), but all other operations
213 * like update utimes, chown, chgrp, etc, are performed and the paths
214 * that it uses for those operations must use the orphanized name of
215 * its parent (the directory we're going to rm later), so we need to
216 * memorize that name.
217 *
218 * Indexed by the inode number of the directory to be deleted.
219 */
220 struct rb_root orphan_dirs;
221 };
222
223 struct pending_dir_move {
224 struct rb_node node;
225 struct list_head list;
226 u64 parent_ino;
227 u64 ino;
228 u64 gen;
229 struct list_head update_refs;
230 };
231
232 struct waiting_dir_move {
233 struct rb_node node;
234 u64 ino;
235 /*
236 * There might be some directory that could not be removed because it
237 * was waiting for this directory inode to be moved first. Therefore
238 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
239 */
240 u64 rmdir_ino;
241 u64 rmdir_gen;
242 bool orphanized;
243 };
244
245 struct orphan_dir_info {
246 struct rb_node node;
247 u64 ino;
248 u64 gen;
249 u64 last_dir_index_offset;
250 };
251
252 struct name_cache_entry {
253 struct list_head list;
254 /*
255 * radix_tree has only 32bit entries but we need to handle 64bit inums.
256 * We use the lower 32bit of the 64bit inum to store it in the tree. If
257 * more then one inum would fall into the same entry, we use radix_list
258 * to store the additional entries. radix_list is also used to store
259 * entries where two entries have the same inum but different
260 * generations.
261 */
262 struct list_head radix_list;
263 u64 ino;
264 u64 gen;
265 u64 parent_ino;
266 u64 parent_gen;
267 int ret;
268 int need_later_update;
269 int name_len;
270 char name[];
271 };
272
273 __cold
inconsistent_snapshot_error(struct send_ctx * sctx,enum btrfs_compare_tree_result result,const char * what)274 static void inconsistent_snapshot_error(struct send_ctx *sctx,
275 enum btrfs_compare_tree_result result,
276 const char *what)
277 {
278 const char *result_string;
279
280 switch (result) {
281 case BTRFS_COMPARE_TREE_NEW:
282 result_string = "new";
283 break;
284 case BTRFS_COMPARE_TREE_DELETED:
285 result_string = "deleted";
286 break;
287 case BTRFS_COMPARE_TREE_CHANGED:
288 result_string = "updated";
289 break;
290 case BTRFS_COMPARE_TREE_SAME:
291 ASSERT(0);
292 result_string = "unchanged";
293 break;
294 default:
295 ASSERT(0);
296 result_string = "unexpected";
297 }
298
299 btrfs_err(sctx->send_root->fs_info,
300 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
301 result_string, what, sctx->cmp_key->objectid,
302 sctx->send_root->root_key.objectid,
303 (sctx->parent_root ?
304 sctx->parent_root->root_key.objectid : 0));
305 }
306
307 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
308
309 static struct waiting_dir_move *
310 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
311
312 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
313
need_send_hole(struct send_ctx * sctx)314 static int need_send_hole(struct send_ctx *sctx)
315 {
316 return (sctx->parent_root && !sctx->cur_inode_new &&
317 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
318 S_ISREG(sctx->cur_inode_mode));
319 }
320
fs_path_reset(struct fs_path * p)321 static void fs_path_reset(struct fs_path *p)
322 {
323 if (p->reversed) {
324 p->start = p->buf + p->buf_len - 1;
325 p->end = p->start;
326 *p->start = 0;
327 } else {
328 p->start = p->buf;
329 p->end = p->start;
330 *p->start = 0;
331 }
332 }
333
fs_path_alloc(void)334 static struct fs_path *fs_path_alloc(void)
335 {
336 struct fs_path *p;
337
338 p = kmalloc(sizeof(*p), GFP_KERNEL);
339 if (!p)
340 return NULL;
341 p->reversed = 0;
342 p->buf = p->inline_buf;
343 p->buf_len = FS_PATH_INLINE_SIZE;
344 fs_path_reset(p);
345 return p;
346 }
347
fs_path_alloc_reversed(void)348 static struct fs_path *fs_path_alloc_reversed(void)
349 {
350 struct fs_path *p;
351
352 p = fs_path_alloc();
353 if (!p)
354 return NULL;
355 p->reversed = 1;
356 fs_path_reset(p);
357 return p;
358 }
359
fs_path_free(struct fs_path * p)360 static void fs_path_free(struct fs_path *p)
361 {
362 if (!p)
363 return;
364 if (p->buf != p->inline_buf)
365 kfree(p->buf);
366 kfree(p);
367 }
368
fs_path_len(struct fs_path * p)369 static int fs_path_len(struct fs_path *p)
370 {
371 return p->end - p->start;
372 }
373
fs_path_ensure_buf(struct fs_path * p,int len)374 static int fs_path_ensure_buf(struct fs_path *p, int len)
375 {
376 char *tmp_buf;
377 int path_len;
378 int old_buf_len;
379
380 len++;
381
382 if (p->buf_len >= len)
383 return 0;
384
385 if (len > PATH_MAX) {
386 WARN_ON(1);
387 return -ENOMEM;
388 }
389
390 path_len = p->end - p->start;
391 old_buf_len = p->buf_len;
392
393 /*
394 * First time the inline_buf does not suffice
395 */
396 if (p->buf == p->inline_buf) {
397 tmp_buf = kmalloc(len, GFP_KERNEL);
398 if (tmp_buf)
399 memcpy(tmp_buf, p->buf, old_buf_len);
400 } else {
401 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
402 }
403 if (!tmp_buf)
404 return -ENOMEM;
405 p->buf = tmp_buf;
406 /*
407 * The real size of the buffer is bigger, this will let the fast path
408 * happen most of the time
409 */
410 p->buf_len = ksize(p->buf);
411
412 if (p->reversed) {
413 tmp_buf = p->buf + old_buf_len - path_len - 1;
414 p->end = p->buf + p->buf_len - 1;
415 p->start = p->end - path_len;
416 memmove(p->start, tmp_buf, path_len + 1);
417 } else {
418 p->start = p->buf;
419 p->end = p->start + path_len;
420 }
421 return 0;
422 }
423
fs_path_prepare_for_add(struct fs_path * p,int name_len,char ** prepared)424 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
425 char **prepared)
426 {
427 int ret;
428 int new_len;
429
430 new_len = p->end - p->start + name_len;
431 if (p->start != p->end)
432 new_len++;
433 ret = fs_path_ensure_buf(p, new_len);
434 if (ret < 0)
435 goto out;
436
437 if (p->reversed) {
438 if (p->start != p->end)
439 *--p->start = '/';
440 p->start -= name_len;
441 *prepared = p->start;
442 } else {
443 if (p->start != p->end)
444 *p->end++ = '/';
445 *prepared = p->end;
446 p->end += name_len;
447 *p->end = 0;
448 }
449
450 out:
451 return ret;
452 }
453
fs_path_add(struct fs_path * p,const char * name,int name_len)454 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
455 {
456 int ret;
457 char *prepared;
458
459 ret = fs_path_prepare_for_add(p, name_len, &prepared);
460 if (ret < 0)
461 goto out;
462 memcpy(prepared, name, name_len);
463
464 out:
465 return ret;
466 }
467
fs_path_add_path(struct fs_path * p,struct fs_path * p2)468 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
469 {
470 int ret;
471 char *prepared;
472
473 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
474 if (ret < 0)
475 goto out;
476 memcpy(prepared, p2->start, p2->end - p2->start);
477
478 out:
479 return ret;
480 }
481
fs_path_add_from_extent_buffer(struct fs_path * p,struct extent_buffer * eb,unsigned long off,int len)482 static int fs_path_add_from_extent_buffer(struct fs_path *p,
483 struct extent_buffer *eb,
484 unsigned long off, int len)
485 {
486 int ret;
487 char *prepared;
488
489 ret = fs_path_prepare_for_add(p, len, &prepared);
490 if (ret < 0)
491 goto out;
492
493 read_extent_buffer(eb, prepared, off, len);
494
495 out:
496 return ret;
497 }
498
fs_path_copy(struct fs_path * p,struct fs_path * from)499 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
500 {
501 int ret;
502
503 p->reversed = from->reversed;
504 fs_path_reset(p);
505
506 ret = fs_path_add_path(p, from);
507
508 return ret;
509 }
510
511
fs_path_unreverse(struct fs_path * p)512 static void fs_path_unreverse(struct fs_path *p)
513 {
514 char *tmp;
515 int len;
516
517 if (!p->reversed)
518 return;
519
520 tmp = p->start;
521 len = p->end - p->start;
522 p->start = p->buf;
523 p->end = p->start + len;
524 memmove(p->start, tmp, len + 1);
525 p->reversed = 0;
526 }
527
alloc_path_for_send(void)528 static struct btrfs_path *alloc_path_for_send(void)
529 {
530 struct btrfs_path *path;
531
532 path = btrfs_alloc_path();
533 if (!path)
534 return NULL;
535 path->search_commit_root = 1;
536 path->skip_locking = 1;
537 path->need_commit_sem = 1;
538 return path;
539 }
540
write_buf(struct file * filp,const void * buf,u32 len,loff_t * off)541 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
542 {
543 int ret;
544 u32 pos = 0;
545
546 while (pos < len) {
547 ret = kernel_write(filp, buf + pos, len - pos, off);
548 /* TODO handle that correctly */
549 /*if (ret == -ERESTARTSYS) {
550 continue;
551 }*/
552 if (ret < 0)
553 return ret;
554 if (ret == 0) {
555 return -EIO;
556 }
557 pos += ret;
558 }
559
560 return 0;
561 }
562
tlv_put(struct send_ctx * sctx,u16 attr,const void * data,int len)563 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
564 {
565 struct btrfs_tlv_header *hdr;
566 int total_len = sizeof(*hdr) + len;
567 int left = sctx->send_max_size - sctx->send_size;
568
569 if (unlikely(left < total_len))
570 return -EOVERFLOW;
571
572 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
573 hdr->tlv_type = cpu_to_le16(attr);
574 hdr->tlv_len = cpu_to_le16(len);
575 memcpy(hdr + 1, data, len);
576 sctx->send_size += total_len;
577
578 return 0;
579 }
580
581 #define TLV_PUT_DEFINE_INT(bits) \
582 static int tlv_put_u##bits(struct send_ctx *sctx, \
583 u##bits attr, u##bits value) \
584 { \
585 __le##bits __tmp = cpu_to_le##bits(value); \
586 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
587 }
588
589 TLV_PUT_DEFINE_INT(64)
590
tlv_put_string(struct send_ctx * sctx,u16 attr,const char * str,int len)591 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
592 const char *str, int len)
593 {
594 if (len == -1)
595 len = strlen(str);
596 return tlv_put(sctx, attr, str, len);
597 }
598
tlv_put_uuid(struct send_ctx * sctx,u16 attr,const u8 * uuid)599 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
600 const u8 *uuid)
601 {
602 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
603 }
604
tlv_put_btrfs_timespec(struct send_ctx * sctx,u16 attr,struct extent_buffer * eb,struct btrfs_timespec * ts)605 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
606 struct extent_buffer *eb,
607 struct btrfs_timespec *ts)
608 {
609 struct btrfs_timespec bts;
610 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
611 return tlv_put(sctx, attr, &bts, sizeof(bts));
612 }
613
614
615 #define TLV_PUT(sctx, attrtype, data, attrlen) \
616 do { \
617 ret = tlv_put(sctx, attrtype, data, attrlen); \
618 if (ret < 0) \
619 goto tlv_put_failure; \
620 } while (0)
621
622 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
623 do { \
624 ret = tlv_put_u##bits(sctx, attrtype, value); \
625 if (ret < 0) \
626 goto tlv_put_failure; \
627 } while (0)
628
629 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
630 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
631 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
632 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
633 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
634 do { \
635 ret = tlv_put_string(sctx, attrtype, str, len); \
636 if (ret < 0) \
637 goto tlv_put_failure; \
638 } while (0)
639 #define TLV_PUT_PATH(sctx, attrtype, p) \
640 do { \
641 ret = tlv_put_string(sctx, attrtype, p->start, \
642 p->end - p->start); \
643 if (ret < 0) \
644 goto tlv_put_failure; \
645 } while(0)
646 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
647 do { \
648 ret = tlv_put_uuid(sctx, attrtype, uuid); \
649 if (ret < 0) \
650 goto tlv_put_failure; \
651 } while (0)
652 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
653 do { \
654 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
655 if (ret < 0) \
656 goto tlv_put_failure; \
657 } while (0)
658
send_header(struct send_ctx * sctx)659 static int send_header(struct send_ctx *sctx)
660 {
661 struct btrfs_stream_header hdr;
662
663 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
664 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
665
666 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
667 &sctx->send_off);
668 }
669
670 /*
671 * For each command/item we want to send to userspace, we call this function.
672 */
begin_cmd(struct send_ctx * sctx,int cmd)673 static int begin_cmd(struct send_ctx *sctx, int cmd)
674 {
675 struct btrfs_cmd_header *hdr;
676
677 if (WARN_ON(!sctx->send_buf))
678 return -EINVAL;
679
680 BUG_ON(sctx->send_size);
681
682 sctx->send_size += sizeof(*hdr);
683 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
684 hdr->cmd = cpu_to_le16(cmd);
685
686 return 0;
687 }
688
send_cmd(struct send_ctx * sctx)689 static int send_cmd(struct send_ctx *sctx)
690 {
691 int ret;
692 struct btrfs_cmd_header *hdr;
693 u32 crc;
694
695 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
696 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
697 hdr->crc = 0;
698
699 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
700 hdr->crc = cpu_to_le32(crc);
701
702 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
703 &sctx->send_off);
704
705 sctx->total_send_size += sctx->send_size;
706 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
707 sctx->send_size = 0;
708
709 return ret;
710 }
711
712 /*
713 * Sends a move instruction to user space
714 */
send_rename(struct send_ctx * sctx,struct fs_path * from,struct fs_path * to)715 static int send_rename(struct send_ctx *sctx,
716 struct fs_path *from, struct fs_path *to)
717 {
718 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
719 int ret;
720
721 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
722
723 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
724 if (ret < 0)
725 goto out;
726
727 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
728 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
729
730 ret = send_cmd(sctx);
731
732 tlv_put_failure:
733 out:
734 return ret;
735 }
736
737 /*
738 * Sends a link instruction to user space
739 */
send_link(struct send_ctx * sctx,struct fs_path * path,struct fs_path * lnk)740 static int send_link(struct send_ctx *sctx,
741 struct fs_path *path, struct fs_path *lnk)
742 {
743 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
744 int ret;
745
746 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
747
748 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
749 if (ret < 0)
750 goto out;
751
752 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
753 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
754
755 ret = send_cmd(sctx);
756
757 tlv_put_failure:
758 out:
759 return ret;
760 }
761
762 /*
763 * Sends an unlink instruction to user space
764 */
send_unlink(struct send_ctx * sctx,struct fs_path * path)765 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
766 {
767 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
768 int ret;
769
770 btrfs_debug(fs_info, "send_unlink %s", path->start);
771
772 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
773 if (ret < 0)
774 goto out;
775
776 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
777
778 ret = send_cmd(sctx);
779
780 tlv_put_failure:
781 out:
782 return ret;
783 }
784
785 /*
786 * Sends a rmdir instruction to user space
787 */
send_rmdir(struct send_ctx * sctx,struct fs_path * path)788 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
789 {
790 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
791 int ret;
792
793 btrfs_debug(fs_info, "send_rmdir %s", path->start);
794
795 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
796 if (ret < 0)
797 goto out;
798
799 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
800
801 ret = send_cmd(sctx);
802
803 tlv_put_failure:
804 out:
805 return ret;
806 }
807
808 /*
809 * Helper function to retrieve some fields from an inode item.
810 */
__get_inode_info(struct btrfs_root * root,struct btrfs_path * path,u64 ino,u64 * size,u64 * gen,u64 * mode,u64 * uid,u64 * gid,u64 * rdev)811 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
812 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
813 u64 *gid, u64 *rdev)
814 {
815 int ret;
816 struct btrfs_inode_item *ii;
817 struct btrfs_key key;
818
819 key.objectid = ino;
820 key.type = BTRFS_INODE_ITEM_KEY;
821 key.offset = 0;
822 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
823 if (ret) {
824 if (ret > 0)
825 ret = -ENOENT;
826 return ret;
827 }
828
829 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
830 struct btrfs_inode_item);
831 if (size)
832 *size = btrfs_inode_size(path->nodes[0], ii);
833 if (gen)
834 *gen = btrfs_inode_generation(path->nodes[0], ii);
835 if (mode)
836 *mode = btrfs_inode_mode(path->nodes[0], ii);
837 if (uid)
838 *uid = btrfs_inode_uid(path->nodes[0], ii);
839 if (gid)
840 *gid = btrfs_inode_gid(path->nodes[0], ii);
841 if (rdev)
842 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
843
844 return ret;
845 }
846
get_inode_info(struct btrfs_root * root,u64 ino,u64 * size,u64 * gen,u64 * mode,u64 * uid,u64 * gid,u64 * rdev)847 static int get_inode_info(struct btrfs_root *root,
848 u64 ino, u64 *size, u64 *gen,
849 u64 *mode, u64 *uid, u64 *gid,
850 u64 *rdev)
851 {
852 struct btrfs_path *path;
853 int ret;
854
855 path = alloc_path_for_send();
856 if (!path)
857 return -ENOMEM;
858 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
859 rdev);
860 btrfs_free_path(path);
861 return ret;
862 }
863
864 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
865 struct fs_path *p,
866 void *ctx);
867
868 /*
869 * Helper function to iterate the entries in ONE btrfs_inode_ref or
870 * btrfs_inode_extref.
871 * The iterate callback may return a non zero value to stop iteration. This can
872 * be a negative value for error codes or 1 to simply stop it.
873 *
874 * path must point to the INODE_REF or INODE_EXTREF when called.
875 */
iterate_inode_ref(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * found_key,int resolve,iterate_inode_ref_t iterate,void * ctx)876 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
877 struct btrfs_key *found_key, int resolve,
878 iterate_inode_ref_t iterate, void *ctx)
879 {
880 struct extent_buffer *eb = path->nodes[0];
881 struct btrfs_item *item;
882 struct btrfs_inode_ref *iref;
883 struct btrfs_inode_extref *extref;
884 struct btrfs_path *tmp_path;
885 struct fs_path *p;
886 u32 cur = 0;
887 u32 total;
888 int slot = path->slots[0];
889 u32 name_len;
890 char *start;
891 int ret = 0;
892 int num = 0;
893 int index;
894 u64 dir;
895 unsigned long name_off;
896 unsigned long elem_size;
897 unsigned long ptr;
898
899 p = fs_path_alloc_reversed();
900 if (!p)
901 return -ENOMEM;
902
903 tmp_path = alloc_path_for_send();
904 if (!tmp_path) {
905 fs_path_free(p);
906 return -ENOMEM;
907 }
908
909
910 if (found_key->type == BTRFS_INODE_REF_KEY) {
911 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
912 struct btrfs_inode_ref);
913 item = btrfs_item_nr(slot);
914 total = btrfs_item_size(eb, item);
915 elem_size = sizeof(*iref);
916 } else {
917 ptr = btrfs_item_ptr_offset(eb, slot);
918 total = btrfs_item_size_nr(eb, slot);
919 elem_size = sizeof(*extref);
920 }
921
922 while (cur < total) {
923 fs_path_reset(p);
924
925 if (found_key->type == BTRFS_INODE_REF_KEY) {
926 iref = (struct btrfs_inode_ref *)(ptr + cur);
927 name_len = btrfs_inode_ref_name_len(eb, iref);
928 name_off = (unsigned long)(iref + 1);
929 index = btrfs_inode_ref_index(eb, iref);
930 dir = found_key->offset;
931 } else {
932 extref = (struct btrfs_inode_extref *)(ptr + cur);
933 name_len = btrfs_inode_extref_name_len(eb, extref);
934 name_off = (unsigned long)&extref->name;
935 index = btrfs_inode_extref_index(eb, extref);
936 dir = btrfs_inode_extref_parent(eb, extref);
937 }
938
939 if (resolve) {
940 start = btrfs_ref_to_path(root, tmp_path, name_len,
941 name_off, eb, dir,
942 p->buf, p->buf_len);
943 if (IS_ERR(start)) {
944 ret = PTR_ERR(start);
945 goto out;
946 }
947 if (start < p->buf) {
948 /* overflow , try again with larger buffer */
949 ret = fs_path_ensure_buf(p,
950 p->buf_len + p->buf - start);
951 if (ret < 0)
952 goto out;
953 start = btrfs_ref_to_path(root, tmp_path,
954 name_len, name_off,
955 eb, dir,
956 p->buf, p->buf_len);
957 if (IS_ERR(start)) {
958 ret = PTR_ERR(start);
959 goto out;
960 }
961 BUG_ON(start < p->buf);
962 }
963 p->start = start;
964 } else {
965 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
966 name_len);
967 if (ret < 0)
968 goto out;
969 }
970
971 cur += elem_size + name_len;
972 ret = iterate(num, dir, index, p, ctx);
973 if (ret)
974 goto out;
975 num++;
976 }
977
978 out:
979 btrfs_free_path(tmp_path);
980 fs_path_free(p);
981 return ret;
982 }
983
984 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
985 const char *name, int name_len,
986 const char *data, int data_len,
987 u8 type, void *ctx);
988
989 /*
990 * Helper function to iterate the entries in ONE btrfs_dir_item.
991 * The iterate callback may return a non zero value to stop iteration. This can
992 * be a negative value for error codes or 1 to simply stop it.
993 *
994 * path must point to the dir item when called.
995 */
iterate_dir_item(struct btrfs_root * root,struct btrfs_path * path,iterate_dir_item_t iterate,void * ctx)996 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
997 iterate_dir_item_t iterate, void *ctx)
998 {
999 int ret = 0;
1000 struct extent_buffer *eb;
1001 struct btrfs_item *item;
1002 struct btrfs_dir_item *di;
1003 struct btrfs_key di_key;
1004 char *buf = NULL;
1005 int buf_len;
1006 u32 name_len;
1007 u32 data_len;
1008 u32 cur;
1009 u32 len;
1010 u32 total;
1011 int slot;
1012 int num;
1013 u8 type;
1014
1015 /*
1016 * Start with a small buffer (1 page). If later we end up needing more
1017 * space, which can happen for xattrs on a fs with a leaf size greater
1018 * then the page size, attempt to increase the buffer. Typically xattr
1019 * values are small.
1020 */
1021 buf_len = PATH_MAX;
1022 buf = kmalloc(buf_len, GFP_KERNEL);
1023 if (!buf) {
1024 ret = -ENOMEM;
1025 goto out;
1026 }
1027
1028 eb = path->nodes[0];
1029 slot = path->slots[0];
1030 item = btrfs_item_nr(slot);
1031 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1032 cur = 0;
1033 len = 0;
1034 total = btrfs_item_size(eb, item);
1035
1036 num = 0;
1037 while (cur < total) {
1038 name_len = btrfs_dir_name_len(eb, di);
1039 data_len = btrfs_dir_data_len(eb, di);
1040 type = btrfs_dir_type(eb, di);
1041 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1042
1043 if (type == BTRFS_FT_XATTR) {
1044 if (name_len > XATTR_NAME_MAX) {
1045 ret = -ENAMETOOLONG;
1046 goto out;
1047 }
1048 if (name_len + data_len >
1049 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1050 ret = -E2BIG;
1051 goto out;
1052 }
1053 } else {
1054 /*
1055 * Path too long
1056 */
1057 if (name_len + data_len > PATH_MAX) {
1058 ret = -ENAMETOOLONG;
1059 goto out;
1060 }
1061 }
1062
1063 if (name_len + data_len > buf_len) {
1064 buf_len = name_len + data_len;
1065 if (is_vmalloc_addr(buf)) {
1066 vfree(buf);
1067 buf = NULL;
1068 } else {
1069 char *tmp = krealloc(buf, buf_len,
1070 GFP_KERNEL | __GFP_NOWARN);
1071
1072 if (!tmp)
1073 kfree(buf);
1074 buf = tmp;
1075 }
1076 if (!buf) {
1077 buf = kvmalloc(buf_len, GFP_KERNEL);
1078 if (!buf) {
1079 ret = -ENOMEM;
1080 goto out;
1081 }
1082 }
1083 }
1084
1085 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1086 name_len + data_len);
1087
1088 len = sizeof(*di) + name_len + data_len;
1089 di = (struct btrfs_dir_item *)((char *)di + len);
1090 cur += len;
1091
1092 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1093 data_len, type, ctx);
1094 if (ret < 0)
1095 goto out;
1096 if (ret) {
1097 ret = 0;
1098 goto out;
1099 }
1100
1101 num++;
1102 }
1103
1104 out:
1105 kvfree(buf);
1106 return ret;
1107 }
1108
__copy_first_ref(int num,u64 dir,int index,struct fs_path * p,void * ctx)1109 static int __copy_first_ref(int num, u64 dir, int index,
1110 struct fs_path *p, void *ctx)
1111 {
1112 int ret;
1113 struct fs_path *pt = ctx;
1114
1115 ret = fs_path_copy(pt, p);
1116 if (ret < 0)
1117 return ret;
1118
1119 /* we want the first only */
1120 return 1;
1121 }
1122
1123 /*
1124 * Retrieve the first path of an inode. If an inode has more then one
1125 * ref/hardlink, this is ignored.
1126 */
get_inode_path(struct btrfs_root * root,u64 ino,struct fs_path * path)1127 static int get_inode_path(struct btrfs_root *root,
1128 u64 ino, struct fs_path *path)
1129 {
1130 int ret;
1131 struct btrfs_key key, found_key;
1132 struct btrfs_path *p;
1133
1134 p = alloc_path_for_send();
1135 if (!p)
1136 return -ENOMEM;
1137
1138 fs_path_reset(path);
1139
1140 key.objectid = ino;
1141 key.type = BTRFS_INODE_REF_KEY;
1142 key.offset = 0;
1143
1144 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1145 if (ret < 0)
1146 goto out;
1147 if (ret) {
1148 ret = 1;
1149 goto out;
1150 }
1151 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1152 if (found_key.objectid != ino ||
1153 (found_key.type != BTRFS_INODE_REF_KEY &&
1154 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1155 ret = -ENOENT;
1156 goto out;
1157 }
1158
1159 ret = iterate_inode_ref(root, p, &found_key, 1,
1160 __copy_first_ref, path);
1161 if (ret < 0)
1162 goto out;
1163 ret = 0;
1164
1165 out:
1166 btrfs_free_path(p);
1167 return ret;
1168 }
1169
1170 struct backref_ctx {
1171 struct send_ctx *sctx;
1172
1173 struct btrfs_path *path;
1174 /* number of total found references */
1175 u64 found;
1176
1177 /*
1178 * used for clones found in send_root. clones found behind cur_objectid
1179 * and cur_offset are not considered as allowed clones.
1180 */
1181 u64 cur_objectid;
1182 u64 cur_offset;
1183
1184 /* may be truncated in case it's the last extent in a file */
1185 u64 extent_len;
1186
1187 /* data offset in the file extent item */
1188 u64 data_offset;
1189
1190 /* Just to check for bugs in backref resolving */
1191 int found_itself;
1192 };
1193
__clone_root_cmp_bsearch(const void * key,const void * elt)1194 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1195 {
1196 u64 root = (u64)(uintptr_t)key;
1197 struct clone_root *cr = (struct clone_root *)elt;
1198
1199 if (root < cr->root->objectid)
1200 return -1;
1201 if (root > cr->root->objectid)
1202 return 1;
1203 return 0;
1204 }
1205
__clone_root_cmp_sort(const void * e1,const void * e2)1206 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1207 {
1208 struct clone_root *cr1 = (struct clone_root *)e1;
1209 struct clone_root *cr2 = (struct clone_root *)e2;
1210
1211 if (cr1->root->objectid < cr2->root->objectid)
1212 return -1;
1213 if (cr1->root->objectid > cr2->root->objectid)
1214 return 1;
1215 return 0;
1216 }
1217
1218 /*
1219 * Called for every backref that is found for the current extent.
1220 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1221 */
__iterate_backrefs(u64 ino,u64 offset,u64 root,void * ctx_)1222 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1223 {
1224 struct backref_ctx *bctx = ctx_;
1225 struct clone_root *found;
1226 int ret;
1227 u64 i_size;
1228
1229 /* First check if the root is in the list of accepted clone sources */
1230 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1231 bctx->sctx->clone_roots_cnt,
1232 sizeof(struct clone_root),
1233 __clone_root_cmp_bsearch);
1234 if (!found)
1235 return 0;
1236
1237 if (found->root == bctx->sctx->send_root &&
1238 ino == bctx->cur_objectid &&
1239 offset == bctx->cur_offset) {
1240 bctx->found_itself = 1;
1241 }
1242
1243 /*
1244 * There are inodes that have extents that lie behind its i_size. Don't
1245 * accept clones from these extents.
1246 */
1247 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
1248 NULL, NULL, NULL);
1249 btrfs_release_path(bctx->path);
1250 if (ret < 0)
1251 return ret;
1252
1253 if (offset + bctx->data_offset + bctx->extent_len > i_size)
1254 return 0;
1255
1256 /*
1257 * Make sure we don't consider clones from send_root that are
1258 * behind the current inode/offset.
1259 */
1260 if (found->root == bctx->sctx->send_root) {
1261 /*
1262 * TODO for the moment we don't accept clones from the inode
1263 * that is currently send. We may change this when
1264 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1265 * file.
1266 */
1267 if (ino >= bctx->cur_objectid)
1268 return 0;
1269 }
1270
1271 bctx->found++;
1272 found->found_refs++;
1273 if (ino < found->ino) {
1274 found->ino = ino;
1275 found->offset = offset;
1276 } else if (found->ino == ino) {
1277 /*
1278 * same extent found more then once in the same file.
1279 */
1280 if (found->offset > offset + bctx->extent_len)
1281 found->offset = offset;
1282 }
1283
1284 return 0;
1285 }
1286
1287 /*
1288 * Given an inode, offset and extent item, it finds a good clone for a clone
1289 * instruction. Returns -ENOENT when none could be found. The function makes
1290 * sure that the returned clone is usable at the point where sending is at the
1291 * moment. This means, that no clones are accepted which lie behind the current
1292 * inode+offset.
1293 *
1294 * path must point to the extent item when called.
1295 */
find_extent_clone(struct send_ctx * sctx,struct btrfs_path * path,u64 ino,u64 data_offset,u64 ino_size,struct clone_root ** found)1296 static int find_extent_clone(struct send_ctx *sctx,
1297 struct btrfs_path *path,
1298 u64 ino, u64 data_offset,
1299 u64 ino_size,
1300 struct clone_root **found)
1301 {
1302 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1303 int ret;
1304 int extent_type;
1305 u64 logical;
1306 u64 disk_byte;
1307 u64 num_bytes;
1308 u64 extent_item_pos;
1309 u64 extent_refs;
1310 u64 flags = 0;
1311 struct btrfs_file_extent_item *fi;
1312 struct extent_buffer *eb = path->nodes[0];
1313 struct backref_ctx *backref_ctx = NULL;
1314 struct clone_root *cur_clone_root;
1315 struct btrfs_key found_key;
1316 struct btrfs_path *tmp_path;
1317 struct btrfs_extent_item *ei;
1318 int compressed;
1319 u32 i;
1320
1321 tmp_path = alloc_path_for_send();
1322 if (!tmp_path)
1323 return -ENOMEM;
1324
1325 /* We only use this path under the commit sem */
1326 tmp_path->need_commit_sem = 0;
1327
1328 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1329 if (!backref_ctx) {
1330 ret = -ENOMEM;
1331 goto out;
1332 }
1333
1334 backref_ctx->path = tmp_path;
1335
1336 if (data_offset >= ino_size) {
1337 /*
1338 * There may be extents that lie behind the file's size.
1339 * I at least had this in combination with snapshotting while
1340 * writing large files.
1341 */
1342 ret = 0;
1343 goto out;
1344 }
1345
1346 fi = btrfs_item_ptr(eb, path->slots[0],
1347 struct btrfs_file_extent_item);
1348 extent_type = btrfs_file_extent_type(eb, fi);
1349 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1350 ret = -ENOENT;
1351 goto out;
1352 }
1353 compressed = btrfs_file_extent_compression(eb, fi);
1354
1355 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1356 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1357 if (disk_byte == 0) {
1358 ret = -ENOENT;
1359 goto out;
1360 }
1361 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1362
1363 down_read(&fs_info->commit_root_sem);
1364 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1365 &found_key, &flags);
1366 up_read(&fs_info->commit_root_sem);
1367
1368 if (ret < 0)
1369 goto out;
1370 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1371 ret = -EIO;
1372 goto out;
1373 }
1374
1375 ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
1376 struct btrfs_extent_item);
1377 extent_refs = btrfs_extent_refs(tmp_path->nodes[0], ei);
1378 /*
1379 * Backreference walking (iterate_extent_inodes() below) is currently
1380 * too expensive when an extent has a large number of references, both
1381 * in time spent and used memory. So for now just fallback to write
1382 * operations instead of clone operations when an extent has more than
1383 * a certain amount of references.
1384 *
1385 * Also, if we have only one reference and only the send root as a clone
1386 * source - meaning no clone roots were given in the struct
1387 * btrfs_ioctl_send_args passed to the send ioctl - then it's our
1388 * reference and there's no point in doing backref walking which is
1389 * expensive, so exit early.
1390 */
1391 if ((extent_refs == 1 && sctx->clone_roots_cnt == 1) ||
1392 extent_refs > SEND_MAX_EXTENT_REFS) {
1393 ret = -ENOENT;
1394 goto out;
1395 }
1396 btrfs_release_path(tmp_path);
1397
1398 /*
1399 * Setup the clone roots.
1400 */
1401 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1402 cur_clone_root = sctx->clone_roots + i;
1403 cur_clone_root->ino = (u64)-1;
1404 cur_clone_root->offset = 0;
1405 cur_clone_root->found_refs = 0;
1406 }
1407
1408 backref_ctx->sctx = sctx;
1409 backref_ctx->found = 0;
1410 backref_ctx->cur_objectid = ino;
1411 backref_ctx->cur_offset = data_offset;
1412 backref_ctx->found_itself = 0;
1413 backref_ctx->extent_len = num_bytes;
1414 /*
1415 * For non-compressed extents iterate_extent_inodes() gives us extent
1416 * offsets that already take into account the data offset, but not for
1417 * compressed extents, since the offset is logical and not relative to
1418 * the physical extent locations. We must take this into account to
1419 * avoid sending clone offsets that go beyond the source file's size,
1420 * which would result in the clone ioctl failing with -EINVAL on the
1421 * receiving end.
1422 */
1423 if (compressed == BTRFS_COMPRESS_NONE)
1424 backref_ctx->data_offset = 0;
1425 else
1426 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
1427
1428 /*
1429 * The last extent of a file may be too large due to page alignment.
1430 * We need to adjust extent_len in this case so that the checks in
1431 * __iterate_backrefs work.
1432 */
1433 if (data_offset + num_bytes >= ino_size)
1434 backref_ctx->extent_len = ino_size - data_offset;
1435
1436 /*
1437 * Now collect all backrefs.
1438 */
1439 if (compressed == BTRFS_COMPRESS_NONE)
1440 extent_item_pos = logical - found_key.objectid;
1441 else
1442 extent_item_pos = 0;
1443 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1444 extent_item_pos, 1, __iterate_backrefs,
1445 backref_ctx, false);
1446
1447 if (ret < 0)
1448 goto out;
1449
1450 if (!backref_ctx->found_itself) {
1451 /* found a bug in backref code? */
1452 ret = -EIO;
1453 btrfs_err(fs_info,
1454 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1455 ino, data_offset, disk_byte, found_key.objectid);
1456 goto out;
1457 }
1458
1459 btrfs_debug(fs_info,
1460 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1461 data_offset, ino, num_bytes, logical);
1462
1463 if (!backref_ctx->found)
1464 btrfs_debug(fs_info, "no clones found");
1465
1466 cur_clone_root = NULL;
1467 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1468 if (sctx->clone_roots[i].found_refs) {
1469 if (!cur_clone_root)
1470 cur_clone_root = sctx->clone_roots + i;
1471 else if (sctx->clone_roots[i].root == sctx->send_root)
1472 /* prefer clones from send_root over others */
1473 cur_clone_root = sctx->clone_roots + i;
1474 }
1475
1476 }
1477
1478 if (cur_clone_root) {
1479 *found = cur_clone_root;
1480 ret = 0;
1481 } else {
1482 ret = -ENOENT;
1483 }
1484
1485 out:
1486 btrfs_free_path(tmp_path);
1487 kfree(backref_ctx);
1488 return ret;
1489 }
1490
read_symlink(struct btrfs_root * root,u64 ino,struct fs_path * dest)1491 static int read_symlink(struct btrfs_root *root,
1492 u64 ino,
1493 struct fs_path *dest)
1494 {
1495 int ret;
1496 struct btrfs_path *path;
1497 struct btrfs_key key;
1498 struct btrfs_file_extent_item *ei;
1499 u8 type;
1500 u8 compression;
1501 unsigned long off;
1502 int len;
1503
1504 path = alloc_path_for_send();
1505 if (!path)
1506 return -ENOMEM;
1507
1508 key.objectid = ino;
1509 key.type = BTRFS_EXTENT_DATA_KEY;
1510 key.offset = 0;
1511 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1512 if (ret < 0)
1513 goto out;
1514 if (ret) {
1515 /*
1516 * An empty symlink inode. Can happen in rare error paths when
1517 * creating a symlink (transaction committed before the inode
1518 * eviction handler removed the symlink inode items and a crash
1519 * happened in between or the subvol was snapshoted in between).
1520 * Print an informative message to dmesg/syslog so that the user
1521 * can delete the symlink.
1522 */
1523 btrfs_err(root->fs_info,
1524 "Found empty symlink inode %llu at root %llu",
1525 ino, root->root_key.objectid);
1526 ret = -EIO;
1527 goto out;
1528 }
1529
1530 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1531 struct btrfs_file_extent_item);
1532 type = btrfs_file_extent_type(path->nodes[0], ei);
1533 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1534 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1535 BUG_ON(compression);
1536
1537 off = btrfs_file_extent_inline_start(ei);
1538 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1539
1540 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1541
1542 out:
1543 btrfs_free_path(path);
1544 return ret;
1545 }
1546
1547 /*
1548 * Helper function to generate a file name that is unique in the root of
1549 * send_root and parent_root. This is used to generate names for orphan inodes.
1550 */
gen_unique_name(struct send_ctx * sctx,u64 ino,u64 gen,struct fs_path * dest)1551 static int gen_unique_name(struct send_ctx *sctx,
1552 u64 ino, u64 gen,
1553 struct fs_path *dest)
1554 {
1555 int ret = 0;
1556 struct btrfs_path *path;
1557 struct btrfs_dir_item *di;
1558 char tmp[64];
1559 int len;
1560 u64 idx = 0;
1561
1562 path = alloc_path_for_send();
1563 if (!path)
1564 return -ENOMEM;
1565
1566 while (1) {
1567 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1568 ino, gen, idx);
1569 ASSERT(len < sizeof(tmp));
1570
1571 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1572 path, BTRFS_FIRST_FREE_OBJECTID,
1573 tmp, strlen(tmp), 0);
1574 btrfs_release_path(path);
1575 if (IS_ERR(di)) {
1576 ret = PTR_ERR(di);
1577 goto out;
1578 }
1579 if (di) {
1580 /* not unique, try again */
1581 idx++;
1582 continue;
1583 }
1584
1585 if (!sctx->parent_root) {
1586 /* unique */
1587 ret = 0;
1588 break;
1589 }
1590
1591 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1592 path, BTRFS_FIRST_FREE_OBJECTID,
1593 tmp, strlen(tmp), 0);
1594 btrfs_release_path(path);
1595 if (IS_ERR(di)) {
1596 ret = PTR_ERR(di);
1597 goto out;
1598 }
1599 if (di) {
1600 /* not unique, try again */
1601 idx++;
1602 continue;
1603 }
1604 /* unique */
1605 break;
1606 }
1607
1608 ret = fs_path_add(dest, tmp, strlen(tmp));
1609
1610 out:
1611 btrfs_free_path(path);
1612 return ret;
1613 }
1614
1615 enum inode_state {
1616 inode_state_no_change,
1617 inode_state_will_create,
1618 inode_state_did_create,
1619 inode_state_will_delete,
1620 inode_state_did_delete,
1621 };
1622
get_cur_inode_state(struct send_ctx * sctx,u64 ino,u64 gen)1623 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1624 {
1625 int ret;
1626 int left_ret;
1627 int right_ret;
1628 u64 left_gen;
1629 u64 right_gen;
1630
1631 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1632 NULL, NULL);
1633 if (ret < 0 && ret != -ENOENT)
1634 goto out;
1635 left_ret = ret;
1636
1637 if (!sctx->parent_root) {
1638 right_ret = -ENOENT;
1639 } else {
1640 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1641 NULL, NULL, NULL, NULL);
1642 if (ret < 0 && ret != -ENOENT)
1643 goto out;
1644 right_ret = ret;
1645 }
1646
1647 if (!left_ret && !right_ret) {
1648 if (left_gen == gen && right_gen == gen) {
1649 ret = inode_state_no_change;
1650 } else if (left_gen == gen) {
1651 if (ino < sctx->send_progress)
1652 ret = inode_state_did_create;
1653 else
1654 ret = inode_state_will_create;
1655 } else if (right_gen == gen) {
1656 if (ino < sctx->send_progress)
1657 ret = inode_state_did_delete;
1658 else
1659 ret = inode_state_will_delete;
1660 } else {
1661 ret = -ENOENT;
1662 }
1663 } else if (!left_ret) {
1664 if (left_gen == gen) {
1665 if (ino < sctx->send_progress)
1666 ret = inode_state_did_create;
1667 else
1668 ret = inode_state_will_create;
1669 } else {
1670 ret = -ENOENT;
1671 }
1672 } else if (!right_ret) {
1673 if (right_gen == gen) {
1674 if (ino < sctx->send_progress)
1675 ret = inode_state_did_delete;
1676 else
1677 ret = inode_state_will_delete;
1678 } else {
1679 ret = -ENOENT;
1680 }
1681 } else {
1682 ret = -ENOENT;
1683 }
1684
1685 out:
1686 return ret;
1687 }
1688
is_inode_existent(struct send_ctx * sctx,u64 ino,u64 gen)1689 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1690 {
1691 int ret;
1692
1693 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1694 return 1;
1695
1696 ret = get_cur_inode_state(sctx, ino, gen);
1697 if (ret < 0)
1698 goto out;
1699
1700 if (ret == inode_state_no_change ||
1701 ret == inode_state_did_create ||
1702 ret == inode_state_will_delete)
1703 ret = 1;
1704 else
1705 ret = 0;
1706
1707 out:
1708 return ret;
1709 }
1710
1711 /*
1712 * Helper function to lookup a dir item in a dir.
1713 */
lookup_dir_item_inode(struct btrfs_root * root,u64 dir,const char * name,int name_len,u64 * found_inode,u8 * found_type)1714 static int lookup_dir_item_inode(struct btrfs_root *root,
1715 u64 dir, const char *name, int name_len,
1716 u64 *found_inode,
1717 u8 *found_type)
1718 {
1719 int ret = 0;
1720 struct btrfs_dir_item *di;
1721 struct btrfs_key key;
1722 struct btrfs_path *path;
1723
1724 path = alloc_path_for_send();
1725 if (!path)
1726 return -ENOMEM;
1727
1728 di = btrfs_lookup_dir_item(NULL, root, path,
1729 dir, name, name_len, 0);
1730 if (!di) {
1731 ret = -ENOENT;
1732 goto out;
1733 }
1734 if (IS_ERR(di)) {
1735 ret = PTR_ERR(di);
1736 goto out;
1737 }
1738 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1739 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1740 ret = -ENOENT;
1741 goto out;
1742 }
1743 *found_inode = key.objectid;
1744 *found_type = btrfs_dir_type(path->nodes[0], di);
1745
1746 out:
1747 btrfs_free_path(path);
1748 return ret;
1749 }
1750
1751 /*
1752 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1753 * generation of the parent dir and the name of the dir entry.
1754 */
get_first_ref(struct btrfs_root * root,u64 ino,u64 * dir,u64 * dir_gen,struct fs_path * name)1755 static int get_first_ref(struct btrfs_root *root, u64 ino,
1756 u64 *dir, u64 *dir_gen, struct fs_path *name)
1757 {
1758 int ret;
1759 struct btrfs_key key;
1760 struct btrfs_key found_key;
1761 struct btrfs_path *path;
1762 int len;
1763 u64 parent_dir;
1764
1765 path = alloc_path_for_send();
1766 if (!path)
1767 return -ENOMEM;
1768
1769 key.objectid = ino;
1770 key.type = BTRFS_INODE_REF_KEY;
1771 key.offset = 0;
1772
1773 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1774 if (ret < 0)
1775 goto out;
1776 if (!ret)
1777 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1778 path->slots[0]);
1779 if (ret || found_key.objectid != ino ||
1780 (found_key.type != BTRFS_INODE_REF_KEY &&
1781 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1782 ret = -ENOENT;
1783 goto out;
1784 }
1785
1786 if (found_key.type == BTRFS_INODE_REF_KEY) {
1787 struct btrfs_inode_ref *iref;
1788 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1789 struct btrfs_inode_ref);
1790 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1791 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1792 (unsigned long)(iref + 1),
1793 len);
1794 parent_dir = found_key.offset;
1795 } else {
1796 struct btrfs_inode_extref *extref;
1797 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1798 struct btrfs_inode_extref);
1799 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1800 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1801 (unsigned long)&extref->name, len);
1802 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1803 }
1804 if (ret < 0)
1805 goto out;
1806 btrfs_release_path(path);
1807
1808 if (dir_gen) {
1809 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1810 NULL, NULL, NULL);
1811 if (ret < 0)
1812 goto out;
1813 }
1814
1815 *dir = parent_dir;
1816
1817 out:
1818 btrfs_free_path(path);
1819 return ret;
1820 }
1821
is_first_ref(struct btrfs_root * root,u64 ino,u64 dir,const char * name,int name_len)1822 static int is_first_ref(struct btrfs_root *root,
1823 u64 ino, u64 dir,
1824 const char *name, int name_len)
1825 {
1826 int ret;
1827 struct fs_path *tmp_name;
1828 u64 tmp_dir;
1829
1830 tmp_name = fs_path_alloc();
1831 if (!tmp_name)
1832 return -ENOMEM;
1833
1834 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1835 if (ret < 0)
1836 goto out;
1837
1838 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1839 ret = 0;
1840 goto out;
1841 }
1842
1843 ret = !memcmp(tmp_name->start, name, name_len);
1844
1845 out:
1846 fs_path_free(tmp_name);
1847 return ret;
1848 }
1849
1850 /*
1851 * Used by process_recorded_refs to determine if a new ref would overwrite an
1852 * already existing ref. In case it detects an overwrite, it returns the
1853 * inode/gen in who_ino/who_gen.
1854 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1855 * to make sure later references to the overwritten inode are possible.
1856 * Orphanizing is however only required for the first ref of an inode.
1857 * process_recorded_refs does an additional is_first_ref check to see if
1858 * orphanizing is really required.
1859 */
will_overwrite_ref(struct send_ctx * sctx,u64 dir,u64 dir_gen,const char * name,int name_len,u64 * who_ino,u64 * who_gen,u64 * who_mode)1860 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1861 const char *name, int name_len,
1862 u64 *who_ino, u64 *who_gen, u64 *who_mode)
1863 {
1864 int ret = 0;
1865 u64 gen;
1866 u64 other_inode = 0;
1867 u8 other_type = 0;
1868
1869 if (!sctx->parent_root)
1870 goto out;
1871
1872 ret = is_inode_existent(sctx, dir, dir_gen);
1873 if (ret <= 0)
1874 goto out;
1875
1876 /*
1877 * If we have a parent root we need to verify that the parent dir was
1878 * not deleted and then re-created, if it was then we have no overwrite
1879 * and we can just unlink this entry.
1880 */
1881 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1882 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1883 NULL, NULL, NULL);
1884 if (ret < 0 && ret != -ENOENT)
1885 goto out;
1886 if (ret) {
1887 ret = 0;
1888 goto out;
1889 }
1890 if (gen != dir_gen)
1891 goto out;
1892 }
1893
1894 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1895 &other_inode, &other_type);
1896 if (ret < 0 && ret != -ENOENT)
1897 goto out;
1898 if (ret) {
1899 ret = 0;
1900 goto out;
1901 }
1902
1903 /*
1904 * Check if the overwritten ref was already processed. If yes, the ref
1905 * was already unlinked/moved, so we can safely assume that we will not
1906 * overwrite anything at this point in time.
1907 */
1908 if (other_inode > sctx->send_progress ||
1909 is_waiting_for_move(sctx, other_inode)) {
1910 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1911 who_gen, who_mode, NULL, NULL, NULL);
1912 if (ret < 0)
1913 goto out;
1914
1915 ret = 1;
1916 *who_ino = other_inode;
1917 } else {
1918 ret = 0;
1919 }
1920
1921 out:
1922 return ret;
1923 }
1924
1925 /*
1926 * Checks if the ref was overwritten by an already processed inode. This is
1927 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1928 * thus the orphan name needs be used.
1929 * process_recorded_refs also uses it to avoid unlinking of refs that were
1930 * overwritten.
1931 */
did_overwrite_ref(struct send_ctx * sctx,u64 dir,u64 dir_gen,u64 ino,u64 ino_gen,const char * name,int name_len)1932 static int did_overwrite_ref(struct send_ctx *sctx,
1933 u64 dir, u64 dir_gen,
1934 u64 ino, u64 ino_gen,
1935 const char *name, int name_len)
1936 {
1937 int ret = 0;
1938 u64 gen;
1939 u64 ow_inode;
1940 u8 other_type;
1941
1942 if (!sctx->parent_root)
1943 goto out;
1944
1945 ret = is_inode_existent(sctx, dir, dir_gen);
1946 if (ret <= 0)
1947 goto out;
1948
1949 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1950 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1951 NULL, NULL, NULL);
1952 if (ret < 0 && ret != -ENOENT)
1953 goto out;
1954 if (ret) {
1955 ret = 0;
1956 goto out;
1957 }
1958 if (gen != dir_gen)
1959 goto out;
1960 }
1961
1962 /* check if the ref was overwritten by another ref */
1963 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1964 &ow_inode, &other_type);
1965 if (ret < 0 && ret != -ENOENT)
1966 goto out;
1967 if (ret) {
1968 /* was never and will never be overwritten */
1969 ret = 0;
1970 goto out;
1971 }
1972
1973 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1974 NULL, NULL);
1975 if (ret < 0)
1976 goto out;
1977
1978 if (ow_inode == ino && gen == ino_gen) {
1979 ret = 0;
1980 goto out;
1981 }
1982
1983 /*
1984 * We know that it is or will be overwritten. Check this now.
1985 * The current inode being processed might have been the one that caused
1986 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1987 * the current inode being processed.
1988 */
1989 if ((ow_inode < sctx->send_progress) ||
1990 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1991 gen == sctx->cur_inode_gen))
1992 ret = 1;
1993 else
1994 ret = 0;
1995
1996 out:
1997 return ret;
1998 }
1999
2000 /*
2001 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
2002 * that got overwritten. This is used by process_recorded_refs to determine
2003 * if it has to use the path as returned by get_cur_path or the orphan name.
2004 */
did_overwrite_first_ref(struct send_ctx * sctx,u64 ino,u64 gen)2005 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
2006 {
2007 int ret = 0;
2008 struct fs_path *name = NULL;
2009 u64 dir;
2010 u64 dir_gen;
2011
2012 if (!sctx->parent_root)
2013 goto out;
2014
2015 name = fs_path_alloc();
2016 if (!name)
2017 return -ENOMEM;
2018
2019 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2020 if (ret < 0)
2021 goto out;
2022
2023 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2024 name->start, fs_path_len(name));
2025
2026 out:
2027 fs_path_free(name);
2028 return ret;
2029 }
2030
2031 /*
2032 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2033 * so we need to do some special handling in case we have clashes. This function
2034 * takes care of this with the help of name_cache_entry::radix_list.
2035 * In case of error, nce is kfreed.
2036 */
name_cache_insert(struct send_ctx * sctx,struct name_cache_entry * nce)2037 static int name_cache_insert(struct send_ctx *sctx,
2038 struct name_cache_entry *nce)
2039 {
2040 int ret = 0;
2041 struct list_head *nce_head;
2042
2043 nce_head = radix_tree_lookup(&sctx->name_cache,
2044 (unsigned long)nce->ino);
2045 if (!nce_head) {
2046 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2047 if (!nce_head) {
2048 kfree(nce);
2049 return -ENOMEM;
2050 }
2051 INIT_LIST_HEAD(nce_head);
2052
2053 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2054 if (ret < 0) {
2055 kfree(nce_head);
2056 kfree(nce);
2057 return ret;
2058 }
2059 }
2060 list_add_tail(&nce->radix_list, nce_head);
2061 list_add_tail(&nce->list, &sctx->name_cache_list);
2062 sctx->name_cache_size++;
2063
2064 return ret;
2065 }
2066
name_cache_delete(struct send_ctx * sctx,struct name_cache_entry * nce)2067 static void name_cache_delete(struct send_ctx *sctx,
2068 struct name_cache_entry *nce)
2069 {
2070 struct list_head *nce_head;
2071
2072 nce_head = radix_tree_lookup(&sctx->name_cache,
2073 (unsigned long)nce->ino);
2074 if (!nce_head) {
2075 btrfs_err(sctx->send_root->fs_info,
2076 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2077 nce->ino, sctx->name_cache_size);
2078 }
2079
2080 list_del(&nce->radix_list);
2081 list_del(&nce->list);
2082 sctx->name_cache_size--;
2083
2084 /*
2085 * We may not get to the final release of nce_head if the lookup fails
2086 */
2087 if (nce_head && list_empty(nce_head)) {
2088 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2089 kfree(nce_head);
2090 }
2091 }
2092
name_cache_search(struct send_ctx * sctx,u64 ino,u64 gen)2093 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2094 u64 ino, u64 gen)
2095 {
2096 struct list_head *nce_head;
2097 struct name_cache_entry *cur;
2098
2099 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2100 if (!nce_head)
2101 return NULL;
2102
2103 list_for_each_entry(cur, nce_head, radix_list) {
2104 if (cur->ino == ino && cur->gen == gen)
2105 return cur;
2106 }
2107 return NULL;
2108 }
2109
2110 /*
2111 * Removes the entry from the list and adds it back to the end. This marks the
2112 * entry as recently used so that name_cache_clean_unused does not remove it.
2113 */
name_cache_used(struct send_ctx * sctx,struct name_cache_entry * nce)2114 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
2115 {
2116 list_del(&nce->list);
2117 list_add_tail(&nce->list, &sctx->name_cache_list);
2118 }
2119
2120 /*
2121 * Remove some entries from the beginning of name_cache_list.
2122 */
name_cache_clean_unused(struct send_ctx * sctx)2123 static void name_cache_clean_unused(struct send_ctx *sctx)
2124 {
2125 struct name_cache_entry *nce;
2126
2127 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2128 return;
2129
2130 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2131 nce = list_entry(sctx->name_cache_list.next,
2132 struct name_cache_entry, list);
2133 name_cache_delete(sctx, nce);
2134 kfree(nce);
2135 }
2136 }
2137
name_cache_free(struct send_ctx * sctx)2138 static void name_cache_free(struct send_ctx *sctx)
2139 {
2140 struct name_cache_entry *nce;
2141
2142 while (!list_empty(&sctx->name_cache_list)) {
2143 nce = list_entry(sctx->name_cache_list.next,
2144 struct name_cache_entry, list);
2145 name_cache_delete(sctx, nce);
2146 kfree(nce);
2147 }
2148 }
2149
2150 /*
2151 * Used by get_cur_path for each ref up to the root.
2152 * Returns 0 if it succeeded.
2153 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2154 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2155 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2156 * Returns <0 in case of error.
2157 */
__get_cur_name_and_parent(struct send_ctx * sctx,u64 ino,u64 gen,u64 * parent_ino,u64 * parent_gen,struct fs_path * dest)2158 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2159 u64 ino, u64 gen,
2160 u64 *parent_ino,
2161 u64 *parent_gen,
2162 struct fs_path *dest)
2163 {
2164 int ret;
2165 int nce_ret;
2166 struct name_cache_entry *nce = NULL;
2167
2168 /*
2169 * First check if we already did a call to this function with the same
2170 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2171 * return the cached result.
2172 */
2173 nce = name_cache_search(sctx, ino, gen);
2174 if (nce) {
2175 if (ino < sctx->send_progress && nce->need_later_update) {
2176 name_cache_delete(sctx, nce);
2177 kfree(nce);
2178 nce = NULL;
2179 } else {
2180 name_cache_used(sctx, nce);
2181 *parent_ino = nce->parent_ino;
2182 *parent_gen = nce->parent_gen;
2183 ret = fs_path_add(dest, nce->name, nce->name_len);
2184 if (ret < 0)
2185 goto out;
2186 ret = nce->ret;
2187 goto out;
2188 }
2189 }
2190
2191 /*
2192 * If the inode is not existent yet, add the orphan name and return 1.
2193 * This should only happen for the parent dir that we determine in
2194 * __record_new_ref
2195 */
2196 ret = is_inode_existent(sctx, ino, gen);
2197 if (ret < 0)
2198 goto out;
2199
2200 if (!ret) {
2201 ret = gen_unique_name(sctx, ino, gen, dest);
2202 if (ret < 0)
2203 goto out;
2204 ret = 1;
2205 goto out_cache;
2206 }
2207
2208 /*
2209 * Depending on whether the inode was already processed or not, use
2210 * send_root or parent_root for ref lookup.
2211 */
2212 if (ino < sctx->send_progress)
2213 ret = get_first_ref(sctx->send_root, ino,
2214 parent_ino, parent_gen, dest);
2215 else
2216 ret = get_first_ref(sctx->parent_root, ino,
2217 parent_ino, parent_gen, dest);
2218 if (ret < 0)
2219 goto out;
2220
2221 /*
2222 * Check if the ref was overwritten by an inode's ref that was processed
2223 * earlier. If yes, treat as orphan and return 1.
2224 */
2225 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2226 dest->start, dest->end - dest->start);
2227 if (ret < 0)
2228 goto out;
2229 if (ret) {
2230 fs_path_reset(dest);
2231 ret = gen_unique_name(sctx, ino, gen, dest);
2232 if (ret < 0)
2233 goto out;
2234 ret = 1;
2235 }
2236
2237 out_cache:
2238 /*
2239 * Store the result of the lookup in the name cache.
2240 */
2241 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2242 if (!nce) {
2243 ret = -ENOMEM;
2244 goto out;
2245 }
2246
2247 nce->ino = ino;
2248 nce->gen = gen;
2249 nce->parent_ino = *parent_ino;
2250 nce->parent_gen = *parent_gen;
2251 nce->name_len = fs_path_len(dest);
2252 nce->ret = ret;
2253 strcpy(nce->name, dest->start);
2254
2255 if (ino < sctx->send_progress)
2256 nce->need_later_update = 0;
2257 else
2258 nce->need_later_update = 1;
2259
2260 nce_ret = name_cache_insert(sctx, nce);
2261 if (nce_ret < 0)
2262 ret = nce_ret;
2263 name_cache_clean_unused(sctx);
2264
2265 out:
2266 return ret;
2267 }
2268
2269 /*
2270 * Magic happens here. This function returns the first ref to an inode as it
2271 * would look like while receiving the stream at this point in time.
2272 * We walk the path up to the root. For every inode in between, we check if it
2273 * was already processed/sent. If yes, we continue with the parent as found
2274 * in send_root. If not, we continue with the parent as found in parent_root.
2275 * If we encounter an inode that was deleted at this point in time, we use the
2276 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2277 * that were not created yet and overwritten inodes/refs.
2278 *
2279 * When do we have have orphan inodes:
2280 * 1. When an inode is freshly created and thus no valid refs are available yet
2281 * 2. When a directory lost all it's refs (deleted) but still has dir items
2282 * inside which were not processed yet (pending for move/delete). If anyone
2283 * tried to get the path to the dir items, it would get a path inside that
2284 * orphan directory.
2285 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2286 * of an unprocessed inode. If in that case the first ref would be
2287 * overwritten, the overwritten inode gets "orphanized". Later when we
2288 * process this overwritten inode, it is restored at a new place by moving
2289 * the orphan inode.
2290 *
2291 * sctx->send_progress tells this function at which point in time receiving
2292 * would be.
2293 */
get_cur_path(struct send_ctx * sctx,u64 ino,u64 gen,struct fs_path * dest)2294 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2295 struct fs_path *dest)
2296 {
2297 int ret = 0;
2298 struct fs_path *name = NULL;
2299 u64 parent_inode = 0;
2300 u64 parent_gen = 0;
2301 int stop = 0;
2302
2303 name = fs_path_alloc();
2304 if (!name) {
2305 ret = -ENOMEM;
2306 goto out;
2307 }
2308
2309 dest->reversed = 1;
2310 fs_path_reset(dest);
2311
2312 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2313 struct waiting_dir_move *wdm;
2314
2315 fs_path_reset(name);
2316
2317 if (is_waiting_for_rm(sctx, ino, gen)) {
2318 ret = gen_unique_name(sctx, ino, gen, name);
2319 if (ret < 0)
2320 goto out;
2321 ret = fs_path_add_path(dest, name);
2322 break;
2323 }
2324
2325 wdm = get_waiting_dir_move(sctx, ino);
2326 if (wdm && wdm->orphanized) {
2327 ret = gen_unique_name(sctx, ino, gen, name);
2328 stop = 1;
2329 } else if (wdm) {
2330 ret = get_first_ref(sctx->parent_root, ino,
2331 &parent_inode, &parent_gen, name);
2332 } else {
2333 ret = __get_cur_name_and_parent(sctx, ino, gen,
2334 &parent_inode,
2335 &parent_gen, name);
2336 if (ret)
2337 stop = 1;
2338 }
2339
2340 if (ret < 0)
2341 goto out;
2342
2343 ret = fs_path_add_path(dest, name);
2344 if (ret < 0)
2345 goto out;
2346
2347 ino = parent_inode;
2348 gen = parent_gen;
2349 }
2350
2351 out:
2352 fs_path_free(name);
2353 if (!ret)
2354 fs_path_unreverse(dest);
2355 return ret;
2356 }
2357
2358 /*
2359 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2360 */
send_subvol_begin(struct send_ctx * sctx)2361 static int send_subvol_begin(struct send_ctx *sctx)
2362 {
2363 int ret;
2364 struct btrfs_root *send_root = sctx->send_root;
2365 struct btrfs_root *parent_root = sctx->parent_root;
2366 struct btrfs_path *path;
2367 struct btrfs_key key;
2368 struct btrfs_root_ref *ref;
2369 struct extent_buffer *leaf;
2370 char *name = NULL;
2371 int namelen;
2372
2373 path = btrfs_alloc_path();
2374 if (!path)
2375 return -ENOMEM;
2376
2377 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2378 if (!name) {
2379 btrfs_free_path(path);
2380 return -ENOMEM;
2381 }
2382
2383 key.objectid = send_root->objectid;
2384 key.type = BTRFS_ROOT_BACKREF_KEY;
2385 key.offset = 0;
2386
2387 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2388 &key, path, 1, 0);
2389 if (ret < 0)
2390 goto out;
2391 if (ret) {
2392 ret = -ENOENT;
2393 goto out;
2394 }
2395
2396 leaf = path->nodes[0];
2397 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2398 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2399 key.objectid != send_root->objectid) {
2400 ret = -ENOENT;
2401 goto out;
2402 }
2403 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2404 namelen = btrfs_root_ref_name_len(leaf, ref);
2405 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2406 btrfs_release_path(path);
2407
2408 if (parent_root) {
2409 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2410 if (ret < 0)
2411 goto out;
2412 } else {
2413 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2414 if (ret < 0)
2415 goto out;
2416 }
2417
2418 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2419
2420 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2421 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2422 sctx->send_root->root_item.received_uuid);
2423 else
2424 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2425 sctx->send_root->root_item.uuid);
2426
2427 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2428 le64_to_cpu(sctx->send_root->root_item.ctransid));
2429 if (parent_root) {
2430 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2431 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2432 parent_root->root_item.received_uuid);
2433 else
2434 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2435 parent_root->root_item.uuid);
2436 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2437 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2438 }
2439
2440 ret = send_cmd(sctx);
2441
2442 tlv_put_failure:
2443 out:
2444 btrfs_free_path(path);
2445 kfree(name);
2446 return ret;
2447 }
2448
send_truncate(struct send_ctx * sctx,u64 ino,u64 gen,u64 size)2449 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2450 {
2451 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2452 int ret = 0;
2453 struct fs_path *p;
2454
2455 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2456
2457 p = fs_path_alloc();
2458 if (!p)
2459 return -ENOMEM;
2460
2461 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2462 if (ret < 0)
2463 goto out;
2464
2465 ret = get_cur_path(sctx, ino, gen, p);
2466 if (ret < 0)
2467 goto out;
2468 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2469 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2470
2471 ret = send_cmd(sctx);
2472
2473 tlv_put_failure:
2474 out:
2475 fs_path_free(p);
2476 return ret;
2477 }
2478
send_chmod(struct send_ctx * sctx,u64 ino,u64 gen,u64 mode)2479 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2480 {
2481 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2482 int ret = 0;
2483 struct fs_path *p;
2484
2485 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2486
2487 p = fs_path_alloc();
2488 if (!p)
2489 return -ENOMEM;
2490
2491 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2492 if (ret < 0)
2493 goto out;
2494
2495 ret = get_cur_path(sctx, ino, gen, p);
2496 if (ret < 0)
2497 goto out;
2498 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2499 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2500
2501 ret = send_cmd(sctx);
2502
2503 tlv_put_failure:
2504 out:
2505 fs_path_free(p);
2506 return ret;
2507 }
2508
send_chown(struct send_ctx * sctx,u64 ino,u64 gen,u64 uid,u64 gid)2509 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2510 {
2511 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2512 int ret = 0;
2513 struct fs_path *p;
2514
2515 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2516 ino, uid, gid);
2517
2518 p = fs_path_alloc();
2519 if (!p)
2520 return -ENOMEM;
2521
2522 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2523 if (ret < 0)
2524 goto out;
2525
2526 ret = get_cur_path(sctx, ino, gen, p);
2527 if (ret < 0)
2528 goto out;
2529 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2530 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2531 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2532
2533 ret = send_cmd(sctx);
2534
2535 tlv_put_failure:
2536 out:
2537 fs_path_free(p);
2538 return ret;
2539 }
2540
send_utimes(struct send_ctx * sctx,u64 ino,u64 gen)2541 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2542 {
2543 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2544 int ret = 0;
2545 struct fs_path *p = NULL;
2546 struct btrfs_inode_item *ii;
2547 struct btrfs_path *path = NULL;
2548 struct extent_buffer *eb;
2549 struct btrfs_key key;
2550 int slot;
2551
2552 btrfs_debug(fs_info, "send_utimes %llu", ino);
2553
2554 p = fs_path_alloc();
2555 if (!p)
2556 return -ENOMEM;
2557
2558 path = alloc_path_for_send();
2559 if (!path) {
2560 ret = -ENOMEM;
2561 goto out;
2562 }
2563
2564 key.objectid = ino;
2565 key.type = BTRFS_INODE_ITEM_KEY;
2566 key.offset = 0;
2567 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2568 if (ret > 0)
2569 ret = -ENOENT;
2570 if (ret < 0)
2571 goto out;
2572
2573 eb = path->nodes[0];
2574 slot = path->slots[0];
2575 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2576
2577 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2578 if (ret < 0)
2579 goto out;
2580
2581 ret = get_cur_path(sctx, ino, gen, p);
2582 if (ret < 0)
2583 goto out;
2584 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2585 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2586 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2587 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2588 /* TODO Add otime support when the otime patches get into upstream */
2589
2590 ret = send_cmd(sctx);
2591
2592 tlv_put_failure:
2593 out:
2594 fs_path_free(p);
2595 btrfs_free_path(path);
2596 return ret;
2597 }
2598
2599 /*
2600 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2601 * a valid path yet because we did not process the refs yet. So, the inode
2602 * is created as orphan.
2603 */
send_create_inode(struct send_ctx * sctx,u64 ino)2604 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2605 {
2606 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2607 int ret = 0;
2608 struct fs_path *p;
2609 int cmd;
2610 u64 gen;
2611 u64 mode;
2612 u64 rdev;
2613
2614 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2615
2616 p = fs_path_alloc();
2617 if (!p)
2618 return -ENOMEM;
2619
2620 if (ino != sctx->cur_ino) {
2621 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2622 NULL, NULL, &rdev);
2623 if (ret < 0)
2624 goto out;
2625 } else {
2626 gen = sctx->cur_inode_gen;
2627 mode = sctx->cur_inode_mode;
2628 rdev = sctx->cur_inode_rdev;
2629 }
2630
2631 if (S_ISREG(mode)) {
2632 cmd = BTRFS_SEND_C_MKFILE;
2633 } else if (S_ISDIR(mode)) {
2634 cmd = BTRFS_SEND_C_MKDIR;
2635 } else if (S_ISLNK(mode)) {
2636 cmd = BTRFS_SEND_C_SYMLINK;
2637 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2638 cmd = BTRFS_SEND_C_MKNOD;
2639 } else if (S_ISFIFO(mode)) {
2640 cmd = BTRFS_SEND_C_MKFIFO;
2641 } else if (S_ISSOCK(mode)) {
2642 cmd = BTRFS_SEND_C_MKSOCK;
2643 } else {
2644 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2645 (int)(mode & S_IFMT));
2646 ret = -EOPNOTSUPP;
2647 goto out;
2648 }
2649
2650 ret = begin_cmd(sctx, cmd);
2651 if (ret < 0)
2652 goto out;
2653
2654 ret = gen_unique_name(sctx, ino, gen, p);
2655 if (ret < 0)
2656 goto out;
2657
2658 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2659 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2660
2661 if (S_ISLNK(mode)) {
2662 fs_path_reset(p);
2663 ret = read_symlink(sctx->send_root, ino, p);
2664 if (ret < 0)
2665 goto out;
2666 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2667 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2668 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2669 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2670 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2671 }
2672
2673 ret = send_cmd(sctx);
2674 if (ret < 0)
2675 goto out;
2676
2677
2678 tlv_put_failure:
2679 out:
2680 fs_path_free(p);
2681 return ret;
2682 }
2683
2684 /*
2685 * We need some special handling for inodes that get processed before the parent
2686 * directory got created. See process_recorded_refs for details.
2687 * This function does the check if we already created the dir out of order.
2688 */
did_create_dir(struct send_ctx * sctx,u64 dir)2689 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2690 {
2691 int ret = 0;
2692 struct btrfs_path *path = NULL;
2693 struct btrfs_key key;
2694 struct btrfs_key found_key;
2695 struct btrfs_key di_key;
2696 struct extent_buffer *eb;
2697 struct btrfs_dir_item *di;
2698 int slot;
2699
2700 path = alloc_path_for_send();
2701 if (!path) {
2702 ret = -ENOMEM;
2703 goto out;
2704 }
2705
2706 key.objectid = dir;
2707 key.type = BTRFS_DIR_INDEX_KEY;
2708 key.offset = 0;
2709 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2710 if (ret < 0)
2711 goto out;
2712
2713 while (1) {
2714 eb = path->nodes[0];
2715 slot = path->slots[0];
2716 if (slot >= btrfs_header_nritems(eb)) {
2717 ret = btrfs_next_leaf(sctx->send_root, path);
2718 if (ret < 0) {
2719 goto out;
2720 } else if (ret > 0) {
2721 ret = 0;
2722 break;
2723 }
2724 continue;
2725 }
2726
2727 btrfs_item_key_to_cpu(eb, &found_key, slot);
2728 if (found_key.objectid != key.objectid ||
2729 found_key.type != key.type) {
2730 ret = 0;
2731 goto out;
2732 }
2733
2734 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2735 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2736
2737 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2738 di_key.objectid < sctx->send_progress) {
2739 ret = 1;
2740 goto out;
2741 }
2742
2743 path->slots[0]++;
2744 }
2745
2746 out:
2747 btrfs_free_path(path);
2748 return ret;
2749 }
2750
2751 /*
2752 * Only creates the inode if it is:
2753 * 1. Not a directory
2754 * 2. Or a directory which was not created already due to out of order
2755 * directories. See did_create_dir and process_recorded_refs for details.
2756 */
send_create_inode_if_needed(struct send_ctx * sctx)2757 static int send_create_inode_if_needed(struct send_ctx *sctx)
2758 {
2759 int ret;
2760
2761 if (S_ISDIR(sctx->cur_inode_mode)) {
2762 ret = did_create_dir(sctx, sctx->cur_ino);
2763 if (ret < 0)
2764 goto out;
2765 if (ret) {
2766 ret = 0;
2767 goto out;
2768 }
2769 }
2770
2771 ret = send_create_inode(sctx, sctx->cur_ino);
2772 if (ret < 0)
2773 goto out;
2774
2775 out:
2776 return ret;
2777 }
2778
2779 struct recorded_ref {
2780 struct list_head list;
2781 char *name;
2782 struct fs_path *full_path;
2783 u64 dir;
2784 u64 dir_gen;
2785 int name_len;
2786 };
2787
set_ref_path(struct recorded_ref * ref,struct fs_path * path)2788 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2789 {
2790 ref->full_path = path;
2791 ref->name = (char *)kbasename(ref->full_path->start);
2792 ref->name_len = ref->full_path->end - ref->name;
2793 }
2794
2795 /*
2796 * We need to process new refs before deleted refs, but compare_tree gives us
2797 * everything mixed. So we first record all refs and later process them.
2798 * This function is a helper to record one ref.
2799 */
__record_ref(struct list_head * head,u64 dir,u64 dir_gen,struct fs_path * path)2800 static int __record_ref(struct list_head *head, u64 dir,
2801 u64 dir_gen, struct fs_path *path)
2802 {
2803 struct recorded_ref *ref;
2804
2805 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2806 if (!ref)
2807 return -ENOMEM;
2808
2809 ref->dir = dir;
2810 ref->dir_gen = dir_gen;
2811 set_ref_path(ref, path);
2812 list_add_tail(&ref->list, head);
2813 return 0;
2814 }
2815
dup_ref(struct recorded_ref * ref,struct list_head * list)2816 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2817 {
2818 struct recorded_ref *new;
2819
2820 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2821 if (!new)
2822 return -ENOMEM;
2823
2824 new->dir = ref->dir;
2825 new->dir_gen = ref->dir_gen;
2826 new->full_path = NULL;
2827 INIT_LIST_HEAD(&new->list);
2828 list_add_tail(&new->list, list);
2829 return 0;
2830 }
2831
__free_recorded_refs(struct list_head * head)2832 static void __free_recorded_refs(struct list_head *head)
2833 {
2834 struct recorded_ref *cur;
2835
2836 while (!list_empty(head)) {
2837 cur = list_entry(head->next, struct recorded_ref, list);
2838 fs_path_free(cur->full_path);
2839 list_del(&cur->list);
2840 kfree(cur);
2841 }
2842 }
2843
free_recorded_refs(struct send_ctx * sctx)2844 static void free_recorded_refs(struct send_ctx *sctx)
2845 {
2846 __free_recorded_refs(&sctx->new_refs);
2847 __free_recorded_refs(&sctx->deleted_refs);
2848 }
2849
2850 /*
2851 * Renames/moves a file/dir to its orphan name. Used when the first
2852 * ref of an unprocessed inode gets overwritten and for all non empty
2853 * directories.
2854 */
orphanize_inode(struct send_ctx * sctx,u64 ino,u64 gen,struct fs_path * path)2855 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2856 struct fs_path *path)
2857 {
2858 int ret;
2859 struct fs_path *orphan;
2860
2861 orphan = fs_path_alloc();
2862 if (!orphan)
2863 return -ENOMEM;
2864
2865 ret = gen_unique_name(sctx, ino, gen, orphan);
2866 if (ret < 0)
2867 goto out;
2868
2869 ret = send_rename(sctx, path, orphan);
2870
2871 out:
2872 fs_path_free(orphan);
2873 return ret;
2874 }
2875
add_orphan_dir_info(struct send_ctx * sctx,u64 dir_ino,u64 dir_gen)2876 static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
2877 u64 dir_ino, u64 dir_gen)
2878 {
2879 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2880 struct rb_node *parent = NULL;
2881 struct orphan_dir_info *entry, *odi;
2882
2883 while (*p) {
2884 parent = *p;
2885 entry = rb_entry(parent, struct orphan_dir_info, node);
2886 if (dir_ino < entry->ino)
2887 p = &(*p)->rb_left;
2888 else if (dir_ino > entry->ino)
2889 p = &(*p)->rb_right;
2890 else if (dir_gen < entry->gen)
2891 p = &(*p)->rb_left;
2892 else if (dir_gen > entry->gen)
2893 p = &(*p)->rb_right;
2894 else
2895 return entry;
2896 }
2897
2898 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2899 if (!odi)
2900 return ERR_PTR(-ENOMEM);
2901 odi->ino = dir_ino;
2902 odi->gen = dir_gen;
2903 odi->last_dir_index_offset = 0;
2904
2905 rb_link_node(&odi->node, parent, p);
2906 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2907 return odi;
2908 }
2909
get_orphan_dir_info(struct send_ctx * sctx,u64 dir_ino,u64 gen)2910 static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
2911 u64 dir_ino, u64 gen)
2912 {
2913 struct rb_node *n = sctx->orphan_dirs.rb_node;
2914 struct orphan_dir_info *entry;
2915
2916 while (n) {
2917 entry = rb_entry(n, struct orphan_dir_info, node);
2918 if (dir_ino < entry->ino)
2919 n = n->rb_left;
2920 else if (dir_ino > entry->ino)
2921 n = n->rb_right;
2922 else if (gen < entry->gen)
2923 n = n->rb_left;
2924 else if (gen > entry->gen)
2925 n = n->rb_right;
2926 else
2927 return entry;
2928 }
2929 return NULL;
2930 }
2931
is_waiting_for_rm(struct send_ctx * sctx,u64 dir_ino,u64 gen)2932 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
2933 {
2934 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
2935
2936 return odi != NULL;
2937 }
2938
free_orphan_dir_info(struct send_ctx * sctx,struct orphan_dir_info * odi)2939 static void free_orphan_dir_info(struct send_ctx *sctx,
2940 struct orphan_dir_info *odi)
2941 {
2942 if (!odi)
2943 return;
2944 rb_erase(&odi->node, &sctx->orphan_dirs);
2945 kfree(odi);
2946 }
2947
2948 /*
2949 * Returns 1 if a directory can be removed at this point in time.
2950 * We check this by iterating all dir items and checking if the inode behind
2951 * the dir item was already processed.
2952 */
can_rmdir(struct send_ctx * sctx,u64 dir,u64 dir_gen,u64 send_progress)2953 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2954 u64 send_progress)
2955 {
2956 int ret = 0;
2957 struct btrfs_root *root = sctx->parent_root;
2958 struct btrfs_path *path;
2959 struct btrfs_key key;
2960 struct btrfs_key found_key;
2961 struct btrfs_key loc;
2962 struct btrfs_dir_item *di;
2963 struct orphan_dir_info *odi = NULL;
2964
2965 /*
2966 * Don't try to rmdir the top/root subvolume dir.
2967 */
2968 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2969 return 0;
2970
2971 path = alloc_path_for_send();
2972 if (!path)
2973 return -ENOMEM;
2974
2975 key.objectid = dir;
2976 key.type = BTRFS_DIR_INDEX_KEY;
2977 key.offset = 0;
2978
2979 odi = get_orphan_dir_info(sctx, dir, dir_gen);
2980 if (odi)
2981 key.offset = odi->last_dir_index_offset;
2982
2983 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2984 if (ret < 0)
2985 goto out;
2986
2987 while (1) {
2988 struct waiting_dir_move *dm;
2989
2990 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2991 ret = btrfs_next_leaf(root, path);
2992 if (ret < 0)
2993 goto out;
2994 else if (ret > 0)
2995 break;
2996 continue;
2997 }
2998 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2999 path->slots[0]);
3000 if (found_key.objectid != key.objectid ||
3001 found_key.type != key.type)
3002 break;
3003
3004 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
3005 struct btrfs_dir_item);
3006 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
3007
3008 dm = get_waiting_dir_move(sctx, loc.objectid);
3009 if (dm) {
3010 odi = add_orphan_dir_info(sctx, dir, dir_gen);
3011 if (IS_ERR(odi)) {
3012 ret = PTR_ERR(odi);
3013 goto out;
3014 }
3015 odi->gen = dir_gen;
3016 odi->last_dir_index_offset = found_key.offset;
3017 dm->rmdir_ino = dir;
3018 dm->rmdir_gen = dir_gen;
3019 ret = 0;
3020 goto out;
3021 }
3022
3023 if (loc.objectid > send_progress) {
3024 odi = add_orphan_dir_info(sctx, dir, dir_gen);
3025 if (IS_ERR(odi)) {
3026 ret = PTR_ERR(odi);
3027 goto out;
3028 }
3029 odi->gen = dir_gen;
3030 odi->last_dir_index_offset = found_key.offset;
3031 ret = 0;
3032 goto out;
3033 }
3034
3035 path->slots[0]++;
3036 }
3037 free_orphan_dir_info(sctx, odi);
3038
3039 ret = 1;
3040
3041 out:
3042 btrfs_free_path(path);
3043 return ret;
3044 }
3045
is_waiting_for_move(struct send_ctx * sctx,u64 ino)3046 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3047 {
3048 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3049
3050 return entry != NULL;
3051 }
3052
add_waiting_dir_move(struct send_ctx * sctx,u64 ino,bool orphanized)3053 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3054 {
3055 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3056 struct rb_node *parent = NULL;
3057 struct waiting_dir_move *entry, *dm;
3058
3059 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3060 if (!dm)
3061 return -ENOMEM;
3062 dm->ino = ino;
3063 dm->rmdir_ino = 0;
3064 dm->rmdir_gen = 0;
3065 dm->orphanized = orphanized;
3066
3067 while (*p) {
3068 parent = *p;
3069 entry = rb_entry(parent, struct waiting_dir_move, node);
3070 if (ino < entry->ino) {
3071 p = &(*p)->rb_left;
3072 } else if (ino > entry->ino) {
3073 p = &(*p)->rb_right;
3074 } else {
3075 kfree(dm);
3076 return -EEXIST;
3077 }
3078 }
3079
3080 rb_link_node(&dm->node, parent, p);
3081 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3082 return 0;
3083 }
3084
3085 static struct waiting_dir_move *
get_waiting_dir_move(struct send_ctx * sctx,u64 ino)3086 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3087 {
3088 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3089 struct waiting_dir_move *entry;
3090
3091 while (n) {
3092 entry = rb_entry(n, struct waiting_dir_move, node);
3093 if (ino < entry->ino)
3094 n = n->rb_left;
3095 else if (ino > entry->ino)
3096 n = n->rb_right;
3097 else
3098 return entry;
3099 }
3100 return NULL;
3101 }
3102
free_waiting_dir_move(struct send_ctx * sctx,struct waiting_dir_move * dm)3103 static void free_waiting_dir_move(struct send_ctx *sctx,
3104 struct waiting_dir_move *dm)
3105 {
3106 if (!dm)
3107 return;
3108 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3109 kfree(dm);
3110 }
3111
add_pending_dir_move(struct send_ctx * sctx,u64 ino,u64 ino_gen,u64 parent_ino,struct list_head * new_refs,struct list_head * deleted_refs,const bool is_orphan)3112 static int add_pending_dir_move(struct send_ctx *sctx,
3113 u64 ino,
3114 u64 ino_gen,
3115 u64 parent_ino,
3116 struct list_head *new_refs,
3117 struct list_head *deleted_refs,
3118 const bool is_orphan)
3119 {
3120 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3121 struct rb_node *parent = NULL;
3122 struct pending_dir_move *entry = NULL, *pm;
3123 struct recorded_ref *cur;
3124 int exists = 0;
3125 int ret;
3126
3127 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3128 if (!pm)
3129 return -ENOMEM;
3130 pm->parent_ino = parent_ino;
3131 pm->ino = ino;
3132 pm->gen = ino_gen;
3133 INIT_LIST_HEAD(&pm->list);
3134 INIT_LIST_HEAD(&pm->update_refs);
3135 RB_CLEAR_NODE(&pm->node);
3136
3137 while (*p) {
3138 parent = *p;
3139 entry = rb_entry(parent, struct pending_dir_move, node);
3140 if (parent_ino < entry->parent_ino) {
3141 p = &(*p)->rb_left;
3142 } else if (parent_ino > entry->parent_ino) {
3143 p = &(*p)->rb_right;
3144 } else {
3145 exists = 1;
3146 break;
3147 }
3148 }
3149
3150 list_for_each_entry(cur, deleted_refs, list) {
3151 ret = dup_ref(cur, &pm->update_refs);
3152 if (ret < 0)
3153 goto out;
3154 }
3155 list_for_each_entry(cur, new_refs, list) {
3156 ret = dup_ref(cur, &pm->update_refs);
3157 if (ret < 0)
3158 goto out;
3159 }
3160
3161 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3162 if (ret)
3163 goto out;
3164
3165 if (exists) {
3166 list_add_tail(&pm->list, &entry->list);
3167 } else {
3168 rb_link_node(&pm->node, parent, p);
3169 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3170 }
3171 ret = 0;
3172 out:
3173 if (ret) {
3174 __free_recorded_refs(&pm->update_refs);
3175 kfree(pm);
3176 }
3177 return ret;
3178 }
3179
get_pending_dir_moves(struct send_ctx * sctx,u64 parent_ino)3180 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3181 u64 parent_ino)
3182 {
3183 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3184 struct pending_dir_move *entry;
3185
3186 while (n) {
3187 entry = rb_entry(n, struct pending_dir_move, node);
3188 if (parent_ino < entry->parent_ino)
3189 n = n->rb_left;
3190 else if (parent_ino > entry->parent_ino)
3191 n = n->rb_right;
3192 else
3193 return entry;
3194 }
3195 return NULL;
3196 }
3197
path_loop(struct send_ctx * sctx,struct fs_path * name,u64 ino,u64 gen,u64 * ancestor_ino)3198 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3199 u64 ino, u64 gen, u64 *ancestor_ino)
3200 {
3201 int ret = 0;
3202 u64 parent_inode = 0;
3203 u64 parent_gen = 0;
3204 u64 start_ino = ino;
3205
3206 *ancestor_ino = 0;
3207 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3208 fs_path_reset(name);
3209
3210 if (is_waiting_for_rm(sctx, ino, gen))
3211 break;
3212 if (is_waiting_for_move(sctx, ino)) {
3213 if (*ancestor_ino == 0)
3214 *ancestor_ino = ino;
3215 ret = get_first_ref(sctx->parent_root, ino,
3216 &parent_inode, &parent_gen, name);
3217 } else {
3218 ret = __get_cur_name_and_parent(sctx, ino, gen,
3219 &parent_inode,
3220 &parent_gen, name);
3221 if (ret > 0) {
3222 ret = 0;
3223 break;
3224 }
3225 }
3226 if (ret < 0)
3227 break;
3228 if (parent_inode == start_ino) {
3229 ret = 1;
3230 if (*ancestor_ino == 0)
3231 *ancestor_ino = ino;
3232 break;
3233 }
3234 ino = parent_inode;
3235 gen = parent_gen;
3236 }
3237 return ret;
3238 }
3239
apply_dir_move(struct send_ctx * sctx,struct pending_dir_move * pm)3240 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3241 {
3242 struct fs_path *from_path = NULL;
3243 struct fs_path *to_path = NULL;
3244 struct fs_path *name = NULL;
3245 u64 orig_progress = sctx->send_progress;
3246 struct recorded_ref *cur;
3247 u64 parent_ino, parent_gen;
3248 struct waiting_dir_move *dm = NULL;
3249 u64 rmdir_ino = 0;
3250 u64 rmdir_gen;
3251 u64 ancestor;
3252 bool is_orphan;
3253 int ret;
3254
3255 name = fs_path_alloc();
3256 from_path = fs_path_alloc();
3257 if (!name || !from_path) {
3258 ret = -ENOMEM;
3259 goto out;
3260 }
3261
3262 dm = get_waiting_dir_move(sctx, pm->ino);
3263 ASSERT(dm);
3264 rmdir_ino = dm->rmdir_ino;
3265 rmdir_gen = dm->rmdir_gen;
3266 is_orphan = dm->orphanized;
3267 free_waiting_dir_move(sctx, dm);
3268
3269 if (is_orphan) {
3270 ret = gen_unique_name(sctx, pm->ino,
3271 pm->gen, from_path);
3272 } else {
3273 ret = get_first_ref(sctx->parent_root, pm->ino,
3274 &parent_ino, &parent_gen, name);
3275 if (ret < 0)
3276 goto out;
3277 ret = get_cur_path(sctx, parent_ino, parent_gen,
3278 from_path);
3279 if (ret < 0)
3280 goto out;
3281 ret = fs_path_add_path(from_path, name);
3282 }
3283 if (ret < 0)
3284 goto out;
3285
3286 sctx->send_progress = sctx->cur_ino + 1;
3287 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3288 if (ret < 0)
3289 goto out;
3290 if (ret) {
3291 LIST_HEAD(deleted_refs);
3292 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3293 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3294 &pm->update_refs, &deleted_refs,
3295 is_orphan);
3296 if (ret < 0)
3297 goto out;
3298 if (rmdir_ino) {
3299 dm = get_waiting_dir_move(sctx, pm->ino);
3300 ASSERT(dm);
3301 dm->rmdir_ino = rmdir_ino;
3302 dm->rmdir_gen = rmdir_gen;
3303 }
3304 goto out;
3305 }
3306 fs_path_reset(name);
3307 to_path = name;
3308 name = NULL;
3309 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3310 if (ret < 0)
3311 goto out;
3312
3313 ret = send_rename(sctx, from_path, to_path);
3314 if (ret < 0)
3315 goto out;
3316
3317 if (rmdir_ino) {
3318 struct orphan_dir_info *odi;
3319 u64 gen;
3320
3321 odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
3322 if (!odi) {
3323 /* already deleted */
3324 goto finish;
3325 }
3326 gen = odi->gen;
3327
3328 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3329 if (ret < 0)
3330 goto out;
3331 if (!ret)
3332 goto finish;
3333
3334 name = fs_path_alloc();
3335 if (!name) {
3336 ret = -ENOMEM;
3337 goto out;
3338 }
3339 ret = get_cur_path(sctx, rmdir_ino, gen, name);
3340 if (ret < 0)
3341 goto out;
3342 ret = send_rmdir(sctx, name);
3343 if (ret < 0)
3344 goto out;
3345 }
3346
3347 finish:
3348 ret = send_utimes(sctx, pm->ino, pm->gen);
3349 if (ret < 0)
3350 goto out;
3351
3352 /*
3353 * After rename/move, need to update the utimes of both new parent(s)
3354 * and old parent(s).
3355 */
3356 list_for_each_entry(cur, &pm->update_refs, list) {
3357 /*
3358 * The parent inode might have been deleted in the send snapshot
3359 */
3360 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3361 NULL, NULL, NULL, NULL, NULL);
3362 if (ret == -ENOENT) {
3363 ret = 0;
3364 continue;
3365 }
3366 if (ret < 0)
3367 goto out;
3368
3369 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3370 if (ret < 0)
3371 goto out;
3372 }
3373
3374 out:
3375 fs_path_free(name);
3376 fs_path_free(from_path);
3377 fs_path_free(to_path);
3378 sctx->send_progress = orig_progress;
3379
3380 return ret;
3381 }
3382
free_pending_move(struct send_ctx * sctx,struct pending_dir_move * m)3383 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3384 {
3385 if (!list_empty(&m->list))
3386 list_del(&m->list);
3387 if (!RB_EMPTY_NODE(&m->node))
3388 rb_erase(&m->node, &sctx->pending_dir_moves);
3389 __free_recorded_refs(&m->update_refs);
3390 kfree(m);
3391 }
3392
tail_append_pending_moves(struct send_ctx * sctx,struct pending_dir_move * moves,struct list_head * stack)3393 static void tail_append_pending_moves(struct send_ctx *sctx,
3394 struct pending_dir_move *moves,
3395 struct list_head *stack)
3396 {
3397 if (list_empty(&moves->list)) {
3398 list_add_tail(&moves->list, stack);
3399 } else {
3400 LIST_HEAD(list);
3401 list_splice_init(&moves->list, &list);
3402 list_add_tail(&moves->list, stack);
3403 list_splice_tail(&list, stack);
3404 }
3405 if (!RB_EMPTY_NODE(&moves->node)) {
3406 rb_erase(&moves->node, &sctx->pending_dir_moves);
3407 RB_CLEAR_NODE(&moves->node);
3408 }
3409 }
3410
apply_children_dir_moves(struct send_ctx * sctx)3411 static int apply_children_dir_moves(struct send_ctx *sctx)
3412 {
3413 struct pending_dir_move *pm;
3414 struct list_head stack;
3415 u64 parent_ino = sctx->cur_ino;
3416 int ret = 0;
3417
3418 pm = get_pending_dir_moves(sctx, parent_ino);
3419 if (!pm)
3420 return 0;
3421
3422 INIT_LIST_HEAD(&stack);
3423 tail_append_pending_moves(sctx, pm, &stack);
3424
3425 while (!list_empty(&stack)) {
3426 pm = list_first_entry(&stack, struct pending_dir_move, list);
3427 parent_ino = pm->ino;
3428 ret = apply_dir_move(sctx, pm);
3429 free_pending_move(sctx, pm);
3430 if (ret)
3431 goto out;
3432 pm = get_pending_dir_moves(sctx, parent_ino);
3433 if (pm)
3434 tail_append_pending_moves(sctx, pm, &stack);
3435 }
3436 return 0;
3437
3438 out:
3439 while (!list_empty(&stack)) {
3440 pm = list_first_entry(&stack, struct pending_dir_move, list);
3441 free_pending_move(sctx, pm);
3442 }
3443 return ret;
3444 }
3445
3446 /*
3447 * We might need to delay a directory rename even when no ancestor directory
3448 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3449 * renamed. This happens when we rename a directory to the old name (the name
3450 * in the parent root) of some other unrelated directory that got its rename
3451 * delayed due to some ancestor with higher number that got renamed.
3452 *
3453 * Example:
3454 *
3455 * Parent snapshot:
3456 * . (ino 256)
3457 * |---- a/ (ino 257)
3458 * | |---- file (ino 260)
3459 * |
3460 * |---- b/ (ino 258)
3461 * |---- c/ (ino 259)
3462 *
3463 * Send snapshot:
3464 * . (ino 256)
3465 * |---- a/ (ino 258)
3466 * |---- x/ (ino 259)
3467 * |---- y/ (ino 257)
3468 * |----- file (ino 260)
3469 *
3470 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3471 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3472 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3473 * must issue is:
3474 *
3475 * 1 - rename 259 from 'c' to 'x'
3476 * 2 - rename 257 from 'a' to 'x/y'
3477 * 3 - rename 258 from 'b' to 'a'
3478 *
3479 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3480 * be done right away and < 0 on error.
3481 */
wait_for_dest_dir_move(struct send_ctx * sctx,struct recorded_ref * parent_ref,const bool is_orphan)3482 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3483 struct recorded_ref *parent_ref,
3484 const bool is_orphan)
3485 {
3486 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3487 struct btrfs_path *path;
3488 struct btrfs_key key;
3489 struct btrfs_key di_key;
3490 struct btrfs_dir_item *di;
3491 u64 left_gen;
3492 u64 right_gen;
3493 int ret = 0;
3494 struct waiting_dir_move *wdm;
3495
3496 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3497 return 0;
3498
3499 path = alloc_path_for_send();
3500 if (!path)
3501 return -ENOMEM;
3502
3503 key.objectid = parent_ref->dir;
3504 key.type = BTRFS_DIR_ITEM_KEY;
3505 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3506
3507 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3508 if (ret < 0) {
3509 goto out;
3510 } else if (ret > 0) {
3511 ret = 0;
3512 goto out;
3513 }
3514
3515 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3516 parent_ref->name_len);
3517 if (!di) {
3518 ret = 0;
3519 goto out;
3520 }
3521 /*
3522 * di_key.objectid has the number of the inode that has a dentry in the
3523 * parent directory with the same name that sctx->cur_ino is being
3524 * renamed to. We need to check if that inode is in the send root as
3525 * well and if it is currently marked as an inode with a pending rename,
3526 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3527 * that it happens after that other inode is renamed.
3528 */
3529 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3530 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3531 ret = 0;
3532 goto out;
3533 }
3534
3535 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3536 &left_gen, NULL, NULL, NULL, NULL);
3537 if (ret < 0)
3538 goto out;
3539 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3540 &right_gen, NULL, NULL, NULL, NULL);
3541 if (ret < 0) {
3542 if (ret == -ENOENT)
3543 ret = 0;
3544 goto out;
3545 }
3546
3547 /* Different inode, no need to delay the rename of sctx->cur_ino */
3548 if (right_gen != left_gen) {
3549 ret = 0;
3550 goto out;
3551 }
3552
3553 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3554 if (wdm && !wdm->orphanized) {
3555 ret = add_pending_dir_move(sctx,
3556 sctx->cur_ino,
3557 sctx->cur_inode_gen,
3558 di_key.objectid,
3559 &sctx->new_refs,
3560 &sctx->deleted_refs,
3561 is_orphan);
3562 if (!ret)
3563 ret = 1;
3564 }
3565 out:
3566 btrfs_free_path(path);
3567 return ret;
3568 }
3569
3570 /*
3571 * Check if inode ino2, or any of its ancestors, is inode ino1.
3572 * Return 1 if true, 0 if false and < 0 on error.
3573 */
check_ino_in_path(struct btrfs_root * root,const u64 ino1,const u64 ino1_gen,const u64 ino2,const u64 ino2_gen,struct fs_path * fs_path)3574 static int check_ino_in_path(struct btrfs_root *root,
3575 const u64 ino1,
3576 const u64 ino1_gen,
3577 const u64 ino2,
3578 const u64 ino2_gen,
3579 struct fs_path *fs_path)
3580 {
3581 u64 ino = ino2;
3582
3583 if (ino1 == ino2)
3584 return ino1_gen == ino2_gen;
3585
3586 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3587 u64 parent;
3588 u64 parent_gen;
3589 int ret;
3590
3591 fs_path_reset(fs_path);
3592 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3593 if (ret < 0)
3594 return ret;
3595 if (parent == ino1)
3596 return parent_gen == ino1_gen;
3597 ino = parent;
3598 }
3599 return 0;
3600 }
3601
3602 /*
3603 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3604 * possible path (in case ino2 is not a directory and has multiple hard links).
3605 * Return 1 if true, 0 if false and < 0 on error.
3606 */
is_ancestor(struct btrfs_root * root,const u64 ino1,const u64 ino1_gen,const u64 ino2,struct fs_path * fs_path)3607 static int is_ancestor(struct btrfs_root *root,
3608 const u64 ino1,
3609 const u64 ino1_gen,
3610 const u64 ino2,
3611 struct fs_path *fs_path)
3612 {
3613 bool free_fs_path = false;
3614 int ret = 0;
3615 struct btrfs_path *path = NULL;
3616 struct btrfs_key key;
3617
3618 if (!fs_path) {
3619 fs_path = fs_path_alloc();
3620 if (!fs_path)
3621 return -ENOMEM;
3622 free_fs_path = true;
3623 }
3624
3625 path = alloc_path_for_send();
3626 if (!path) {
3627 ret = -ENOMEM;
3628 goto out;
3629 }
3630
3631 key.objectid = ino2;
3632 key.type = BTRFS_INODE_REF_KEY;
3633 key.offset = 0;
3634
3635 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3636 if (ret < 0)
3637 goto out;
3638
3639 while (true) {
3640 struct extent_buffer *leaf = path->nodes[0];
3641 int slot = path->slots[0];
3642 u32 cur_offset = 0;
3643 u32 item_size;
3644
3645 if (slot >= btrfs_header_nritems(leaf)) {
3646 ret = btrfs_next_leaf(root, path);
3647 if (ret < 0)
3648 goto out;
3649 if (ret > 0)
3650 break;
3651 continue;
3652 }
3653
3654 btrfs_item_key_to_cpu(leaf, &key, slot);
3655 if (key.objectid != ino2)
3656 break;
3657 if (key.type != BTRFS_INODE_REF_KEY &&
3658 key.type != BTRFS_INODE_EXTREF_KEY)
3659 break;
3660
3661 item_size = btrfs_item_size_nr(leaf, slot);
3662 while (cur_offset < item_size) {
3663 u64 parent;
3664 u64 parent_gen;
3665
3666 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3667 unsigned long ptr;
3668 struct btrfs_inode_extref *extref;
3669
3670 ptr = btrfs_item_ptr_offset(leaf, slot);
3671 extref = (struct btrfs_inode_extref *)
3672 (ptr + cur_offset);
3673 parent = btrfs_inode_extref_parent(leaf,
3674 extref);
3675 cur_offset += sizeof(*extref);
3676 cur_offset += btrfs_inode_extref_name_len(leaf,
3677 extref);
3678 } else {
3679 parent = key.offset;
3680 cur_offset = item_size;
3681 }
3682
3683 ret = get_inode_info(root, parent, NULL, &parent_gen,
3684 NULL, NULL, NULL, NULL);
3685 if (ret < 0)
3686 goto out;
3687 ret = check_ino_in_path(root, ino1, ino1_gen,
3688 parent, parent_gen, fs_path);
3689 if (ret)
3690 goto out;
3691 }
3692 path->slots[0]++;
3693 }
3694 ret = 0;
3695 out:
3696 btrfs_free_path(path);
3697 if (free_fs_path)
3698 fs_path_free(fs_path);
3699 return ret;
3700 }
3701
wait_for_parent_move(struct send_ctx * sctx,struct recorded_ref * parent_ref,const bool is_orphan)3702 static int wait_for_parent_move(struct send_ctx *sctx,
3703 struct recorded_ref *parent_ref,
3704 const bool is_orphan)
3705 {
3706 int ret = 0;
3707 u64 ino = parent_ref->dir;
3708 u64 ino_gen = parent_ref->dir_gen;
3709 u64 parent_ino_before, parent_ino_after;
3710 struct fs_path *path_before = NULL;
3711 struct fs_path *path_after = NULL;
3712 int len1, len2;
3713
3714 path_after = fs_path_alloc();
3715 path_before = fs_path_alloc();
3716 if (!path_after || !path_before) {
3717 ret = -ENOMEM;
3718 goto out;
3719 }
3720
3721 /*
3722 * Our current directory inode may not yet be renamed/moved because some
3723 * ancestor (immediate or not) has to be renamed/moved first. So find if
3724 * such ancestor exists and make sure our own rename/move happens after
3725 * that ancestor is processed to avoid path build infinite loops (done
3726 * at get_cur_path()).
3727 */
3728 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3729 u64 parent_ino_after_gen;
3730
3731 if (is_waiting_for_move(sctx, ino)) {
3732 /*
3733 * If the current inode is an ancestor of ino in the
3734 * parent root, we need to delay the rename of the
3735 * current inode, otherwise don't delayed the rename
3736 * because we can end up with a circular dependency
3737 * of renames, resulting in some directories never
3738 * getting the respective rename operations issued in
3739 * the send stream or getting into infinite path build
3740 * loops.
3741 */
3742 ret = is_ancestor(sctx->parent_root,
3743 sctx->cur_ino, sctx->cur_inode_gen,
3744 ino, path_before);
3745 if (ret)
3746 break;
3747 }
3748
3749 fs_path_reset(path_before);
3750 fs_path_reset(path_after);
3751
3752 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3753 &parent_ino_after_gen, path_after);
3754 if (ret < 0)
3755 goto out;
3756 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3757 NULL, path_before);
3758 if (ret < 0 && ret != -ENOENT) {
3759 goto out;
3760 } else if (ret == -ENOENT) {
3761 ret = 0;
3762 break;
3763 }
3764
3765 len1 = fs_path_len(path_before);
3766 len2 = fs_path_len(path_after);
3767 if (ino > sctx->cur_ino &&
3768 (parent_ino_before != parent_ino_after || len1 != len2 ||
3769 memcmp(path_before->start, path_after->start, len1))) {
3770 u64 parent_ino_gen;
3771
3772 ret = get_inode_info(sctx->parent_root, ino, NULL,
3773 &parent_ino_gen, NULL, NULL, NULL,
3774 NULL);
3775 if (ret < 0)
3776 goto out;
3777 if (ino_gen == parent_ino_gen) {
3778 ret = 1;
3779 break;
3780 }
3781 }
3782 ino = parent_ino_after;
3783 ino_gen = parent_ino_after_gen;
3784 }
3785
3786 out:
3787 fs_path_free(path_before);
3788 fs_path_free(path_after);
3789
3790 if (ret == 1) {
3791 ret = add_pending_dir_move(sctx,
3792 sctx->cur_ino,
3793 sctx->cur_inode_gen,
3794 ino,
3795 &sctx->new_refs,
3796 &sctx->deleted_refs,
3797 is_orphan);
3798 if (!ret)
3799 ret = 1;
3800 }
3801
3802 return ret;
3803 }
3804
update_ref_path(struct send_ctx * sctx,struct recorded_ref * ref)3805 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3806 {
3807 int ret;
3808 struct fs_path *new_path;
3809
3810 /*
3811 * Our reference's name member points to its full_path member string, so
3812 * we use here a new path.
3813 */
3814 new_path = fs_path_alloc();
3815 if (!new_path)
3816 return -ENOMEM;
3817
3818 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3819 if (ret < 0) {
3820 fs_path_free(new_path);
3821 return ret;
3822 }
3823 ret = fs_path_add(new_path, ref->name, ref->name_len);
3824 if (ret < 0) {
3825 fs_path_free(new_path);
3826 return ret;
3827 }
3828
3829 fs_path_free(ref->full_path);
3830 set_ref_path(ref, new_path);
3831
3832 return 0;
3833 }
3834
3835 /*
3836 * When processing the new references for an inode we may orphanize an existing
3837 * directory inode because its old name conflicts with one of the new references
3838 * of the current inode. Later, when processing another new reference of our
3839 * inode, we might need to orphanize another inode, but the path we have in the
3840 * reference reflects the pre-orphanization name of the directory we previously
3841 * orphanized. For example:
3842 *
3843 * parent snapshot looks like:
3844 *
3845 * . (ino 256)
3846 * |----- f1 (ino 257)
3847 * |----- f2 (ino 258)
3848 * |----- d1/ (ino 259)
3849 * |----- d2/ (ino 260)
3850 *
3851 * send snapshot looks like:
3852 *
3853 * . (ino 256)
3854 * |----- d1 (ino 258)
3855 * |----- f2/ (ino 259)
3856 * |----- f2_link/ (ino 260)
3857 * | |----- f1 (ino 257)
3858 * |
3859 * |----- d2 (ino 258)
3860 *
3861 * When processing inode 257 we compute the name for inode 259 as "d1", and we
3862 * cache it in the name cache. Later when we start processing inode 258, when
3863 * collecting all its new references we set a full path of "d1/d2" for its new
3864 * reference with name "d2". When we start processing the new references we
3865 * start by processing the new reference with name "d1", and this results in
3866 * orphanizing inode 259, since its old reference causes a conflict. Then we
3867 * move on the next new reference, with name "d2", and we find out we must
3868 * orphanize inode 260, as its old reference conflicts with ours - but for the
3869 * orphanization we use a source path corresponding to the path we stored in the
3870 * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
3871 * receiver fail since the path component "d1/" no longer exists, it was renamed
3872 * to "o259-6-0/" when processing the previous new reference. So in this case we
3873 * must recompute the path in the new reference and use it for the new
3874 * orphanization operation.
3875 */
refresh_ref_path(struct send_ctx * sctx,struct recorded_ref * ref)3876 static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3877 {
3878 char *name;
3879 int ret;
3880
3881 name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
3882 if (!name)
3883 return -ENOMEM;
3884
3885 fs_path_reset(ref->full_path);
3886 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
3887 if (ret < 0)
3888 goto out;
3889
3890 ret = fs_path_add(ref->full_path, name, ref->name_len);
3891 if (ret < 0)
3892 goto out;
3893
3894 /* Update the reference's base name pointer. */
3895 set_ref_path(ref, ref->full_path);
3896 out:
3897 kfree(name);
3898 return ret;
3899 }
3900
3901 /*
3902 * This does all the move/link/unlink/rmdir magic.
3903 */
process_recorded_refs(struct send_ctx * sctx,int * pending_move)3904 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3905 {
3906 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3907 int ret = 0;
3908 struct recorded_ref *cur;
3909 struct recorded_ref *cur2;
3910 struct list_head check_dirs;
3911 struct fs_path *valid_path = NULL;
3912 u64 ow_inode = 0;
3913 u64 ow_gen;
3914 u64 ow_mode;
3915 int did_overwrite = 0;
3916 int is_orphan = 0;
3917 u64 last_dir_ino_rm = 0;
3918 bool can_rename = true;
3919 bool orphanized_dir = false;
3920 bool orphanized_ancestor = false;
3921
3922 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3923
3924 /*
3925 * This should never happen as the root dir always has the same ref
3926 * which is always '..'
3927 */
3928 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3929 INIT_LIST_HEAD(&check_dirs);
3930
3931 valid_path = fs_path_alloc();
3932 if (!valid_path) {
3933 ret = -ENOMEM;
3934 goto out;
3935 }
3936
3937 /*
3938 * First, check if the first ref of the current inode was overwritten
3939 * before. If yes, we know that the current inode was already orphanized
3940 * and thus use the orphan name. If not, we can use get_cur_path to
3941 * get the path of the first ref as it would like while receiving at
3942 * this point in time.
3943 * New inodes are always orphan at the beginning, so force to use the
3944 * orphan name in this case.
3945 * The first ref is stored in valid_path and will be updated if it
3946 * gets moved around.
3947 */
3948 if (!sctx->cur_inode_new) {
3949 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3950 sctx->cur_inode_gen);
3951 if (ret < 0)
3952 goto out;
3953 if (ret)
3954 did_overwrite = 1;
3955 }
3956 if (sctx->cur_inode_new || did_overwrite) {
3957 ret = gen_unique_name(sctx, sctx->cur_ino,
3958 sctx->cur_inode_gen, valid_path);
3959 if (ret < 0)
3960 goto out;
3961 is_orphan = 1;
3962 } else {
3963 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3964 valid_path);
3965 if (ret < 0)
3966 goto out;
3967 }
3968
3969 list_for_each_entry(cur, &sctx->new_refs, list) {
3970 /*
3971 * We may have refs where the parent directory does not exist
3972 * yet. This happens if the parent directories inum is higher
3973 * the the current inum. To handle this case, we create the
3974 * parent directory out of order. But we need to check if this
3975 * did already happen before due to other refs in the same dir.
3976 */
3977 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3978 if (ret < 0)
3979 goto out;
3980 if (ret == inode_state_will_create) {
3981 ret = 0;
3982 /*
3983 * First check if any of the current inodes refs did
3984 * already create the dir.
3985 */
3986 list_for_each_entry(cur2, &sctx->new_refs, list) {
3987 if (cur == cur2)
3988 break;
3989 if (cur2->dir == cur->dir) {
3990 ret = 1;
3991 break;
3992 }
3993 }
3994
3995 /*
3996 * If that did not happen, check if a previous inode
3997 * did already create the dir.
3998 */
3999 if (!ret)
4000 ret = did_create_dir(sctx, cur->dir);
4001 if (ret < 0)
4002 goto out;
4003 if (!ret) {
4004 ret = send_create_inode(sctx, cur->dir);
4005 if (ret < 0)
4006 goto out;
4007 }
4008 }
4009
4010 /*
4011 * Check if this new ref would overwrite the first ref of
4012 * another unprocessed inode. If yes, orphanize the
4013 * overwritten inode. If we find an overwritten ref that is
4014 * not the first ref, simply unlink it.
4015 */
4016 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4017 cur->name, cur->name_len,
4018 &ow_inode, &ow_gen, &ow_mode);
4019 if (ret < 0)
4020 goto out;
4021 if (ret) {
4022 ret = is_first_ref(sctx->parent_root,
4023 ow_inode, cur->dir, cur->name,
4024 cur->name_len);
4025 if (ret < 0)
4026 goto out;
4027 if (ret) {
4028 struct name_cache_entry *nce;
4029 struct waiting_dir_move *wdm;
4030
4031 if (orphanized_dir) {
4032 ret = refresh_ref_path(sctx, cur);
4033 if (ret < 0)
4034 goto out;
4035 }
4036
4037 ret = orphanize_inode(sctx, ow_inode, ow_gen,
4038 cur->full_path);
4039 if (ret < 0)
4040 goto out;
4041 if (S_ISDIR(ow_mode))
4042 orphanized_dir = true;
4043
4044 /*
4045 * If ow_inode has its rename operation delayed
4046 * make sure that its orphanized name is used in
4047 * the source path when performing its rename
4048 * operation.
4049 */
4050 if (is_waiting_for_move(sctx, ow_inode)) {
4051 wdm = get_waiting_dir_move(sctx,
4052 ow_inode);
4053 ASSERT(wdm);
4054 wdm->orphanized = true;
4055 }
4056
4057 /*
4058 * Make sure we clear our orphanized inode's
4059 * name from the name cache. This is because the
4060 * inode ow_inode might be an ancestor of some
4061 * other inode that will be orphanized as well
4062 * later and has an inode number greater than
4063 * sctx->send_progress. We need to prevent
4064 * future name lookups from using the old name
4065 * and get instead the orphan name.
4066 */
4067 nce = name_cache_search(sctx, ow_inode, ow_gen);
4068 if (nce) {
4069 name_cache_delete(sctx, nce);
4070 kfree(nce);
4071 }
4072
4073 /*
4074 * ow_inode might currently be an ancestor of
4075 * cur_ino, therefore compute valid_path (the
4076 * current path of cur_ino) again because it
4077 * might contain the pre-orphanization name of
4078 * ow_inode, which is no longer valid.
4079 */
4080 ret = is_ancestor(sctx->parent_root,
4081 ow_inode, ow_gen,
4082 sctx->cur_ino, NULL);
4083 if (ret > 0) {
4084 orphanized_ancestor = true;
4085 fs_path_reset(valid_path);
4086 ret = get_cur_path(sctx, sctx->cur_ino,
4087 sctx->cur_inode_gen,
4088 valid_path);
4089 }
4090 if (ret < 0)
4091 goto out;
4092 } else {
4093 /*
4094 * If we previously orphanized a directory that
4095 * collided with a new reference that we already
4096 * processed, recompute the current path because
4097 * that directory may be part of the path.
4098 */
4099 if (orphanized_dir) {
4100 ret = refresh_ref_path(sctx, cur);
4101 if (ret < 0)
4102 goto out;
4103 }
4104 ret = send_unlink(sctx, cur->full_path);
4105 if (ret < 0)
4106 goto out;
4107 }
4108 }
4109
4110 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
4111 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
4112 if (ret < 0)
4113 goto out;
4114 if (ret == 1) {
4115 can_rename = false;
4116 *pending_move = 1;
4117 }
4118 }
4119
4120 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
4121 can_rename) {
4122 ret = wait_for_parent_move(sctx, cur, is_orphan);
4123 if (ret < 0)
4124 goto out;
4125 if (ret == 1) {
4126 can_rename = false;
4127 *pending_move = 1;
4128 }
4129 }
4130
4131 /*
4132 * link/move the ref to the new place. If we have an orphan
4133 * inode, move it and update valid_path. If not, link or move
4134 * it depending on the inode mode.
4135 */
4136 if (is_orphan && can_rename) {
4137 ret = send_rename(sctx, valid_path, cur->full_path);
4138 if (ret < 0)
4139 goto out;
4140 is_orphan = 0;
4141 ret = fs_path_copy(valid_path, cur->full_path);
4142 if (ret < 0)
4143 goto out;
4144 } else if (can_rename) {
4145 if (S_ISDIR(sctx->cur_inode_mode)) {
4146 /*
4147 * Dirs can't be linked, so move it. For moved
4148 * dirs, we always have one new and one deleted
4149 * ref. The deleted ref is ignored later.
4150 */
4151 ret = send_rename(sctx, valid_path,
4152 cur->full_path);
4153 if (!ret)
4154 ret = fs_path_copy(valid_path,
4155 cur->full_path);
4156 if (ret < 0)
4157 goto out;
4158 } else {
4159 /*
4160 * We might have previously orphanized an inode
4161 * which is an ancestor of our current inode,
4162 * so our reference's full path, which was
4163 * computed before any such orphanizations, must
4164 * be updated.
4165 */
4166 if (orphanized_dir) {
4167 ret = update_ref_path(sctx, cur);
4168 if (ret < 0)
4169 goto out;
4170 }
4171 ret = send_link(sctx, cur->full_path,
4172 valid_path);
4173 if (ret < 0)
4174 goto out;
4175 }
4176 }
4177 ret = dup_ref(cur, &check_dirs);
4178 if (ret < 0)
4179 goto out;
4180 }
4181
4182 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4183 /*
4184 * Check if we can already rmdir the directory. If not,
4185 * orphanize it. For every dir item inside that gets deleted
4186 * later, we do this check again and rmdir it then if possible.
4187 * See the use of check_dirs for more details.
4188 */
4189 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4190 sctx->cur_ino);
4191 if (ret < 0)
4192 goto out;
4193 if (ret) {
4194 ret = send_rmdir(sctx, valid_path);
4195 if (ret < 0)
4196 goto out;
4197 } else if (!is_orphan) {
4198 ret = orphanize_inode(sctx, sctx->cur_ino,
4199 sctx->cur_inode_gen, valid_path);
4200 if (ret < 0)
4201 goto out;
4202 is_orphan = 1;
4203 }
4204
4205 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4206 ret = dup_ref(cur, &check_dirs);
4207 if (ret < 0)
4208 goto out;
4209 }
4210 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4211 !list_empty(&sctx->deleted_refs)) {
4212 /*
4213 * We have a moved dir. Add the old parent to check_dirs
4214 */
4215 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4216 list);
4217 ret = dup_ref(cur, &check_dirs);
4218 if (ret < 0)
4219 goto out;
4220 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4221 /*
4222 * We have a non dir inode. Go through all deleted refs and
4223 * unlink them if they were not already overwritten by other
4224 * inodes.
4225 */
4226 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4227 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4228 sctx->cur_ino, sctx->cur_inode_gen,
4229 cur->name, cur->name_len);
4230 if (ret < 0)
4231 goto out;
4232 if (!ret) {
4233 /*
4234 * If we orphanized any ancestor before, we need
4235 * to recompute the full path for deleted names,
4236 * since any such path was computed before we
4237 * processed any references and orphanized any
4238 * ancestor inode.
4239 */
4240 if (orphanized_ancestor) {
4241 ret = update_ref_path(sctx, cur);
4242 if (ret < 0)
4243 goto out;
4244 }
4245 ret = send_unlink(sctx, cur->full_path);
4246 if (ret < 0)
4247 goto out;
4248 }
4249 ret = dup_ref(cur, &check_dirs);
4250 if (ret < 0)
4251 goto out;
4252 }
4253 /*
4254 * If the inode is still orphan, unlink the orphan. This may
4255 * happen when a previous inode did overwrite the first ref
4256 * of this inode and no new refs were added for the current
4257 * inode. Unlinking does not mean that the inode is deleted in
4258 * all cases. There may still be links to this inode in other
4259 * places.
4260 */
4261 if (is_orphan) {
4262 ret = send_unlink(sctx, valid_path);
4263 if (ret < 0)
4264 goto out;
4265 }
4266 }
4267
4268 /*
4269 * We did collect all parent dirs where cur_inode was once located. We
4270 * now go through all these dirs and check if they are pending for
4271 * deletion and if it's finally possible to perform the rmdir now.
4272 * We also update the inode stats of the parent dirs here.
4273 */
4274 list_for_each_entry(cur, &check_dirs, list) {
4275 /*
4276 * In case we had refs into dirs that were not processed yet,
4277 * we don't need to do the utime and rmdir logic for these dirs.
4278 * The dir will be processed later.
4279 */
4280 if (cur->dir > sctx->cur_ino)
4281 continue;
4282
4283 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4284 if (ret < 0)
4285 goto out;
4286
4287 if (ret == inode_state_did_create ||
4288 ret == inode_state_no_change) {
4289 /* TODO delayed utimes */
4290 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4291 if (ret < 0)
4292 goto out;
4293 } else if (ret == inode_state_did_delete &&
4294 cur->dir != last_dir_ino_rm) {
4295 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4296 sctx->cur_ino);
4297 if (ret < 0)
4298 goto out;
4299 if (ret) {
4300 ret = get_cur_path(sctx, cur->dir,
4301 cur->dir_gen, valid_path);
4302 if (ret < 0)
4303 goto out;
4304 ret = send_rmdir(sctx, valid_path);
4305 if (ret < 0)
4306 goto out;
4307 last_dir_ino_rm = cur->dir;
4308 }
4309 }
4310 }
4311
4312 ret = 0;
4313
4314 out:
4315 __free_recorded_refs(&check_dirs);
4316 free_recorded_refs(sctx);
4317 fs_path_free(valid_path);
4318 return ret;
4319 }
4320
record_ref(struct btrfs_root * root,u64 dir,struct fs_path * name,void * ctx,struct list_head * refs)4321 static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4322 void *ctx, struct list_head *refs)
4323 {
4324 int ret = 0;
4325 struct send_ctx *sctx = ctx;
4326 struct fs_path *p;
4327 u64 gen;
4328
4329 p = fs_path_alloc();
4330 if (!p)
4331 return -ENOMEM;
4332
4333 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4334 NULL, NULL);
4335 if (ret < 0)
4336 goto out;
4337
4338 ret = get_cur_path(sctx, dir, gen, p);
4339 if (ret < 0)
4340 goto out;
4341 ret = fs_path_add_path(p, name);
4342 if (ret < 0)
4343 goto out;
4344
4345 ret = __record_ref(refs, dir, gen, p);
4346
4347 out:
4348 if (ret)
4349 fs_path_free(p);
4350 return ret;
4351 }
4352
__record_new_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4353 static int __record_new_ref(int num, u64 dir, int index,
4354 struct fs_path *name,
4355 void *ctx)
4356 {
4357 struct send_ctx *sctx = ctx;
4358 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4359 }
4360
4361
__record_deleted_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4362 static int __record_deleted_ref(int num, u64 dir, int index,
4363 struct fs_path *name,
4364 void *ctx)
4365 {
4366 struct send_ctx *sctx = ctx;
4367 return record_ref(sctx->parent_root, dir, name, ctx,
4368 &sctx->deleted_refs);
4369 }
4370
record_new_ref(struct send_ctx * sctx)4371 static int record_new_ref(struct send_ctx *sctx)
4372 {
4373 int ret;
4374
4375 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4376 sctx->cmp_key, 0, __record_new_ref, sctx);
4377 if (ret < 0)
4378 goto out;
4379 ret = 0;
4380
4381 out:
4382 return ret;
4383 }
4384
record_deleted_ref(struct send_ctx * sctx)4385 static int record_deleted_ref(struct send_ctx *sctx)
4386 {
4387 int ret;
4388
4389 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4390 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4391 if (ret < 0)
4392 goto out;
4393 ret = 0;
4394
4395 out:
4396 return ret;
4397 }
4398
4399 struct find_ref_ctx {
4400 u64 dir;
4401 u64 dir_gen;
4402 struct btrfs_root *root;
4403 struct fs_path *name;
4404 int found_idx;
4405 };
4406
__find_iref(int num,u64 dir,int index,struct fs_path * name,void * ctx_)4407 static int __find_iref(int num, u64 dir, int index,
4408 struct fs_path *name,
4409 void *ctx_)
4410 {
4411 struct find_ref_ctx *ctx = ctx_;
4412 u64 dir_gen;
4413 int ret;
4414
4415 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4416 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4417 /*
4418 * To avoid doing extra lookups we'll only do this if everything
4419 * else matches.
4420 */
4421 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4422 NULL, NULL, NULL);
4423 if (ret)
4424 return ret;
4425 if (dir_gen != ctx->dir_gen)
4426 return 0;
4427 ctx->found_idx = num;
4428 return 1;
4429 }
4430 return 0;
4431 }
4432
find_iref(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key,u64 dir,u64 dir_gen,struct fs_path * name)4433 static int find_iref(struct btrfs_root *root,
4434 struct btrfs_path *path,
4435 struct btrfs_key *key,
4436 u64 dir, u64 dir_gen, struct fs_path *name)
4437 {
4438 int ret;
4439 struct find_ref_ctx ctx;
4440
4441 ctx.dir = dir;
4442 ctx.name = name;
4443 ctx.dir_gen = dir_gen;
4444 ctx.found_idx = -1;
4445 ctx.root = root;
4446
4447 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4448 if (ret < 0)
4449 return ret;
4450
4451 if (ctx.found_idx == -1)
4452 return -ENOENT;
4453
4454 return ctx.found_idx;
4455 }
4456
__record_changed_new_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4457 static int __record_changed_new_ref(int num, u64 dir, int index,
4458 struct fs_path *name,
4459 void *ctx)
4460 {
4461 u64 dir_gen;
4462 int ret;
4463 struct send_ctx *sctx = ctx;
4464
4465 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4466 NULL, NULL, NULL);
4467 if (ret)
4468 return ret;
4469
4470 ret = find_iref(sctx->parent_root, sctx->right_path,
4471 sctx->cmp_key, dir, dir_gen, name);
4472 if (ret == -ENOENT)
4473 ret = __record_new_ref(num, dir, index, name, sctx);
4474 else if (ret > 0)
4475 ret = 0;
4476
4477 return ret;
4478 }
4479
__record_changed_deleted_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4480 static int __record_changed_deleted_ref(int num, u64 dir, int index,
4481 struct fs_path *name,
4482 void *ctx)
4483 {
4484 u64 dir_gen;
4485 int ret;
4486 struct send_ctx *sctx = ctx;
4487
4488 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4489 NULL, NULL, NULL);
4490 if (ret)
4491 return ret;
4492
4493 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4494 dir, dir_gen, name);
4495 if (ret == -ENOENT)
4496 ret = __record_deleted_ref(num, dir, index, name, sctx);
4497 else if (ret > 0)
4498 ret = 0;
4499
4500 return ret;
4501 }
4502
record_changed_ref(struct send_ctx * sctx)4503 static int record_changed_ref(struct send_ctx *sctx)
4504 {
4505 int ret = 0;
4506
4507 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4508 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4509 if (ret < 0)
4510 goto out;
4511 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4512 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4513 if (ret < 0)
4514 goto out;
4515 ret = 0;
4516
4517 out:
4518 return ret;
4519 }
4520
4521 /*
4522 * Record and process all refs at once. Needed when an inode changes the
4523 * generation number, which means that it was deleted and recreated.
4524 */
process_all_refs(struct send_ctx * sctx,enum btrfs_compare_tree_result cmd)4525 static int process_all_refs(struct send_ctx *sctx,
4526 enum btrfs_compare_tree_result cmd)
4527 {
4528 int ret;
4529 struct btrfs_root *root;
4530 struct btrfs_path *path;
4531 struct btrfs_key key;
4532 struct btrfs_key found_key;
4533 struct extent_buffer *eb;
4534 int slot;
4535 iterate_inode_ref_t cb;
4536 int pending_move = 0;
4537
4538 path = alloc_path_for_send();
4539 if (!path)
4540 return -ENOMEM;
4541
4542 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4543 root = sctx->send_root;
4544 cb = __record_new_ref;
4545 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4546 root = sctx->parent_root;
4547 cb = __record_deleted_ref;
4548 } else {
4549 btrfs_err(sctx->send_root->fs_info,
4550 "Wrong command %d in process_all_refs", cmd);
4551 ret = -EINVAL;
4552 goto out;
4553 }
4554
4555 key.objectid = sctx->cmp_key->objectid;
4556 key.type = BTRFS_INODE_REF_KEY;
4557 key.offset = 0;
4558 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4559 if (ret < 0)
4560 goto out;
4561
4562 while (1) {
4563 eb = path->nodes[0];
4564 slot = path->slots[0];
4565 if (slot >= btrfs_header_nritems(eb)) {
4566 ret = btrfs_next_leaf(root, path);
4567 if (ret < 0)
4568 goto out;
4569 else if (ret > 0)
4570 break;
4571 continue;
4572 }
4573
4574 btrfs_item_key_to_cpu(eb, &found_key, slot);
4575
4576 if (found_key.objectid != key.objectid ||
4577 (found_key.type != BTRFS_INODE_REF_KEY &&
4578 found_key.type != BTRFS_INODE_EXTREF_KEY))
4579 break;
4580
4581 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4582 if (ret < 0)
4583 goto out;
4584
4585 path->slots[0]++;
4586 }
4587 btrfs_release_path(path);
4588
4589 /*
4590 * We don't actually care about pending_move as we are simply
4591 * re-creating this inode and will be rename'ing it into place once we
4592 * rename the parent directory.
4593 */
4594 ret = process_recorded_refs(sctx, &pending_move);
4595 out:
4596 btrfs_free_path(path);
4597 return ret;
4598 }
4599
send_set_xattr(struct send_ctx * sctx,struct fs_path * path,const char * name,int name_len,const char * data,int data_len)4600 static int send_set_xattr(struct send_ctx *sctx,
4601 struct fs_path *path,
4602 const char *name, int name_len,
4603 const char *data, int data_len)
4604 {
4605 int ret = 0;
4606
4607 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4608 if (ret < 0)
4609 goto out;
4610
4611 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4612 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4613 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4614
4615 ret = send_cmd(sctx);
4616
4617 tlv_put_failure:
4618 out:
4619 return ret;
4620 }
4621
send_remove_xattr(struct send_ctx * sctx,struct fs_path * path,const char * name,int name_len)4622 static int send_remove_xattr(struct send_ctx *sctx,
4623 struct fs_path *path,
4624 const char *name, int name_len)
4625 {
4626 int ret = 0;
4627
4628 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4629 if (ret < 0)
4630 goto out;
4631
4632 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4633 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4634
4635 ret = send_cmd(sctx);
4636
4637 tlv_put_failure:
4638 out:
4639 return ret;
4640 }
4641
__process_new_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4642 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4643 const char *name, int name_len,
4644 const char *data, int data_len,
4645 u8 type, void *ctx)
4646 {
4647 int ret;
4648 struct send_ctx *sctx = ctx;
4649 struct fs_path *p;
4650 struct posix_acl_xattr_header dummy_acl;
4651
4652 /* Capabilities are emitted by finish_inode_if_needed */
4653 if (!strncmp(name, XATTR_NAME_CAPS, name_len))
4654 return 0;
4655
4656 p = fs_path_alloc();
4657 if (!p)
4658 return -ENOMEM;
4659
4660 /*
4661 * This hack is needed because empty acls are stored as zero byte
4662 * data in xattrs. Problem with that is, that receiving these zero byte
4663 * acls will fail later. To fix this, we send a dummy acl list that
4664 * only contains the version number and no entries.
4665 */
4666 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4667 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4668 if (data_len == 0) {
4669 dummy_acl.a_version =
4670 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4671 data = (char *)&dummy_acl;
4672 data_len = sizeof(dummy_acl);
4673 }
4674 }
4675
4676 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4677 if (ret < 0)
4678 goto out;
4679
4680 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4681
4682 out:
4683 fs_path_free(p);
4684 return ret;
4685 }
4686
__process_deleted_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4687 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4688 const char *name, int name_len,
4689 const char *data, int data_len,
4690 u8 type, void *ctx)
4691 {
4692 int ret;
4693 struct send_ctx *sctx = ctx;
4694 struct fs_path *p;
4695
4696 p = fs_path_alloc();
4697 if (!p)
4698 return -ENOMEM;
4699
4700 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4701 if (ret < 0)
4702 goto out;
4703
4704 ret = send_remove_xattr(sctx, p, name, name_len);
4705
4706 out:
4707 fs_path_free(p);
4708 return ret;
4709 }
4710
process_new_xattr(struct send_ctx * sctx)4711 static int process_new_xattr(struct send_ctx *sctx)
4712 {
4713 int ret = 0;
4714
4715 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4716 __process_new_xattr, sctx);
4717
4718 return ret;
4719 }
4720
process_deleted_xattr(struct send_ctx * sctx)4721 static int process_deleted_xattr(struct send_ctx *sctx)
4722 {
4723 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4724 __process_deleted_xattr, sctx);
4725 }
4726
4727 struct find_xattr_ctx {
4728 const char *name;
4729 int name_len;
4730 int found_idx;
4731 char *found_data;
4732 int found_data_len;
4733 };
4734
__find_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * vctx)4735 static int __find_xattr(int num, struct btrfs_key *di_key,
4736 const char *name, int name_len,
4737 const char *data, int data_len,
4738 u8 type, void *vctx)
4739 {
4740 struct find_xattr_ctx *ctx = vctx;
4741
4742 if (name_len == ctx->name_len &&
4743 strncmp(name, ctx->name, name_len) == 0) {
4744 ctx->found_idx = num;
4745 ctx->found_data_len = data_len;
4746 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4747 if (!ctx->found_data)
4748 return -ENOMEM;
4749 return 1;
4750 }
4751 return 0;
4752 }
4753
find_xattr(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key,const char * name,int name_len,char ** data,int * data_len)4754 static int find_xattr(struct btrfs_root *root,
4755 struct btrfs_path *path,
4756 struct btrfs_key *key,
4757 const char *name, int name_len,
4758 char **data, int *data_len)
4759 {
4760 int ret;
4761 struct find_xattr_ctx ctx;
4762
4763 ctx.name = name;
4764 ctx.name_len = name_len;
4765 ctx.found_idx = -1;
4766 ctx.found_data = NULL;
4767 ctx.found_data_len = 0;
4768
4769 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4770 if (ret < 0)
4771 return ret;
4772
4773 if (ctx.found_idx == -1)
4774 return -ENOENT;
4775 if (data) {
4776 *data = ctx.found_data;
4777 *data_len = ctx.found_data_len;
4778 } else {
4779 kfree(ctx.found_data);
4780 }
4781 return ctx.found_idx;
4782 }
4783
4784
__process_changed_new_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4785 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4786 const char *name, int name_len,
4787 const char *data, int data_len,
4788 u8 type, void *ctx)
4789 {
4790 int ret;
4791 struct send_ctx *sctx = ctx;
4792 char *found_data = NULL;
4793 int found_data_len = 0;
4794
4795 ret = find_xattr(sctx->parent_root, sctx->right_path,
4796 sctx->cmp_key, name, name_len, &found_data,
4797 &found_data_len);
4798 if (ret == -ENOENT) {
4799 ret = __process_new_xattr(num, di_key, name, name_len, data,
4800 data_len, type, ctx);
4801 } else if (ret >= 0) {
4802 if (data_len != found_data_len ||
4803 memcmp(data, found_data, data_len)) {
4804 ret = __process_new_xattr(num, di_key, name, name_len,
4805 data, data_len, type, ctx);
4806 } else {
4807 ret = 0;
4808 }
4809 }
4810
4811 kfree(found_data);
4812 return ret;
4813 }
4814
__process_changed_deleted_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4815 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4816 const char *name, int name_len,
4817 const char *data, int data_len,
4818 u8 type, void *ctx)
4819 {
4820 int ret;
4821 struct send_ctx *sctx = ctx;
4822
4823 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4824 name, name_len, NULL, NULL);
4825 if (ret == -ENOENT)
4826 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4827 data_len, type, ctx);
4828 else if (ret >= 0)
4829 ret = 0;
4830
4831 return ret;
4832 }
4833
process_changed_xattr(struct send_ctx * sctx)4834 static int process_changed_xattr(struct send_ctx *sctx)
4835 {
4836 int ret = 0;
4837
4838 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4839 __process_changed_new_xattr, sctx);
4840 if (ret < 0)
4841 goto out;
4842 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4843 __process_changed_deleted_xattr, sctx);
4844
4845 out:
4846 return ret;
4847 }
4848
process_all_new_xattrs(struct send_ctx * sctx)4849 static int process_all_new_xattrs(struct send_ctx *sctx)
4850 {
4851 int ret;
4852 struct btrfs_root *root;
4853 struct btrfs_path *path;
4854 struct btrfs_key key;
4855 struct btrfs_key found_key;
4856 struct extent_buffer *eb;
4857 int slot;
4858
4859 path = alloc_path_for_send();
4860 if (!path)
4861 return -ENOMEM;
4862
4863 root = sctx->send_root;
4864
4865 key.objectid = sctx->cmp_key->objectid;
4866 key.type = BTRFS_XATTR_ITEM_KEY;
4867 key.offset = 0;
4868 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4869 if (ret < 0)
4870 goto out;
4871
4872 while (1) {
4873 eb = path->nodes[0];
4874 slot = path->slots[0];
4875 if (slot >= btrfs_header_nritems(eb)) {
4876 ret = btrfs_next_leaf(root, path);
4877 if (ret < 0) {
4878 goto out;
4879 } else if (ret > 0) {
4880 ret = 0;
4881 break;
4882 }
4883 continue;
4884 }
4885
4886 btrfs_item_key_to_cpu(eb, &found_key, slot);
4887 if (found_key.objectid != key.objectid ||
4888 found_key.type != key.type) {
4889 ret = 0;
4890 goto out;
4891 }
4892
4893 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4894 if (ret < 0)
4895 goto out;
4896
4897 path->slots[0]++;
4898 }
4899
4900 out:
4901 btrfs_free_path(path);
4902 return ret;
4903 }
4904
fill_read_buf(struct send_ctx * sctx,u64 offset,u32 len)4905 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4906 {
4907 struct btrfs_root *root = sctx->send_root;
4908 struct btrfs_fs_info *fs_info = root->fs_info;
4909 struct inode *inode;
4910 struct page *page;
4911 char *addr;
4912 struct btrfs_key key;
4913 pgoff_t index = offset >> PAGE_SHIFT;
4914 pgoff_t last_index;
4915 unsigned pg_offset = offset & ~PAGE_MASK;
4916 ssize_t ret = 0;
4917
4918 key.objectid = sctx->cur_ino;
4919 key.type = BTRFS_INODE_ITEM_KEY;
4920 key.offset = 0;
4921
4922 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4923 if (IS_ERR(inode))
4924 return PTR_ERR(inode);
4925
4926 if (offset + len > i_size_read(inode)) {
4927 if (offset > i_size_read(inode))
4928 len = 0;
4929 else
4930 len = offset - i_size_read(inode);
4931 }
4932 if (len == 0)
4933 goto out;
4934
4935 last_index = (offset + len - 1) >> PAGE_SHIFT;
4936
4937 /* initial readahead */
4938 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4939 file_ra_state_init(&sctx->ra, inode->i_mapping);
4940
4941 while (index <= last_index) {
4942 unsigned cur_len = min_t(unsigned, len,
4943 PAGE_SIZE - pg_offset);
4944
4945 page = find_lock_page(inode->i_mapping, index);
4946 if (!page) {
4947 page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4948 NULL, index, last_index + 1 - index);
4949
4950 page = find_or_create_page(inode->i_mapping, index,
4951 GFP_KERNEL);
4952 if (!page) {
4953 ret = -ENOMEM;
4954 break;
4955 }
4956 }
4957
4958 if (PageReadahead(page)) {
4959 page_cache_async_readahead(inode->i_mapping, &sctx->ra,
4960 NULL, page, index, last_index + 1 - index);
4961 }
4962
4963 if (!PageUptodate(page)) {
4964 btrfs_readpage(NULL, page);
4965 lock_page(page);
4966 if (!PageUptodate(page)) {
4967 unlock_page(page);
4968 btrfs_err(fs_info,
4969 "send: IO error at offset %llu for inode %llu root %llu",
4970 page_offset(page), sctx->cur_ino,
4971 sctx->send_root->root_key.objectid);
4972 put_page(page);
4973 ret = -EIO;
4974 break;
4975 }
4976 }
4977
4978 addr = kmap(page);
4979 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4980 kunmap(page);
4981 unlock_page(page);
4982 put_page(page);
4983 index++;
4984 pg_offset = 0;
4985 len -= cur_len;
4986 ret += cur_len;
4987 }
4988 out:
4989 iput(inode);
4990 return ret;
4991 }
4992
4993 /*
4994 * Read some bytes from the current inode/file and send a write command to
4995 * user space.
4996 */
send_write(struct send_ctx * sctx,u64 offset,u32 len)4997 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4998 {
4999 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
5000 int ret = 0;
5001 struct fs_path *p;
5002 ssize_t num_read = 0;
5003
5004 p = fs_path_alloc();
5005 if (!p)
5006 return -ENOMEM;
5007
5008 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
5009
5010 num_read = fill_read_buf(sctx, offset, len);
5011 if (num_read <= 0) {
5012 if (num_read < 0)
5013 ret = num_read;
5014 goto out;
5015 }
5016
5017 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5018 if (ret < 0)
5019 goto out;
5020
5021 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5022 if (ret < 0)
5023 goto out;
5024
5025 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5026 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5027 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
5028
5029 ret = send_cmd(sctx);
5030
5031 tlv_put_failure:
5032 out:
5033 fs_path_free(p);
5034 if (ret < 0)
5035 return ret;
5036 return num_read;
5037 }
5038
5039 /*
5040 * Send a clone command to user space.
5041 */
send_clone(struct send_ctx * sctx,u64 offset,u32 len,struct clone_root * clone_root)5042 static int send_clone(struct send_ctx *sctx,
5043 u64 offset, u32 len,
5044 struct clone_root *clone_root)
5045 {
5046 int ret = 0;
5047 struct fs_path *p;
5048 u64 gen;
5049
5050 btrfs_debug(sctx->send_root->fs_info,
5051 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
5052 offset, len, clone_root->root->objectid, clone_root->ino,
5053 clone_root->offset);
5054
5055 p = fs_path_alloc();
5056 if (!p)
5057 return -ENOMEM;
5058
5059 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
5060 if (ret < 0)
5061 goto out;
5062
5063 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5064 if (ret < 0)
5065 goto out;
5066
5067 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5068 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
5069 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5070
5071 if (clone_root->root == sctx->send_root) {
5072 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
5073 &gen, NULL, NULL, NULL, NULL);
5074 if (ret < 0)
5075 goto out;
5076 ret = get_cur_path(sctx, clone_root->ino, gen, p);
5077 } else {
5078 ret = get_inode_path(clone_root->root, clone_root->ino, p);
5079 }
5080 if (ret < 0)
5081 goto out;
5082
5083 /*
5084 * If the parent we're using has a received_uuid set then use that as
5085 * our clone source as that is what we will look for when doing a
5086 * receive.
5087 *
5088 * This covers the case that we create a snapshot off of a received
5089 * subvolume and then use that as the parent and try to receive on a
5090 * different host.
5091 */
5092 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
5093 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5094 clone_root->root->root_item.received_uuid);
5095 else
5096 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5097 clone_root->root->root_item.uuid);
5098 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
5099 le64_to_cpu(clone_root->root->root_item.ctransid));
5100 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
5101 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
5102 clone_root->offset);
5103
5104 ret = send_cmd(sctx);
5105
5106 tlv_put_failure:
5107 out:
5108 fs_path_free(p);
5109 return ret;
5110 }
5111
5112 /*
5113 * Send an update extent command to user space.
5114 */
send_update_extent(struct send_ctx * sctx,u64 offset,u32 len)5115 static int send_update_extent(struct send_ctx *sctx,
5116 u64 offset, u32 len)
5117 {
5118 int ret = 0;
5119 struct fs_path *p;
5120
5121 p = fs_path_alloc();
5122 if (!p)
5123 return -ENOMEM;
5124
5125 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
5126 if (ret < 0)
5127 goto out;
5128
5129 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5130 if (ret < 0)
5131 goto out;
5132
5133 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5134 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5135 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
5136
5137 ret = send_cmd(sctx);
5138
5139 tlv_put_failure:
5140 out:
5141 fs_path_free(p);
5142 return ret;
5143 }
5144
send_hole(struct send_ctx * sctx,u64 end)5145 static int send_hole(struct send_ctx *sctx, u64 end)
5146 {
5147 struct fs_path *p = NULL;
5148 u64 offset = sctx->cur_inode_last_extent;
5149 u64 len;
5150 int ret = 0;
5151
5152 /*
5153 * A hole that starts at EOF or beyond it. Since we do not yet support
5154 * fallocate (for extent preallocation and hole punching), sending a
5155 * write of zeroes starting at EOF or beyond would later require issuing
5156 * a truncate operation which would undo the write and achieve nothing.
5157 */
5158 if (offset >= sctx->cur_inode_size)
5159 return 0;
5160
5161 /*
5162 * Don't go beyond the inode's i_size due to prealloc extents that start
5163 * after the i_size.
5164 */
5165 end = min_t(u64, end, sctx->cur_inode_size);
5166
5167 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5168 return send_update_extent(sctx, offset, end - offset);
5169
5170 p = fs_path_alloc();
5171 if (!p)
5172 return -ENOMEM;
5173 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5174 if (ret < 0)
5175 goto tlv_put_failure;
5176 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
5177 while (offset < end) {
5178 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
5179
5180 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5181 if (ret < 0)
5182 break;
5183 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5184 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5185 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
5186 ret = send_cmd(sctx);
5187 if (ret < 0)
5188 break;
5189 offset += len;
5190 }
5191 sctx->cur_inode_next_write_offset = offset;
5192 tlv_put_failure:
5193 fs_path_free(p);
5194 return ret;
5195 }
5196
send_extent_data(struct send_ctx * sctx,const u64 offset,const u64 len)5197 static int send_extent_data(struct send_ctx *sctx,
5198 const u64 offset,
5199 const u64 len)
5200 {
5201 u64 sent = 0;
5202
5203 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5204 return send_update_extent(sctx, offset, len);
5205
5206 while (sent < len) {
5207 u64 size = len - sent;
5208 int ret;
5209
5210 if (size > BTRFS_SEND_READ_SIZE)
5211 size = BTRFS_SEND_READ_SIZE;
5212 ret = send_write(sctx, offset + sent, size);
5213 if (ret < 0)
5214 return ret;
5215 if (!ret)
5216 break;
5217 sent += ret;
5218 }
5219 return 0;
5220 }
5221
5222 /*
5223 * Search for a capability xattr related to sctx->cur_ino. If the capability is
5224 * found, call send_set_xattr function to emit it.
5225 *
5226 * Return 0 if there isn't a capability, or when the capability was emitted
5227 * successfully, or < 0 if an error occurred.
5228 */
send_capabilities(struct send_ctx * sctx)5229 static int send_capabilities(struct send_ctx *sctx)
5230 {
5231 struct fs_path *fspath = NULL;
5232 struct btrfs_path *path;
5233 struct btrfs_dir_item *di;
5234 struct extent_buffer *leaf;
5235 unsigned long data_ptr;
5236 char *buf = NULL;
5237 int buf_len;
5238 int ret = 0;
5239
5240 path = alloc_path_for_send();
5241 if (!path)
5242 return -ENOMEM;
5243
5244 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5245 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
5246 if (!di) {
5247 /* There is no xattr for this inode */
5248 goto out;
5249 } else if (IS_ERR(di)) {
5250 ret = PTR_ERR(di);
5251 goto out;
5252 }
5253
5254 leaf = path->nodes[0];
5255 buf_len = btrfs_dir_data_len(leaf, di);
5256
5257 fspath = fs_path_alloc();
5258 buf = kmalloc(buf_len, GFP_KERNEL);
5259 if (!fspath || !buf) {
5260 ret = -ENOMEM;
5261 goto out;
5262 }
5263
5264 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5265 if (ret < 0)
5266 goto out;
5267
5268 data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
5269 read_extent_buffer(leaf, buf, data_ptr, buf_len);
5270
5271 ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
5272 strlen(XATTR_NAME_CAPS), buf, buf_len);
5273 out:
5274 kfree(buf);
5275 fs_path_free(fspath);
5276 btrfs_free_path(path);
5277 return ret;
5278 }
5279
clone_range(struct send_ctx * sctx,struct clone_root * clone_root,const u64 disk_byte,u64 data_offset,u64 offset,u64 len)5280 static int clone_range(struct send_ctx *sctx,
5281 struct clone_root *clone_root,
5282 const u64 disk_byte,
5283 u64 data_offset,
5284 u64 offset,
5285 u64 len)
5286 {
5287 struct btrfs_path *path;
5288 struct btrfs_key key;
5289 int ret;
5290
5291 /*
5292 * Prevent cloning from a zero offset with a length matching the sector
5293 * size because in some scenarios this will make the receiver fail.
5294 *
5295 * For example, if in the source filesystem the extent at offset 0
5296 * has a length of sectorsize and it was written using direct IO, then
5297 * it can never be an inline extent (even if compression is enabled).
5298 * Then this extent can be cloned in the original filesystem to a non
5299 * zero file offset, but it may not be possible to clone in the
5300 * destination filesystem because it can be inlined due to compression
5301 * on the destination filesystem (as the receiver's write operations are
5302 * always done using buffered IO). The same happens when the original
5303 * filesystem does not have compression enabled but the destination
5304 * filesystem has.
5305 */
5306 if (clone_root->offset == 0 &&
5307 len == sctx->send_root->fs_info->sectorsize)
5308 return send_extent_data(sctx, offset, len);
5309
5310 path = alloc_path_for_send();
5311 if (!path)
5312 return -ENOMEM;
5313
5314 /*
5315 * We can't send a clone operation for the entire range if we find
5316 * extent items in the respective range in the source file that
5317 * refer to different extents or if we find holes.
5318 * So check for that and do a mix of clone and regular write/copy
5319 * operations if needed.
5320 *
5321 * Example:
5322 *
5323 * mkfs.btrfs -f /dev/sda
5324 * mount /dev/sda /mnt
5325 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5326 * cp --reflink=always /mnt/foo /mnt/bar
5327 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5328 * btrfs subvolume snapshot -r /mnt /mnt/snap
5329 *
5330 * If when we send the snapshot and we are processing file bar (which
5331 * has a higher inode number than foo) we blindly send a clone operation
5332 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5333 * a file bar that matches the content of file foo - iow, doesn't match
5334 * the content from bar in the original filesystem.
5335 */
5336 key.objectid = clone_root->ino;
5337 key.type = BTRFS_EXTENT_DATA_KEY;
5338 key.offset = clone_root->offset;
5339 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5340 if (ret < 0)
5341 goto out;
5342 if (ret > 0 && path->slots[0] > 0) {
5343 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5344 if (key.objectid == clone_root->ino &&
5345 key.type == BTRFS_EXTENT_DATA_KEY)
5346 path->slots[0]--;
5347 }
5348
5349 while (true) {
5350 struct extent_buffer *leaf = path->nodes[0];
5351 int slot = path->slots[0];
5352 struct btrfs_file_extent_item *ei;
5353 u8 type;
5354 u64 ext_len;
5355 u64 clone_len;
5356
5357 if (slot >= btrfs_header_nritems(leaf)) {
5358 ret = btrfs_next_leaf(clone_root->root, path);
5359 if (ret < 0)
5360 goto out;
5361 else if (ret > 0)
5362 break;
5363 continue;
5364 }
5365
5366 btrfs_item_key_to_cpu(leaf, &key, slot);
5367
5368 /*
5369 * We might have an implicit trailing hole (NO_HOLES feature
5370 * enabled). We deal with it after leaving this loop.
5371 */
5372 if (key.objectid != clone_root->ino ||
5373 key.type != BTRFS_EXTENT_DATA_KEY)
5374 break;
5375
5376 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5377 type = btrfs_file_extent_type(leaf, ei);
5378 if (type == BTRFS_FILE_EXTENT_INLINE) {
5379 ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5380 ext_len = PAGE_ALIGN(ext_len);
5381 } else {
5382 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5383 }
5384
5385 if (key.offset + ext_len <= clone_root->offset)
5386 goto next;
5387
5388 if (key.offset > clone_root->offset) {
5389 /* Implicit hole, NO_HOLES feature enabled. */
5390 u64 hole_len = key.offset - clone_root->offset;
5391
5392 if (hole_len > len)
5393 hole_len = len;
5394 ret = send_extent_data(sctx, offset, hole_len);
5395 if (ret < 0)
5396 goto out;
5397
5398 len -= hole_len;
5399 if (len == 0)
5400 break;
5401 offset += hole_len;
5402 clone_root->offset += hole_len;
5403 data_offset += hole_len;
5404 }
5405
5406 if (key.offset >= clone_root->offset + len)
5407 break;
5408
5409 clone_len = min_t(u64, ext_len, len);
5410
5411 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5412 btrfs_file_extent_offset(leaf, ei) == data_offset)
5413 ret = send_clone(sctx, offset, clone_len, clone_root);
5414 else
5415 ret = send_extent_data(sctx, offset, clone_len);
5416
5417 if (ret < 0)
5418 goto out;
5419
5420 len -= clone_len;
5421 if (len == 0)
5422 break;
5423 offset += clone_len;
5424 clone_root->offset += clone_len;
5425 data_offset += clone_len;
5426 next:
5427 path->slots[0]++;
5428 }
5429
5430 if (len > 0)
5431 ret = send_extent_data(sctx, offset, len);
5432 else
5433 ret = 0;
5434 out:
5435 btrfs_free_path(path);
5436 return ret;
5437 }
5438
send_write_or_clone(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key,struct clone_root * clone_root)5439 static int send_write_or_clone(struct send_ctx *sctx,
5440 struct btrfs_path *path,
5441 struct btrfs_key *key,
5442 struct clone_root *clone_root)
5443 {
5444 int ret = 0;
5445 struct btrfs_file_extent_item *ei;
5446 u64 offset = key->offset;
5447 u64 len;
5448 u8 type;
5449 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5450
5451 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5452 struct btrfs_file_extent_item);
5453 type = btrfs_file_extent_type(path->nodes[0], ei);
5454 if (type == BTRFS_FILE_EXTENT_INLINE) {
5455 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
5456 /*
5457 * it is possible the inline item won't cover the whole page,
5458 * but there may be items after this page. Make
5459 * sure to send the whole thing
5460 */
5461 len = PAGE_ALIGN(len);
5462 } else {
5463 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
5464 }
5465
5466 if (offset >= sctx->cur_inode_size) {
5467 ret = 0;
5468 goto out;
5469 }
5470 if (offset + len > sctx->cur_inode_size)
5471 len = sctx->cur_inode_size - offset;
5472 if (len == 0) {
5473 ret = 0;
5474 goto out;
5475 }
5476
5477 if (clone_root && IS_ALIGNED(offset + len, bs)) {
5478 u64 disk_byte;
5479 u64 data_offset;
5480
5481 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5482 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5483 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5484 offset, len);
5485 } else {
5486 ret = send_extent_data(sctx, offset, len);
5487 }
5488 sctx->cur_inode_next_write_offset = offset + len;
5489 out:
5490 return ret;
5491 }
5492
is_extent_unchanged(struct send_ctx * sctx,struct btrfs_path * left_path,struct btrfs_key * ekey)5493 static int is_extent_unchanged(struct send_ctx *sctx,
5494 struct btrfs_path *left_path,
5495 struct btrfs_key *ekey)
5496 {
5497 int ret = 0;
5498 struct btrfs_key key;
5499 struct btrfs_path *path = NULL;
5500 struct extent_buffer *eb;
5501 int slot;
5502 struct btrfs_key found_key;
5503 struct btrfs_file_extent_item *ei;
5504 u64 left_disknr;
5505 u64 right_disknr;
5506 u64 left_offset;
5507 u64 right_offset;
5508 u64 left_offset_fixed;
5509 u64 left_len;
5510 u64 right_len;
5511 u64 left_gen;
5512 u64 right_gen;
5513 u8 left_type;
5514 u8 right_type;
5515
5516 path = alloc_path_for_send();
5517 if (!path)
5518 return -ENOMEM;
5519
5520 eb = left_path->nodes[0];
5521 slot = left_path->slots[0];
5522 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5523 left_type = btrfs_file_extent_type(eb, ei);
5524
5525 if (left_type != BTRFS_FILE_EXTENT_REG) {
5526 ret = 0;
5527 goto out;
5528 }
5529 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5530 left_len = btrfs_file_extent_num_bytes(eb, ei);
5531 left_offset = btrfs_file_extent_offset(eb, ei);
5532 left_gen = btrfs_file_extent_generation(eb, ei);
5533
5534 /*
5535 * Following comments will refer to these graphics. L is the left
5536 * extents which we are checking at the moment. 1-8 are the right
5537 * extents that we iterate.
5538 *
5539 * |-----L-----|
5540 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5541 *
5542 * |-----L-----|
5543 * |--1--|-2b-|...(same as above)
5544 *
5545 * Alternative situation. Happens on files where extents got split.
5546 * |-----L-----|
5547 * |-----------7-----------|-6-|
5548 *
5549 * Alternative situation. Happens on files which got larger.
5550 * |-----L-----|
5551 * |-8-|
5552 * Nothing follows after 8.
5553 */
5554
5555 key.objectid = ekey->objectid;
5556 key.type = BTRFS_EXTENT_DATA_KEY;
5557 key.offset = ekey->offset;
5558 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5559 if (ret < 0)
5560 goto out;
5561 if (ret) {
5562 ret = 0;
5563 goto out;
5564 }
5565
5566 /*
5567 * Handle special case where the right side has no extents at all.
5568 */
5569 eb = path->nodes[0];
5570 slot = path->slots[0];
5571 btrfs_item_key_to_cpu(eb, &found_key, slot);
5572 if (found_key.objectid != key.objectid ||
5573 found_key.type != key.type) {
5574 /* If we're a hole then just pretend nothing changed */
5575 ret = (left_disknr) ? 0 : 1;
5576 goto out;
5577 }
5578
5579 /*
5580 * We're now on 2a, 2b or 7.
5581 */
5582 key = found_key;
5583 while (key.offset < ekey->offset + left_len) {
5584 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5585 right_type = btrfs_file_extent_type(eb, ei);
5586 if (right_type != BTRFS_FILE_EXTENT_REG &&
5587 right_type != BTRFS_FILE_EXTENT_INLINE) {
5588 ret = 0;
5589 goto out;
5590 }
5591
5592 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5593 right_len = btrfs_file_extent_ram_bytes(eb, ei);
5594 right_len = PAGE_ALIGN(right_len);
5595 } else {
5596 right_len = btrfs_file_extent_num_bytes(eb, ei);
5597 }
5598
5599 /*
5600 * Are we at extent 8? If yes, we know the extent is changed.
5601 * This may only happen on the first iteration.
5602 */
5603 if (found_key.offset + right_len <= ekey->offset) {
5604 /* If we're a hole just pretend nothing changed */
5605 ret = (left_disknr) ? 0 : 1;
5606 goto out;
5607 }
5608
5609 /*
5610 * We just wanted to see if when we have an inline extent, what
5611 * follows it is a regular extent (wanted to check the above
5612 * condition for inline extents too). This should normally not
5613 * happen but it's possible for example when we have an inline
5614 * compressed extent representing data with a size matching
5615 * the page size (currently the same as sector size).
5616 */
5617 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5618 ret = 0;
5619 goto out;
5620 }
5621
5622 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5623 right_offset = btrfs_file_extent_offset(eb, ei);
5624 right_gen = btrfs_file_extent_generation(eb, ei);
5625
5626 left_offset_fixed = left_offset;
5627 if (key.offset < ekey->offset) {
5628 /* Fix the right offset for 2a and 7. */
5629 right_offset += ekey->offset - key.offset;
5630 } else {
5631 /* Fix the left offset for all behind 2a and 2b */
5632 left_offset_fixed += key.offset - ekey->offset;
5633 }
5634
5635 /*
5636 * Check if we have the same extent.
5637 */
5638 if (left_disknr != right_disknr ||
5639 left_offset_fixed != right_offset ||
5640 left_gen != right_gen) {
5641 ret = 0;
5642 goto out;
5643 }
5644
5645 /*
5646 * Go to the next extent.
5647 */
5648 ret = btrfs_next_item(sctx->parent_root, path);
5649 if (ret < 0)
5650 goto out;
5651 if (!ret) {
5652 eb = path->nodes[0];
5653 slot = path->slots[0];
5654 btrfs_item_key_to_cpu(eb, &found_key, slot);
5655 }
5656 if (ret || found_key.objectid != key.objectid ||
5657 found_key.type != key.type) {
5658 key.offset += right_len;
5659 break;
5660 }
5661 if (found_key.offset != key.offset + right_len) {
5662 ret = 0;
5663 goto out;
5664 }
5665 key = found_key;
5666 }
5667
5668 /*
5669 * We're now behind the left extent (treat as unchanged) or at the end
5670 * of the right side (treat as changed).
5671 */
5672 if (key.offset >= ekey->offset + left_len)
5673 ret = 1;
5674 else
5675 ret = 0;
5676
5677
5678 out:
5679 btrfs_free_path(path);
5680 return ret;
5681 }
5682
get_last_extent(struct send_ctx * sctx,u64 offset)5683 static int get_last_extent(struct send_ctx *sctx, u64 offset)
5684 {
5685 struct btrfs_path *path;
5686 struct btrfs_root *root = sctx->send_root;
5687 struct btrfs_file_extent_item *fi;
5688 struct btrfs_key key;
5689 u64 extent_end;
5690 u8 type;
5691 int ret;
5692
5693 path = alloc_path_for_send();
5694 if (!path)
5695 return -ENOMEM;
5696
5697 sctx->cur_inode_last_extent = 0;
5698
5699 key.objectid = sctx->cur_ino;
5700 key.type = BTRFS_EXTENT_DATA_KEY;
5701 key.offset = offset;
5702 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5703 if (ret < 0)
5704 goto out;
5705 ret = 0;
5706 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5707 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5708 goto out;
5709
5710 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5711 struct btrfs_file_extent_item);
5712 type = btrfs_file_extent_type(path->nodes[0], fi);
5713 if (type == BTRFS_FILE_EXTENT_INLINE) {
5714 u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
5715 extent_end = ALIGN(key.offset + size,
5716 sctx->send_root->fs_info->sectorsize);
5717 } else {
5718 extent_end = key.offset +
5719 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5720 }
5721 sctx->cur_inode_last_extent = extent_end;
5722 out:
5723 btrfs_free_path(path);
5724 return ret;
5725 }
5726
range_is_hole_in_parent(struct send_ctx * sctx,const u64 start,const u64 end)5727 static int range_is_hole_in_parent(struct send_ctx *sctx,
5728 const u64 start,
5729 const u64 end)
5730 {
5731 struct btrfs_path *path;
5732 struct btrfs_key key;
5733 struct btrfs_root *root = sctx->parent_root;
5734 u64 search_start = start;
5735 int ret;
5736
5737 path = alloc_path_for_send();
5738 if (!path)
5739 return -ENOMEM;
5740
5741 key.objectid = sctx->cur_ino;
5742 key.type = BTRFS_EXTENT_DATA_KEY;
5743 key.offset = search_start;
5744 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5745 if (ret < 0)
5746 goto out;
5747 if (ret > 0 && path->slots[0] > 0)
5748 path->slots[0]--;
5749
5750 while (search_start < end) {
5751 struct extent_buffer *leaf = path->nodes[0];
5752 int slot = path->slots[0];
5753 struct btrfs_file_extent_item *fi;
5754 u64 extent_end;
5755
5756 if (slot >= btrfs_header_nritems(leaf)) {
5757 ret = btrfs_next_leaf(root, path);
5758 if (ret < 0)
5759 goto out;
5760 else if (ret > 0)
5761 break;
5762 continue;
5763 }
5764
5765 btrfs_item_key_to_cpu(leaf, &key, slot);
5766 if (key.objectid < sctx->cur_ino ||
5767 key.type < BTRFS_EXTENT_DATA_KEY)
5768 goto next;
5769 if (key.objectid > sctx->cur_ino ||
5770 key.type > BTRFS_EXTENT_DATA_KEY ||
5771 key.offset >= end)
5772 break;
5773
5774 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5775 if (btrfs_file_extent_type(leaf, fi) ==
5776 BTRFS_FILE_EXTENT_INLINE) {
5777 u64 size = btrfs_file_extent_ram_bytes(leaf, fi);
5778
5779 extent_end = ALIGN(key.offset + size,
5780 root->fs_info->sectorsize);
5781 } else {
5782 extent_end = key.offset +
5783 btrfs_file_extent_num_bytes(leaf, fi);
5784 }
5785 if (extent_end <= start)
5786 goto next;
5787 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5788 search_start = extent_end;
5789 goto next;
5790 }
5791 ret = 0;
5792 goto out;
5793 next:
5794 path->slots[0]++;
5795 }
5796 ret = 1;
5797 out:
5798 btrfs_free_path(path);
5799 return ret;
5800 }
5801
maybe_send_hole(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key)5802 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5803 struct btrfs_key *key)
5804 {
5805 struct btrfs_file_extent_item *fi;
5806 u64 extent_end;
5807 u8 type;
5808 int ret = 0;
5809
5810 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5811 return 0;
5812
5813 if (sctx->cur_inode_last_extent == (u64)-1) {
5814 ret = get_last_extent(sctx, key->offset - 1);
5815 if (ret)
5816 return ret;
5817 }
5818
5819 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5820 struct btrfs_file_extent_item);
5821 type = btrfs_file_extent_type(path->nodes[0], fi);
5822 if (type == BTRFS_FILE_EXTENT_INLINE) {
5823 u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
5824 extent_end = ALIGN(key->offset + size,
5825 sctx->send_root->fs_info->sectorsize);
5826 } else {
5827 extent_end = key->offset +
5828 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5829 }
5830
5831 if (path->slots[0] == 0 &&
5832 sctx->cur_inode_last_extent < key->offset) {
5833 /*
5834 * We might have skipped entire leafs that contained only
5835 * file extent items for our current inode. These leafs have
5836 * a generation number smaller (older) than the one in the
5837 * current leaf and the leaf our last extent came from, and
5838 * are located between these 2 leafs.
5839 */
5840 ret = get_last_extent(sctx, key->offset - 1);
5841 if (ret)
5842 return ret;
5843 }
5844
5845 if (sctx->cur_inode_last_extent < key->offset) {
5846 ret = range_is_hole_in_parent(sctx,
5847 sctx->cur_inode_last_extent,
5848 key->offset);
5849 if (ret < 0)
5850 return ret;
5851 else if (ret == 0)
5852 ret = send_hole(sctx, key->offset);
5853 else
5854 ret = 0;
5855 }
5856 sctx->cur_inode_last_extent = extent_end;
5857 return ret;
5858 }
5859
process_extent(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key)5860 static int process_extent(struct send_ctx *sctx,
5861 struct btrfs_path *path,
5862 struct btrfs_key *key)
5863 {
5864 struct clone_root *found_clone = NULL;
5865 int ret = 0;
5866
5867 if (S_ISLNK(sctx->cur_inode_mode))
5868 return 0;
5869
5870 if (sctx->parent_root && !sctx->cur_inode_new) {
5871 ret = is_extent_unchanged(sctx, path, key);
5872 if (ret < 0)
5873 goto out;
5874 if (ret) {
5875 ret = 0;
5876 goto out_hole;
5877 }
5878 } else {
5879 struct btrfs_file_extent_item *ei;
5880 u8 type;
5881
5882 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5883 struct btrfs_file_extent_item);
5884 type = btrfs_file_extent_type(path->nodes[0], ei);
5885 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5886 type == BTRFS_FILE_EXTENT_REG) {
5887 /*
5888 * The send spec does not have a prealloc command yet,
5889 * so just leave a hole for prealloc'ed extents until
5890 * we have enough commands queued up to justify rev'ing
5891 * the send spec.
5892 */
5893 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5894 ret = 0;
5895 goto out;
5896 }
5897
5898 /* Have a hole, just skip it. */
5899 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5900 ret = 0;
5901 goto out;
5902 }
5903 }
5904 }
5905
5906 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5907 sctx->cur_inode_size, &found_clone);
5908 if (ret != -ENOENT && ret < 0)
5909 goto out;
5910
5911 ret = send_write_or_clone(sctx, path, key, found_clone);
5912 if (ret)
5913 goto out;
5914 out_hole:
5915 ret = maybe_send_hole(sctx, path, key);
5916 out:
5917 return ret;
5918 }
5919
process_all_extents(struct send_ctx * sctx)5920 static int process_all_extents(struct send_ctx *sctx)
5921 {
5922 int ret;
5923 struct btrfs_root *root;
5924 struct btrfs_path *path;
5925 struct btrfs_key key;
5926 struct btrfs_key found_key;
5927 struct extent_buffer *eb;
5928 int slot;
5929
5930 root = sctx->send_root;
5931 path = alloc_path_for_send();
5932 if (!path)
5933 return -ENOMEM;
5934
5935 key.objectid = sctx->cmp_key->objectid;
5936 key.type = BTRFS_EXTENT_DATA_KEY;
5937 key.offset = 0;
5938 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5939 if (ret < 0)
5940 goto out;
5941
5942 while (1) {
5943 eb = path->nodes[0];
5944 slot = path->slots[0];
5945
5946 if (slot >= btrfs_header_nritems(eb)) {
5947 ret = btrfs_next_leaf(root, path);
5948 if (ret < 0) {
5949 goto out;
5950 } else if (ret > 0) {
5951 ret = 0;
5952 break;
5953 }
5954 continue;
5955 }
5956
5957 btrfs_item_key_to_cpu(eb, &found_key, slot);
5958
5959 if (found_key.objectid != key.objectid ||
5960 found_key.type != key.type) {
5961 ret = 0;
5962 goto out;
5963 }
5964
5965 ret = process_extent(sctx, path, &found_key);
5966 if (ret < 0)
5967 goto out;
5968
5969 path->slots[0]++;
5970 }
5971
5972 out:
5973 btrfs_free_path(path);
5974 return ret;
5975 }
5976
process_recorded_refs_if_needed(struct send_ctx * sctx,int at_end,int * pending_move,int * refs_processed)5977 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
5978 int *pending_move,
5979 int *refs_processed)
5980 {
5981 int ret = 0;
5982
5983 if (sctx->cur_ino == 0)
5984 goto out;
5985 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
5986 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
5987 goto out;
5988 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
5989 goto out;
5990
5991 ret = process_recorded_refs(sctx, pending_move);
5992 if (ret < 0)
5993 goto out;
5994
5995 *refs_processed = 1;
5996 out:
5997 return ret;
5998 }
5999
finish_inode_if_needed(struct send_ctx * sctx,int at_end)6000 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
6001 {
6002 int ret = 0;
6003 u64 left_mode;
6004 u64 left_uid;
6005 u64 left_gid;
6006 u64 right_mode;
6007 u64 right_uid;
6008 u64 right_gid;
6009 int need_chmod = 0;
6010 int need_chown = 0;
6011 int need_truncate = 1;
6012 int pending_move = 0;
6013 int refs_processed = 0;
6014
6015 if (sctx->ignore_cur_inode)
6016 return 0;
6017
6018 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
6019 &refs_processed);
6020 if (ret < 0)
6021 goto out;
6022
6023 /*
6024 * We have processed the refs and thus need to advance send_progress.
6025 * Now, calls to get_cur_xxx will take the updated refs of the current
6026 * inode into account.
6027 *
6028 * On the other hand, if our current inode is a directory and couldn't
6029 * be moved/renamed because its parent was renamed/moved too and it has
6030 * a higher inode number, we can only move/rename our current inode
6031 * after we moved/renamed its parent. Therefore in this case operate on
6032 * the old path (pre move/rename) of our current inode, and the
6033 * move/rename will be performed later.
6034 */
6035 if (refs_processed && !pending_move)
6036 sctx->send_progress = sctx->cur_ino + 1;
6037
6038 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
6039 goto out;
6040 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
6041 goto out;
6042
6043 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
6044 &left_mode, &left_uid, &left_gid, NULL);
6045 if (ret < 0)
6046 goto out;
6047
6048 if (!sctx->parent_root || sctx->cur_inode_new) {
6049 need_chown = 1;
6050 if (!S_ISLNK(sctx->cur_inode_mode))
6051 need_chmod = 1;
6052 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
6053 need_truncate = 0;
6054 } else {
6055 u64 old_size;
6056
6057 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
6058 &old_size, NULL, &right_mode, &right_uid,
6059 &right_gid, NULL);
6060 if (ret < 0)
6061 goto out;
6062
6063 if (left_uid != right_uid || left_gid != right_gid)
6064 need_chown = 1;
6065 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
6066 need_chmod = 1;
6067 if ((old_size == sctx->cur_inode_size) ||
6068 (sctx->cur_inode_size > old_size &&
6069 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
6070 need_truncate = 0;
6071 }
6072
6073 if (S_ISREG(sctx->cur_inode_mode)) {
6074 if (need_send_hole(sctx)) {
6075 if (sctx->cur_inode_last_extent == (u64)-1 ||
6076 sctx->cur_inode_last_extent <
6077 sctx->cur_inode_size) {
6078 ret = get_last_extent(sctx, (u64)-1);
6079 if (ret)
6080 goto out;
6081 }
6082 if (sctx->cur_inode_last_extent <
6083 sctx->cur_inode_size) {
6084 ret = send_hole(sctx, sctx->cur_inode_size);
6085 if (ret)
6086 goto out;
6087 }
6088 }
6089 if (need_truncate) {
6090 ret = send_truncate(sctx, sctx->cur_ino,
6091 sctx->cur_inode_gen,
6092 sctx->cur_inode_size);
6093 if (ret < 0)
6094 goto out;
6095 }
6096 }
6097
6098 if (need_chown) {
6099 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6100 left_uid, left_gid);
6101 if (ret < 0)
6102 goto out;
6103 }
6104 if (need_chmod) {
6105 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6106 left_mode);
6107 if (ret < 0)
6108 goto out;
6109 }
6110
6111 ret = send_capabilities(sctx);
6112 if (ret < 0)
6113 goto out;
6114
6115 /*
6116 * If other directory inodes depended on our current directory
6117 * inode's move/rename, now do their move/rename operations.
6118 */
6119 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
6120 ret = apply_children_dir_moves(sctx);
6121 if (ret)
6122 goto out;
6123 /*
6124 * Need to send that every time, no matter if it actually
6125 * changed between the two trees as we have done changes to
6126 * the inode before. If our inode is a directory and it's
6127 * waiting to be moved/renamed, we will send its utimes when
6128 * it's moved/renamed, therefore we don't need to do it here.
6129 */
6130 sctx->send_progress = sctx->cur_ino + 1;
6131 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6132 if (ret < 0)
6133 goto out;
6134 }
6135
6136 out:
6137 return ret;
6138 }
6139
6140 struct parent_paths_ctx {
6141 struct list_head *refs;
6142 struct send_ctx *sctx;
6143 };
6144
record_parent_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)6145 static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
6146 void *ctx)
6147 {
6148 struct parent_paths_ctx *ppctx = ctx;
6149
6150 return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
6151 ppctx->refs);
6152 }
6153
6154 /*
6155 * Issue unlink operations for all paths of the current inode found in the
6156 * parent snapshot.
6157 */
btrfs_unlink_all_paths(struct send_ctx * sctx)6158 static int btrfs_unlink_all_paths(struct send_ctx *sctx)
6159 {
6160 LIST_HEAD(deleted_refs);
6161 struct btrfs_path *path;
6162 struct btrfs_key key;
6163 struct parent_paths_ctx ctx;
6164 int ret;
6165
6166 path = alloc_path_for_send();
6167 if (!path)
6168 return -ENOMEM;
6169
6170 key.objectid = sctx->cur_ino;
6171 key.type = BTRFS_INODE_REF_KEY;
6172 key.offset = 0;
6173 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
6174 if (ret < 0)
6175 goto out;
6176
6177 ctx.refs = &deleted_refs;
6178 ctx.sctx = sctx;
6179
6180 while (true) {
6181 struct extent_buffer *eb = path->nodes[0];
6182 int slot = path->slots[0];
6183
6184 if (slot >= btrfs_header_nritems(eb)) {
6185 ret = btrfs_next_leaf(sctx->parent_root, path);
6186 if (ret < 0)
6187 goto out;
6188 else if (ret > 0)
6189 break;
6190 continue;
6191 }
6192
6193 btrfs_item_key_to_cpu(eb, &key, slot);
6194 if (key.objectid != sctx->cur_ino)
6195 break;
6196 if (key.type != BTRFS_INODE_REF_KEY &&
6197 key.type != BTRFS_INODE_EXTREF_KEY)
6198 break;
6199
6200 ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
6201 record_parent_ref, &ctx);
6202 if (ret < 0)
6203 goto out;
6204
6205 path->slots[0]++;
6206 }
6207
6208 while (!list_empty(&deleted_refs)) {
6209 struct recorded_ref *ref;
6210
6211 ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
6212 ret = send_unlink(sctx, ref->full_path);
6213 if (ret < 0)
6214 goto out;
6215 fs_path_free(ref->full_path);
6216 list_del(&ref->list);
6217 kfree(ref);
6218 }
6219 ret = 0;
6220 out:
6221 btrfs_free_path(path);
6222 if (ret)
6223 __free_recorded_refs(&deleted_refs);
6224 return ret;
6225 }
6226
changed_inode(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6227 static int changed_inode(struct send_ctx *sctx,
6228 enum btrfs_compare_tree_result result)
6229 {
6230 int ret = 0;
6231 struct btrfs_key *key = sctx->cmp_key;
6232 struct btrfs_inode_item *left_ii = NULL;
6233 struct btrfs_inode_item *right_ii = NULL;
6234 u64 left_gen = 0;
6235 u64 right_gen = 0;
6236
6237 sctx->cur_ino = key->objectid;
6238 sctx->cur_inode_new_gen = 0;
6239 sctx->cur_inode_last_extent = (u64)-1;
6240 sctx->cur_inode_next_write_offset = 0;
6241 sctx->ignore_cur_inode = false;
6242
6243 /*
6244 * Set send_progress to current inode. This will tell all get_cur_xxx
6245 * functions that the current inode's refs are not updated yet. Later,
6246 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6247 */
6248 sctx->send_progress = sctx->cur_ino;
6249
6250 if (result == BTRFS_COMPARE_TREE_NEW ||
6251 result == BTRFS_COMPARE_TREE_CHANGED) {
6252 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6253 sctx->left_path->slots[0],
6254 struct btrfs_inode_item);
6255 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6256 left_ii);
6257 } else {
6258 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6259 sctx->right_path->slots[0],
6260 struct btrfs_inode_item);
6261 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6262 right_ii);
6263 }
6264 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6265 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6266 sctx->right_path->slots[0],
6267 struct btrfs_inode_item);
6268
6269 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6270 right_ii);
6271
6272 /*
6273 * The cur_ino = root dir case is special here. We can't treat
6274 * the inode as deleted+reused because it would generate a
6275 * stream that tries to delete/mkdir the root dir.
6276 */
6277 if (left_gen != right_gen &&
6278 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6279 sctx->cur_inode_new_gen = 1;
6280 }
6281
6282 /*
6283 * Normally we do not find inodes with a link count of zero (orphans)
6284 * because the most common case is to create a snapshot and use it
6285 * for a send operation. However other less common use cases involve
6286 * using a subvolume and send it after turning it to RO mode just
6287 * after deleting all hard links of a file while holding an open
6288 * file descriptor against it or turning a RO snapshot into RW mode,
6289 * keep an open file descriptor against a file, delete it and then
6290 * turn the snapshot back to RO mode before using it for a send
6291 * operation. So if we find such cases, ignore the inode and all its
6292 * items completely if it's a new inode, or if it's a changed inode
6293 * make sure all its previous paths (from the parent snapshot) are all
6294 * unlinked and all other the inode items are ignored.
6295 */
6296 if (result == BTRFS_COMPARE_TREE_NEW ||
6297 result == BTRFS_COMPARE_TREE_CHANGED) {
6298 u32 nlinks;
6299
6300 nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6301 if (nlinks == 0) {
6302 sctx->ignore_cur_inode = true;
6303 if (result == BTRFS_COMPARE_TREE_CHANGED)
6304 ret = btrfs_unlink_all_paths(sctx);
6305 goto out;
6306 }
6307 }
6308
6309 if (result == BTRFS_COMPARE_TREE_NEW) {
6310 sctx->cur_inode_gen = left_gen;
6311 sctx->cur_inode_new = 1;
6312 sctx->cur_inode_deleted = 0;
6313 sctx->cur_inode_size = btrfs_inode_size(
6314 sctx->left_path->nodes[0], left_ii);
6315 sctx->cur_inode_mode = btrfs_inode_mode(
6316 sctx->left_path->nodes[0], left_ii);
6317 sctx->cur_inode_rdev = btrfs_inode_rdev(
6318 sctx->left_path->nodes[0], left_ii);
6319 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6320 ret = send_create_inode_if_needed(sctx);
6321 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6322 sctx->cur_inode_gen = right_gen;
6323 sctx->cur_inode_new = 0;
6324 sctx->cur_inode_deleted = 1;
6325 sctx->cur_inode_size = btrfs_inode_size(
6326 sctx->right_path->nodes[0], right_ii);
6327 sctx->cur_inode_mode = btrfs_inode_mode(
6328 sctx->right_path->nodes[0], right_ii);
6329 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6330 /*
6331 * We need to do some special handling in case the inode was
6332 * reported as changed with a changed generation number. This
6333 * means that the original inode was deleted and new inode
6334 * reused the same inum. So we have to treat the old inode as
6335 * deleted and the new one as new.
6336 */
6337 if (sctx->cur_inode_new_gen) {
6338 /*
6339 * First, process the inode as if it was deleted.
6340 */
6341 sctx->cur_inode_gen = right_gen;
6342 sctx->cur_inode_new = 0;
6343 sctx->cur_inode_deleted = 1;
6344 sctx->cur_inode_size = btrfs_inode_size(
6345 sctx->right_path->nodes[0], right_ii);
6346 sctx->cur_inode_mode = btrfs_inode_mode(
6347 sctx->right_path->nodes[0], right_ii);
6348 ret = process_all_refs(sctx,
6349 BTRFS_COMPARE_TREE_DELETED);
6350 if (ret < 0)
6351 goto out;
6352
6353 /*
6354 * Now process the inode as if it was new.
6355 */
6356 sctx->cur_inode_gen = left_gen;
6357 sctx->cur_inode_new = 1;
6358 sctx->cur_inode_deleted = 0;
6359 sctx->cur_inode_size = btrfs_inode_size(
6360 sctx->left_path->nodes[0], left_ii);
6361 sctx->cur_inode_mode = btrfs_inode_mode(
6362 sctx->left_path->nodes[0], left_ii);
6363 sctx->cur_inode_rdev = btrfs_inode_rdev(
6364 sctx->left_path->nodes[0], left_ii);
6365 ret = send_create_inode_if_needed(sctx);
6366 if (ret < 0)
6367 goto out;
6368
6369 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6370 if (ret < 0)
6371 goto out;
6372 /*
6373 * Advance send_progress now as we did not get into
6374 * process_recorded_refs_if_needed in the new_gen case.
6375 */
6376 sctx->send_progress = sctx->cur_ino + 1;
6377
6378 /*
6379 * Now process all extents and xattrs of the inode as if
6380 * they were all new.
6381 */
6382 ret = process_all_extents(sctx);
6383 if (ret < 0)
6384 goto out;
6385 ret = process_all_new_xattrs(sctx);
6386 if (ret < 0)
6387 goto out;
6388 } else {
6389 sctx->cur_inode_gen = left_gen;
6390 sctx->cur_inode_new = 0;
6391 sctx->cur_inode_new_gen = 0;
6392 sctx->cur_inode_deleted = 0;
6393 sctx->cur_inode_size = btrfs_inode_size(
6394 sctx->left_path->nodes[0], left_ii);
6395 sctx->cur_inode_mode = btrfs_inode_mode(
6396 sctx->left_path->nodes[0], left_ii);
6397 }
6398 }
6399
6400 out:
6401 return ret;
6402 }
6403
6404 /*
6405 * We have to process new refs before deleted refs, but compare_trees gives us
6406 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6407 * first and later process them in process_recorded_refs.
6408 * For the cur_inode_new_gen case, we skip recording completely because
6409 * changed_inode did already initiate processing of refs. The reason for this is
6410 * that in this case, compare_tree actually compares the refs of 2 different
6411 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6412 * refs of the right tree as deleted and all refs of the left tree as new.
6413 */
changed_ref(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6414 static int changed_ref(struct send_ctx *sctx,
6415 enum btrfs_compare_tree_result result)
6416 {
6417 int ret = 0;
6418
6419 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6420 inconsistent_snapshot_error(sctx, result, "reference");
6421 return -EIO;
6422 }
6423
6424 if (!sctx->cur_inode_new_gen &&
6425 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6426 if (result == BTRFS_COMPARE_TREE_NEW)
6427 ret = record_new_ref(sctx);
6428 else if (result == BTRFS_COMPARE_TREE_DELETED)
6429 ret = record_deleted_ref(sctx);
6430 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6431 ret = record_changed_ref(sctx);
6432 }
6433
6434 return ret;
6435 }
6436
6437 /*
6438 * Process new/deleted/changed xattrs. We skip processing in the
6439 * cur_inode_new_gen case because changed_inode did already initiate processing
6440 * of xattrs. The reason is the same as in changed_ref
6441 */
changed_xattr(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6442 static int changed_xattr(struct send_ctx *sctx,
6443 enum btrfs_compare_tree_result result)
6444 {
6445 int ret = 0;
6446
6447 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6448 inconsistent_snapshot_error(sctx, result, "xattr");
6449 return -EIO;
6450 }
6451
6452 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6453 if (result == BTRFS_COMPARE_TREE_NEW)
6454 ret = process_new_xattr(sctx);
6455 else if (result == BTRFS_COMPARE_TREE_DELETED)
6456 ret = process_deleted_xattr(sctx);
6457 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6458 ret = process_changed_xattr(sctx);
6459 }
6460
6461 return ret;
6462 }
6463
6464 /*
6465 * Process new/deleted/changed extents. We skip processing in the
6466 * cur_inode_new_gen case because changed_inode did already initiate processing
6467 * of extents. The reason is the same as in changed_ref
6468 */
changed_extent(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6469 static int changed_extent(struct send_ctx *sctx,
6470 enum btrfs_compare_tree_result result)
6471 {
6472 int ret = 0;
6473
6474 /*
6475 * We have found an extent item that changed without the inode item
6476 * having changed. This can happen either after relocation (where the
6477 * disk_bytenr of an extent item is replaced at
6478 * relocation.c:replace_file_extents()) or after deduplication into a
6479 * file in both the parent and send snapshots (where an extent item can
6480 * get modified or replaced with a new one). Note that deduplication
6481 * updates the inode item, but it only changes the iversion (sequence
6482 * field in the inode item) of the inode, so if a file is deduplicated
6483 * the same amount of times in both the parent and send snapshots, its
6484 * iversion becames the same in both snapshots, whence the inode item is
6485 * the same on both snapshots.
6486 */
6487 if (sctx->cur_ino != sctx->cmp_key->objectid)
6488 return 0;
6489
6490 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6491 if (result != BTRFS_COMPARE_TREE_DELETED)
6492 ret = process_extent(sctx, sctx->left_path,
6493 sctx->cmp_key);
6494 }
6495
6496 return ret;
6497 }
6498
dir_changed(struct send_ctx * sctx,u64 dir)6499 static int dir_changed(struct send_ctx *sctx, u64 dir)
6500 {
6501 u64 orig_gen, new_gen;
6502 int ret;
6503
6504 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6505 NULL, NULL);
6506 if (ret)
6507 return ret;
6508
6509 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6510 NULL, NULL, NULL);
6511 if (ret)
6512 return ret;
6513
6514 return (orig_gen != new_gen) ? 1 : 0;
6515 }
6516
compare_refs(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key)6517 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6518 struct btrfs_key *key)
6519 {
6520 struct btrfs_inode_extref *extref;
6521 struct extent_buffer *leaf;
6522 u64 dirid = 0, last_dirid = 0;
6523 unsigned long ptr;
6524 u32 item_size;
6525 u32 cur_offset = 0;
6526 int ref_name_len;
6527 int ret = 0;
6528
6529 /* Easy case, just check this one dirid */
6530 if (key->type == BTRFS_INODE_REF_KEY) {
6531 dirid = key->offset;
6532
6533 ret = dir_changed(sctx, dirid);
6534 goto out;
6535 }
6536
6537 leaf = path->nodes[0];
6538 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6539 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6540 while (cur_offset < item_size) {
6541 extref = (struct btrfs_inode_extref *)(ptr +
6542 cur_offset);
6543 dirid = btrfs_inode_extref_parent(leaf, extref);
6544 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6545 cur_offset += ref_name_len + sizeof(*extref);
6546 if (dirid == last_dirid)
6547 continue;
6548 ret = dir_changed(sctx, dirid);
6549 if (ret)
6550 break;
6551 last_dirid = dirid;
6552 }
6553 out:
6554 return ret;
6555 }
6556
6557 /*
6558 * Updates compare related fields in sctx and simply forwards to the actual
6559 * changed_xxx functions.
6560 */
changed_cb(struct btrfs_path * left_path,struct btrfs_path * right_path,struct btrfs_key * key,enum btrfs_compare_tree_result result,void * ctx)6561 static int changed_cb(struct btrfs_path *left_path,
6562 struct btrfs_path *right_path,
6563 struct btrfs_key *key,
6564 enum btrfs_compare_tree_result result,
6565 void *ctx)
6566 {
6567 int ret = 0;
6568 struct send_ctx *sctx = ctx;
6569
6570 if (result == BTRFS_COMPARE_TREE_SAME) {
6571 if (key->type == BTRFS_INODE_REF_KEY ||
6572 key->type == BTRFS_INODE_EXTREF_KEY) {
6573 ret = compare_refs(sctx, left_path, key);
6574 if (!ret)
6575 return 0;
6576 if (ret < 0)
6577 return ret;
6578 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6579 return maybe_send_hole(sctx, left_path, key);
6580 } else {
6581 return 0;
6582 }
6583 result = BTRFS_COMPARE_TREE_CHANGED;
6584 ret = 0;
6585 }
6586
6587 sctx->left_path = left_path;
6588 sctx->right_path = right_path;
6589 sctx->cmp_key = key;
6590
6591 ret = finish_inode_if_needed(sctx, 0);
6592 if (ret < 0)
6593 goto out;
6594
6595 /* Ignore non-FS objects */
6596 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6597 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6598 goto out;
6599
6600 if (key->type == BTRFS_INODE_ITEM_KEY) {
6601 ret = changed_inode(sctx, result);
6602 } else if (!sctx->ignore_cur_inode) {
6603 if (key->type == BTRFS_INODE_REF_KEY ||
6604 key->type == BTRFS_INODE_EXTREF_KEY)
6605 ret = changed_ref(sctx, result);
6606 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6607 ret = changed_xattr(sctx, result);
6608 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6609 ret = changed_extent(sctx, result);
6610 }
6611
6612 out:
6613 return ret;
6614 }
6615
full_send_tree(struct send_ctx * sctx)6616 static int full_send_tree(struct send_ctx *sctx)
6617 {
6618 int ret;
6619 struct btrfs_root *send_root = sctx->send_root;
6620 struct btrfs_key key;
6621 struct btrfs_path *path;
6622 struct extent_buffer *eb;
6623 int slot;
6624
6625 path = alloc_path_for_send();
6626 if (!path)
6627 return -ENOMEM;
6628
6629 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6630 key.type = BTRFS_INODE_ITEM_KEY;
6631 key.offset = 0;
6632
6633 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6634 if (ret < 0)
6635 goto out;
6636 if (ret)
6637 goto out_finish;
6638
6639 while (1) {
6640 eb = path->nodes[0];
6641 slot = path->slots[0];
6642 btrfs_item_key_to_cpu(eb, &key, slot);
6643
6644 ret = changed_cb(path, NULL, &key,
6645 BTRFS_COMPARE_TREE_NEW, sctx);
6646 if (ret < 0)
6647 goto out;
6648
6649 ret = btrfs_next_item(send_root, path);
6650 if (ret < 0)
6651 goto out;
6652 if (ret) {
6653 ret = 0;
6654 break;
6655 }
6656 }
6657
6658 out_finish:
6659 ret = finish_inode_if_needed(sctx, 1);
6660
6661 out:
6662 btrfs_free_path(path);
6663 return ret;
6664 }
6665
send_subvol(struct send_ctx * sctx)6666 static int send_subvol(struct send_ctx *sctx)
6667 {
6668 int ret;
6669
6670 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
6671 ret = send_header(sctx);
6672 if (ret < 0)
6673 goto out;
6674 }
6675
6676 ret = send_subvol_begin(sctx);
6677 if (ret < 0)
6678 goto out;
6679
6680 if (sctx->parent_root) {
6681 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
6682 changed_cb, sctx);
6683 if (ret < 0)
6684 goto out;
6685 ret = finish_inode_if_needed(sctx, 1);
6686 if (ret < 0)
6687 goto out;
6688 } else {
6689 ret = full_send_tree(sctx);
6690 if (ret < 0)
6691 goto out;
6692 }
6693
6694 out:
6695 free_recorded_refs(sctx);
6696 return ret;
6697 }
6698
6699 /*
6700 * If orphan cleanup did remove any orphans from a root, it means the tree
6701 * was modified and therefore the commit root is not the same as the current
6702 * root anymore. This is a problem, because send uses the commit root and
6703 * therefore can see inode items that don't exist in the current root anymore,
6704 * and for example make calls to btrfs_iget, which will do tree lookups based
6705 * on the current root and not on the commit root. Those lookups will fail,
6706 * returning a -ESTALE error, and making send fail with that error. So make
6707 * sure a send does not see any orphans we have just removed, and that it will
6708 * see the same inodes regardless of whether a transaction commit happened
6709 * before it started (meaning that the commit root will be the same as the
6710 * current root) or not.
6711 */
ensure_commit_roots_uptodate(struct send_ctx * sctx)6712 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
6713 {
6714 int i;
6715 struct btrfs_trans_handle *trans = NULL;
6716
6717 again:
6718 if (sctx->parent_root &&
6719 sctx->parent_root->node != sctx->parent_root->commit_root)
6720 goto commit_trans;
6721
6722 for (i = 0; i < sctx->clone_roots_cnt; i++)
6723 if (sctx->clone_roots[i].root->node !=
6724 sctx->clone_roots[i].root->commit_root)
6725 goto commit_trans;
6726
6727 if (trans)
6728 return btrfs_end_transaction(trans);
6729
6730 return 0;
6731
6732 commit_trans:
6733 /* Use any root, all fs roots will get their commit roots updated. */
6734 if (!trans) {
6735 trans = btrfs_join_transaction(sctx->send_root);
6736 if (IS_ERR(trans))
6737 return PTR_ERR(trans);
6738 goto again;
6739 }
6740
6741 return btrfs_commit_transaction(trans);
6742 }
6743
6744 /*
6745 * Make sure any existing dellaloc is flushed for any root used by a send
6746 * operation so that we do not miss any data and we do not race with writeback
6747 * finishing and changing a tree while send is using the tree. This could
6748 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
6749 * a send operation then uses the subvolume.
6750 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
6751 */
flush_delalloc_roots(struct send_ctx * sctx)6752 static int flush_delalloc_roots(struct send_ctx *sctx)
6753 {
6754 struct btrfs_root *root = sctx->parent_root;
6755 int ret;
6756 int i;
6757
6758 if (root) {
6759 ret = btrfs_start_delalloc_snapshot(root);
6760 if (ret)
6761 return ret;
6762 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
6763 }
6764
6765 for (i = 0; i < sctx->clone_roots_cnt; i++) {
6766 root = sctx->clone_roots[i].root;
6767 ret = btrfs_start_delalloc_snapshot(root);
6768 if (ret)
6769 return ret;
6770 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
6771 }
6772
6773 return 0;
6774 }
6775
btrfs_root_dec_send_in_progress(struct btrfs_root * root)6776 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
6777 {
6778 spin_lock(&root->root_item_lock);
6779 root->send_in_progress--;
6780 /*
6781 * Not much left to do, we don't know why it's unbalanced and
6782 * can't blindly reset it to 0.
6783 */
6784 if (root->send_in_progress < 0)
6785 btrfs_err(root->fs_info,
6786 "send_in_progress unbalanced %d root %llu",
6787 root->send_in_progress, root->root_key.objectid);
6788 spin_unlock(&root->root_item_lock);
6789 }
6790
btrfs_ioctl_send(struct file * mnt_file,struct btrfs_ioctl_send_args * arg)6791 long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
6792 {
6793 int ret = 0;
6794 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
6795 struct btrfs_fs_info *fs_info = send_root->fs_info;
6796 struct btrfs_root *clone_root;
6797 struct btrfs_key key;
6798 struct send_ctx *sctx = NULL;
6799 u32 i;
6800 u64 *clone_sources_tmp = NULL;
6801 int clone_sources_to_rollback = 0;
6802 unsigned alloc_size;
6803 int sort_clone_roots = 0;
6804 int index;
6805
6806 if (!capable(CAP_SYS_ADMIN))
6807 return -EPERM;
6808
6809 /*
6810 * The subvolume must remain read-only during send, protect against
6811 * making it RW. This also protects against deletion.
6812 */
6813 spin_lock(&send_root->root_item_lock);
6814 send_root->send_in_progress++;
6815 spin_unlock(&send_root->root_item_lock);
6816
6817 /*
6818 * Userspace tools do the checks and warn the user if it's
6819 * not RO.
6820 */
6821 if (!btrfs_root_readonly(send_root)) {
6822 ret = -EPERM;
6823 goto out;
6824 }
6825
6826 /*
6827 * Check that we don't overflow at later allocations, we request
6828 * clone_sources_count + 1 items, and compare to unsigned long inside
6829 * access_ok. Also set an upper limit for allocation size so this can't
6830 * easily exhaust memory. Max number of clone sources is about 200K.
6831 */
6832 if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
6833 ret = -EINVAL;
6834 goto out;
6835 }
6836
6837 if (!access_ok(VERIFY_READ, arg->clone_sources,
6838 sizeof(*arg->clone_sources) *
6839 arg->clone_sources_count)) {
6840 ret = -EFAULT;
6841 goto out;
6842 }
6843
6844 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
6845 ret = -EINVAL;
6846 goto out;
6847 }
6848
6849 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
6850 if (!sctx) {
6851 ret = -ENOMEM;
6852 goto out;
6853 }
6854
6855 INIT_LIST_HEAD(&sctx->new_refs);
6856 INIT_LIST_HEAD(&sctx->deleted_refs);
6857 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
6858 INIT_LIST_HEAD(&sctx->name_cache_list);
6859
6860 sctx->flags = arg->flags;
6861
6862 sctx->send_filp = fget(arg->send_fd);
6863 if (!sctx->send_filp) {
6864 ret = -EBADF;
6865 goto out;
6866 }
6867
6868 sctx->send_root = send_root;
6869 /*
6870 * Unlikely but possible, if the subvolume is marked for deletion but
6871 * is slow to remove the directory entry, send can still be started
6872 */
6873 if (btrfs_root_dead(sctx->send_root)) {
6874 ret = -EPERM;
6875 goto out;
6876 }
6877
6878 sctx->clone_roots_cnt = arg->clone_sources_count;
6879
6880 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
6881 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
6882 if (!sctx->send_buf) {
6883 ret = -ENOMEM;
6884 goto out;
6885 }
6886
6887 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
6888 if (!sctx->read_buf) {
6889 ret = -ENOMEM;
6890 goto out;
6891 }
6892
6893 sctx->pending_dir_moves = RB_ROOT;
6894 sctx->waiting_dir_moves = RB_ROOT;
6895 sctx->orphan_dirs = RB_ROOT;
6896
6897 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
6898
6899 sctx->clone_roots = kvzalloc(alloc_size, GFP_KERNEL);
6900 if (!sctx->clone_roots) {
6901 ret = -ENOMEM;
6902 goto out;
6903 }
6904
6905 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
6906
6907 if (arg->clone_sources_count) {
6908 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
6909 if (!clone_sources_tmp) {
6910 ret = -ENOMEM;
6911 goto out;
6912 }
6913
6914 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
6915 alloc_size);
6916 if (ret) {
6917 ret = -EFAULT;
6918 goto out;
6919 }
6920
6921 for (i = 0; i < arg->clone_sources_count; i++) {
6922 key.objectid = clone_sources_tmp[i];
6923 key.type = BTRFS_ROOT_ITEM_KEY;
6924 key.offset = (u64)-1;
6925
6926 index = srcu_read_lock(&fs_info->subvol_srcu);
6927
6928 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
6929 if (IS_ERR(clone_root)) {
6930 srcu_read_unlock(&fs_info->subvol_srcu, index);
6931 ret = PTR_ERR(clone_root);
6932 goto out;
6933 }
6934 spin_lock(&clone_root->root_item_lock);
6935 if (!btrfs_root_readonly(clone_root) ||
6936 btrfs_root_dead(clone_root)) {
6937 spin_unlock(&clone_root->root_item_lock);
6938 srcu_read_unlock(&fs_info->subvol_srcu, index);
6939 ret = -EPERM;
6940 goto out;
6941 }
6942 clone_root->send_in_progress++;
6943 spin_unlock(&clone_root->root_item_lock);
6944 srcu_read_unlock(&fs_info->subvol_srcu, index);
6945
6946 sctx->clone_roots[i].root = clone_root;
6947 clone_sources_to_rollback = i + 1;
6948 }
6949 kvfree(clone_sources_tmp);
6950 clone_sources_tmp = NULL;
6951 }
6952
6953 if (arg->parent_root) {
6954 key.objectid = arg->parent_root;
6955 key.type = BTRFS_ROOT_ITEM_KEY;
6956 key.offset = (u64)-1;
6957
6958 index = srcu_read_lock(&fs_info->subvol_srcu);
6959
6960 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
6961 if (IS_ERR(sctx->parent_root)) {
6962 srcu_read_unlock(&fs_info->subvol_srcu, index);
6963 ret = PTR_ERR(sctx->parent_root);
6964 goto out;
6965 }
6966
6967 spin_lock(&sctx->parent_root->root_item_lock);
6968 sctx->parent_root->send_in_progress++;
6969 if (!btrfs_root_readonly(sctx->parent_root) ||
6970 btrfs_root_dead(sctx->parent_root)) {
6971 spin_unlock(&sctx->parent_root->root_item_lock);
6972 srcu_read_unlock(&fs_info->subvol_srcu, index);
6973 ret = -EPERM;
6974 goto out;
6975 }
6976 spin_unlock(&sctx->parent_root->root_item_lock);
6977
6978 srcu_read_unlock(&fs_info->subvol_srcu, index);
6979 }
6980
6981 /*
6982 * Clones from send_root are allowed, but only if the clone source
6983 * is behind the current send position. This is checked while searching
6984 * for possible clone sources.
6985 */
6986 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
6987
6988 /* We do a bsearch later */
6989 sort(sctx->clone_roots, sctx->clone_roots_cnt,
6990 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
6991 NULL);
6992 sort_clone_roots = 1;
6993
6994 ret = flush_delalloc_roots(sctx);
6995 if (ret)
6996 goto out;
6997
6998 ret = ensure_commit_roots_uptodate(sctx);
6999 if (ret)
7000 goto out;
7001
7002 current->journal_info = BTRFS_SEND_TRANS_STUB;
7003 ret = send_subvol(sctx);
7004 current->journal_info = NULL;
7005 if (ret < 0)
7006 goto out;
7007
7008 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
7009 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
7010 if (ret < 0)
7011 goto out;
7012 ret = send_cmd(sctx);
7013 if (ret < 0)
7014 goto out;
7015 }
7016
7017 out:
7018 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
7019 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
7020 struct rb_node *n;
7021 struct pending_dir_move *pm;
7022
7023 n = rb_first(&sctx->pending_dir_moves);
7024 pm = rb_entry(n, struct pending_dir_move, node);
7025 while (!list_empty(&pm->list)) {
7026 struct pending_dir_move *pm2;
7027
7028 pm2 = list_first_entry(&pm->list,
7029 struct pending_dir_move, list);
7030 free_pending_move(sctx, pm2);
7031 }
7032 free_pending_move(sctx, pm);
7033 }
7034
7035 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
7036 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
7037 struct rb_node *n;
7038 struct waiting_dir_move *dm;
7039
7040 n = rb_first(&sctx->waiting_dir_moves);
7041 dm = rb_entry(n, struct waiting_dir_move, node);
7042 rb_erase(&dm->node, &sctx->waiting_dir_moves);
7043 kfree(dm);
7044 }
7045
7046 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
7047 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
7048 struct rb_node *n;
7049 struct orphan_dir_info *odi;
7050
7051 n = rb_first(&sctx->orphan_dirs);
7052 odi = rb_entry(n, struct orphan_dir_info, node);
7053 free_orphan_dir_info(sctx, odi);
7054 }
7055
7056 if (sort_clone_roots) {
7057 for (i = 0; i < sctx->clone_roots_cnt; i++)
7058 btrfs_root_dec_send_in_progress(
7059 sctx->clone_roots[i].root);
7060 } else {
7061 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
7062 btrfs_root_dec_send_in_progress(
7063 sctx->clone_roots[i].root);
7064
7065 btrfs_root_dec_send_in_progress(send_root);
7066 }
7067 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
7068 btrfs_root_dec_send_in_progress(sctx->parent_root);
7069
7070 kvfree(clone_sources_tmp);
7071
7072 if (sctx) {
7073 if (sctx->send_filp)
7074 fput(sctx->send_filp);
7075
7076 kvfree(sctx->clone_roots);
7077 kvfree(sctx->send_buf);
7078 kvfree(sctx->read_buf);
7079
7080 name_cache_free(sctx);
7081
7082 kfree(sctx);
7083 }
7084
7085 return ret;
7086 }
7087