1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/bio.h>
7 #include <linux/slab.h>
8 #include <linux/pagemap.h>
9 #include <linux/highmem.h>
10 #include "ctree.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "volumes.h"
14 #include "print-tree.h"
15 #include "compression.h"
16
17 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
18 sizeof(struct btrfs_item) * 2) / \
19 size) - 1))
20
21 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
22 PAGE_SIZE))
23
24 #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
25 sizeof(struct btrfs_ordered_sum)) / \
26 sizeof(u32) * (fs_info)->sectorsize)
27
btrfs_insert_file_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid,u64 pos,u64 disk_offset,u64 disk_num_bytes,u64 num_bytes,u64 offset,u64 ram_bytes,u8 compression,u8 encryption,u16 other_encoding)28 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
29 struct btrfs_root *root,
30 u64 objectid, u64 pos,
31 u64 disk_offset, u64 disk_num_bytes,
32 u64 num_bytes, u64 offset, u64 ram_bytes,
33 u8 compression, u8 encryption, u16 other_encoding)
34 {
35 int ret = 0;
36 struct btrfs_file_extent_item *item;
37 struct btrfs_key file_key;
38 struct btrfs_path *path;
39 struct extent_buffer *leaf;
40
41 path = btrfs_alloc_path();
42 if (!path)
43 return -ENOMEM;
44 file_key.objectid = objectid;
45 file_key.offset = pos;
46 file_key.type = BTRFS_EXTENT_DATA_KEY;
47
48 path->leave_spinning = 1;
49 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
50 sizeof(*item));
51 if (ret < 0)
52 goto out;
53 BUG_ON(ret); /* Can't happen */
54 leaf = path->nodes[0];
55 item = btrfs_item_ptr(leaf, path->slots[0],
56 struct btrfs_file_extent_item);
57 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
58 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
59 btrfs_set_file_extent_offset(leaf, item, offset);
60 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
61 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
62 btrfs_set_file_extent_generation(leaf, item, trans->transid);
63 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
64 btrfs_set_file_extent_compression(leaf, item, compression);
65 btrfs_set_file_extent_encryption(leaf, item, encryption);
66 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
67
68 btrfs_mark_buffer_dirty(leaf);
69 out:
70 btrfs_free_path(path);
71 return ret;
72 }
73
74 static struct btrfs_csum_item *
btrfs_lookup_csum(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 bytenr,int cow)75 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
76 struct btrfs_root *root,
77 struct btrfs_path *path,
78 u64 bytenr, int cow)
79 {
80 struct btrfs_fs_info *fs_info = root->fs_info;
81 int ret;
82 struct btrfs_key file_key;
83 struct btrfs_key found_key;
84 struct btrfs_csum_item *item;
85 struct extent_buffer *leaf;
86 u64 csum_offset = 0;
87 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
88 int csums_in_item;
89
90 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
91 file_key.offset = bytenr;
92 file_key.type = BTRFS_EXTENT_CSUM_KEY;
93 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
94 if (ret < 0)
95 goto fail;
96 leaf = path->nodes[0];
97 if (ret > 0) {
98 ret = 1;
99 if (path->slots[0] == 0)
100 goto fail;
101 path->slots[0]--;
102 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
103 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
104 goto fail;
105
106 csum_offset = (bytenr - found_key.offset) >>
107 fs_info->sb->s_blocksize_bits;
108 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
109 csums_in_item /= csum_size;
110
111 if (csum_offset == csums_in_item) {
112 ret = -EFBIG;
113 goto fail;
114 } else if (csum_offset > csums_in_item) {
115 goto fail;
116 }
117 }
118 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
119 item = (struct btrfs_csum_item *)((unsigned char *)item +
120 csum_offset * csum_size);
121 return item;
122 fail:
123 if (ret > 0)
124 ret = -ENOENT;
125 return ERR_PTR(ret);
126 }
127
btrfs_lookup_file_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 objectid,u64 offset,int mod)128 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
129 struct btrfs_root *root,
130 struct btrfs_path *path, u64 objectid,
131 u64 offset, int mod)
132 {
133 int ret;
134 struct btrfs_key file_key;
135 int ins_len = mod < 0 ? -1 : 0;
136 int cow = mod != 0;
137
138 file_key.objectid = objectid;
139 file_key.offset = offset;
140 file_key.type = BTRFS_EXTENT_DATA_KEY;
141 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
142 return ret;
143 }
144
btrfs_io_bio_endio_readpage(struct btrfs_io_bio * bio,int err)145 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
146 {
147 kfree(bio->csum_allocated);
148 }
149
__btrfs_lookup_bio_sums(struct inode * inode,struct bio * bio,u64 logical_offset,u32 * dst,int dio)150 static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
151 u64 logical_offset, u32 *dst, int dio)
152 {
153 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
154 struct bio_vec bvec;
155 struct bvec_iter iter;
156 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
157 struct btrfs_csum_item *item = NULL;
158 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
159 struct btrfs_path *path;
160 u8 *csum;
161 u64 offset = 0;
162 u64 item_start_offset = 0;
163 u64 item_last_offset = 0;
164 u64 disk_bytenr;
165 u64 page_bytes_left;
166 u32 diff;
167 int nblocks;
168 int count = 0;
169 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
170
171 path = btrfs_alloc_path();
172 if (!path)
173 return BLK_STS_RESOURCE;
174
175 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
176 if (!dst) {
177 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
178 btrfs_bio->csum_allocated = kmalloc_array(nblocks,
179 csum_size, GFP_NOFS);
180 if (!btrfs_bio->csum_allocated) {
181 btrfs_free_path(path);
182 return BLK_STS_RESOURCE;
183 }
184 btrfs_bio->csum = btrfs_bio->csum_allocated;
185 btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
186 } else {
187 btrfs_bio->csum = btrfs_bio->csum_inline;
188 }
189 csum = btrfs_bio->csum;
190 } else {
191 csum = (u8 *)dst;
192 }
193
194 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
195 path->reada = READA_FORWARD;
196
197 /*
198 * the free space stuff is only read when it hasn't been
199 * updated in the current transaction. So, we can safely
200 * read from the commit root and sidestep a nasty deadlock
201 * between reading the free space cache and updating the csum tree.
202 */
203 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
204 path->search_commit_root = 1;
205 path->skip_locking = 1;
206 }
207
208 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
209 if (dio)
210 offset = logical_offset;
211
212 bio_for_each_segment(bvec, bio, iter) {
213 page_bytes_left = bvec.bv_len;
214 if (count)
215 goto next;
216
217 if (!dio)
218 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
219 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
220 (u32 *)csum, nblocks);
221 if (count)
222 goto found;
223
224 if (!item || disk_bytenr < item_start_offset ||
225 disk_bytenr >= item_last_offset) {
226 struct btrfs_key found_key;
227 u32 item_size;
228
229 if (item)
230 btrfs_release_path(path);
231 item = btrfs_lookup_csum(NULL, fs_info->csum_root,
232 path, disk_bytenr, 0);
233 if (IS_ERR(item)) {
234 count = 1;
235 memset(csum, 0, csum_size);
236 if (BTRFS_I(inode)->root->root_key.objectid ==
237 BTRFS_DATA_RELOC_TREE_OBJECTID) {
238 set_extent_bits(io_tree, offset,
239 offset + fs_info->sectorsize - 1,
240 EXTENT_NODATASUM);
241 } else {
242 btrfs_info_rl(fs_info,
243 "no csum found for inode %llu start %llu",
244 btrfs_ino(BTRFS_I(inode)), offset);
245 }
246 item = NULL;
247 btrfs_release_path(path);
248 goto found;
249 }
250 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
251 path->slots[0]);
252
253 item_start_offset = found_key.offset;
254 item_size = btrfs_item_size_nr(path->nodes[0],
255 path->slots[0]);
256 item_last_offset = item_start_offset +
257 (item_size / csum_size) *
258 fs_info->sectorsize;
259 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
260 struct btrfs_csum_item);
261 }
262 /*
263 * this byte range must be able to fit inside
264 * a single leaf so it will also fit inside a u32
265 */
266 diff = disk_bytenr - item_start_offset;
267 diff = diff / fs_info->sectorsize;
268 diff = diff * csum_size;
269 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
270 inode->i_sb->s_blocksize_bits);
271 read_extent_buffer(path->nodes[0], csum,
272 ((unsigned long)item) + diff,
273 csum_size * count);
274 found:
275 csum += count * csum_size;
276 nblocks -= count;
277 next:
278 while (count > 0) {
279 count--;
280 disk_bytenr += fs_info->sectorsize;
281 offset += fs_info->sectorsize;
282 page_bytes_left -= fs_info->sectorsize;
283 if (!page_bytes_left)
284 break; /* move to next bio */
285 }
286 }
287
288 WARN_ON_ONCE(count);
289 btrfs_free_path(path);
290 return 0;
291 }
292
btrfs_lookup_bio_sums(struct inode * inode,struct bio * bio,u32 * dst)293 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
294 {
295 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
296 }
297
btrfs_lookup_bio_sums_dio(struct inode * inode,struct bio * bio,u64 offset)298 blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
299 {
300 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
301 }
302
btrfs_lookup_csums_range(struct btrfs_root * root,u64 start,u64 end,struct list_head * list,int search_commit)303 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
304 struct list_head *list, int search_commit)
305 {
306 struct btrfs_fs_info *fs_info = root->fs_info;
307 struct btrfs_key key;
308 struct btrfs_path *path;
309 struct extent_buffer *leaf;
310 struct btrfs_ordered_sum *sums;
311 struct btrfs_csum_item *item;
312 LIST_HEAD(tmplist);
313 unsigned long offset;
314 int ret;
315 size_t size;
316 u64 csum_end;
317 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
318
319 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
320 IS_ALIGNED(end + 1, fs_info->sectorsize));
321
322 path = btrfs_alloc_path();
323 if (!path)
324 return -ENOMEM;
325
326 if (search_commit) {
327 path->skip_locking = 1;
328 path->reada = READA_FORWARD;
329 path->search_commit_root = 1;
330 }
331
332 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
333 key.offset = start;
334 key.type = BTRFS_EXTENT_CSUM_KEY;
335
336 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
337 if (ret < 0)
338 goto fail;
339 if (ret > 0 && path->slots[0] > 0) {
340 leaf = path->nodes[0];
341 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
342 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
343 key.type == BTRFS_EXTENT_CSUM_KEY) {
344 offset = (start - key.offset) >>
345 fs_info->sb->s_blocksize_bits;
346 if (offset * csum_size <
347 btrfs_item_size_nr(leaf, path->slots[0] - 1))
348 path->slots[0]--;
349 }
350 }
351
352 while (start <= end) {
353 leaf = path->nodes[0];
354 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
355 ret = btrfs_next_leaf(root, path);
356 if (ret < 0)
357 goto fail;
358 if (ret > 0)
359 break;
360 leaf = path->nodes[0];
361 }
362
363 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
364 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
365 key.type != BTRFS_EXTENT_CSUM_KEY ||
366 key.offset > end)
367 break;
368
369 if (key.offset > start)
370 start = key.offset;
371
372 size = btrfs_item_size_nr(leaf, path->slots[0]);
373 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
374 if (csum_end <= start) {
375 path->slots[0]++;
376 continue;
377 }
378
379 csum_end = min(csum_end, end + 1);
380 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
381 struct btrfs_csum_item);
382 while (start < csum_end) {
383 size = min_t(size_t, csum_end - start,
384 MAX_ORDERED_SUM_BYTES(fs_info));
385 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
386 GFP_NOFS);
387 if (!sums) {
388 ret = -ENOMEM;
389 goto fail;
390 }
391
392 sums->bytenr = start;
393 sums->len = (int)size;
394
395 offset = (start - key.offset) >>
396 fs_info->sb->s_blocksize_bits;
397 offset *= csum_size;
398 size >>= fs_info->sb->s_blocksize_bits;
399
400 read_extent_buffer(path->nodes[0],
401 sums->sums,
402 ((unsigned long)item) + offset,
403 csum_size * size);
404
405 start += fs_info->sectorsize * size;
406 list_add_tail(&sums->list, &tmplist);
407 }
408 path->slots[0]++;
409 }
410 ret = 0;
411 fail:
412 while (ret < 0 && !list_empty(&tmplist)) {
413 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
414 list_del(&sums->list);
415 kfree(sums);
416 }
417 list_splice_tail(&tmplist, list);
418
419 btrfs_free_path(path);
420 return ret;
421 }
422
btrfs_csum_one_bio(struct inode * inode,struct bio * bio,u64 file_start,int contig)423 blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
424 u64 file_start, int contig)
425 {
426 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
427 struct btrfs_ordered_sum *sums;
428 struct btrfs_ordered_extent *ordered = NULL;
429 char *data;
430 struct bvec_iter iter;
431 struct bio_vec bvec;
432 int index;
433 int nr_sectors;
434 unsigned long total_bytes = 0;
435 unsigned long this_sum_bytes = 0;
436 int i;
437 u64 offset;
438
439 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
440 GFP_NOFS);
441 if (!sums)
442 return BLK_STS_RESOURCE;
443
444 sums->len = bio->bi_iter.bi_size;
445 INIT_LIST_HEAD(&sums->list);
446
447 if (contig)
448 offset = file_start;
449 else
450 offset = 0; /* shut up gcc */
451
452 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
453 index = 0;
454
455 bio_for_each_segment(bvec, bio, iter) {
456 if (!contig)
457 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
458
459 if (!ordered) {
460 ordered = btrfs_lookup_ordered_extent(inode, offset);
461 BUG_ON(!ordered); /* Logic error */
462 }
463
464 data = kmap_atomic(bvec.bv_page);
465
466 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
467 bvec.bv_len + fs_info->sectorsize
468 - 1);
469
470 for (i = 0; i < nr_sectors; i++) {
471 if (offset >= ordered->file_offset + ordered->len ||
472 offset < ordered->file_offset) {
473 unsigned long bytes_left;
474
475 kunmap_atomic(data);
476 sums->len = this_sum_bytes;
477 this_sum_bytes = 0;
478 btrfs_add_ordered_sum(inode, ordered, sums);
479 btrfs_put_ordered_extent(ordered);
480
481 bytes_left = bio->bi_iter.bi_size - total_bytes;
482
483 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
484 GFP_NOFS);
485 BUG_ON(!sums); /* -ENOMEM */
486 sums->len = bytes_left;
487 ordered = btrfs_lookup_ordered_extent(inode,
488 offset);
489 ASSERT(ordered); /* Logic error */
490 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
491 + total_bytes;
492 index = 0;
493
494 data = kmap_atomic(bvec.bv_page);
495 }
496
497 sums->sums[index] = ~(u32)0;
498 sums->sums[index]
499 = btrfs_csum_data(data + bvec.bv_offset
500 + (i * fs_info->sectorsize),
501 sums->sums[index],
502 fs_info->sectorsize);
503 btrfs_csum_final(sums->sums[index],
504 (char *)(sums->sums + index));
505 index++;
506 offset += fs_info->sectorsize;
507 this_sum_bytes += fs_info->sectorsize;
508 total_bytes += fs_info->sectorsize;
509 }
510
511 kunmap_atomic(data);
512 }
513 this_sum_bytes = 0;
514 btrfs_add_ordered_sum(inode, ordered, sums);
515 btrfs_put_ordered_extent(ordered);
516 return 0;
517 }
518
519 /*
520 * helper function for csum removal, this expects the
521 * key to describe the csum pointed to by the path, and it expects
522 * the csum to overlap the range [bytenr, len]
523 *
524 * The csum should not be entirely contained in the range and the
525 * range should not be entirely contained in the csum.
526 *
527 * This calls btrfs_truncate_item with the correct args based on the
528 * overlap, and fixes up the key as required.
529 */
truncate_one_csum(struct btrfs_fs_info * fs_info,struct btrfs_path * path,struct btrfs_key * key,u64 bytenr,u64 len)530 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
531 struct btrfs_path *path,
532 struct btrfs_key *key,
533 u64 bytenr, u64 len)
534 {
535 struct extent_buffer *leaf;
536 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
537 u64 csum_end;
538 u64 end_byte = bytenr + len;
539 u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
540
541 leaf = path->nodes[0];
542 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
543 csum_end <<= fs_info->sb->s_blocksize_bits;
544 csum_end += key->offset;
545
546 if (key->offset < bytenr && csum_end <= end_byte) {
547 /*
548 * [ bytenr - len ]
549 * [ ]
550 * [csum ]
551 * A simple truncate off the end of the item
552 */
553 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
554 new_size *= csum_size;
555 btrfs_truncate_item(fs_info, path, new_size, 1);
556 } else if (key->offset >= bytenr && csum_end > end_byte &&
557 end_byte > key->offset) {
558 /*
559 * [ bytenr - len ]
560 * [ ]
561 * [csum ]
562 * we need to truncate from the beginning of the csum
563 */
564 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
565 new_size *= csum_size;
566
567 btrfs_truncate_item(fs_info, path, new_size, 0);
568
569 key->offset = end_byte;
570 btrfs_set_item_key_safe(fs_info, path, key);
571 } else {
572 BUG();
573 }
574 }
575
576 /*
577 * deletes the csum items from the csum tree for a given
578 * range of bytes.
579 */
btrfs_del_csums(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 len)580 int btrfs_del_csums(struct btrfs_trans_handle *trans,
581 struct btrfs_root *root, u64 bytenr, u64 len)
582 {
583 struct btrfs_fs_info *fs_info = trans->fs_info;
584 struct btrfs_path *path;
585 struct btrfs_key key;
586 u64 end_byte = bytenr + len;
587 u64 csum_end;
588 struct extent_buffer *leaf;
589 int ret = 0;
590 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
591 int blocksize_bits = fs_info->sb->s_blocksize_bits;
592
593 ASSERT(root == fs_info->csum_root ||
594 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
595
596 path = btrfs_alloc_path();
597 if (!path)
598 return -ENOMEM;
599
600 while (1) {
601 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
602 key.offset = end_byte - 1;
603 key.type = BTRFS_EXTENT_CSUM_KEY;
604
605 path->leave_spinning = 1;
606 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
607 if (ret > 0) {
608 ret = 0;
609 if (path->slots[0] == 0)
610 break;
611 path->slots[0]--;
612 } else if (ret < 0) {
613 break;
614 }
615
616 leaf = path->nodes[0];
617 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
618
619 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
620 key.type != BTRFS_EXTENT_CSUM_KEY) {
621 break;
622 }
623
624 if (key.offset >= end_byte)
625 break;
626
627 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
628 csum_end <<= blocksize_bits;
629 csum_end += key.offset;
630
631 /* this csum ends before we start, we're done */
632 if (csum_end <= bytenr)
633 break;
634
635 /* delete the entire item, it is inside our range */
636 if (key.offset >= bytenr && csum_end <= end_byte) {
637 int del_nr = 1;
638
639 /*
640 * Check how many csum items preceding this one in this
641 * leaf correspond to our range and then delete them all
642 * at once.
643 */
644 if (key.offset > bytenr && path->slots[0] > 0) {
645 int slot = path->slots[0] - 1;
646
647 while (slot >= 0) {
648 struct btrfs_key pk;
649
650 btrfs_item_key_to_cpu(leaf, &pk, slot);
651 if (pk.offset < bytenr ||
652 pk.type != BTRFS_EXTENT_CSUM_KEY ||
653 pk.objectid !=
654 BTRFS_EXTENT_CSUM_OBJECTID)
655 break;
656 path->slots[0] = slot;
657 del_nr++;
658 key.offset = pk.offset;
659 slot--;
660 }
661 }
662 ret = btrfs_del_items(trans, root, path,
663 path->slots[0], del_nr);
664 if (ret)
665 break;
666 if (key.offset == bytenr)
667 break;
668 } else if (key.offset < bytenr && csum_end > end_byte) {
669 unsigned long offset;
670 unsigned long shift_len;
671 unsigned long item_offset;
672 /*
673 * [ bytenr - len ]
674 * [csum ]
675 *
676 * Our bytes are in the middle of the csum,
677 * we need to split this item and insert a new one.
678 *
679 * But we can't drop the path because the
680 * csum could change, get removed, extended etc.
681 *
682 * The trick here is the max size of a csum item leaves
683 * enough room in the tree block for a single
684 * item header. So, we split the item in place,
685 * adding a new header pointing to the existing
686 * bytes. Then we loop around again and we have
687 * a nicely formed csum item that we can neatly
688 * truncate.
689 */
690 offset = (bytenr - key.offset) >> blocksize_bits;
691 offset *= csum_size;
692
693 shift_len = (len >> blocksize_bits) * csum_size;
694
695 item_offset = btrfs_item_ptr_offset(leaf,
696 path->slots[0]);
697
698 memzero_extent_buffer(leaf, item_offset + offset,
699 shift_len);
700 key.offset = bytenr;
701
702 /*
703 * btrfs_split_item returns -EAGAIN when the
704 * item changed size or key
705 */
706 ret = btrfs_split_item(trans, root, path, &key, offset);
707 if (ret && ret != -EAGAIN) {
708 btrfs_abort_transaction(trans, ret);
709 break;
710 }
711 ret = 0;
712
713 key.offset = end_byte - 1;
714 } else {
715 truncate_one_csum(fs_info, path, &key, bytenr, len);
716 if (key.offset < bytenr)
717 break;
718 }
719 btrfs_release_path(path);
720 }
721 btrfs_free_path(path);
722 return ret;
723 }
724
btrfs_csum_file_blocks(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_ordered_sum * sums)725 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
726 struct btrfs_root *root,
727 struct btrfs_ordered_sum *sums)
728 {
729 struct btrfs_fs_info *fs_info = root->fs_info;
730 struct btrfs_key file_key;
731 struct btrfs_key found_key;
732 struct btrfs_path *path;
733 struct btrfs_csum_item *item;
734 struct btrfs_csum_item *item_end;
735 struct extent_buffer *leaf = NULL;
736 u64 next_offset;
737 u64 total_bytes = 0;
738 u64 csum_offset;
739 u64 bytenr;
740 u32 nritems;
741 u32 ins_size;
742 int index = 0;
743 int found_next;
744 int ret;
745 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
746
747 path = btrfs_alloc_path();
748 if (!path)
749 return -ENOMEM;
750 again:
751 next_offset = (u64)-1;
752 found_next = 0;
753 bytenr = sums->bytenr + total_bytes;
754 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
755 file_key.offset = bytenr;
756 file_key.type = BTRFS_EXTENT_CSUM_KEY;
757
758 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
759 if (!IS_ERR(item)) {
760 ret = 0;
761 leaf = path->nodes[0];
762 item_end = btrfs_item_ptr(leaf, path->slots[0],
763 struct btrfs_csum_item);
764 item_end = (struct btrfs_csum_item *)((char *)item_end +
765 btrfs_item_size_nr(leaf, path->slots[0]));
766 goto found;
767 }
768 ret = PTR_ERR(item);
769 if (ret != -EFBIG && ret != -ENOENT)
770 goto fail_unlock;
771
772 if (ret == -EFBIG) {
773 u32 item_size;
774 /* we found one, but it isn't big enough yet */
775 leaf = path->nodes[0];
776 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
777 if ((item_size / csum_size) >=
778 MAX_CSUM_ITEMS(fs_info, csum_size)) {
779 /* already at max size, make a new one */
780 goto insert;
781 }
782 } else {
783 int slot = path->slots[0] + 1;
784 /* we didn't find a csum item, insert one */
785 nritems = btrfs_header_nritems(path->nodes[0]);
786 if (!nritems || (path->slots[0] >= nritems - 1)) {
787 ret = btrfs_next_leaf(root, path);
788 if (ret < 0) {
789 goto out;
790 } else if (ret > 0) {
791 found_next = 1;
792 goto insert;
793 }
794 slot = path->slots[0];
795 }
796 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
797 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
798 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
799 found_next = 1;
800 goto insert;
801 }
802 next_offset = found_key.offset;
803 found_next = 1;
804 goto insert;
805 }
806
807 /*
808 * at this point, we know the tree has an item, but it isn't big
809 * enough yet to put our csum in. Grow it
810 */
811 btrfs_release_path(path);
812 ret = btrfs_search_slot(trans, root, &file_key, path,
813 csum_size, 1);
814 if (ret < 0)
815 goto fail_unlock;
816
817 if (ret > 0) {
818 if (path->slots[0] == 0)
819 goto insert;
820 path->slots[0]--;
821 }
822
823 leaf = path->nodes[0];
824 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
825 csum_offset = (bytenr - found_key.offset) >>
826 fs_info->sb->s_blocksize_bits;
827
828 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
829 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
830 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
831 goto insert;
832 }
833
834 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
835 csum_size) {
836 int extend_nr;
837 u64 tmp;
838 u32 diff;
839 u32 free_space;
840
841 if (btrfs_leaf_free_space(fs_info, leaf) <
842 sizeof(struct btrfs_item) + csum_size * 2)
843 goto insert;
844
845 free_space = btrfs_leaf_free_space(fs_info, leaf) -
846 sizeof(struct btrfs_item) - csum_size;
847 tmp = sums->len - total_bytes;
848 tmp >>= fs_info->sb->s_blocksize_bits;
849 WARN_ON(tmp < 1);
850
851 extend_nr = max_t(int, 1, (int)tmp);
852 diff = (csum_offset + extend_nr) * csum_size;
853 diff = min(diff,
854 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
855
856 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
857 diff = min(free_space, diff);
858 diff /= csum_size;
859 diff *= csum_size;
860
861 btrfs_extend_item(fs_info, path, diff);
862 ret = 0;
863 goto csum;
864 }
865
866 insert:
867 btrfs_release_path(path);
868 csum_offset = 0;
869 if (found_next) {
870 u64 tmp;
871
872 tmp = sums->len - total_bytes;
873 tmp >>= fs_info->sb->s_blocksize_bits;
874 tmp = min(tmp, (next_offset - file_key.offset) >>
875 fs_info->sb->s_blocksize_bits);
876
877 tmp = max_t(u64, 1, tmp);
878 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
879 ins_size = csum_size * tmp;
880 } else {
881 ins_size = csum_size;
882 }
883 path->leave_spinning = 1;
884 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
885 ins_size);
886 path->leave_spinning = 0;
887 if (ret < 0)
888 goto fail_unlock;
889 if (WARN_ON(ret != 0))
890 goto fail_unlock;
891 leaf = path->nodes[0];
892 csum:
893 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
894 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
895 btrfs_item_size_nr(leaf, path->slots[0]));
896 item = (struct btrfs_csum_item *)((unsigned char *)item +
897 csum_offset * csum_size);
898 found:
899 ins_size = (u32)(sums->len - total_bytes) >>
900 fs_info->sb->s_blocksize_bits;
901 ins_size *= csum_size;
902 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
903 ins_size);
904 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
905 ins_size);
906
907 ins_size /= csum_size;
908 total_bytes += ins_size * fs_info->sectorsize;
909 index += ins_size;
910
911 btrfs_mark_buffer_dirty(path->nodes[0]);
912 if (total_bytes < sums->len) {
913 btrfs_release_path(path);
914 cond_resched();
915 goto again;
916 }
917 out:
918 btrfs_free_path(path);
919 return ret;
920
921 fail_unlock:
922 goto out;
923 }
924
btrfs_extent_item_to_extent_map(struct btrfs_inode * inode,const struct btrfs_path * path,struct btrfs_file_extent_item * fi,const bool new_inline,struct extent_map * em)925 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
926 const struct btrfs_path *path,
927 struct btrfs_file_extent_item *fi,
928 const bool new_inline,
929 struct extent_map *em)
930 {
931 struct btrfs_fs_info *fs_info = inode->root->fs_info;
932 struct btrfs_root *root = inode->root;
933 struct extent_buffer *leaf = path->nodes[0];
934 const int slot = path->slots[0];
935 struct btrfs_key key;
936 u64 extent_start, extent_end;
937 u64 bytenr;
938 u8 type = btrfs_file_extent_type(leaf, fi);
939 int compress_type = btrfs_file_extent_compression(leaf, fi);
940
941 em->bdev = fs_info->fs_devices->latest_bdev;
942 btrfs_item_key_to_cpu(leaf, &key, slot);
943 extent_start = key.offset;
944
945 if (type == BTRFS_FILE_EXTENT_REG ||
946 type == BTRFS_FILE_EXTENT_PREALLOC) {
947 extent_end = extent_start +
948 btrfs_file_extent_num_bytes(leaf, fi);
949 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
950 size_t size;
951 size = btrfs_file_extent_ram_bytes(leaf, fi);
952 extent_end = ALIGN(extent_start + size,
953 fs_info->sectorsize);
954 }
955
956 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
957 if (type == BTRFS_FILE_EXTENT_REG ||
958 type == BTRFS_FILE_EXTENT_PREALLOC) {
959 em->start = extent_start;
960 em->len = extent_end - extent_start;
961 em->orig_start = extent_start -
962 btrfs_file_extent_offset(leaf, fi);
963 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
964 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
965 if (bytenr == 0) {
966 em->block_start = EXTENT_MAP_HOLE;
967 return;
968 }
969 if (compress_type != BTRFS_COMPRESS_NONE) {
970 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
971 em->compress_type = compress_type;
972 em->block_start = bytenr;
973 em->block_len = em->orig_block_len;
974 } else {
975 bytenr += btrfs_file_extent_offset(leaf, fi);
976 em->block_start = bytenr;
977 em->block_len = em->len;
978 if (type == BTRFS_FILE_EXTENT_PREALLOC)
979 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
980 }
981 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
982 em->block_start = EXTENT_MAP_INLINE;
983 em->start = extent_start;
984 em->len = extent_end - extent_start;
985 /*
986 * Initialize orig_start and block_len with the same values
987 * as in inode.c:btrfs_get_extent().
988 */
989 em->orig_start = EXTENT_MAP_HOLE;
990 em->block_len = (u64)-1;
991 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
992 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
993 em->compress_type = compress_type;
994 }
995 } else {
996 btrfs_err(fs_info,
997 "unknown file extent item type %d, inode %llu, offset %llu, "
998 "root %llu", type, btrfs_ino(inode), extent_start,
999 root->root_key.objectid);
1000 }
1001 }
1002