Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/bio.h>
7#include <linux/slab.h>
8#include <linux/pagemap.h>
9#include <linux/highmem.h>
10#include "ctree.h"
11#include "disk-io.h"
12#include "transaction.h"
13#include "volumes.h"
14#include "print-tree.h"
15#include "compression.h"
16
17#define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
18 sizeof(struct btrfs_item) * 2) / \
19 size) - 1))
20
21#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
22 PAGE_SIZE))
23
24#define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
25 sizeof(struct btrfs_ordered_sum)) / \
26 sizeof(u32) * (fs_info)->sectorsize)
27
28int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
29 struct btrfs_root *root,
30 u64 objectid, u64 pos,
31 u64 disk_offset, u64 disk_num_bytes,
32 u64 num_bytes, u64 offset, u64 ram_bytes,
33 u8 compression, u8 encryption, u16 other_encoding)
34{
35 int ret = 0;
36 struct btrfs_file_extent_item *item;
37 struct btrfs_key file_key;
38 struct btrfs_path *path;
39 struct extent_buffer *leaf;
40
41 path = btrfs_alloc_path();
42 if (!path)
43 return -ENOMEM;
44 file_key.objectid = objectid;
45 file_key.offset = pos;
46 file_key.type = BTRFS_EXTENT_DATA_KEY;
47
48 path->leave_spinning = 1;
49 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
50 sizeof(*item));
51 if (ret < 0)
52 goto out;
53 BUG_ON(ret); /* Can't happen */
54 leaf = path->nodes[0];
55 item = btrfs_item_ptr(leaf, path->slots[0],
56 struct btrfs_file_extent_item);
57 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
58 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
59 btrfs_set_file_extent_offset(leaf, item, offset);
60 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
61 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
62 btrfs_set_file_extent_generation(leaf, item, trans->transid);
63 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
64 btrfs_set_file_extent_compression(leaf, item, compression);
65 btrfs_set_file_extent_encryption(leaf, item, encryption);
66 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
67
68 btrfs_mark_buffer_dirty(leaf);
69out:
70 btrfs_free_path(path);
71 return ret;
72}
73
74static struct btrfs_csum_item *
75btrfs_lookup_csum(struct btrfs_trans_handle *trans,
76 struct btrfs_root *root,
77 struct btrfs_path *path,
78 u64 bytenr, int cow)
79{
80 struct btrfs_fs_info *fs_info = root->fs_info;
81 int ret;
82 struct btrfs_key file_key;
83 struct btrfs_key found_key;
84 struct btrfs_csum_item *item;
85 struct extent_buffer *leaf;
86 u64 csum_offset = 0;
87 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
88 int csums_in_item;
89
90 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
91 file_key.offset = bytenr;
92 file_key.type = BTRFS_EXTENT_CSUM_KEY;
93 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
94 if (ret < 0)
95 goto fail;
96 leaf = path->nodes[0];
97 if (ret > 0) {
98 ret = 1;
99 if (path->slots[0] == 0)
100 goto fail;
101 path->slots[0]--;
102 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
103 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
104 goto fail;
105
106 csum_offset = (bytenr - found_key.offset) >>
107 fs_info->sb->s_blocksize_bits;
108 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
109 csums_in_item /= csum_size;
110
111 if (csum_offset == csums_in_item) {
112 ret = -EFBIG;
113 goto fail;
114 } else if (csum_offset > csums_in_item) {
115 goto fail;
116 }
117 }
118 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
119 item = (struct btrfs_csum_item *)((unsigned char *)item +
120 csum_offset * csum_size);
121 return item;
122fail:
123 if (ret > 0)
124 ret = -ENOENT;
125 return ERR_PTR(ret);
126}
127
128int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
129 struct btrfs_root *root,
130 struct btrfs_path *path, u64 objectid,
131 u64 offset, int mod)
132{
133 int ret;
134 struct btrfs_key file_key;
135 int ins_len = mod < 0 ? -1 : 0;
136 int cow = mod != 0;
137
138 file_key.objectid = objectid;
139 file_key.offset = offset;
140 file_key.type = BTRFS_EXTENT_DATA_KEY;
141 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
142 return ret;
143}
144
145static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
146 u64 logical_offset, u32 *dst, int dio)
147{
148 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
149 struct bio_vec bvec;
150 struct bvec_iter iter;
151 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
152 struct btrfs_csum_item *item = NULL;
153 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
154 struct btrfs_path *path;
155 u8 *csum;
156 u64 offset = 0;
157 u64 item_start_offset = 0;
158 u64 item_last_offset = 0;
159 u64 disk_bytenr;
160 u64 page_bytes_left;
161 u32 diff;
162 int nblocks;
163 int count = 0;
164 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
165
166 path = btrfs_alloc_path();
167 if (!path)
168 return BLK_STS_RESOURCE;
169
170 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
171 if (!dst) {
172 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
173 btrfs_bio->csum = kmalloc_array(nblocks, csum_size,
174 GFP_NOFS);
175 if (!btrfs_bio->csum) {
176 btrfs_free_path(path);
177 return BLK_STS_RESOURCE;
178 }
179 } else {
180 btrfs_bio->csum = btrfs_bio->csum_inline;
181 }
182 csum = btrfs_bio->csum;
183 } else {
184 csum = (u8 *)dst;
185 }
186
187 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
188 path->reada = READA_FORWARD;
189
190 /*
191 * the free space stuff is only read when it hasn't been
192 * updated in the current transaction. So, we can safely
193 * read from the commit root and sidestep a nasty deadlock
194 * between reading the free space cache and updating the csum tree.
195 */
196 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
197 path->search_commit_root = 1;
198 path->skip_locking = 1;
199 }
200
201 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
202 if (dio)
203 offset = logical_offset;
204
205 bio_for_each_segment(bvec, bio, iter) {
206 page_bytes_left = bvec.bv_len;
207 if (count)
208 goto next;
209
210 if (!dio)
211 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
212 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
213 (u32 *)csum, nblocks);
214 if (count)
215 goto found;
216
217 if (!item || disk_bytenr < item_start_offset ||
218 disk_bytenr >= item_last_offset) {
219 struct btrfs_key found_key;
220 u32 item_size;
221
222 if (item)
223 btrfs_release_path(path);
224 item = btrfs_lookup_csum(NULL, fs_info->csum_root,
225 path, disk_bytenr, 0);
226 if (IS_ERR(item)) {
227 count = 1;
228 memset(csum, 0, csum_size);
229 if (BTRFS_I(inode)->root->root_key.objectid ==
230 BTRFS_DATA_RELOC_TREE_OBJECTID) {
231 set_extent_bits(io_tree, offset,
232 offset + fs_info->sectorsize - 1,
233 EXTENT_NODATASUM);
234 } else {
235 btrfs_info_rl(fs_info,
236 "no csum found for inode %llu start %llu",
237 btrfs_ino(BTRFS_I(inode)), offset);
238 }
239 item = NULL;
240 btrfs_release_path(path);
241 goto found;
242 }
243 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
244 path->slots[0]);
245
246 item_start_offset = found_key.offset;
247 item_size = btrfs_item_size_nr(path->nodes[0],
248 path->slots[0]);
249 item_last_offset = item_start_offset +
250 (item_size / csum_size) *
251 fs_info->sectorsize;
252 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
253 struct btrfs_csum_item);
254 }
255 /*
256 * this byte range must be able to fit inside
257 * a single leaf so it will also fit inside a u32
258 */
259 diff = disk_bytenr - item_start_offset;
260 diff = diff / fs_info->sectorsize;
261 diff = diff * csum_size;
262 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
263 inode->i_sb->s_blocksize_bits);
264 read_extent_buffer(path->nodes[0], csum,
265 ((unsigned long)item) + diff,
266 csum_size * count);
267found:
268 csum += count * csum_size;
269 nblocks -= count;
270next:
271 while (count--) {
272 disk_bytenr += fs_info->sectorsize;
273 offset += fs_info->sectorsize;
274 page_bytes_left -= fs_info->sectorsize;
275 if (!page_bytes_left)
276 break; /* move to next bio */
277 }
278 }
279
280 WARN_ON_ONCE(count);
281 btrfs_free_path(path);
282 return 0;
283}
284
285blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
286{
287 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
288}
289
290blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
291{
292 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
293}
294
295int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
296 struct list_head *list, int search_commit)
297{
298 struct btrfs_fs_info *fs_info = root->fs_info;
299 struct btrfs_key key;
300 struct btrfs_path *path;
301 struct extent_buffer *leaf;
302 struct btrfs_ordered_sum *sums;
303 struct btrfs_csum_item *item;
304 LIST_HEAD(tmplist);
305 unsigned long offset;
306 int ret;
307 size_t size;
308 u64 csum_end;
309 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
310
311 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
312 IS_ALIGNED(end + 1, fs_info->sectorsize));
313
314 path = btrfs_alloc_path();
315 if (!path)
316 return -ENOMEM;
317
318 if (search_commit) {
319 path->skip_locking = 1;
320 path->reada = READA_FORWARD;
321 path->search_commit_root = 1;
322 }
323
324 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
325 key.offset = start;
326 key.type = BTRFS_EXTENT_CSUM_KEY;
327
328 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
329 if (ret < 0)
330 goto fail;
331 if (ret > 0 && path->slots[0] > 0) {
332 leaf = path->nodes[0];
333 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
334 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
335 key.type == BTRFS_EXTENT_CSUM_KEY) {
336 offset = (start - key.offset) >>
337 fs_info->sb->s_blocksize_bits;
338 if (offset * csum_size <
339 btrfs_item_size_nr(leaf, path->slots[0] - 1))
340 path->slots[0]--;
341 }
342 }
343
344 while (start <= end) {
345 leaf = path->nodes[0];
346 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
347 ret = btrfs_next_leaf(root, path);
348 if (ret < 0)
349 goto fail;
350 if (ret > 0)
351 break;
352 leaf = path->nodes[0];
353 }
354
355 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
356 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
357 key.type != BTRFS_EXTENT_CSUM_KEY ||
358 key.offset > end)
359 break;
360
361 if (key.offset > start)
362 start = key.offset;
363
364 size = btrfs_item_size_nr(leaf, path->slots[0]);
365 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
366 if (csum_end <= start) {
367 path->slots[0]++;
368 continue;
369 }
370
371 csum_end = min(csum_end, end + 1);
372 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
373 struct btrfs_csum_item);
374 while (start < csum_end) {
375 size = min_t(size_t, csum_end - start,
376 MAX_ORDERED_SUM_BYTES(fs_info));
377 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
378 GFP_NOFS);
379 if (!sums) {
380 ret = -ENOMEM;
381 goto fail;
382 }
383
384 sums->bytenr = start;
385 sums->len = (int)size;
386
387 offset = (start - key.offset) >>
388 fs_info->sb->s_blocksize_bits;
389 offset *= csum_size;
390 size >>= fs_info->sb->s_blocksize_bits;
391
392 read_extent_buffer(path->nodes[0],
393 sums->sums,
394 ((unsigned long)item) + offset,
395 csum_size * size);
396
397 start += fs_info->sectorsize * size;
398 list_add_tail(&sums->list, &tmplist);
399 }
400 path->slots[0]++;
401 }
402 ret = 0;
403fail:
404 while (ret < 0 && !list_empty(&tmplist)) {
405 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
406 list_del(&sums->list);
407 kfree(sums);
408 }
409 list_splice_tail(&tmplist, list);
410
411 btrfs_free_path(path);
412 return ret;
413}
414
415blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
416 u64 file_start, int contig)
417{
418 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
419 struct btrfs_ordered_sum *sums;
420 struct btrfs_ordered_extent *ordered = NULL;
421 char *data;
422 struct bvec_iter iter;
423 struct bio_vec bvec;
424 int index;
425 int nr_sectors;
426 unsigned long total_bytes = 0;
427 unsigned long this_sum_bytes = 0;
428 int i;
429 u64 offset;
430
431 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
432 GFP_NOFS);
433 if (!sums)
434 return BLK_STS_RESOURCE;
435
436 sums->len = bio->bi_iter.bi_size;
437 INIT_LIST_HEAD(&sums->list);
438
439 if (contig)
440 offset = file_start;
441 else
442 offset = 0; /* shut up gcc */
443
444 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
445 index = 0;
446
447 bio_for_each_segment(bvec, bio, iter) {
448 if (!contig)
449 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
450
451 if (!ordered) {
452 ordered = btrfs_lookup_ordered_extent(inode, offset);
453 BUG_ON(!ordered); /* Logic error */
454 }
455
456 data = kmap_atomic(bvec.bv_page);
457
458 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
459 bvec.bv_len + fs_info->sectorsize
460 - 1);
461
462 for (i = 0; i < nr_sectors; i++) {
463 if (offset >= ordered->file_offset + ordered->len ||
464 offset < ordered->file_offset) {
465 unsigned long bytes_left;
466
467 kunmap_atomic(data);
468 sums->len = this_sum_bytes;
469 this_sum_bytes = 0;
470 btrfs_add_ordered_sum(inode, ordered, sums);
471 btrfs_put_ordered_extent(ordered);
472
473 bytes_left = bio->bi_iter.bi_size - total_bytes;
474
475 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
476 GFP_NOFS);
477 BUG_ON(!sums); /* -ENOMEM */
478 sums->len = bytes_left;
479 ordered = btrfs_lookup_ordered_extent(inode,
480 offset);
481 ASSERT(ordered); /* Logic error */
482 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
483 + total_bytes;
484 index = 0;
485
486 data = kmap_atomic(bvec.bv_page);
487 }
488
489 sums->sums[index] = ~(u32)0;
490 sums->sums[index]
491 = btrfs_csum_data(data + bvec.bv_offset
492 + (i * fs_info->sectorsize),
493 sums->sums[index],
494 fs_info->sectorsize);
495 btrfs_csum_final(sums->sums[index],
496 (char *)(sums->sums + index));
497 index++;
498 offset += fs_info->sectorsize;
499 this_sum_bytes += fs_info->sectorsize;
500 total_bytes += fs_info->sectorsize;
501 }
502
503 kunmap_atomic(data);
504 }
505 this_sum_bytes = 0;
506 btrfs_add_ordered_sum(inode, ordered, sums);
507 btrfs_put_ordered_extent(ordered);
508 return 0;
509}
510
511/*
512 * helper function for csum removal, this expects the
513 * key to describe the csum pointed to by the path, and it expects
514 * the csum to overlap the range [bytenr, len]
515 *
516 * The csum should not be entirely contained in the range and the
517 * range should not be entirely contained in the csum.
518 *
519 * This calls btrfs_truncate_item with the correct args based on the
520 * overlap, and fixes up the key as required.
521 */
522static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
523 struct btrfs_path *path,
524 struct btrfs_key *key,
525 u64 bytenr, u64 len)
526{
527 struct extent_buffer *leaf;
528 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
529 u64 csum_end;
530 u64 end_byte = bytenr + len;
531 u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
532
533 leaf = path->nodes[0];
534 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
535 csum_end <<= fs_info->sb->s_blocksize_bits;
536 csum_end += key->offset;
537
538 if (key->offset < bytenr && csum_end <= end_byte) {
539 /*
540 * [ bytenr - len ]
541 * [ ]
542 * [csum ]
543 * A simple truncate off the end of the item
544 */
545 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
546 new_size *= csum_size;
547 btrfs_truncate_item(fs_info, path, new_size, 1);
548 } else if (key->offset >= bytenr && csum_end > end_byte &&
549 end_byte > key->offset) {
550 /*
551 * [ bytenr - len ]
552 * [ ]
553 * [csum ]
554 * we need to truncate from the beginning of the csum
555 */
556 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
557 new_size *= csum_size;
558
559 btrfs_truncate_item(fs_info, path, new_size, 0);
560
561 key->offset = end_byte;
562 btrfs_set_item_key_safe(fs_info, path, key);
563 } else {
564 BUG();
565 }
566}
567
568/*
569 * deletes the csum items from the csum tree for a given
570 * range of bytes.
571 */
572int btrfs_del_csums(struct btrfs_trans_handle *trans,
573 struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
574{
575 struct btrfs_root *root = fs_info->csum_root;
576 struct btrfs_path *path;
577 struct btrfs_key key;
578 u64 end_byte = bytenr + len;
579 u64 csum_end;
580 struct extent_buffer *leaf;
581 int ret;
582 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
583 int blocksize_bits = fs_info->sb->s_blocksize_bits;
584
585 path = btrfs_alloc_path();
586 if (!path)
587 return -ENOMEM;
588
589 while (1) {
590 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
591 key.offset = end_byte - 1;
592 key.type = BTRFS_EXTENT_CSUM_KEY;
593
594 path->leave_spinning = 1;
595 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
596 if (ret > 0) {
597 if (path->slots[0] == 0)
598 break;
599 path->slots[0]--;
600 } else if (ret < 0) {
601 break;
602 }
603
604 leaf = path->nodes[0];
605 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
606
607 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
608 key.type != BTRFS_EXTENT_CSUM_KEY) {
609 break;
610 }
611
612 if (key.offset >= end_byte)
613 break;
614
615 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
616 csum_end <<= blocksize_bits;
617 csum_end += key.offset;
618
619 /* this csum ends before we start, we're done */
620 if (csum_end <= bytenr)
621 break;
622
623 /* delete the entire item, it is inside our range */
624 if (key.offset >= bytenr && csum_end <= end_byte) {
625 int del_nr = 1;
626
627 /*
628 * Check how many csum items preceding this one in this
629 * leaf correspond to our range and then delete them all
630 * at once.
631 */
632 if (key.offset > bytenr && path->slots[0] > 0) {
633 int slot = path->slots[0] - 1;
634
635 while (slot >= 0) {
636 struct btrfs_key pk;
637
638 btrfs_item_key_to_cpu(leaf, &pk, slot);
639 if (pk.offset < bytenr ||
640 pk.type != BTRFS_EXTENT_CSUM_KEY ||
641 pk.objectid !=
642 BTRFS_EXTENT_CSUM_OBJECTID)
643 break;
644 path->slots[0] = slot;
645 del_nr++;
646 key.offset = pk.offset;
647 slot--;
648 }
649 }
650 ret = btrfs_del_items(trans, root, path,
651 path->slots[0], del_nr);
652 if (ret)
653 goto out;
654 if (key.offset == bytenr)
655 break;
656 } else if (key.offset < bytenr && csum_end > end_byte) {
657 unsigned long offset;
658 unsigned long shift_len;
659 unsigned long item_offset;
660 /*
661 * [ bytenr - len ]
662 * [csum ]
663 *
664 * Our bytes are in the middle of the csum,
665 * we need to split this item and insert a new one.
666 *
667 * But we can't drop the path because the
668 * csum could change, get removed, extended etc.
669 *
670 * The trick here is the max size of a csum item leaves
671 * enough room in the tree block for a single
672 * item header. So, we split the item in place,
673 * adding a new header pointing to the existing
674 * bytes. Then we loop around again and we have
675 * a nicely formed csum item that we can neatly
676 * truncate.
677 */
678 offset = (bytenr - key.offset) >> blocksize_bits;
679 offset *= csum_size;
680
681 shift_len = (len >> blocksize_bits) * csum_size;
682
683 item_offset = btrfs_item_ptr_offset(leaf,
684 path->slots[0]);
685
686 memzero_extent_buffer(leaf, item_offset + offset,
687 shift_len);
688 key.offset = bytenr;
689
690 /*
691 * btrfs_split_item returns -EAGAIN when the
692 * item changed size or key
693 */
694 ret = btrfs_split_item(trans, root, path, &key, offset);
695 if (ret && ret != -EAGAIN) {
696 btrfs_abort_transaction(trans, ret);
697 goto out;
698 }
699
700 key.offset = end_byte - 1;
701 } else {
702 truncate_one_csum(fs_info, path, &key, bytenr, len);
703 if (key.offset < bytenr)
704 break;
705 }
706 btrfs_release_path(path);
707 }
708 ret = 0;
709out:
710 btrfs_free_path(path);
711 return ret;
712}
713
714int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
715 struct btrfs_root *root,
716 struct btrfs_ordered_sum *sums)
717{
718 struct btrfs_fs_info *fs_info = root->fs_info;
719 struct btrfs_key file_key;
720 struct btrfs_key found_key;
721 struct btrfs_path *path;
722 struct btrfs_csum_item *item;
723 struct btrfs_csum_item *item_end;
724 struct extent_buffer *leaf = NULL;
725 u64 next_offset;
726 u64 total_bytes = 0;
727 u64 csum_offset;
728 u64 bytenr;
729 u32 nritems;
730 u32 ins_size;
731 int index = 0;
732 int found_next;
733 int ret;
734 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
735
736 path = btrfs_alloc_path();
737 if (!path)
738 return -ENOMEM;
739again:
740 next_offset = (u64)-1;
741 found_next = 0;
742 bytenr = sums->bytenr + total_bytes;
743 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
744 file_key.offset = bytenr;
745 file_key.type = BTRFS_EXTENT_CSUM_KEY;
746
747 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
748 if (!IS_ERR(item)) {
749 ret = 0;
750 leaf = path->nodes[0];
751 item_end = btrfs_item_ptr(leaf, path->slots[0],
752 struct btrfs_csum_item);
753 item_end = (struct btrfs_csum_item *)((char *)item_end +
754 btrfs_item_size_nr(leaf, path->slots[0]));
755 goto found;
756 }
757 ret = PTR_ERR(item);
758 if (ret != -EFBIG && ret != -ENOENT)
759 goto fail_unlock;
760
761 if (ret == -EFBIG) {
762 u32 item_size;
763 /* we found one, but it isn't big enough yet */
764 leaf = path->nodes[0];
765 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
766 if ((item_size / csum_size) >=
767 MAX_CSUM_ITEMS(fs_info, csum_size)) {
768 /* already at max size, make a new one */
769 goto insert;
770 }
771 } else {
772 int slot = path->slots[0] + 1;
773 /* we didn't find a csum item, insert one */
774 nritems = btrfs_header_nritems(path->nodes[0]);
775 if (!nritems || (path->slots[0] >= nritems - 1)) {
776 ret = btrfs_next_leaf(root, path);
777 if (ret == 1)
778 found_next = 1;
779 if (ret != 0)
780 goto insert;
781 slot = path->slots[0];
782 }
783 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
784 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
785 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
786 found_next = 1;
787 goto insert;
788 }
789 next_offset = found_key.offset;
790 found_next = 1;
791 goto insert;
792 }
793
794 /*
795 * at this point, we know the tree has an item, but it isn't big
796 * enough yet to put our csum in. Grow it
797 */
798 btrfs_release_path(path);
799 ret = btrfs_search_slot(trans, root, &file_key, path,
800 csum_size, 1);
801 if (ret < 0)
802 goto fail_unlock;
803
804 if (ret > 0) {
805 if (path->slots[0] == 0)
806 goto insert;
807 path->slots[0]--;
808 }
809
810 leaf = path->nodes[0];
811 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
812 csum_offset = (bytenr - found_key.offset) >>
813 fs_info->sb->s_blocksize_bits;
814
815 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
816 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
817 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
818 goto insert;
819 }
820
821 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
822 csum_size) {
823 int extend_nr;
824 u64 tmp;
825 u32 diff;
826 u32 free_space;
827
828 if (btrfs_leaf_free_space(fs_info, leaf) <
829 sizeof(struct btrfs_item) + csum_size * 2)
830 goto insert;
831
832 free_space = btrfs_leaf_free_space(fs_info, leaf) -
833 sizeof(struct btrfs_item) - csum_size;
834 tmp = sums->len - total_bytes;
835 tmp >>= fs_info->sb->s_blocksize_bits;
836 WARN_ON(tmp < 1);
837
838 extend_nr = max_t(int, 1, (int)tmp);
839 diff = (csum_offset + extend_nr) * csum_size;
840 diff = min(diff,
841 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
842
843 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
844 diff = min(free_space, diff);
845 diff /= csum_size;
846 diff *= csum_size;
847
848 btrfs_extend_item(fs_info, path, diff);
849 ret = 0;
850 goto csum;
851 }
852
853insert:
854 btrfs_release_path(path);
855 csum_offset = 0;
856 if (found_next) {
857 u64 tmp;
858
859 tmp = sums->len - total_bytes;
860 tmp >>= fs_info->sb->s_blocksize_bits;
861 tmp = min(tmp, (next_offset - file_key.offset) >>
862 fs_info->sb->s_blocksize_bits);
863
864 tmp = max_t(u64, 1, tmp);
865 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
866 ins_size = csum_size * tmp;
867 } else {
868 ins_size = csum_size;
869 }
870 path->leave_spinning = 1;
871 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
872 ins_size);
873 path->leave_spinning = 0;
874 if (ret < 0)
875 goto fail_unlock;
876 if (WARN_ON(ret != 0))
877 goto fail_unlock;
878 leaf = path->nodes[0];
879csum:
880 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
881 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
882 btrfs_item_size_nr(leaf, path->slots[0]));
883 item = (struct btrfs_csum_item *)((unsigned char *)item +
884 csum_offset * csum_size);
885found:
886 ins_size = (u32)(sums->len - total_bytes) >>
887 fs_info->sb->s_blocksize_bits;
888 ins_size *= csum_size;
889 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
890 ins_size);
891 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
892 ins_size);
893
894 ins_size /= csum_size;
895 total_bytes += ins_size * fs_info->sectorsize;
896 index += ins_size;
897
898 btrfs_mark_buffer_dirty(path->nodes[0]);
899 if (total_bytes < sums->len) {
900 btrfs_release_path(path);
901 cond_resched();
902 goto again;
903 }
904out:
905 btrfs_free_path(path);
906 return ret;
907
908fail_unlock:
909 goto out;
910}
911
912void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
913 const struct btrfs_path *path,
914 struct btrfs_file_extent_item *fi,
915 const bool new_inline,
916 struct extent_map *em)
917{
918 struct btrfs_fs_info *fs_info = inode->root->fs_info;
919 struct btrfs_root *root = inode->root;
920 struct extent_buffer *leaf = path->nodes[0];
921 const int slot = path->slots[0];
922 struct btrfs_key key;
923 u64 extent_start, extent_end;
924 u64 bytenr;
925 u8 type = btrfs_file_extent_type(leaf, fi);
926 int compress_type = btrfs_file_extent_compression(leaf, fi);
927
928 em->bdev = fs_info->fs_devices->latest_bdev;
929 btrfs_item_key_to_cpu(leaf, &key, slot);
930 extent_start = key.offset;
931
932 if (type == BTRFS_FILE_EXTENT_REG ||
933 type == BTRFS_FILE_EXTENT_PREALLOC) {
934 extent_end = extent_start +
935 btrfs_file_extent_num_bytes(leaf, fi);
936 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
937 size_t size;
938 size = btrfs_file_extent_ram_bytes(leaf, fi);
939 extent_end = ALIGN(extent_start + size,
940 fs_info->sectorsize);
941 }
942
943 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
944 if (type == BTRFS_FILE_EXTENT_REG ||
945 type == BTRFS_FILE_EXTENT_PREALLOC) {
946 em->start = extent_start;
947 em->len = extent_end - extent_start;
948 em->orig_start = extent_start -
949 btrfs_file_extent_offset(leaf, fi);
950 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
951 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
952 if (bytenr == 0) {
953 em->block_start = EXTENT_MAP_HOLE;
954 return;
955 }
956 if (compress_type != BTRFS_COMPRESS_NONE) {
957 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
958 em->compress_type = compress_type;
959 em->block_start = bytenr;
960 em->block_len = em->orig_block_len;
961 } else {
962 bytenr += btrfs_file_extent_offset(leaf, fi);
963 em->block_start = bytenr;
964 em->block_len = em->len;
965 if (type == BTRFS_FILE_EXTENT_PREALLOC)
966 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
967 }
968 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
969 em->block_start = EXTENT_MAP_INLINE;
970 em->start = extent_start;
971 em->len = extent_end - extent_start;
972 /*
973 * Initialize orig_start and block_len with the same values
974 * as in inode.c:btrfs_get_extent().
975 */
976 em->orig_start = EXTENT_MAP_HOLE;
977 em->block_len = (u64)-1;
978 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
979 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
980 em->compress_type = compress_type;
981 }
982 } else {
983 btrfs_err(fs_info,
984 "unknown file extent item type %d, inode %llu, offset %llu, "
985 "root %llu", type, btrfs_ino(inode), extent_start,
986 root->root_key.objectid);
987 }
988}