]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/btrfs/file-item.c
btrfs: refactor __btrfs_lookup_bio_sums to use bio_for_each_segment_all
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / file-item.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "volumes.h"
27 #include "print-tree.h"
28 #include "compression.h"
29
30 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
31 sizeof(struct btrfs_item) * 2) / \
32 size) - 1))
33
34 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
35 PAGE_SIZE))
36
37 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
38 sizeof(struct btrfs_ordered_sum)) / \
39 sizeof(u32) * (r)->sectorsize)
40
41 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 objectid, u64 pos,
44 u64 disk_offset, u64 disk_num_bytes,
45 u64 num_bytes, u64 offset, u64 ram_bytes,
46 u8 compression, u8 encryption, u16 other_encoding)
47 {
48 int ret = 0;
49 struct btrfs_file_extent_item *item;
50 struct btrfs_key file_key;
51 struct btrfs_path *path;
52 struct extent_buffer *leaf;
53
54 path = btrfs_alloc_path();
55 if (!path)
56 return -ENOMEM;
57 file_key.objectid = objectid;
58 file_key.offset = pos;
59 file_key.type = BTRFS_EXTENT_DATA_KEY;
60
61 path->leave_spinning = 1;
62 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
63 sizeof(*item));
64 if (ret < 0)
65 goto out;
66 BUG_ON(ret); /* Can't happen */
67 leaf = path->nodes[0];
68 item = btrfs_item_ptr(leaf, path->slots[0],
69 struct btrfs_file_extent_item);
70 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
71 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
72 btrfs_set_file_extent_offset(leaf, item, offset);
73 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
74 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
75 btrfs_set_file_extent_generation(leaf, item, trans->transid);
76 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
77 btrfs_set_file_extent_compression(leaf, item, compression);
78 btrfs_set_file_extent_encryption(leaf, item, encryption);
79 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
80
81 btrfs_mark_buffer_dirty(leaf);
82 out:
83 btrfs_free_path(path);
84 return ret;
85 }
86
87 static struct btrfs_csum_item *
88 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 struct btrfs_path *path,
91 u64 bytenr, int cow)
92 {
93 int ret;
94 struct btrfs_key file_key;
95 struct btrfs_key found_key;
96 struct btrfs_csum_item *item;
97 struct extent_buffer *leaf;
98 u64 csum_offset = 0;
99 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
100 int csums_in_item;
101
102 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
103 file_key.offset = bytenr;
104 file_key.type = BTRFS_EXTENT_CSUM_KEY;
105 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
106 if (ret < 0)
107 goto fail;
108 leaf = path->nodes[0];
109 if (ret > 0) {
110 ret = 1;
111 if (path->slots[0] == 0)
112 goto fail;
113 path->slots[0]--;
114 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
115 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
116 goto fail;
117
118 csum_offset = (bytenr - found_key.offset) >>
119 root->fs_info->sb->s_blocksize_bits;
120 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
121 csums_in_item /= csum_size;
122
123 if (csum_offset == csums_in_item) {
124 ret = -EFBIG;
125 goto fail;
126 } else if (csum_offset > csums_in_item) {
127 goto fail;
128 }
129 }
130 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
131 item = (struct btrfs_csum_item *)((unsigned char *)item +
132 csum_offset * csum_size);
133 return item;
134 fail:
135 if (ret > 0)
136 ret = -ENOENT;
137 return ERR_PTR(ret);
138 }
139
140 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
141 struct btrfs_root *root,
142 struct btrfs_path *path, u64 objectid,
143 u64 offset, int mod)
144 {
145 int ret;
146 struct btrfs_key file_key;
147 int ins_len = mod < 0 ? -1 : 0;
148 int cow = mod != 0;
149
150 file_key.objectid = objectid;
151 file_key.offset = offset;
152 file_key.type = BTRFS_EXTENT_DATA_KEY;
153 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
154 return ret;
155 }
156
157 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
158 {
159 kfree(bio->csum_allocated);
160 }
161
162 static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
163 struct inode *inode, struct bio *bio,
164 u64 logical_offset, u32 *dst, int dio)
165 {
166 struct bio_vec *bvec;
167 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
168 struct btrfs_csum_item *item = NULL;
169 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
170 struct btrfs_path *path;
171 u8 *csum;
172 u64 offset = 0;
173 u64 item_start_offset = 0;
174 u64 item_last_offset = 0;
175 u64 disk_bytenr;
176 u64 page_bytes_left;
177 u32 diff;
178 int nblocks;
179 int count = 0, i;
180 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
181
182 path = btrfs_alloc_path();
183 if (!path)
184 return -ENOMEM;
185
186 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
187 if (!dst) {
188 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
189 btrfs_bio->csum_allocated = kmalloc_array(nblocks,
190 csum_size, GFP_NOFS);
191 if (!btrfs_bio->csum_allocated) {
192 btrfs_free_path(path);
193 return -ENOMEM;
194 }
195 btrfs_bio->csum = btrfs_bio->csum_allocated;
196 btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
197 } else {
198 btrfs_bio->csum = btrfs_bio->csum_inline;
199 }
200 csum = btrfs_bio->csum;
201 } else {
202 csum = (u8 *)dst;
203 }
204
205 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
206 path->reada = READA_FORWARD;
207
208 WARN_ON(bio->bi_vcnt <= 0);
209
210 /*
211 * the free space stuff is only read when it hasn't been
212 * updated in the current transaction. So, we can safely
213 * read from the commit root and sidestep a nasty deadlock
214 * between reading the free space cache and updating the csum tree.
215 */
216 if (btrfs_is_free_space_inode(inode)) {
217 path->search_commit_root = 1;
218 path->skip_locking = 1;
219 }
220
221 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
222 if (dio)
223 offset = logical_offset;
224
225 bio_for_each_segment_all(bvec, bio, i) {
226 page_bytes_left = bvec->bv_len;
227 if (count)
228 goto next;
229
230 if (!dio)
231 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
232 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
233 (u32 *)csum, nblocks);
234 if (count)
235 goto found;
236
237 if (!item || disk_bytenr < item_start_offset ||
238 disk_bytenr >= item_last_offset) {
239 struct btrfs_key found_key;
240 u32 item_size;
241
242 if (item)
243 btrfs_release_path(path);
244 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
245 path, disk_bytenr, 0);
246 if (IS_ERR(item)) {
247 count = 1;
248 memset(csum, 0, csum_size);
249 if (BTRFS_I(inode)->root->root_key.objectid ==
250 BTRFS_DATA_RELOC_TREE_OBJECTID) {
251 set_extent_bits(io_tree, offset,
252 offset + root->sectorsize - 1,
253 EXTENT_NODATASUM);
254 } else {
255 btrfs_info_rl(BTRFS_I(inode)->root->fs_info,
256 "no csum found for inode %llu start %llu",
257 btrfs_ino(inode), offset);
258 }
259 item = NULL;
260 btrfs_release_path(path);
261 goto found;
262 }
263 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
264 path->slots[0]);
265
266 item_start_offset = found_key.offset;
267 item_size = btrfs_item_size_nr(path->nodes[0],
268 path->slots[0]);
269 item_last_offset = item_start_offset +
270 (item_size / csum_size) *
271 root->sectorsize;
272 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
273 struct btrfs_csum_item);
274 }
275 /*
276 * this byte range must be able to fit inside
277 * a single leaf so it will also fit inside a u32
278 */
279 diff = disk_bytenr - item_start_offset;
280 diff = diff / root->sectorsize;
281 diff = diff * csum_size;
282 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
283 inode->i_sb->s_blocksize_bits);
284 read_extent_buffer(path->nodes[0], csum,
285 ((unsigned long)item) + diff,
286 csum_size * count);
287 found:
288 csum += count * csum_size;
289 nblocks -= count;
290 next:
291 while (count--) {
292 disk_bytenr += root->sectorsize;
293 offset += root->sectorsize;
294 page_bytes_left -= root->sectorsize;
295 if (!page_bytes_left)
296 break; /* move to next bio */
297 }
298 }
299
300 WARN_ON_ONCE(count);
301 btrfs_free_path(path);
302 return 0;
303 }
304
305 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
306 struct bio *bio, u32 *dst)
307 {
308 return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
309 }
310
311 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
312 struct bio *bio, u64 offset)
313 {
314 return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
315 }
316
317 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
318 struct list_head *list, int search_commit)
319 {
320 struct btrfs_key key;
321 struct btrfs_path *path;
322 struct extent_buffer *leaf;
323 struct btrfs_ordered_sum *sums;
324 struct btrfs_csum_item *item;
325 LIST_HEAD(tmplist);
326 unsigned long offset;
327 int ret;
328 size_t size;
329 u64 csum_end;
330 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
331
332 ASSERT(IS_ALIGNED(start, root->sectorsize) &&
333 IS_ALIGNED(end + 1, root->sectorsize));
334
335 path = btrfs_alloc_path();
336 if (!path)
337 return -ENOMEM;
338
339 if (search_commit) {
340 path->skip_locking = 1;
341 path->reada = READA_FORWARD;
342 path->search_commit_root = 1;
343 }
344
345 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
346 key.offset = start;
347 key.type = BTRFS_EXTENT_CSUM_KEY;
348
349 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
350 if (ret < 0)
351 goto fail;
352 if (ret > 0 && path->slots[0] > 0) {
353 leaf = path->nodes[0];
354 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
355 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
356 key.type == BTRFS_EXTENT_CSUM_KEY) {
357 offset = (start - key.offset) >>
358 root->fs_info->sb->s_blocksize_bits;
359 if (offset * csum_size <
360 btrfs_item_size_nr(leaf, path->slots[0] - 1))
361 path->slots[0]--;
362 }
363 }
364
365 while (start <= end) {
366 leaf = path->nodes[0];
367 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
368 ret = btrfs_next_leaf(root, path);
369 if (ret < 0)
370 goto fail;
371 if (ret > 0)
372 break;
373 leaf = path->nodes[0];
374 }
375
376 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
377 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
378 key.type != BTRFS_EXTENT_CSUM_KEY ||
379 key.offset > end)
380 break;
381
382 if (key.offset > start)
383 start = key.offset;
384
385 size = btrfs_item_size_nr(leaf, path->slots[0]);
386 csum_end = key.offset + (size / csum_size) * root->sectorsize;
387 if (csum_end <= start) {
388 path->slots[0]++;
389 continue;
390 }
391
392 csum_end = min(csum_end, end + 1);
393 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
394 struct btrfs_csum_item);
395 while (start < csum_end) {
396 size = min_t(size_t, csum_end - start,
397 MAX_ORDERED_SUM_BYTES(root));
398 sums = kzalloc(btrfs_ordered_sum_size(root, size),
399 GFP_NOFS);
400 if (!sums) {
401 ret = -ENOMEM;
402 goto fail;
403 }
404
405 sums->bytenr = start;
406 sums->len = (int)size;
407
408 offset = (start - key.offset) >>
409 root->fs_info->sb->s_blocksize_bits;
410 offset *= csum_size;
411 size >>= root->fs_info->sb->s_blocksize_bits;
412
413 read_extent_buffer(path->nodes[0],
414 sums->sums,
415 ((unsigned long)item) + offset,
416 csum_size * size);
417
418 start += root->sectorsize * size;
419 list_add_tail(&sums->list, &tmplist);
420 }
421 path->slots[0]++;
422 }
423 ret = 0;
424 fail:
425 while (ret < 0 && !list_empty(&tmplist)) {
426 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
427 list_del(&sums->list);
428 kfree(sums);
429 }
430 list_splice_tail(&tmplist, list);
431
432 btrfs_free_path(path);
433 return ret;
434 }
435
436 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
437 struct bio *bio, u64 file_start, int contig)
438 {
439 struct btrfs_ordered_sum *sums;
440 struct btrfs_ordered_extent *ordered = NULL;
441 char *data;
442 struct bio_vec *bvec;
443 int index;
444 int nr_sectors;
445 int i, j;
446 unsigned long total_bytes = 0;
447 unsigned long this_sum_bytes = 0;
448 u64 offset;
449
450 WARN_ON(bio->bi_vcnt <= 0);
451 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
452 GFP_NOFS);
453 if (!sums)
454 return -ENOMEM;
455
456 sums->len = bio->bi_iter.bi_size;
457 INIT_LIST_HEAD(&sums->list);
458
459 if (contig)
460 offset = file_start;
461 else
462 offset = 0; /* shut up gcc */
463
464 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
465 index = 0;
466
467 bio_for_each_segment_all(bvec, bio, j) {
468 if (!contig)
469 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
470
471 if (!ordered) {
472 ordered = btrfs_lookup_ordered_extent(inode, offset);
473 BUG_ON(!ordered); /* Logic error */
474 }
475
476 data = kmap_atomic(bvec->bv_page);
477
478 nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
479 bvec->bv_len + root->sectorsize
480 - 1);
481
482 for (i = 0; i < nr_sectors; i++) {
483 if (offset >= ordered->file_offset + ordered->len ||
484 offset < ordered->file_offset) {
485 unsigned long bytes_left;
486
487 kunmap_atomic(data);
488 sums->len = this_sum_bytes;
489 this_sum_bytes = 0;
490 btrfs_add_ordered_sum(inode, ordered, sums);
491 btrfs_put_ordered_extent(ordered);
492
493 bytes_left = bio->bi_iter.bi_size - total_bytes;
494
495 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
496 GFP_NOFS);
497 BUG_ON(!sums); /* -ENOMEM */
498 sums->len = bytes_left;
499 ordered = btrfs_lookup_ordered_extent(inode,
500 offset);
501 ASSERT(ordered); /* Logic error */
502 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
503 + total_bytes;
504 index = 0;
505
506 data = kmap_atomic(bvec->bv_page);
507 }
508
509 sums->sums[index] = ~(u32)0;
510 sums->sums[index]
511 = btrfs_csum_data(data + bvec->bv_offset
512 + (i * root->sectorsize),
513 sums->sums[index],
514 root->sectorsize);
515 btrfs_csum_final(sums->sums[index],
516 (char *)(sums->sums + index));
517 index++;
518 offset += root->sectorsize;
519 this_sum_bytes += root->sectorsize;
520 total_bytes += root->sectorsize;
521 }
522
523 kunmap_atomic(data);
524 }
525 this_sum_bytes = 0;
526 btrfs_add_ordered_sum(inode, ordered, sums);
527 btrfs_put_ordered_extent(ordered);
528 return 0;
529 }
530
531 /*
532 * helper function for csum removal, this expects the
533 * key to describe the csum pointed to by the path, and it expects
534 * the csum to overlap the range [bytenr, len]
535 *
536 * The csum should not be entirely contained in the range and the
537 * range should not be entirely contained in the csum.
538 *
539 * This calls btrfs_truncate_item with the correct args based on the
540 * overlap, and fixes up the key as required.
541 */
542 static noinline void truncate_one_csum(struct btrfs_root *root,
543 struct btrfs_path *path,
544 struct btrfs_key *key,
545 u64 bytenr, u64 len)
546 {
547 struct extent_buffer *leaf;
548 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
549 u64 csum_end;
550 u64 end_byte = bytenr + len;
551 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
552
553 leaf = path->nodes[0];
554 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
555 csum_end <<= root->fs_info->sb->s_blocksize_bits;
556 csum_end += key->offset;
557
558 if (key->offset < bytenr && csum_end <= end_byte) {
559 /*
560 * [ bytenr - len ]
561 * [ ]
562 * [csum ]
563 * A simple truncate off the end of the item
564 */
565 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
566 new_size *= csum_size;
567 btrfs_truncate_item(root, path, new_size, 1);
568 } else if (key->offset >= bytenr && csum_end > end_byte &&
569 end_byte > key->offset) {
570 /*
571 * [ bytenr - len ]
572 * [ ]
573 * [csum ]
574 * we need to truncate from the beginning of the csum
575 */
576 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
577 new_size *= csum_size;
578
579 btrfs_truncate_item(root, path, new_size, 0);
580
581 key->offset = end_byte;
582 btrfs_set_item_key_safe(root->fs_info, path, key);
583 } else {
584 BUG();
585 }
586 }
587
588 /*
589 * deletes the csum items from the csum tree for a given
590 * range of bytes.
591 */
592 int btrfs_del_csums(struct btrfs_trans_handle *trans,
593 struct btrfs_root *root, u64 bytenr, u64 len)
594 {
595 struct btrfs_path *path;
596 struct btrfs_key key;
597 u64 end_byte = bytenr + len;
598 u64 csum_end;
599 struct extent_buffer *leaf;
600 int ret;
601 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
602 int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
603
604 root = root->fs_info->csum_root;
605
606 path = btrfs_alloc_path();
607 if (!path)
608 return -ENOMEM;
609
610 while (1) {
611 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
612 key.offset = end_byte - 1;
613 key.type = BTRFS_EXTENT_CSUM_KEY;
614
615 path->leave_spinning = 1;
616 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
617 if (ret > 0) {
618 if (path->slots[0] == 0)
619 break;
620 path->slots[0]--;
621 } else if (ret < 0) {
622 break;
623 }
624
625 leaf = path->nodes[0];
626 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
627
628 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
629 key.type != BTRFS_EXTENT_CSUM_KEY) {
630 break;
631 }
632
633 if (key.offset >= end_byte)
634 break;
635
636 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
637 csum_end <<= blocksize_bits;
638 csum_end += key.offset;
639
640 /* this csum ends before we start, we're done */
641 if (csum_end <= bytenr)
642 break;
643
644 /* delete the entire item, it is inside our range */
645 if (key.offset >= bytenr && csum_end <= end_byte) {
646 ret = btrfs_del_item(trans, root, path);
647 if (ret)
648 goto out;
649 if (key.offset == bytenr)
650 break;
651 } else if (key.offset < bytenr && csum_end > end_byte) {
652 unsigned long offset;
653 unsigned long shift_len;
654 unsigned long item_offset;
655 /*
656 * [ bytenr - len ]
657 * [csum ]
658 *
659 * Our bytes are in the middle of the csum,
660 * we need to split this item and insert a new one.
661 *
662 * But we can't drop the path because the
663 * csum could change, get removed, extended etc.
664 *
665 * The trick here is the max size of a csum item leaves
666 * enough room in the tree block for a single
667 * item header. So, we split the item in place,
668 * adding a new header pointing to the existing
669 * bytes. Then we loop around again and we have
670 * a nicely formed csum item that we can neatly
671 * truncate.
672 */
673 offset = (bytenr - key.offset) >> blocksize_bits;
674 offset *= csum_size;
675
676 shift_len = (len >> blocksize_bits) * csum_size;
677
678 item_offset = btrfs_item_ptr_offset(leaf,
679 path->slots[0]);
680
681 memzero_extent_buffer(leaf, item_offset + offset,
682 shift_len);
683 key.offset = bytenr;
684
685 /*
686 * btrfs_split_item returns -EAGAIN when the
687 * item changed size or key
688 */
689 ret = btrfs_split_item(trans, root, path, &key, offset);
690 if (ret && ret != -EAGAIN) {
691 btrfs_abort_transaction(trans, ret);
692 goto out;
693 }
694
695 key.offset = end_byte - 1;
696 } else {
697 truncate_one_csum(root, path, &key, bytenr, len);
698 if (key.offset < bytenr)
699 break;
700 }
701 btrfs_release_path(path);
702 }
703 ret = 0;
704 out:
705 btrfs_free_path(path);
706 return ret;
707 }
708
709 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
710 struct btrfs_root *root,
711 struct btrfs_ordered_sum *sums)
712 {
713 struct btrfs_key file_key;
714 struct btrfs_key found_key;
715 struct btrfs_path *path;
716 struct btrfs_csum_item *item;
717 struct btrfs_csum_item *item_end;
718 struct extent_buffer *leaf = NULL;
719 u64 next_offset;
720 u64 total_bytes = 0;
721 u64 csum_offset;
722 u64 bytenr;
723 u32 nritems;
724 u32 ins_size;
725 int index = 0;
726 int found_next;
727 int ret;
728 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
729
730 path = btrfs_alloc_path();
731 if (!path)
732 return -ENOMEM;
733 again:
734 next_offset = (u64)-1;
735 found_next = 0;
736 bytenr = sums->bytenr + total_bytes;
737 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
738 file_key.offset = bytenr;
739 file_key.type = BTRFS_EXTENT_CSUM_KEY;
740
741 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
742 if (!IS_ERR(item)) {
743 ret = 0;
744 leaf = path->nodes[0];
745 item_end = btrfs_item_ptr(leaf, path->slots[0],
746 struct btrfs_csum_item);
747 item_end = (struct btrfs_csum_item *)((char *)item_end +
748 btrfs_item_size_nr(leaf, path->slots[0]));
749 goto found;
750 }
751 ret = PTR_ERR(item);
752 if (ret != -EFBIG && ret != -ENOENT)
753 goto fail_unlock;
754
755 if (ret == -EFBIG) {
756 u32 item_size;
757 /* we found one, but it isn't big enough yet */
758 leaf = path->nodes[0];
759 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
760 if ((item_size / csum_size) >=
761 MAX_CSUM_ITEMS(root, csum_size)) {
762 /* already at max size, make a new one */
763 goto insert;
764 }
765 } else {
766 int slot = path->slots[0] + 1;
767 /* we didn't find a csum item, insert one */
768 nritems = btrfs_header_nritems(path->nodes[0]);
769 if (!nritems || (path->slots[0] >= nritems - 1)) {
770 ret = btrfs_next_leaf(root, path);
771 if (ret == 1)
772 found_next = 1;
773 if (ret != 0)
774 goto insert;
775 slot = path->slots[0];
776 }
777 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
778 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
779 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
780 found_next = 1;
781 goto insert;
782 }
783 next_offset = found_key.offset;
784 found_next = 1;
785 goto insert;
786 }
787
788 /*
789 * at this point, we know the tree has an item, but it isn't big
790 * enough yet to put our csum in. Grow it
791 */
792 btrfs_release_path(path);
793 ret = btrfs_search_slot(trans, root, &file_key, path,
794 csum_size, 1);
795 if (ret < 0)
796 goto fail_unlock;
797
798 if (ret > 0) {
799 if (path->slots[0] == 0)
800 goto insert;
801 path->slots[0]--;
802 }
803
804 leaf = path->nodes[0];
805 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
806 csum_offset = (bytenr - found_key.offset) >>
807 root->fs_info->sb->s_blocksize_bits;
808
809 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
810 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
811 csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
812 goto insert;
813 }
814
815 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
816 csum_size) {
817 int extend_nr;
818 u64 tmp;
819 u32 diff;
820 u32 free_space;
821
822 if (btrfs_leaf_free_space(root, leaf) <
823 sizeof(struct btrfs_item) + csum_size * 2)
824 goto insert;
825
826 free_space = btrfs_leaf_free_space(root, leaf) -
827 sizeof(struct btrfs_item) - csum_size;
828 tmp = sums->len - total_bytes;
829 tmp >>= root->fs_info->sb->s_blocksize_bits;
830 WARN_ON(tmp < 1);
831
832 extend_nr = max_t(int, 1, (int)tmp);
833 diff = (csum_offset + extend_nr) * csum_size;
834 diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
835
836 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
837 diff = min(free_space, diff);
838 diff /= csum_size;
839 diff *= csum_size;
840
841 btrfs_extend_item(root, path, diff);
842 ret = 0;
843 goto csum;
844 }
845
846 insert:
847 btrfs_release_path(path);
848 csum_offset = 0;
849 if (found_next) {
850 u64 tmp;
851
852 tmp = sums->len - total_bytes;
853 tmp >>= root->fs_info->sb->s_blocksize_bits;
854 tmp = min(tmp, (next_offset - file_key.offset) >>
855 root->fs_info->sb->s_blocksize_bits);
856
857 tmp = max((u64)1, tmp);
858 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
859 ins_size = csum_size * tmp;
860 } else {
861 ins_size = csum_size;
862 }
863 path->leave_spinning = 1;
864 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
865 ins_size);
866 path->leave_spinning = 0;
867 if (ret < 0)
868 goto fail_unlock;
869 if (WARN_ON(ret != 0))
870 goto fail_unlock;
871 leaf = path->nodes[0];
872 csum:
873 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
874 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
875 btrfs_item_size_nr(leaf, path->slots[0]));
876 item = (struct btrfs_csum_item *)((unsigned char *)item +
877 csum_offset * csum_size);
878 found:
879 ins_size = (u32)(sums->len - total_bytes) >>
880 root->fs_info->sb->s_blocksize_bits;
881 ins_size *= csum_size;
882 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
883 ins_size);
884 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
885 ins_size);
886
887 ins_size /= csum_size;
888 total_bytes += ins_size * root->sectorsize;
889 index += ins_size;
890
891 btrfs_mark_buffer_dirty(path->nodes[0]);
892 if (total_bytes < sums->len) {
893 btrfs_release_path(path);
894 cond_resched();
895 goto again;
896 }
897 out:
898 btrfs_free_path(path);
899 return ret;
900
901 fail_unlock:
902 goto out;
903 }
904
905 void btrfs_extent_item_to_extent_map(struct inode *inode,
906 const struct btrfs_path *path,
907 struct btrfs_file_extent_item *fi,
908 const bool new_inline,
909 struct extent_map *em)
910 {
911 struct btrfs_root *root = BTRFS_I(inode)->root;
912 struct extent_buffer *leaf = path->nodes[0];
913 const int slot = path->slots[0];
914 struct btrfs_key key;
915 u64 extent_start, extent_end;
916 u64 bytenr;
917 u8 type = btrfs_file_extent_type(leaf, fi);
918 int compress_type = btrfs_file_extent_compression(leaf, fi);
919
920 em->bdev = root->fs_info->fs_devices->latest_bdev;
921 btrfs_item_key_to_cpu(leaf, &key, slot);
922 extent_start = key.offset;
923
924 if (type == BTRFS_FILE_EXTENT_REG ||
925 type == BTRFS_FILE_EXTENT_PREALLOC) {
926 extent_end = extent_start +
927 btrfs_file_extent_num_bytes(leaf, fi);
928 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
929 size_t size;
930 size = btrfs_file_extent_inline_len(leaf, slot, fi);
931 extent_end = ALIGN(extent_start + size, root->sectorsize);
932 }
933
934 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
935 if (type == BTRFS_FILE_EXTENT_REG ||
936 type == BTRFS_FILE_EXTENT_PREALLOC) {
937 em->start = extent_start;
938 em->len = extent_end - extent_start;
939 em->orig_start = extent_start -
940 btrfs_file_extent_offset(leaf, fi);
941 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
942 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
943 if (bytenr == 0) {
944 em->block_start = EXTENT_MAP_HOLE;
945 return;
946 }
947 if (compress_type != BTRFS_COMPRESS_NONE) {
948 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
949 em->compress_type = compress_type;
950 em->block_start = bytenr;
951 em->block_len = em->orig_block_len;
952 } else {
953 bytenr += btrfs_file_extent_offset(leaf, fi);
954 em->block_start = bytenr;
955 em->block_len = em->len;
956 if (type == BTRFS_FILE_EXTENT_PREALLOC)
957 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
958 }
959 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
960 em->block_start = EXTENT_MAP_INLINE;
961 em->start = extent_start;
962 em->len = extent_end - extent_start;
963 /*
964 * Initialize orig_start and block_len with the same values
965 * as in inode.c:btrfs_get_extent().
966 */
967 em->orig_start = EXTENT_MAP_HOLE;
968 em->block_len = (u64)-1;
969 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
970 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
971 em->compress_type = compress_type;
972 }
973 } else {
974 btrfs_err(root->fs_info,
975 "unknown file extent item type %d, inode %llu, offset %llu, root %llu",
976 type, btrfs_ino(inode), extent_start,
977 root->root_key.objectid);
978 }
979 }