1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
15 #include "print-tree.h"
17 #include "compression.h"
19 #include "inode-map.h"
21 /* magic values for the inode_only field in btrfs_log_inode:
23 * LOG_INODE_ALL means to log everything
24 * LOG_INODE_EXISTS means to log just enough to recreate the inode
27 #define LOG_INODE_ALL 0
28 #define LOG_INODE_EXISTS 1
29 #define LOG_OTHER_INODE 2
30 #define LOG_OTHER_INODE_ALL 3
33 * directory trouble cases
35 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
36 * log, we must force a full commit before doing an fsync of the directory
37 * where the unlink was done.
38 * ---> record transid of last unlink/rename per directory
42 * rename foo/some_dir foo2/some_dir
44 * fsync foo/some_dir/some_file
46 * The fsync above will unlink the original some_dir without recording
47 * it in its new location (foo2). After a crash, some_dir will be gone
48 * unless the fsync of some_file forces a full commit
50 * 2) we must log any new names for any file or dir that is in the fsync
51 * log. ---> check inode while renaming/linking.
53 * 2a) we must log any new names for any file or dir during rename
54 * when the directory they are being removed from was logged.
55 * ---> check inode and old parent dir during rename
57 * 2a is actually the more important variant. With the extra logging
58 * a crash might unlink the old name without recreating the new one
60 * 3) after a crash, we must go through any directories with a link count
61 * of zero and redo the rm -rf
68 * The directory f1 was fully removed from the FS, but fsync was never
69 * called on f1, only its parent dir. After a crash the rm -rf must
70 * be replayed. This must be able to recurse down the entire
71 * directory tree. The inode link count fixup code takes care of the
76 * stages for the tree walking. The first
77 * stage (0) is to only pin down the blocks we find
78 * the second stage (1) is to make sure that all the inodes
79 * we find in the log are created in the subvolume.
81 * The last stage is to deal with directories and links and extents
82 * and all the other fun semantics
84 #define LOG_WALK_PIN_ONLY 0
85 #define LOG_WALK_REPLAY_INODES 1
86 #define LOG_WALK_REPLAY_DIR_INDEX 2
87 #define LOG_WALK_REPLAY_ALL 3
89 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
90 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
94 struct btrfs_log_ctx
*ctx
);
95 static int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
96 struct btrfs_root
*root
,
97 struct btrfs_path
*path
, u64 objectid
);
98 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
99 struct btrfs_root
*root
,
100 struct btrfs_root
*log
,
101 struct btrfs_path
*path
,
102 u64 dirid
, int del_all
);
105 * tree logging is a special write ahead log used to make sure that
106 * fsyncs and O_SYNCs can happen without doing full tree commits.
108 * Full tree commits are expensive because they require commonly
109 * modified blocks to be recowed, creating many dirty pages in the
110 * extent tree an 4x-6x higher write load than ext3.
112 * Instead of doing a tree commit on every fsync, we use the
113 * key ranges and transaction ids to find items for a given file or directory
114 * that have changed in this transaction. Those items are copied into
115 * a special tree (one per subvolume root), that tree is written to disk
116 * and then the fsync is considered complete.
118 * After a crash, items are copied out of the log-tree back into the
119 * subvolume tree. Any file data extents found are recorded in the extent
120 * allocation tree, and the log-tree freed.
122 * The log tree is read three times, once to pin down all the extents it is
123 * using in ram and once, once to create all the inodes logged in the tree
124 * and once to do all the other items.
128 * start a sub transaction and setup the log tree
129 * this increments the log tree writer count to make the people
130 * syncing the tree wait for us to finish
132 static int start_log_trans(struct btrfs_trans_handle
*trans
,
133 struct btrfs_root
*root
,
134 struct btrfs_log_ctx
*ctx
)
136 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
139 mutex_lock(&root
->log_mutex
);
141 if (root
->log_root
) {
142 if (btrfs_need_log_full_commit(fs_info
, trans
)) {
147 if (!root
->log_start_pid
) {
148 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
149 root
->log_start_pid
= current
->pid
;
150 } else if (root
->log_start_pid
!= current
->pid
) {
151 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
154 mutex_lock(&fs_info
->tree_log_mutex
);
155 if (!fs_info
->log_root_tree
)
156 ret
= btrfs_init_log_root_tree(trans
, fs_info
);
157 mutex_unlock(&fs_info
->tree_log_mutex
);
161 ret
= btrfs_add_log_tree(trans
, root
);
165 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
166 root
->log_start_pid
= current
->pid
;
169 atomic_inc(&root
->log_batch
);
170 atomic_inc(&root
->log_writers
);
172 int index
= root
->log_transid
% 2;
173 list_add_tail(&ctx
->list
, &root
->log_ctxs
[index
]);
174 ctx
->log_transid
= root
->log_transid
;
178 mutex_unlock(&root
->log_mutex
);
183 * returns 0 if there was a log transaction running and we were able
184 * to join, or returns -ENOENT if there were not transactions
187 static int join_running_log_trans(struct btrfs_root
*root
)
195 mutex_lock(&root
->log_mutex
);
196 if (root
->log_root
) {
198 atomic_inc(&root
->log_writers
);
200 mutex_unlock(&root
->log_mutex
);
205 * This either makes the current running log transaction wait
206 * until you call btrfs_end_log_trans() or it makes any future
207 * log transactions wait until you call btrfs_end_log_trans()
209 void btrfs_pin_log_trans(struct btrfs_root
*root
)
211 mutex_lock(&root
->log_mutex
);
212 atomic_inc(&root
->log_writers
);
213 mutex_unlock(&root
->log_mutex
);
217 * indicate we're done making changes to the log tree
218 * and wake up anyone waiting to do a sync
220 void btrfs_end_log_trans(struct btrfs_root
*root
)
222 if (atomic_dec_and_test(&root
->log_writers
)) {
223 /* atomic_dec_and_test implies a barrier */
224 cond_wake_up_nomb(&root
->log_writer_wait
);
230 * the walk control struct is used to pass state down the chain when
231 * processing the log tree. The stage field tells us which part
232 * of the log tree processing we are currently doing. The others
233 * are state fields used for that specific part
235 struct walk_control
{
236 /* should we free the extent on disk when done? This is used
237 * at transaction commit time while freeing a log tree
241 /* should we write out the extent buffer? This is used
242 * while flushing the log tree to disk during a sync
246 /* should we wait for the extent buffer io to finish? Also used
247 * while flushing the log tree to disk for a sync
251 /* pin only walk, we record which extents on disk belong to the
256 /* what stage of the replay code we're currently in */
260 * Ignore any items from the inode currently being processed. Needs
261 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
262 * the LOG_WALK_REPLAY_INODES stage.
264 bool ignore_cur_inode
;
266 /* the root we are currently replaying */
267 struct btrfs_root
*replay_dest
;
269 /* the trans handle for the current replay */
270 struct btrfs_trans_handle
*trans
;
272 /* the function that gets used to process blocks we find in the
273 * tree. Note the extent_buffer might not be up to date when it is
274 * passed in, and it must be checked or read if you need the data
277 int (*process_func
)(struct btrfs_root
*log
, struct extent_buffer
*eb
,
278 struct walk_control
*wc
, u64 gen
, int level
);
282 * process_func used to pin down extents, write them or wait on them
284 static int process_one_buffer(struct btrfs_root
*log
,
285 struct extent_buffer
*eb
,
286 struct walk_control
*wc
, u64 gen
, int level
)
288 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
292 * If this fs is mixed then we need to be able to process the leaves to
293 * pin down any logged extents, so we have to read the block.
295 if (btrfs_fs_incompat(fs_info
, MIXED_GROUPS
)) {
296 ret
= btrfs_read_buffer(eb
, gen
, level
, NULL
);
302 ret
= btrfs_pin_extent_for_log_replay(fs_info
, eb
->start
,
305 if (!ret
&& btrfs_buffer_uptodate(eb
, gen
, 0)) {
306 if (wc
->pin
&& btrfs_header_level(eb
) == 0)
307 ret
= btrfs_exclude_logged_extents(fs_info
, eb
);
309 btrfs_write_tree_block(eb
);
311 btrfs_wait_tree_block_writeback(eb
);
317 * Item overwrite used by replay and tree logging. eb, slot and key all refer
318 * to the src data we are copying out.
320 * root is the tree we are copying into, and path is a scratch
321 * path for use in this function (it should be released on entry and
322 * will be released on exit).
324 * If the key is already in the destination tree the existing item is
325 * overwritten. If the existing item isn't big enough, it is extended.
326 * If it is too large, it is truncated.
328 * If the key isn't in the destination yet, a new item is inserted.
330 static noinline
int overwrite_item(struct btrfs_trans_handle
*trans
,
331 struct btrfs_root
*root
,
332 struct btrfs_path
*path
,
333 struct extent_buffer
*eb
, int slot
,
334 struct btrfs_key
*key
)
336 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
339 u64 saved_i_size
= 0;
340 int save_old_i_size
= 0;
341 unsigned long src_ptr
;
342 unsigned long dst_ptr
;
343 int overwrite_root
= 0;
344 bool inode_item
= key
->type
== BTRFS_INODE_ITEM_KEY
;
346 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
349 item_size
= btrfs_item_size_nr(eb
, slot
);
350 src_ptr
= btrfs_item_ptr_offset(eb
, slot
);
352 /* look for the key in the destination tree */
353 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
360 u32 dst_size
= btrfs_item_size_nr(path
->nodes
[0],
362 if (dst_size
!= item_size
)
365 if (item_size
== 0) {
366 btrfs_release_path(path
);
369 dst_copy
= kmalloc(item_size
, GFP_NOFS
);
370 src_copy
= kmalloc(item_size
, GFP_NOFS
);
371 if (!dst_copy
|| !src_copy
) {
372 btrfs_release_path(path
);
378 read_extent_buffer(eb
, src_copy
, src_ptr
, item_size
);
380 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
381 read_extent_buffer(path
->nodes
[0], dst_copy
, dst_ptr
,
383 ret
= memcmp(dst_copy
, src_copy
, item_size
);
388 * they have the same contents, just return, this saves
389 * us from cowing blocks in the destination tree and doing
390 * extra writes that may not have been done by a previous
394 btrfs_release_path(path
);
399 * We need to load the old nbytes into the inode so when we
400 * replay the extents we've logged we get the right nbytes.
403 struct btrfs_inode_item
*item
;
407 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
408 struct btrfs_inode_item
);
409 nbytes
= btrfs_inode_nbytes(path
->nodes
[0], item
);
410 item
= btrfs_item_ptr(eb
, slot
,
411 struct btrfs_inode_item
);
412 btrfs_set_inode_nbytes(eb
, item
, nbytes
);
415 * If this is a directory we need to reset the i_size to
416 * 0 so that we can set it up properly when replaying
417 * the rest of the items in this log.
419 mode
= btrfs_inode_mode(eb
, item
);
421 btrfs_set_inode_size(eb
, item
, 0);
423 } else if (inode_item
) {
424 struct btrfs_inode_item
*item
;
428 * New inode, set nbytes to 0 so that the nbytes comes out
429 * properly when we replay the extents.
431 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
432 btrfs_set_inode_nbytes(eb
, item
, 0);
435 * If this is a directory we need to reset the i_size to 0 so
436 * that we can set it up properly when replaying the rest of
437 * the items in this log.
439 mode
= btrfs_inode_mode(eb
, item
);
441 btrfs_set_inode_size(eb
, item
, 0);
444 btrfs_release_path(path
);
445 /* try to insert the key into the destination tree */
446 path
->skip_release_on_error
= 1;
447 ret
= btrfs_insert_empty_item(trans
, root
, path
,
449 path
->skip_release_on_error
= 0;
451 /* make sure any existing item is the correct size */
452 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
) {
454 found_size
= btrfs_item_size_nr(path
->nodes
[0],
456 if (found_size
> item_size
)
457 btrfs_truncate_item(fs_info
, path
, item_size
, 1);
458 else if (found_size
< item_size
)
459 btrfs_extend_item(fs_info
, path
,
460 item_size
- found_size
);
464 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0],
467 /* don't overwrite an existing inode if the generation number
468 * was logged as zero. This is done when the tree logging code
469 * is just logging an inode to make sure it exists after recovery.
471 * Also, don't overwrite i_size on directories during replay.
472 * log replay inserts and removes directory items based on the
473 * state of the tree found in the subvolume, and i_size is modified
476 if (key
->type
== BTRFS_INODE_ITEM_KEY
&& ret
== -EEXIST
) {
477 struct btrfs_inode_item
*src_item
;
478 struct btrfs_inode_item
*dst_item
;
480 src_item
= (struct btrfs_inode_item
*)src_ptr
;
481 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
483 if (btrfs_inode_generation(eb
, src_item
) == 0) {
484 struct extent_buffer
*dst_eb
= path
->nodes
[0];
485 const u64 ino_size
= btrfs_inode_size(eb
, src_item
);
488 * For regular files an ino_size == 0 is used only when
489 * logging that an inode exists, as part of a directory
490 * fsync, and the inode wasn't fsynced before. In this
491 * case don't set the size of the inode in the fs/subvol
492 * tree, otherwise we would be throwing valid data away.
494 if (S_ISREG(btrfs_inode_mode(eb
, src_item
)) &&
495 S_ISREG(btrfs_inode_mode(dst_eb
, dst_item
)) &&
497 struct btrfs_map_token token
;
499 btrfs_init_map_token(&token
);
500 btrfs_set_token_inode_size(dst_eb
, dst_item
,
506 if (overwrite_root
&&
507 S_ISDIR(btrfs_inode_mode(eb
, src_item
)) &&
508 S_ISDIR(btrfs_inode_mode(path
->nodes
[0], dst_item
))) {
510 saved_i_size
= btrfs_inode_size(path
->nodes
[0],
515 copy_extent_buffer(path
->nodes
[0], eb
, dst_ptr
,
518 if (save_old_i_size
) {
519 struct btrfs_inode_item
*dst_item
;
520 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
521 btrfs_set_inode_size(path
->nodes
[0], dst_item
, saved_i_size
);
524 /* make sure the generation is filled in */
525 if (key
->type
== BTRFS_INODE_ITEM_KEY
) {
526 struct btrfs_inode_item
*dst_item
;
527 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
528 if (btrfs_inode_generation(path
->nodes
[0], dst_item
) == 0) {
529 btrfs_set_inode_generation(path
->nodes
[0], dst_item
,
534 btrfs_mark_buffer_dirty(path
->nodes
[0]);
535 btrfs_release_path(path
);
540 * simple helper to read an inode off the disk from a given root
541 * This can only be called for subvolume roots and not for the log
543 static noinline
struct inode
*read_one_inode(struct btrfs_root
*root
,
546 struct btrfs_key key
;
549 key
.objectid
= objectid
;
550 key
.type
= BTRFS_INODE_ITEM_KEY
;
552 inode
= btrfs_iget(root
->fs_info
->sb
, &key
, root
, NULL
);
558 /* replays a single extent in 'eb' at 'slot' with 'key' into the
559 * subvolume 'root'. path is released on entry and should be released
562 * extents in the log tree have not been allocated out of the extent
563 * tree yet. So, this completes the allocation, taking a reference
564 * as required if the extent already exists or creating a new extent
565 * if it isn't in the extent allocation tree yet.
567 * The extent is inserted into the file, dropping any existing extents
568 * from the file that overlap the new one.
570 static noinline
int replay_one_extent(struct btrfs_trans_handle
*trans
,
571 struct btrfs_root
*root
,
572 struct btrfs_path
*path
,
573 struct extent_buffer
*eb
, int slot
,
574 struct btrfs_key
*key
)
576 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
579 u64 start
= key
->offset
;
581 struct btrfs_file_extent_item
*item
;
582 struct inode
*inode
= NULL
;
586 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
587 found_type
= btrfs_file_extent_type(eb
, item
);
589 if (found_type
== BTRFS_FILE_EXTENT_REG
||
590 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
591 nbytes
= btrfs_file_extent_num_bytes(eb
, item
);
592 extent_end
= start
+ nbytes
;
595 * We don't add to the inodes nbytes if we are prealloc or a
598 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0)
600 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
601 size
= btrfs_file_extent_ram_bytes(eb
, item
);
602 nbytes
= btrfs_file_extent_ram_bytes(eb
, item
);
603 extent_end
= ALIGN(start
+ size
,
604 fs_info
->sectorsize
);
610 inode
= read_one_inode(root
, key
->objectid
);
617 * first check to see if we already have this extent in the
618 * file. This must be done before the btrfs_drop_extents run
619 * so we don't try to drop this extent.
621 ret
= btrfs_lookup_file_extent(trans
, root
, path
,
622 btrfs_ino(BTRFS_I(inode
)), start
, 0);
625 (found_type
== BTRFS_FILE_EXTENT_REG
||
626 found_type
== BTRFS_FILE_EXTENT_PREALLOC
)) {
627 struct btrfs_file_extent_item cmp1
;
628 struct btrfs_file_extent_item cmp2
;
629 struct btrfs_file_extent_item
*existing
;
630 struct extent_buffer
*leaf
;
632 leaf
= path
->nodes
[0];
633 existing
= btrfs_item_ptr(leaf
, path
->slots
[0],
634 struct btrfs_file_extent_item
);
636 read_extent_buffer(eb
, &cmp1
, (unsigned long)item
,
638 read_extent_buffer(leaf
, &cmp2
, (unsigned long)existing
,
642 * we already have a pointer to this exact extent,
643 * we don't have to do anything
645 if (memcmp(&cmp1
, &cmp2
, sizeof(cmp1
)) == 0) {
646 btrfs_release_path(path
);
650 btrfs_release_path(path
);
652 /* drop any overlapping extents */
653 ret
= btrfs_drop_extents(trans
, root
, inode
, start
, extent_end
, 1);
657 if (found_type
== BTRFS_FILE_EXTENT_REG
||
658 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
660 unsigned long dest_offset
;
661 struct btrfs_key ins
;
663 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0 &&
664 btrfs_fs_incompat(fs_info
, NO_HOLES
))
667 ret
= btrfs_insert_empty_item(trans
, root
, path
, key
,
671 dest_offset
= btrfs_item_ptr_offset(path
->nodes
[0],
673 copy_extent_buffer(path
->nodes
[0], eb
, dest_offset
,
674 (unsigned long)item
, sizeof(*item
));
676 ins
.objectid
= btrfs_file_extent_disk_bytenr(eb
, item
);
677 ins
.offset
= btrfs_file_extent_disk_num_bytes(eb
, item
);
678 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
679 offset
= key
->offset
- btrfs_file_extent_offset(eb
, item
);
682 * Manually record dirty extent, as here we did a shallow
683 * file extent item copy and skip normal backref update,
684 * but modifying extent tree all by ourselves.
685 * So need to manually record dirty extent for qgroup,
686 * as the owner of the file extent changed from log tree
687 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
689 ret
= btrfs_qgroup_trace_extent(trans
,
690 btrfs_file_extent_disk_bytenr(eb
, item
),
691 btrfs_file_extent_disk_num_bytes(eb
, item
),
696 if (ins
.objectid
> 0) {
699 LIST_HEAD(ordered_sums
);
701 * is this extent already allocated in the extent
702 * allocation tree? If so, just add a reference
704 ret
= btrfs_lookup_data_extent(fs_info
, ins
.objectid
,
707 ret
= btrfs_inc_extent_ref(trans
, root
,
708 ins
.objectid
, ins
.offset
,
709 0, root
->root_key
.objectid
,
710 key
->objectid
, offset
);
715 * insert the extent pointer in the extent
718 ret
= btrfs_alloc_logged_file_extent(trans
,
719 root
->root_key
.objectid
,
720 key
->objectid
, offset
, &ins
);
724 btrfs_release_path(path
);
726 if (btrfs_file_extent_compression(eb
, item
)) {
727 csum_start
= ins
.objectid
;
728 csum_end
= csum_start
+ ins
.offset
;
730 csum_start
= ins
.objectid
+
731 btrfs_file_extent_offset(eb
, item
);
732 csum_end
= csum_start
+
733 btrfs_file_extent_num_bytes(eb
, item
);
736 ret
= btrfs_lookup_csums_range(root
->log_root
,
737 csum_start
, csum_end
- 1,
742 * Now delete all existing cums in the csum root that
743 * cover our range. We do this because we can have an
744 * extent that is completely referenced by one file
745 * extent item and partially referenced by another
746 * file extent item (like after using the clone or
747 * extent_same ioctls). In this case if we end up doing
748 * the replay of the one that partially references the
749 * extent first, and we do not do the csum deletion
750 * below, we can get 2 csum items in the csum tree that
751 * overlap each other. For example, imagine our log has
752 * the two following file extent items:
754 * key (257 EXTENT_DATA 409600)
755 * extent data disk byte 12845056 nr 102400
756 * extent data offset 20480 nr 20480 ram 102400
758 * key (257 EXTENT_DATA 819200)
759 * extent data disk byte 12845056 nr 102400
760 * extent data offset 0 nr 102400 ram 102400
762 * Where the second one fully references the 100K extent
763 * that starts at disk byte 12845056, and the log tree
764 * has a single csum item that covers the entire range
767 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
769 * After the first file extent item is replayed, the
770 * csum tree gets the following csum item:
772 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
774 * Which covers the 20K sub-range starting at offset 20K
775 * of our extent. Now when we replay the second file
776 * extent item, if we do not delete existing csum items
777 * that cover any of its blocks, we end up getting two
778 * csum items in our csum tree that overlap each other:
780 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
781 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
783 * Which is a problem, because after this anyone trying
784 * to lookup up for the checksum of any block of our
785 * extent starting at an offset of 40K or higher, will
786 * end up looking at the second csum item only, which
787 * does not contain the checksum for any block starting
788 * at offset 40K or higher of our extent.
790 while (!list_empty(&ordered_sums
)) {
791 struct btrfs_ordered_sum
*sums
;
792 sums
= list_entry(ordered_sums
.next
,
793 struct btrfs_ordered_sum
,
796 ret
= btrfs_del_csums(trans
, fs_info
,
800 ret
= btrfs_csum_file_blocks(trans
,
801 fs_info
->csum_root
, sums
);
802 list_del(&sums
->list
);
808 btrfs_release_path(path
);
810 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
811 /* inline extents are easy, we just overwrite them */
812 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
817 inode_add_bytes(inode
, nbytes
);
819 ret
= btrfs_update_inode(trans
, root
, inode
);
827 * when cleaning up conflicts between the directory names in the
828 * subvolume, directory names in the log and directory names in the
829 * inode back references, we may have to unlink inodes from directories.
831 * This is a helper function to do the unlink of a specific directory
834 static noinline
int drop_one_dir_item(struct btrfs_trans_handle
*trans
,
835 struct btrfs_root
*root
,
836 struct btrfs_path
*path
,
837 struct btrfs_inode
*dir
,
838 struct btrfs_dir_item
*di
)
843 struct extent_buffer
*leaf
;
844 struct btrfs_key location
;
847 leaf
= path
->nodes
[0];
849 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
850 name_len
= btrfs_dir_name_len(leaf
, di
);
851 name
= kmalloc(name_len
, GFP_NOFS
);
855 read_extent_buffer(leaf
, name
, (unsigned long)(di
+ 1), name_len
);
856 btrfs_release_path(path
);
858 inode
= read_one_inode(root
, location
.objectid
);
864 ret
= link_to_fixup_dir(trans
, root
, path
, location
.objectid
);
868 ret
= btrfs_unlink_inode(trans
, root
, dir
, BTRFS_I(inode
), name
,
873 ret
= btrfs_run_delayed_items(trans
);
881 * helper function to see if a given name and sequence number found
882 * in an inode back reference are already in a directory and correctly
883 * point to this inode
885 static noinline
int inode_in_dir(struct btrfs_root
*root
,
886 struct btrfs_path
*path
,
887 u64 dirid
, u64 objectid
, u64 index
,
888 const char *name
, int name_len
)
890 struct btrfs_dir_item
*di
;
891 struct btrfs_key location
;
894 di
= btrfs_lookup_dir_index_item(NULL
, root
, path
, dirid
,
895 index
, name
, name_len
, 0);
896 if (di
&& !IS_ERR(di
)) {
897 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
898 if (location
.objectid
!= objectid
)
902 btrfs_release_path(path
);
904 di
= btrfs_lookup_dir_item(NULL
, root
, path
, dirid
, name
, name_len
, 0);
905 if (di
&& !IS_ERR(di
)) {
906 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
907 if (location
.objectid
!= objectid
)
913 btrfs_release_path(path
);
918 * helper function to check a log tree for a named back reference in
919 * an inode. This is used to decide if a back reference that is
920 * found in the subvolume conflicts with what we find in the log.
922 * inode backreferences may have multiple refs in a single item,
923 * during replay we process one reference at a time, and we don't
924 * want to delete valid links to a file from the subvolume if that
925 * link is also in the log.
927 static noinline
int backref_in_log(struct btrfs_root
*log
,
928 struct btrfs_key
*key
,
930 const char *name
, int namelen
)
932 struct btrfs_path
*path
;
933 struct btrfs_inode_ref
*ref
;
935 unsigned long ptr_end
;
936 unsigned long name_ptr
;
942 path
= btrfs_alloc_path();
946 ret
= btrfs_search_slot(NULL
, log
, key
, path
, 0, 0);
950 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
952 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
953 if (btrfs_find_name_in_ext_backref(path
->nodes
[0],
956 name
, namelen
, NULL
))
962 item_size
= btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]);
963 ptr_end
= ptr
+ item_size
;
964 while (ptr
< ptr_end
) {
965 ref
= (struct btrfs_inode_ref
*)ptr
;
966 found_name_len
= btrfs_inode_ref_name_len(path
->nodes
[0], ref
);
967 if (found_name_len
== namelen
) {
968 name_ptr
= (unsigned long)(ref
+ 1);
969 ret
= memcmp_extent_buffer(path
->nodes
[0], name
,
976 ptr
= (unsigned long)(ref
+ 1) + found_name_len
;
979 btrfs_free_path(path
);
983 static inline int __add_inode_ref(struct btrfs_trans_handle
*trans
,
984 struct btrfs_root
*root
,
985 struct btrfs_path
*path
,
986 struct btrfs_root
*log_root
,
987 struct btrfs_inode
*dir
,
988 struct btrfs_inode
*inode
,
989 u64 inode_objectid
, u64 parent_objectid
,
990 u64 ref_index
, char *name
, int namelen
,
996 struct extent_buffer
*leaf
;
997 struct btrfs_dir_item
*di
;
998 struct btrfs_key search_key
;
999 struct btrfs_inode_extref
*extref
;
1002 /* Search old style refs */
1003 search_key
.objectid
= inode_objectid
;
1004 search_key
.type
= BTRFS_INODE_REF_KEY
;
1005 search_key
.offset
= parent_objectid
;
1006 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
1008 struct btrfs_inode_ref
*victim_ref
;
1010 unsigned long ptr_end
;
1012 leaf
= path
->nodes
[0];
1014 /* are we trying to overwrite a back ref for the root directory
1015 * if so, just jump out, we're done
1017 if (search_key
.objectid
== search_key
.offset
)
1020 /* check all the names in this back reference to see
1021 * if they are in the log. if so, we allow them to stay
1022 * otherwise they must be unlinked as a conflict
1024 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1025 ptr_end
= ptr
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1026 while (ptr
< ptr_end
) {
1027 victim_ref
= (struct btrfs_inode_ref
*)ptr
;
1028 victim_name_len
= btrfs_inode_ref_name_len(leaf
,
1030 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1034 read_extent_buffer(leaf
, victim_name
,
1035 (unsigned long)(victim_ref
+ 1),
1038 if (!backref_in_log(log_root
, &search_key
,
1042 inc_nlink(&inode
->vfs_inode
);
1043 btrfs_release_path(path
);
1045 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
,
1046 victim_name
, victim_name_len
);
1050 ret
= btrfs_run_delayed_items(trans
);
1058 ptr
= (unsigned long)(victim_ref
+ 1) + victim_name_len
;
1062 * NOTE: we have searched root tree and checked the
1063 * corresponding ref, it does not need to check again.
1067 btrfs_release_path(path
);
1069 /* Same search but for extended refs */
1070 extref
= btrfs_lookup_inode_extref(NULL
, root
, path
, name
, namelen
,
1071 inode_objectid
, parent_objectid
, 0,
1073 if (!IS_ERR_OR_NULL(extref
)) {
1077 struct inode
*victim_parent
;
1079 leaf
= path
->nodes
[0];
1081 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1082 base
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1084 while (cur_offset
< item_size
) {
1085 extref
= (struct btrfs_inode_extref
*)(base
+ cur_offset
);
1087 victim_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1089 if (btrfs_inode_extref_parent(leaf
, extref
) != parent_objectid
)
1092 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1095 read_extent_buffer(leaf
, victim_name
, (unsigned long)&extref
->name
,
1098 search_key
.objectid
= inode_objectid
;
1099 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1100 search_key
.offset
= btrfs_extref_hash(parent_objectid
,
1104 if (!backref_in_log(log_root
, &search_key
,
1105 parent_objectid
, victim_name
,
1108 victim_parent
= read_one_inode(root
,
1110 if (victim_parent
) {
1111 inc_nlink(&inode
->vfs_inode
);
1112 btrfs_release_path(path
);
1114 ret
= btrfs_unlink_inode(trans
, root
,
1115 BTRFS_I(victim_parent
),
1120 ret
= btrfs_run_delayed_items(
1123 iput(victim_parent
);
1132 cur_offset
+= victim_name_len
+ sizeof(*extref
);
1136 btrfs_release_path(path
);
1138 /* look for a conflicting sequence number */
1139 di
= btrfs_lookup_dir_index_item(trans
, root
, path
, btrfs_ino(dir
),
1140 ref_index
, name
, namelen
, 0);
1141 if (di
&& !IS_ERR(di
)) {
1142 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1146 btrfs_release_path(path
);
1148 /* look for a conflicting name */
1149 di
= btrfs_lookup_dir_item(trans
, root
, path
, btrfs_ino(dir
),
1151 if (di
&& !IS_ERR(di
)) {
1152 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1156 btrfs_release_path(path
);
1161 static int extref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
1162 u32
*namelen
, char **name
, u64
*index
,
1163 u64
*parent_objectid
)
1165 struct btrfs_inode_extref
*extref
;
1167 extref
= (struct btrfs_inode_extref
*)ref_ptr
;
1169 *namelen
= btrfs_inode_extref_name_len(eb
, extref
);
1170 *name
= kmalloc(*namelen
, GFP_NOFS
);
1174 read_extent_buffer(eb
, *name
, (unsigned long)&extref
->name
,
1178 *index
= btrfs_inode_extref_index(eb
, extref
);
1179 if (parent_objectid
)
1180 *parent_objectid
= btrfs_inode_extref_parent(eb
, extref
);
1185 static int ref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
1186 u32
*namelen
, char **name
, u64
*index
)
1188 struct btrfs_inode_ref
*ref
;
1190 ref
= (struct btrfs_inode_ref
*)ref_ptr
;
1192 *namelen
= btrfs_inode_ref_name_len(eb
, ref
);
1193 *name
= kmalloc(*namelen
, GFP_NOFS
);
1197 read_extent_buffer(eb
, *name
, (unsigned long)(ref
+ 1), *namelen
);
1200 *index
= btrfs_inode_ref_index(eb
, ref
);
1206 * Take an inode reference item from the log tree and iterate all names from the
1207 * inode reference item in the subvolume tree with the same key (if it exists).
1208 * For any name that is not in the inode reference item from the log tree, do a
1209 * proper unlink of that name (that is, remove its entry from the inode
1210 * reference item and both dir index keys).
1212 static int unlink_old_inode_refs(struct btrfs_trans_handle
*trans
,
1213 struct btrfs_root
*root
,
1214 struct btrfs_path
*path
,
1215 struct btrfs_inode
*inode
,
1216 struct extent_buffer
*log_eb
,
1218 struct btrfs_key
*key
)
1221 unsigned long ref_ptr
;
1222 unsigned long ref_end
;
1223 struct extent_buffer
*eb
;
1226 btrfs_release_path(path
);
1227 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
1235 eb
= path
->nodes
[0];
1236 ref_ptr
= btrfs_item_ptr_offset(eb
, path
->slots
[0]);
1237 ref_end
= ref_ptr
+ btrfs_item_size_nr(eb
, path
->slots
[0]);
1238 while (ref_ptr
< ref_end
) {
1243 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
1244 ret
= extref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1247 parent_id
= key
->offset
;
1248 ret
= ref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1254 if (key
->type
== BTRFS_INODE_EXTREF_KEY
)
1255 ret
= btrfs_find_name_in_ext_backref(log_eb
, log_slot
,
1259 ret
= btrfs_find_name_in_backref(log_eb
, log_slot
, name
,
1265 btrfs_release_path(path
);
1266 dir
= read_one_inode(root
, parent_id
);
1272 ret
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
),
1273 inode
, name
, namelen
);
1283 if (key
->type
== BTRFS_INODE_EXTREF_KEY
)
1284 ref_ptr
+= sizeof(struct btrfs_inode_extref
);
1286 ref_ptr
+= sizeof(struct btrfs_inode_ref
);
1290 btrfs_release_path(path
);
1294 static int btrfs_inode_ref_exists(struct inode
*inode
, struct inode
*dir
,
1295 const u8 ref_type
, const char *name
,
1298 struct btrfs_key key
;
1299 struct btrfs_path
*path
;
1300 const u64 parent_id
= btrfs_ino(BTRFS_I(dir
));
1303 path
= btrfs_alloc_path();
1307 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
1308 key
.type
= ref_type
;
1309 if (key
.type
== BTRFS_INODE_REF_KEY
)
1310 key
.offset
= parent_id
;
1312 key
.offset
= btrfs_extref_hash(parent_id
, name
, namelen
);
1314 ret
= btrfs_search_slot(NULL
, BTRFS_I(inode
)->root
, &key
, path
, 0, 0);
1321 if (key
.type
== BTRFS_INODE_EXTREF_KEY
)
1322 ret
= btrfs_find_name_in_ext_backref(path
->nodes
[0],
1323 path
->slots
[0], parent_id
,
1324 name
, namelen
, NULL
);
1326 ret
= btrfs_find_name_in_backref(path
->nodes
[0], path
->slots
[0],
1327 name
, namelen
, NULL
);
1330 btrfs_free_path(path
);
1334 static int add_link(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1335 struct inode
*dir
, struct inode
*inode
, const char *name
,
1336 int namelen
, u64 ref_index
)
1338 struct btrfs_dir_item
*dir_item
;
1339 struct btrfs_key key
;
1340 struct btrfs_path
*path
;
1341 struct inode
*other_inode
= NULL
;
1344 path
= btrfs_alloc_path();
1348 dir_item
= btrfs_lookup_dir_item(NULL
, root
, path
,
1349 btrfs_ino(BTRFS_I(dir
)),
1352 btrfs_release_path(path
);
1354 } else if (IS_ERR(dir_item
)) {
1355 ret
= PTR_ERR(dir_item
);
1360 * Our inode's dentry collides with the dentry of another inode which is
1361 * in the log but not yet processed since it has a higher inode number.
1362 * So delete that other dentry.
1364 btrfs_dir_item_key_to_cpu(path
->nodes
[0], dir_item
, &key
);
1365 btrfs_release_path(path
);
1366 other_inode
= read_one_inode(root
, key
.objectid
);
1371 ret
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
), BTRFS_I(other_inode
),
1376 * If we dropped the link count to 0, bump it so that later the iput()
1377 * on the inode will not free it. We will fixup the link count later.
1379 if (other_inode
->i_nlink
== 0)
1380 inc_nlink(other_inode
);
1382 ret
= btrfs_run_delayed_items(trans
);
1386 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
),
1387 name
, namelen
, 0, ref_index
);
1390 btrfs_free_path(path
);
1396 * replay one inode back reference item found in the log tree.
1397 * eb, slot and key refer to the buffer and key found in the log tree.
1398 * root is the destination we are replaying into, and path is for temp
1399 * use by this function. (it should be released on return).
1401 static noinline
int add_inode_ref(struct btrfs_trans_handle
*trans
,
1402 struct btrfs_root
*root
,
1403 struct btrfs_root
*log
,
1404 struct btrfs_path
*path
,
1405 struct extent_buffer
*eb
, int slot
,
1406 struct btrfs_key
*key
)
1408 struct inode
*dir
= NULL
;
1409 struct inode
*inode
= NULL
;
1410 unsigned long ref_ptr
;
1411 unsigned long ref_end
;
1415 int search_done
= 0;
1416 int log_ref_ver
= 0;
1417 u64 parent_objectid
;
1420 int ref_struct_size
;
1422 ref_ptr
= btrfs_item_ptr_offset(eb
, slot
);
1423 ref_end
= ref_ptr
+ btrfs_item_size_nr(eb
, slot
);
1425 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
1426 struct btrfs_inode_extref
*r
;
1428 ref_struct_size
= sizeof(struct btrfs_inode_extref
);
1430 r
= (struct btrfs_inode_extref
*)ref_ptr
;
1431 parent_objectid
= btrfs_inode_extref_parent(eb
, r
);
1433 ref_struct_size
= sizeof(struct btrfs_inode_ref
);
1434 parent_objectid
= key
->offset
;
1436 inode_objectid
= key
->objectid
;
1439 * it is possible that we didn't log all the parent directories
1440 * for a given inode. If we don't find the dir, just don't
1441 * copy the back ref in. The link count fixup code will take
1444 dir
= read_one_inode(root
, parent_objectid
);
1450 inode
= read_one_inode(root
, inode_objectid
);
1456 while (ref_ptr
< ref_end
) {
1458 ret
= extref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1459 &ref_index
, &parent_objectid
);
1461 * parent object can change from one array
1465 dir
= read_one_inode(root
, parent_objectid
);
1471 ret
= ref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1477 /* if we already have a perfect match, we're done */
1478 if (!inode_in_dir(root
, path
, btrfs_ino(BTRFS_I(dir
)),
1479 btrfs_ino(BTRFS_I(inode
)), ref_index
,
1482 * look for a conflicting back reference in the
1483 * metadata. if we find one we have to unlink that name
1484 * of the file before we add our new link. Later on, we
1485 * overwrite any existing back reference, and we don't
1486 * want to create dangling pointers in the directory.
1490 ret
= __add_inode_ref(trans
, root
, path
, log
,
1495 ref_index
, name
, namelen
,
1505 * If a reference item already exists for this inode
1506 * with the same parent and name, but different index,
1507 * drop it and the corresponding directory index entries
1508 * from the parent before adding the new reference item
1509 * and dir index entries, otherwise we would fail with
1510 * -EEXIST returned from btrfs_add_link() below.
1512 ret
= btrfs_inode_ref_exists(inode
, dir
, key
->type
,
1515 ret
= btrfs_unlink_inode(trans
, root
,
1520 * If we dropped the link count to 0, bump it so
1521 * that later the iput() on the inode will not
1522 * free it. We will fixup the link count later.
1524 if (!ret
&& inode
->i_nlink
== 0)
1530 /* insert our name */
1531 ret
= add_link(trans
, root
, dir
, inode
, name
, namelen
,
1536 btrfs_update_inode(trans
, root
, inode
);
1539 ref_ptr
= (unsigned long)(ref_ptr
+ ref_struct_size
) + namelen
;
1549 * Before we overwrite the inode reference item in the subvolume tree
1550 * with the item from the log tree, we must unlink all names from the
1551 * parent directory that are in the subvolume's tree inode reference
1552 * item, otherwise we end up with an inconsistent subvolume tree where
1553 * dir index entries exist for a name but there is no inode reference
1554 * item with the same name.
1556 ret
= unlink_old_inode_refs(trans
, root
, path
, BTRFS_I(inode
), eb
, slot
,
1561 /* finally write the back reference in the inode */
1562 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
1564 btrfs_release_path(path
);
1571 static int insert_orphan_item(struct btrfs_trans_handle
*trans
,
1572 struct btrfs_root
*root
, u64 ino
)
1576 ret
= btrfs_insert_orphan_item(trans
, root
, ino
);
1583 static int count_inode_extrefs(struct btrfs_root
*root
,
1584 struct btrfs_inode
*inode
, struct btrfs_path
*path
)
1588 unsigned int nlink
= 0;
1591 u64 inode_objectid
= btrfs_ino(inode
);
1594 struct btrfs_inode_extref
*extref
;
1595 struct extent_buffer
*leaf
;
1598 ret
= btrfs_find_one_extref(root
, inode_objectid
, offset
, path
,
1603 leaf
= path
->nodes
[0];
1604 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1605 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1608 while (cur_offset
< item_size
) {
1609 extref
= (struct btrfs_inode_extref
*) (ptr
+ cur_offset
);
1610 name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1614 cur_offset
+= name_len
+ sizeof(*extref
);
1618 btrfs_release_path(path
);
1620 btrfs_release_path(path
);
1622 if (ret
< 0 && ret
!= -ENOENT
)
1627 static int count_inode_refs(struct btrfs_root
*root
,
1628 struct btrfs_inode
*inode
, struct btrfs_path
*path
)
1631 struct btrfs_key key
;
1632 unsigned int nlink
= 0;
1634 unsigned long ptr_end
;
1636 u64 ino
= btrfs_ino(inode
);
1639 key
.type
= BTRFS_INODE_REF_KEY
;
1640 key
.offset
= (u64
)-1;
1643 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1647 if (path
->slots
[0] == 0)
1652 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1654 if (key
.objectid
!= ino
||
1655 key
.type
!= BTRFS_INODE_REF_KEY
)
1657 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
1658 ptr_end
= ptr
+ btrfs_item_size_nr(path
->nodes
[0],
1660 while (ptr
< ptr_end
) {
1661 struct btrfs_inode_ref
*ref
;
1663 ref
= (struct btrfs_inode_ref
*)ptr
;
1664 name_len
= btrfs_inode_ref_name_len(path
->nodes
[0],
1666 ptr
= (unsigned long)(ref
+ 1) + name_len
;
1670 if (key
.offset
== 0)
1672 if (path
->slots
[0] > 0) {
1677 btrfs_release_path(path
);
1679 btrfs_release_path(path
);
1685 * There are a few corners where the link count of the file can't
1686 * be properly maintained during replay. So, instead of adding
1687 * lots of complexity to the log code, we just scan the backrefs
1688 * for any file that has been through replay.
1690 * The scan will update the link count on the inode to reflect the
1691 * number of back refs found. If it goes down to zero, the iput
1692 * will free the inode.
1694 static noinline
int fixup_inode_link_count(struct btrfs_trans_handle
*trans
,
1695 struct btrfs_root
*root
,
1696 struct inode
*inode
)
1698 struct btrfs_path
*path
;
1701 u64 ino
= btrfs_ino(BTRFS_I(inode
));
1703 path
= btrfs_alloc_path();
1707 ret
= count_inode_refs(root
, BTRFS_I(inode
), path
);
1713 ret
= count_inode_extrefs(root
, BTRFS_I(inode
), path
);
1721 if (nlink
!= inode
->i_nlink
) {
1722 set_nlink(inode
, nlink
);
1723 btrfs_update_inode(trans
, root
, inode
);
1725 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1727 if (inode
->i_nlink
== 0) {
1728 if (S_ISDIR(inode
->i_mode
)) {
1729 ret
= replay_dir_deletes(trans
, root
, NULL
, path
,
1734 ret
= insert_orphan_item(trans
, root
, ino
);
1738 btrfs_free_path(path
);
1742 static noinline
int fixup_inode_link_counts(struct btrfs_trans_handle
*trans
,
1743 struct btrfs_root
*root
,
1744 struct btrfs_path
*path
)
1747 struct btrfs_key key
;
1748 struct inode
*inode
;
1750 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1751 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1752 key
.offset
= (u64
)-1;
1754 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1759 if (path
->slots
[0] == 0)
1764 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1765 if (key
.objectid
!= BTRFS_TREE_LOG_FIXUP_OBJECTID
||
1766 key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
1769 ret
= btrfs_del_item(trans
, root
, path
);
1773 btrfs_release_path(path
);
1774 inode
= read_one_inode(root
, key
.offset
);
1778 ret
= fixup_inode_link_count(trans
, root
, inode
);
1784 * fixup on a directory may create new entries,
1785 * make sure we always look for the highset possible
1788 key
.offset
= (u64
)-1;
1792 btrfs_release_path(path
);
1798 * record a given inode in the fixup dir so we can check its link
1799 * count when replay is done. The link count is incremented here
1800 * so the inode won't go away until we check it
1802 static noinline
int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
1803 struct btrfs_root
*root
,
1804 struct btrfs_path
*path
,
1807 struct btrfs_key key
;
1809 struct inode
*inode
;
1811 inode
= read_one_inode(root
, objectid
);
1815 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1816 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1817 key
.offset
= objectid
;
1819 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1821 btrfs_release_path(path
);
1823 if (!inode
->i_nlink
)
1824 set_nlink(inode
, 1);
1827 ret
= btrfs_update_inode(trans
, root
, inode
);
1828 } else if (ret
== -EEXIST
) {
1831 BUG(); /* Logic Error */
1839 * when replaying the log for a directory, we only insert names
1840 * for inodes that actually exist. This means an fsync on a directory
1841 * does not implicitly fsync all the new files in it
1843 static noinline
int insert_one_name(struct btrfs_trans_handle
*trans
,
1844 struct btrfs_root
*root
,
1845 u64 dirid
, u64 index
,
1846 char *name
, int name_len
,
1847 struct btrfs_key
*location
)
1849 struct inode
*inode
;
1853 inode
= read_one_inode(root
, location
->objectid
);
1857 dir
= read_one_inode(root
, dirid
);
1863 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
), name
,
1864 name_len
, 1, index
);
1866 /* FIXME, put inode into FIXUP list */
1874 * Return true if an inode reference exists in the log for the given name,
1875 * inode and parent inode.
1877 static bool name_in_log_ref(struct btrfs_root
*log_root
,
1878 const char *name
, const int name_len
,
1879 const u64 dirid
, const u64 ino
)
1881 struct btrfs_key search_key
;
1883 search_key
.objectid
= ino
;
1884 search_key
.type
= BTRFS_INODE_REF_KEY
;
1885 search_key
.offset
= dirid
;
1886 if (backref_in_log(log_root
, &search_key
, dirid
, name
, name_len
))
1889 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1890 search_key
.offset
= btrfs_extref_hash(dirid
, name
, name_len
);
1891 if (backref_in_log(log_root
, &search_key
, dirid
, name
, name_len
))
1898 * take a single entry in a log directory item and replay it into
1901 * if a conflicting item exists in the subdirectory already,
1902 * the inode it points to is unlinked and put into the link count
1905 * If a name from the log points to a file or directory that does
1906 * not exist in the FS, it is skipped. fsyncs on directories
1907 * do not force down inodes inside that directory, just changes to the
1908 * names or unlinks in a directory.
1910 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1911 * non-existing inode) and 1 if the name was replayed.
1913 static noinline
int replay_one_name(struct btrfs_trans_handle
*trans
,
1914 struct btrfs_root
*root
,
1915 struct btrfs_path
*path
,
1916 struct extent_buffer
*eb
,
1917 struct btrfs_dir_item
*di
,
1918 struct btrfs_key
*key
)
1922 struct btrfs_dir_item
*dst_di
;
1923 struct btrfs_key found_key
;
1924 struct btrfs_key log_key
;
1929 bool update_size
= (key
->type
== BTRFS_DIR_INDEX_KEY
);
1930 bool name_added
= false;
1932 dir
= read_one_inode(root
, key
->objectid
);
1936 name_len
= btrfs_dir_name_len(eb
, di
);
1937 name
= kmalloc(name_len
, GFP_NOFS
);
1943 log_type
= btrfs_dir_type(eb
, di
);
1944 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1947 btrfs_dir_item_key_to_cpu(eb
, di
, &log_key
);
1948 exists
= btrfs_lookup_inode(trans
, root
, path
, &log_key
, 0);
1953 btrfs_release_path(path
);
1955 if (key
->type
== BTRFS_DIR_ITEM_KEY
) {
1956 dst_di
= btrfs_lookup_dir_item(trans
, root
, path
, key
->objectid
,
1958 } else if (key
->type
== BTRFS_DIR_INDEX_KEY
) {
1959 dst_di
= btrfs_lookup_dir_index_item(trans
, root
, path
,
1968 if (IS_ERR_OR_NULL(dst_di
)) {
1969 /* we need a sequence number to insert, so we only
1970 * do inserts for the BTRFS_DIR_INDEX_KEY types
1972 if (key
->type
!= BTRFS_DIR_INDEX_KEY
)
1977 btrfs_dir_item_key_to_cpu(path
->nodes
[0], dst_di
, &found_key
);
1978 /* the existing item matches the logged item */
1979 if (found_key
.objectid
== log_key
.objectid
&&
1980 found_key
.type
== log_key
.type
&&
1981 found_key
.offset
== log_key
.offset
&&
1982 btrfs_dir_type(path
->nodes
[0], dst_di
) == log_type
) {
1983 update_size
= false;
1988 * don't drop the conflicting directory entry if the inode
1989 * for the new entry doesn't exist
1994 ret
= drop_one_dir_item(trans
, root
, path
, BTRFS_I(dir
), dst_di
);
1998 if (key
->type
== BTRFS_DIR_INDEX_KEY
)
2001 btrfs_release_path(path
);
2002 if (!ret
&& update_size
) {
2003 btrfs_i_size_write(BTRFS_I(dir
), dir
->i_size
+ name_len
* 2);
2004 ret
= btrfs_update_inode(trans
, root
, dir
);
2008 if (!ret
&& name_added
)
2013 if (name_in_log_ref(root
->log_root
, name
, name_len
,
2014 key
->objectid
, log_key
.objectid
)) {
2015 /* The dentry will be added later. */
2017 update_size
= false;
2020 btrfs_release_path(path
);
2021 ret
= insert_one_name(trans
, root
, key
->objectid
, key
->offset
,
2022 name
, name_len
, &log_key
);
2023 if (ret
&& ret
!= -ENOENT
&& ret
!= -EEXIST
)
2027 update_size
= false;
2033 * find all the names in a directory item and reconcile them into
2034 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2035 * one name in a directory item, but the same code gets used for
2036 * both directory index types
2038 static noinline
int replay_one_dir_item(struct btrfs_trans_handle
*trans
,
2039 struct btrfs_root
*root
,
2040 struct btrfs_path
*path
,
2041 struct extent_buffer
*eb
, int slot
,
2042 struct btrfs_key
*key
)
2045 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
2046 struct btrfs_dir_item
*di
;
2049 unsigned long ptr_end
;
2050 struct btrfs_path
*fixup_path
= NULL
;
2052 ptr
= btrfs_item_ptr_offset(eb
, slot
);
2053 ptr_end
= ptr
+ item_size
;
2054 while (ptr
< ptr_end
) {
2055 di
= (struct btrfs_dir_item
*)ptr
;
2056 name_len
= btrfs_dir_name_len(eb
, di
);
2057 ret
= replay_one_name(trans
, root
, path
, eb
, di
, key
);
2060 ptr
= (unsigned long)(di
+ 1);
2064 * If this entry refers to a non-directory (directories can not
2065 * have a link count > 1) and it was added in the transaction
2066 * that was not committed, make sure we fixup the link count of
2067 * the inode it the entry points to. Otherwise something like
2068 * the following would result in a directory pointing to an
2069 * inode with a wrong link that does not account for this dir
2077 * ln testdir/bar testdir/bar_link
2078 * ln testdir/foo testdir/foo_link
2079 * xfs_io -c "fsync" testdir/bar
2083 * mount fs, log replay happens
2085 * File foo would remain with a link count of 1 when it has two
2086 * entries pointing to it in the directory testdir. This would
2087 * make it impossible to ever delete the parent directory has
2088 * it would result in stale dentries that can never be deleted.
2090 if (ret
== 1 && btrfs_dir_type(eb
, di
) != BTRFS_FT_DIR
) {
2091 struct btrfs_key di_key
;
2094 fixup_path
= btrfs_alloc_path();
2101 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
2102 ret
= link_to_fixup_dir(trans
, root
, fixup_path
,
2109 btrfs_free_path(fixup_path
);
2114 * directory replay has two parts. There are the standard directory
2115 * items in the log copied from the subvolume, and range items
2116 * created in the log while the subvolume was logged.
2118 * The range items tell us which parts of the key space the log
2119 * is authoritative for. During replay, if a key in the subvolume
2120 * directory is in a logged range item, but not actually in the log
2121 * that means it was deleted from the directory before the fsync
2122 * and should be removed.
2124 static noinline
int find_dir_range(struct btrfs_root
*root
,
2125 struct btrfs_path
*path
,
2126 u64 dirid
, int key_type
,
2127 u64
*start_ret
, u64
*end_ret
)
2129 struct btrfs_key key
;
2131 struct btrfs_dir_log_item
*item
;
2135 if (*start_ret
== (u64
)-1)
2138 key
.objectid
= dirid
;
2139 key
.type
= key_type
;
2140 key
.offset
= *start_ret
;
2142 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2146 if (path
->slots
[0] == 0)
2151 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
2153 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
2157 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2158 struct btrfs_dir_log_item
);
2159 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
2161 if (*start_ret
>= key
.offset
&& *start_ret
<= found_end
) {
2163 *start_ret
= key
.offset
;
2164 *end_ret
= found_end
;
2169 /* check the next slot in the tree to see if it is a valid item */
2170 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2172 if (path
->slots
[0] >= nritems
) {
2173 ret
= btrfs_next_leaf(root
, path
);
2178 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
2180 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
2184 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2185 struct btrfs_dir_log_item
);
2186 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
2187 *start_ret
= key
.offset
;
2188 *end_ret
= found_end
;
2191 btrfs_release_path(path
);
2196 * this looks for a given directory item in the log. If the directory
2197 * item is not in the log, the item is removed and the inode it points
2200 static noinline
int check_item_in_log(struct btrfs_trans_handle
*trans
,
2201 struct btrfs_root
*root
,
2202 struct btrfs_root
*log
,
2203 struct btrfs_path
*path
,
2204 struct btrfs_path
*log_path
,
2206 struct btrfs_key
*dir_key
)
2209 struct extent_buffer
*eb
;
2212 struct btrfs_dir_item
*di
;
2213 struct btrfs_dir_item
*log_di
;
2216 unsigned long ptr_end
;
2218 struct inode
*inode
;
2219 struct btrfs_key location
;
2222 eb
= path
->nodes
[0];
2223 slot
= path
->slots
[0];
2224 item_size
= btrfs_item_size_nr(eb
, slot
);
2225 ptr
= btrfs_item_ptr_offset(eb
, slot
);
2226 ptr_end
= ptr
+ item_size
;
2227 while (ptr
< ptr_end
) {
2228 di
= (struct btrfs_dir_item
*)ptr
;
2229 name_len
= btrfs_dir_name_len(eb
, di
);
2230 name
= kmalloc(name_len
, GFP_NOFS
);
2235 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
2238 if (log
&& dir_key
->type
== BTRFS_DIR_ITEM_KEY
) {
2239 log_di
= btrfs_lookup_dir_item(trans
, log
, log_path
,
2242 } else if (log
&& dir_key
->type
== BTRFS_DIR_INDEX_KEY
) {
2243 log_di
= btrfs_lookup_dir_index_item(trans
, log
,
2249 if (!log_di
|| log_di
== ERR_PTR(-ENOENT
)) {
2250 btrfs_dir_item_key_to_cpu(eb
, di
, &location
);
2251 btrfs_release_path(path
);
2252 btrfs_release_path(log_path
);
2253 inode
= read_one_inode(root
, location
.objectid
);
2259 ret
= link_to_fixup_dir(trans
, root
,
2260 path
, location
.objectid
);
2268 ret
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
),
2269 BTRFS_I(inode
), name
, name_len
);
2271 ret
= btrfs_run_delayed_items(trans
);
2277 /* there might still be more names under this key
2278 * check and repeat if required
2280 ret
= btrfs_search_slot(NULL
, root
, dir_key
, path
,
2286 } else if (IS_ERR(log_di
)) {
2288 return PTR_ERR(log_di
);
2290 btrfs_release_path(log_path
);
2293 ptr
= (unsigned long)(di
+ 1);
2298 btrfs_release_path(path
);
2299 btrfs_release_path(log_path
);
2303 static int replay_xattr_deletes(struct btrfs_trans_handle
*trans
,
2304 struct btrfs_root
*root
,
2305 struct btrfs_root
*log
,
2306 struct btrfs_path
*path
,
2309 struct btrfs_key search_key
;
2310 struct btrfs_path
*log_path
;
2315 log_path
= btrfs_alloc_path();
2319 search_key
.objectid
= ino
;
2320 search_key
.type
= BTRFS_XATTR_ITEM_KEY
;
2321 search_key
.offset
= 0;
2323 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
2327 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2328 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
2329 struct btrfs_key key
;
2330 struct btrfs_dir_item
*di
;
2331 struct btrfs_dir_item
*log_di
;
2335 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, i
);
2336 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_XATTR_ITEM_KEY
) {
2341 di
= btrfs_item_ptr(path
->nodes
[0], i
, struct btrfs_dir_item
);
2342 total_size
= btrfs_item_size_nr(path
->nodes
[0], i
);
2344 while (cur
< total_size
) {
2345 u16 name_len
= btrfs_dir_name_len(path
->nodes
[0], di
);
2346 u16 data_len
= btrfs_dir_data_len(path
->nodes
[0], di
);
2347 u32 this_len
= sizeof(*di
) + name_len
+ data_len
;
2350 name
= kmalloc(name_len
, GFP_NOFS
);
2355 read_extent_buffer(path
->nodes
[0], name
,
2356 (unsigned long)(di
+ 1), name_len
);
2358 log_di
= btrfs_lookup_xattr(NULL
, log
, log_path
, ino
,
2360 btrfs_release_path(log_path
);
2362 /* Doesn't exist in log tree, so delete it. */
2363 btrfs_release_path(path
);
2364 di
= btrfs_lookup_xattr(trans
, root
, path
, ino
,
2365 name
, name_len
, -1);
2372 ret
= btrfs_delete_one_dir_name(trans
, root
,
2376 btrfs_release_path(path
);
2381 if (IS_ERR(log_di
)) {
2382 ret
= PTR_ERR(log_di
);
2386 di
= (struct btrfs_dir_item
*)((char *)di
+ this_len
);
2389 ret
= btrfs_next_leaf(root
, path
);
2395 btrfs_free_path(log_path
);
2396 btrfs_release_path(path
);
2402 * deletion replay happens before we copy any new directory items
2403 * out of the log or out of backreferences from inodes. It
2404 * scans the log to find ranges of keys that log is authoritative for,
2405 * and then scans the directory to find items in those ranges that are
2406 * not present in the log.
2408 * Anything we don't find in the log is unlinked and removed from the
2411 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
2412 struct btrfs_root
*root
,
2413 struct btrfs_root
*log
,
2414 struct btrfs_path
*path
,
2415 u64 dirid
, int del_all
)
2419 int key_type
= BTRFS_DIR_LOG_ITEM_KEY
;
2421 struct btrfs_key dir_key
;
2422 struct btrfs_key found_key
;
2423 struct btrfs_path
*log_path
;
2426 dir_key
.objectid
= dirid
;
2427 dir_key
.type
= BTRFS_DIR_ITEM_KEY
;
2428 log_path
= btrfs_alloc_path();
2432 dir
= read_one_inode(root
, dirid
);
2433 /* it isn't an error if the inode isn't there, that can happen
2434 * because we replay the deletes before we copy in the inode item
2438 btrfs_free_path(log_path
);
2446 range_end
= (u64
)-1;
2448 ret
= find_dir_range(log
, path
, dirid
, key_type
,
2449 &range_start
, &range_end
);
2454 dir_key
.offset
= range_start
;
2457 ret
= btrfs_search_slot(NULL
, root
, &dir_key
, path
,
2462 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2463 if (path
->slots
[0] >= nritems
) {
2464 ret
= btrfs_next_leaf(root
, path
);
2470 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2472 if (found_key
.objectid
!= dirid
||
2473 found_key
.type
!= dir_key
.type
)
2476 if (found_key
.offset
> range_end
)
2479 ret
= check_item_in_log(trans
, root
, log
, path
,
2484 if (found_key
.offset
== (u64
)-1)
2486 dir_key
.offset
= found_key
.offset
+ 1;
2488 btrfs_release_path(path
);
2489 if (range_end
== (u64
)-1)
2491 range_start
= range_end
+ 1;
2496 if (key_type
== BTRFS_DIR_LOG_ITEM_KEY
) {
2497 key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
2498 dir_key
.type
= BTRFS_DIR_INDEX_KEY
;
2499 btrfs_release_path(path
);
2503 btrfs_release_path(path
);
2504 btrfs_free_path(log_path
);
2510 * the process_func used to replay items from the log tree. This
2511 * gets called in two different stages. The first stage just looks
2512 * for inodes and makes sure they are all copied into the subvolume.
2514 * The second stage copies all the other item types from the log into
2515 * the subvolume. The two stage approach is slower, but gets rid of
2516 * lots of complexity around inodes referencing other inodes that exist
2517 * only in the log (references come from either directory items or inode
2520 static int replay_one_buffer(struct btrfs_root
*log
, struct extent_buffer
*eb
,
2521 struct walk_control
*wc
, u64 gen
, int level
)
2524 struct btrfs_path
*path
;
2525 struct btrfs_root
*root
= wc
->replay_dest
;
2526 struct btrfs_key key
;
2530 ret
= btrfs_read_buffer(eb
, gen
, level
, NULL
);
2534 level
= btrfs_header_level(eb
);
2539 path
= btrfs_alloc_path();
2543 nritems
= btrfs_header_nritems(eb
);
2544 for (i
= 0; i
< nritems
; i
++) {
2545 btrfs_item_key_to_cpu(eb
, &key
, i
);
2547 /* inode keys are done during the first stage */
2548 if (key
.type
== BTRFS_INODE_ITEM_KEY
&&
2549 wc
->stage
== LOG_WALK_REPLAY_INODES
) {
2550 struct btrfs_inode_item
*inode_item
;
2553 inode_item
= btrfs_item_ptr(eb
, i
,
2554 struct btrfs_inode_item
);
2556 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2557 * and never got linked before the fsync, skip it, as
2558 * replaying it is pointless since it would be deleted
2559 * later. We skip logging tmpfiles, but it's always
2560 * possible we are replaying a log created with a kernel
2561 * that used to log tmpfiles.
2563 if (btrfs_inode_nlink(eb
, inode_item
) == 0) {
2564 wc
->ignore_cur_inode
= true;
2567 wc
->ignore_cur_inode
= false;
2569 ret
= replay_xattr_deletes(wc
->trans
, root
, log
,
2570 path
, key
.objectid
);
2573 mode
= btrfs_inode_mode(eb
, inode_item
);
2574 if (S_ISDIR(mode
)) {
2575 ret
= replay_dir_deletes(wc
->trans
,
2576 root
, log
, path
, key
.objectid
, 0);
2580 ret
= overwrite_item(wc
->trans
, root
, path
,
2586 * Before replaying extents, truncate the inode to its
2587 * size. We need to do it now and not after log replay
2588 * because before an fsync we can have prealloc extents
2589 * added beyond the inode's i_size. If we did it after,
2590 * through orphan cleanup for example, we would drop
2591 * those prealloc extents just after replaying them.
2593 if (S_ISREG(mode
)) {
2594 struct inode
*inode
;
2597 inode
= read_one_inode(root
, key
.objectid
);
2602 from
= ALIGN(i_size_read(inode
),
2603 root
->fs_info
->sectorsize
);
2604 ret
= btrfs_drop_extents(wc
->trans
, root
, inode
,
2607 /* Update the inode's nbytes. */
2608 ret
= btrfs_update_inode(wc
->trans
,
2616 ret
= link_to_fixup_dir(wc
->trans
, root
,
2617 path
, key
.objectid
);
2622 if (wc
->ignore_cur_inode
)
2625 if (key
.type
== BTRFS_DIR_INDEX_KEY
&&
2626 wc
->stage
== LOG_WALK_REPLAY_DIR_INDEX
) {
2627 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2633 if (wc
->stage
< LOG_WALK_REPLAY_ALL
)
2636 /* these keys are simply copied */
2637 if (key
.type
== BTRFS_XATTR_ITEM_KEY
) {
2638 ret
= overwrite_item(wc
->trans
, root
, path
,
2642 } else if (key
.type
== BTRFS_INODE_REF_KEY
||
2643 key
.type
== BTRFS_INODE_EXTREF_KEY
) {
2644 ret
= add_inode_ref(wc
->trans
, root
, log
, path
,
2646 if (ret
&& ret
!= -ENOENT
)
2649 } else if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
2650 ret
= replay_one_extent(wc
->trans
, root
, path
,
2654 } else if (key
.type
== BTRFS_DIR_ITEM_KEY
) {
2655 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2661 btrfs_free_path(path
);
2665 static noinline
int walk_down_log_tree(struct btrfs_trans_handle
*trans
,
2666 struct btrfs_root
*root
,
2667 struct btrfs_path
*path
, int *level
,
2668 struct walk_control
*wc
)
2670 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2674 struct extent_buffer
*next
;
2675 struct extent_buffer
*cur
;
2676 struct extent_buffer
*parent
;
2680 WARN_ON(*level
< 0);
2681 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2683 while (*level
> 0) {
2684 struct btrfs_key first_key
;
2686 WARN_ON(*level
< 0);
2687 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2688 cur
= path
->nodes
[*level
];
2690 WARN_ON(btrfs_header_level(cur
) != *level
);
2692 if (path
->slots
[*level
] >=
2693 btrfs_header_nritems(cur
))
2696 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2697 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2698 btrfs_node_key_to_cpu(cur
, &first_key
, path
->slots
[*level
]);
2699 blocksize
= fs_info
->nodesize
;
2701 parent
= path
->nodes
[*level
];
2702 root_owner
= btrfs_header_owner(parent
);
2704 next
= btrfs_find_create_tree_block(fs_info
, bytenr
);
2706 return PTR_ERR(next
);
2709 ret
= wc
->process_func(root
, next
, wc
, ptr_gen
,
2712 free_extent_buffer(next
);
2716 path
->slots
[*level
]++;
2718 ret
= btrfs_read_buffer(next
, ptr_gen
,
2719 *level
- 1, &first_key
);
2721 free_extent_buffer(next
);
2726 btrfs_tree_lock(next
);
2727 btrfs_set_lock_blocking_write(next
);
2728 clean_tree_block(fs_info
, next
);
2729 btrfs_wait_tree_block_writeback(next
);
2730 btrfs_tree_unlock(next
);
2732 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &next
->bflags
))
2733 clear_extent_buffer_dirty(next
);
2736 WARN_ON(root_owner
!=
2737 BTRFS_TREE_LOG_OBJECTID
);
2738 ret
= btrfs_free_and_pin_reserved_extent(
2742 free_extent_buffer(next
);
2746 free_extent_buffer(next
);
2749 ret
= btrfs_read_buffer(next
, ptr_gen
, *level
- 1, &first_key
);
2751 free_extent_buffer(next
);
2755 WARN_ON(*level
<= 0);
2756 if (path
->nodes
[*level
-1])
2757 free_extent_buffer(path
->nodes
[*level
-1]);
2758 path
->nodes
[*level
-1] = next
;
2759 *level
= btrfs_header_level(next
);
2760 path
->slots
[*level
] = 0;
2763 WARN_ON(*level
< 0);
2764 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2766 path
->slots
[*level
] = btrfs_header_nritems(path
->nodes
[*level
]);
2772 static noinline
int walk_up_log_tree(struct btrfs_trans_handle
*trans
,
2773 struct btrfs_root
*root
,
2774 struct btrfs_path
*path
, int *level
,
2775 struct walk_control
*wc
)
2777 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2783 for (i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2784 slot
= path
->slots
[i
];
2785 if (slot
+ 1 < btrfs_header_nritems(path
->nodes
[i
])) {
2788 WARN_ON(*level
== 0);
2791 struct extent_buffer
*parent
;
2792 if (path
->nodes
[*level
] == root
->node
)
2793 parent
= path
->nodes
[*level
];
2795 parent
= path
->nodes
[*level
+ 1];
2797 root_owner
= btrfs_header_owner(parent
);
2798 ret
= wc
->process_func(root
, path
->nodes
[*level
], wc
,
2799 btrfs_header_generation(path
->nodes
[*level
]),
2805 struct extent_buffer
*next
;
2807 next
= path
->nodes
[*level
];
2810 btrfs_tree_lock(next
);
2811 btrfs_set_lock_blocking_write(next
);
2812 clean_tree_block(fs_info
, next
);
2813 btrfs_wait_tree_block_writeback(next
);
2814 btrfs_tree_unlock(next
);
2816 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &next
->bflags
))
2817 clear_extent_buffer_dirty(next
);
2820 WARN_ON(root_owner
!= BTRFS_TREE_LOG_OBJECTID
);
2821 ret
= btrfs_free_and_pin_reserved_extent(
2823 path
->nodes
[*level
]->start
,
2824 path
->nodes
[*level
]->len
);
2828 free_extent_buffer(path
->nodes
[*level
]);
2829 path
->nodes
[*level
] = NULL
;
2837 * drop the reference count on the tree rooted at 'snap'. This traverses
2838 * the tree freeing any blocks that have a ref count of zero after being
2841 static int walk_log_tree(struct btrfs_trans_handle
*trans
,
2842 struct btrfs_root
*log
, struct walk_control
*wc
)
2844 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
2848 struct btrfs_path
*path
;
2851 path
= btrfs_alloc_path();
2855 level
= btrfs_header_level(log
->node
);
2857 path
->nodes
[level
] = log
->node
;
2858 extent_buffer_get(log
->node
);
2859 path
->slots
[level
] = 0;
2862 wret
= walk_down_log_tree(trans
, log
, path
, &level
, wc
);
2870 wret
= walk_up_log_tree(trans
, log
, path
, &level
, wc
);
2879 /* was the root node processed? if not, catch it here */
2880 if (path
->nodes
[orig_level
]) {
2881 ret
= wc
->process_func(log
, path
->nodes
[orig_level
], wc
,
2882 btrfs_header_generation(path
->nodes
[orig_level
]),
2887 struct extent_buffer
*next
;
2889 next
= path
->nodes
[orig_level
];
2892 btrfs_tree_lock(next
);
2893 btrfs_set_lock_blocking_write(next
);
2894 clean_tree_block(fs_info
, next
);
2895 btrfs_wait_tree_block_writeback(next
);
2896 btrfs_tree_unlock(next
);
2898 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &next
->bflags
))
2899 clear_extent_buffer_dirty(next
);
2902 WARN_ON(log
->root_key
.objectid
!=
2903 BTRFS_TREE_LOG_OBJECTID
);
2904 ret
= btrfs_free_and_pin_reserved_extent(fs_info
,
2905 next
->start
, next
->len
);
2912 btrfs_free_path(path
);
2917 * helper function to update the item for a given subvolumes log root
2918 * in the tree of log roots
2920 static int update_log_root(struct btrfs_trans_handle
*trans
,
2921 struct btrfs_root
*log
)
2923 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
2926 if (log
->log_transid
== 1) {
2927 /* insert root item on the first sync */
2928 ret
= btrfs_insert_root(trans
, fs_info
->log_root_tree
,
2929 &log
->root_key
, &log
->root_item
);
2931 ret
= btrfs_update_root(trans
, fs_info
->log_root_tree
,
2932 &log
->root_key
, &log
->root_item
);
2937 static void wait_log_commit(struct btrfs_root
*root
, int transid
)
2940 int index
= transid
% 2;
2943 * we only allow two pending log transactions at a time,
2944 * so we know that if ours is more than 2 older than the
2945 * current transaction, we're done
2948 prepare_to_wait(&root
->log_commit_wait
[index
],
2949 &wait
, TASK_UNINTERRUPTIBLE
);
2951 if (!(root
->log_transid_committed
< transid
&&
2952 atomic_read(&root
->log_commit
[index
])))
2955 mutex_unlock(&root
->log_mutex
);
2957 mutex_lock(&root
->log_mutex
);
2959 finish_wait(&root
->log_commit_wait
[index
], &wait
);
2962 static void wait_for_writer(struct btrfs_root
*root
)
2967 prepare_to_wait(&root
->log_writer_wait
, &wait
,
2968 TASK_UNINTERRUPTIBLE
);
2969 if (!atomic_read(&root
->log_writers
))
2972 mutex_unlock(&root
->log_mutex
);
2974 mutex_lock(&root
->log_mutex
);
2976 finish_wait(&root
->log_writer_wait
, &wait
);
2979 static inline void btrfs_remove_log_ctx(struct btrfs_root
*root
,
2980 struct btrfs_log_ctx
*ctx
)
2985 mutex_lock(&root
->log_mutex
);
2986 list_del_init(&ctx
->list
);
2987 mutex_unlock(&root
->log_mutex
);
2991 * Invoked in log mutex context, or be sure there is no other task which
2992 * can access the list.
2994 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root
*root
,
2995 int index
, int error
)
2997 struct btrfs_log_ctx
*ctx
;
2998 struct btrfs_log_ctx
*safe
;
3000 list_for_each_entry_safe(ctx
, safe
, &root
->log_ctxs
[index
], list
) {
3001 list_del_init(&ctx
->list
);
3002 ctx
->log_ret
= error
;
3005 INIT_LIST_HEAD(&root
->log_ctxs
[index
]);
3009 * btrfs_sync_log does sends a given tree log down to the disk and
3010 * updates the super blocks to record it. When this call is done,
3011 * you know that any inodes previously logged are safely on disk only
3014 * Any other return value means you need to call btrfs_commit_transaction.
3015 * Some of the edge cases for fsyncing directories that have had unlinks
3016 * or renames done in the past mean that sometimes the only safe
3017 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3018 * that has happened.
3020 int btrfs_sync_log(struct btrfs_trans_handle
*trans
,
3021 struct btrfs_root
*root
, struct btrfs_log_ctx
*ctx
)
3027 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3028 struct btrfs_root
*log
= root
->log_root
;
3029 struct btrfs_root
*log_root_tree
= fs_info
->log_root_tree
;
3030 int log_transid
= 0;
3031 struct btrfs_log_ctx root_log_ctx
;
3032 struct blk_plug plug
;
3034 mutex_lock(&root
->log_mutex
);
3035 log_transid
= ctx
->log_transid
;
3036 if (root
->log_transid_committed
>= log_transid
) {
3037 mutex_unlock(&root
->log_mutex
);
3038 return ctx
->log_ret
;
3041 index1
= log_transid
% 2;
3042 if (atomic_read(&root
->log_commit
[index1
])) {
3043 wait_log_commit(root
, log_transid
);
3044 mutex_unlock(&root
->log_mutex
);
3045 return ctx
->log_ret
;
3047 ASSERT(log_transid
== root
->log_transid
);
3048 atomic_set(&root
->log_commit
[index1
], 1);
3050 /* wait for previous tree log sync to complete */
3051 if (atomic_read(&root
->log_commit
[(index1
+ 1) % 2]))
3052 wait_log_commit(root
, log_transid
- 1);
3055 int batch
= atomic_read(&root
->log_batch
);
3056 /* when we're on an ssd, just kick the log commit out */
3057 if (!btrfs_test_opt(fs_info
, SSD
) &&
3058 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
)) {
3059 mutex_unlock(&root
->log_mutex
);
3060 schedule_timeout_uninterruptible(1);
3061 mutex_lock(&root
->log_mutex
);
3063 wait_for_writer(root
);
3064 if (batch
== atomic_read(&root
->log_batch
))
3068 /* bail out if we need to do a full commit */
3069 if (btrfs_need_log_full_commit(fs_info
, trans
)) {
3071 mutex_unlock(&root
->log_mutex
);
3075 if (log_transid
% 2 == 0)
3076 mark
= EXTENT_DIRTY
;
3080 /* we start IO on all the marked extents here, but we don't actually
3081 * wait for them until later.
3083 blk_start_plug(&plug
);
3084 ret
= btrfs_write_marked_extents(fs_info
, &log
->dirty_log_pages
, mark
);
3086 blk_finish_plug(&plug
);
3087 btrfs_abort_transaction(trans
, ret
);
3088 btrfs_set_log_full_commit(fs_info
, trans
);
3089 mutex_unlock(&root
->log_mutex
);
3093 btrfs_set_root_node(&log
->root_item
, log
->node
);
3095 root
->log_transid
++;
3096 log
->log_transid
= root
->log_transid
;
3097 root
->log_start_pid
= 0;
3099 * IO has been started, blocks of the log tree have WRITTEN flag set
3100 * in their headers. new modifications of the log will be written to
3101 * new positions. so it's safe to allow log writers to go in.
3103 mutex_unlock(&root
->log_mutex
);
3105 btrfs_init_log_ctx(&root_log_ctx
, NULL
);
3107 mutex_lock(&log_root_tree
->log_mutex
);
3108 atomic_inc(&log_root_tree
->log_batch
);
3109 atomic_inc(&log_root_tree
->log_writers
);
3111 index2
= log_root_tree
->log_transid
% 2;
3112 list_add_tail(&root_log_ctx
.list
, &log_root_tree
->log_ctxs
[index2
]);
3113 root_log_ctx
.log_transid
= log_root_tree
->log_transid
;
3115 mutex_unlock(&log_root_tree
->log_mutex
);
3117 ret
= update_log_root(trans
, log
);
3119 mutex_lock(&log_root_tree
->log_mutex
);
3120 if (atomic_dec_and_test(&log_root_tree
->log_writers
)) {
3121 /* atomic_dec_and_test implies a barrier */
3122 cond_wake_up_nomb(&log_root_tree
->log_writer_wait
);
3126 if (!list_empty(&root_log_ctx
.list
))
3127 list_del_init(&root_log_ctx
.list
);
3129 blk_finish_plug(&plug
);
3130 btrfs_set_log_full_commit(fs_info
, trans
);
3132 if (ret
!= -ENOSPC
) {
3133 btrfs_abort_transaction(trans
, ret
);
3134 mutex_unlock(&log_root_tree
->log_mutex
);
3137 btrfs_wait_tree_log_extents(log
, mark
);
3138 mutex_unlock(&log_root_tree
->log_mutex
);
3143 if (log_root_tree
->log_transid_committed
>= root_log_ctx
.log_transid
) {
3144 blk_finish_plug(&plug
);
3145 list_del_init(&root_log_ctx
.list
);
3146 mutex_unlock(&log_root_tree
->log_mutex
);
3147 ret
= root_log_ctx
.log_ret
;
3151 index2
= root_log_ctx
.log_transid
% 2;
3152 if (atomic_read(&log_root_tree
->log_commit
[index2
])) {
3153 blk_finish_plug(&plug
);
3154 ret
= btrfs_wait_tree_log_extents(log
, mark
);
3155 wait_log_commit(log_root_tree
,
3156 root_log_ctx
.log_transid
);
3157 mutex_unlock(&log_root_tree
->log_mutex
);
3159 ret
= root_log_ctx
.log_ret
;
3162 ASSERT(root_log_ctx
.log_transid
== log_root_tree
->log_transid
);
3163 atomic_set(&log_root_tree
->log_commit
[index2
], 1);
3165 if (atomic_read(&log_root_tree
->log_commit
[(index2
+ 1) % 2])) {
3166 wait_log_commit(log_root_tree
,
3167 root_log_ctx
.log_transid
- 1);
3170 wait_for_writer(log_root_tree
);
3173 * now that we've moved on to the tree of log tree roots,
3174 * check the full commit flag again
3176 if (btrfs_need_log_full_commit(fs_info
, trans
)) {
3177 blk_finish_plug(&plug
);
3178 btrfs_wait_tree_log_extents(log
, mark
);
3179 mutex_unlock(&log_root_tree
->log_mutex
);
3181 goto out_wake_log_root
;
3184 ret
= btrfs_write_marked_extents(fs_info
,
3185 &log_root_tree
->dirty_log_pages
,
3186 EXTENT_DIRTY
| EXTENT_NEW
);
3187 blk_finish_plug(&plug
);
3189 btrfs_set_log_full_commit(fs_info
, trans
);
3190 btrfs_abort_transaction(trans
, ret
);
3191 mutex_unlock(&log_root_tree
->log_mutex
);
3192 goto out_wake_log_root
;
3194 ret
= btrfs_wait_tree_log_extents(log
, mark
);
3196 ret
= btrfs_wait_tree_log_extents(log_root_tree
,
3197 EXTENT_NEW
| EXTENT_DIRTY
);
3199 btrfs_set_log_full_commit(fs_info
, trans
);
3200 mutex_unlock(&log_root_tree
->log_mutex
);
3201 goto out_wake_log_root
;
3204 btrfs_set_super_log_root(fs_info
->super_for_commit
,
3205 log_root_tree
->node
->start
);
3206 btrfs_set_super_log_root_level(fs_info
->super_for_commit
,
3207 btrfs_header_level(log_root_tree
->node
));
3209 log_root_tree
->log_transid
++;
3210 mutex_unlock(&log_root_tree
->log_mutex
);
3213 * Nobody else is going to jump in and write the ctree
3214 * super here because the log_commit atomic below is protecting
3215 * us. We must be called with a transaction handle pinning
3216 * the running transaction open, so a full commit can't hop
3217 * in and cause problems either.
3219 ret
= write_all_supers(fs_info
, 1);
3221 btrfs_set_log_full_commit(fs_info
, trans
);
3222 btrfs_abort_transaction(trans
, ret
);
3223 goto out_wake_log_root
;
3226 mutex_lock(&root
->log_mutex
);
3227 if (root
->last_log_commit
< log_transid
)
3228 root
->last_log_commit
= log_transid
;
3229 mutex_unlock(&root
->log_mutex
);
3232 mutex_lock(&log_root_tree
->log_mutex
);
3233 btrfs_remove_all_log_ctxs(log_root_tree
, index2
, ret
);
3235 log_root_tree
->log_transid_committed
++;
3236 atomic_set(&log_root_tree
->log_commit
[index2
], 0);
3237 mutex_unlock(&log_root_tree
->log_mutex
);
3240 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3241 * all the updates above are seen by the woken threads. It might not be
3242 * necessary, but proving that seems to be hard.
3244 cond_wake_up(&log_root_tree
->log_commit_wait
[index2
]);
3246 mutex_lock(&root
->log_mutex
);
3247 btrfs_remove_all_log_ctxs(root
, index1
, ret
);
3248 root
->log_transid_committed
++;
3249 atomic_set(&root
->log_commit
[index1
], 0);
3250 mutex_unlock(&root
->log_mutex
);
3253 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3254 * all the updates above are seen by the woken threads. It might not be
3255 * necessary, but proving that seems to be hard.
3257 cond_wake_up(&root
->log_commit_wait
[index1
]);
3261 static void free_log_tree(struct btrfs_trans_handle
*trans
,
3262 struct btrfs_root
*log
)
3265 struct walk_control wc
= {
3267 .process_func
= process_one_buffer
3270 ret
= walk_log_tree(trans
, log
, &wc
);
3273 btrfs_abort_transaction(trans
, ret
);
3275 btrfs_handle_fs_error(log
->fs_info
, ret
, NULL
);
3278 clear_extent_bits(&log
->dirty_log_pages
, 0, (u64
)-1,
3279 EXTENT_DIRTY
| EXTENT_NEW
| EXTENT_NEED_WAIT
);
3280 free_extent_buffer(log
->node
);
3285 * free all the extents used by the tree log. This should be called
3286 * at commit time of the full transaction
3288 int btrfs_free_log(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
)
3290 if (root
->log_root
) {
3291 free_log_tree(trans
, root
->log_root
);
3292 root
->log_root
= NULL
;
3297 int btrfs_free_log_root_tree(struct btrfs_trans_handle
*trans
,
3298 struct btrfs_fs_info
*fs_info
)
3300 if (fs_info
->log_root_tree
) {
3301 free_log_tree(trans
, fs_info
->log_root_tree
);
3302 fs_info
->log_root_tree
= NULL
;
3308 * If both a file and directory are logged, and unlinks or renames are
3309 * mixed in, we have a few interesting corners:
3311 * create file X in dir Y
3312 * link file X to X.link in dir Y
3314 * unlink file X but leave X.link
3317 * After a crash we would expect only X.link to exist. But file X
3318 * didn't get fsync'd again so the log has back refs for X and X.link.
3320 * We solve this by removing directory entries and inode backrefs from the
3321 * log when a file that was logged in the current transaction is
3322 * unlinked. Any later fsync will include the updated log entries, and
3323 * we'll be able to reconstruct the proper directory items from backrefs.
3325 * This optimizations allows us to avoid relogging the entire inode
3326 * or the entire directory.
3328 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle
*trans
,
3329 struct btrfs_root
*root
,
3330 const char *name
, int name_len
,
3331 struct btrfs_inode
*dir
, u64 index
)
3333 struct btrfs_root
*log
;
3334 struct btrfs_dir_item
*di
;
3335 struct btrfs_path
*path
;
3339 u64 dir_ino
= btrfs_ino(dir
);
3341 if (dir
->logged_trans
< trans
->transid
)
3344 ret
= join_running_log_trans(root
);
3348 mutex_lock(&dir
->log_mutex
);
3350 log
= root
->log_root
;
3351 path
= btrfs_alloc_path();
3357 di
= btrfs_lookup_dir_item(trans
, log
, path
, dir_ino
,
3358 name
, name_len
, -1);
3364 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
3365 bytes_del
+= name_len
;
3371 btrfs_release_path(path
);
3372 di
= btrfs_lookup_dir_index_item(trans
, log
, path
, dir_ino
,
3373 index
, name
, name_len
, -1);
3379 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
3380 bytes_del
+= name_len
;
3387 /* update the directory size in the log to reflect the names
3391 struct btrfs_key key
;
3393 key
.objectid
= dir_ino
;
3395 key
.type
= BTRFS_INODE_ITEM_KEY
;
3396 btrfs_release_path(path
);
3398 ret
= btrfs_search_slot(trans
, log
, &key
, path
, 0, 1);
3404 struct btrfs_inode_item
*item
;
3407 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3408 struct btrfs_inode_item
);
3409 i_size
= btrfs_inode_size(path
->nodes
[0], item
);
3410 if (i_size
> bytes_del
)
3411 i_size
-= bytes_del
;
3414 btrfs_set_inode_size(path
->nodes
[0], item
, i_size
);
3415 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3418 btrfs_release_path(path
);
3421 btrfs_free_path(path
);
3423 mutex_unlock(&dir
->log_mutex
);
3424 if (ret
== -ENOSPC
) {
3425 btrfs_set_log_full_commit(root
->fs_info
, trans
);
3428 btrfs_abort_transaction(trans
, ret
);
3430 btrfs_end_log_trans(root
);
3435 /* see comments for btrfs_del_dir_entries_in_log */
3436 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle
*trans
,
3437 struct btrfs_root
*root
,
3438 const char *name
, int name_len
,
3439 struct btrfs_inode
*inode
, u64 dirid
)
3441 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3442 struct btrfs_root
*log
;
3446 if (inode
->logged_trans
< trans
->transid
)
3449 ret
= join_running_log_trans(root
);
3452 log
= root
->log_root
;
3453 mutex_lock(&inode
->log_mutex
);
3455 ret
= btrfs_del_inode_ref(trans
, log
, name
, name_len
, btrfs_ino(inode
),
3457 mutex_unlock(&inode
->log_mutex
);
3458 if (ret
== -ENOSPC
) {
3459 btrfs_set_log_full_commit(fs_info
, trans
);
3461 } else if (ret
< 0 && ret
!= -ENOENT
)
3462 btrfs_abort_transaction(trans
, ret
);
3463 btrfs_end_log_trans(root
);
3469 * creates a range item in the log for 'dirid'. first_offset and
3470 * last_offset tell us which parts of the key space the log should
3471 * be considered authoritative for.
3473 static noinline
int insert_dir_log_key(struct btrfs_trans_handle
*trans
,
3474 struct btrfs_root
*log
,
3475 struct btrfs_path
*path
,
3476 int key_type
, u64 dirid
,
3477 u64 first_offset
, u64 last_offset
)
3480 struct btrfs_key key
;
3481 struct btrfs_dir_log_item
*item
;
3483 key
.objectid
= dirid
;
3484 key
.offset
= first_offset
;
3485 if (key_type
== BTRFS_DIR_ITEM_KEY
)
3486 key
.type
= BTRFS_DIR_LOG_ITEM_KEY
;
3488 key
.type
= BTRFS_DIR_LOG_INDEX_KEY
;
3489 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
, sizeof(*item
));
3493 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3494 struct btrfs_dir_log_item
);
3495 btrfs_set_dir_log_end(path
->nodes
[0], item
, last_offset
);
3496 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3497 btrfs_release_path(path
);
3502 * log all the items included in the current transaction for a given
3503 * directory. This also creates the range items in the log tree required
3504 * to replay anything deleted before the fsync
3506 static noinline
int log_dir_items(struct btrfs_trans_handle
*trans
,
3507 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
3508 struct btrfs_path
*path
,
3509 struct btrfs_path
*dst_path
, int key_type
,
3510 struct btrfs_log_ctx
*ctx
,
3511 u64 min_offset
, u64
*last_offset_ret
)
3513 struct btrfs_key min_key
;
3514 struct btrfs_root
*log
= root
->log_root
;
3515 struct extent_buffer
*src
;
3520 u64 first_offset
= min_offset
;
3521 u64 last_offset
= (u64
)-1;
3522 u64 ino
= btrfs_ino(inode
);
3524 log
= root
->log_root
;
3526 min_key
.objectid
= ino
;
3527 min_key
.type
= key_type
;
3528 min_key
.offset
= min_offset
;
3530 ret
= btrfs_search_forward(root
, &min_key
, path
, trans
->transid
);
3533 * we didn't find anything from this transaction, see if there
3534 * is anything at all
3536 if (ret
!= 0 || min_key
.objectid
!= ino
|| min_key
.type
!= key_type
) {
3537 min_key
.objectid
= ino
;
3538 min_key
.type
= key_type
;
3539 min_key
.offset
= (u64
)-1;
3540 btrfs_release_path(path
);
3541 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3543 btrfs_release_path(path
);
3546 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3548 /* if ret == 0 there are items for this type,
3549 * create a range to tell us the last key of this type.
3550 * otherwise, there are no items in this directory after
3551 * *min_offset, and we create a range to indicate that.
3554 struct btrfs_key tmp
;
3555 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
,
3557 if (key_type
== tmp
.type
)
3558 first_offset
= max(min_offset
, tmp
.offset
) + 1;
3563 /* go backward to find any previous key */
3564 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3566 struct btrfs_key tmp
;
3567 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3568 if (key_type
== tmp
.type
) {
3569 first_offset
= tmp
.offset
;
3570 ret
= overwrite_item(trans
, log
, dst_path
,
3571 path
->nodes
[0], path
->slots
[0],
3579 btrfs_release_path(path
);
3581 /* find the first key from this transaction again */
3582 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3583 if (WARN_ON(ret
!= 0))
3587 * we have a block from this transaction, log every item in it
3588 * from our directory
3591 struct btrfs_key tmp
;
3592 src
= path
->nodes
[0];
3593 nritems
= btrfs_header_nritems(src
);
3594 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
3595 struct btrfs_dir_item
*di
;
3597 btrfs_item_key_to_cpu(src
, &min_key
, i
);
3599 if (min_key
.objectid
!= ino
|| min_key
.type
!= key_type
)
3601 ret
= overwrite_item(trans
, log
, dst_path
, src
, i
,
3609 * We must make sure that when we log a directory entry,
3610 * the corresponding inode, after log replay, has a
3611 * matching link count. For example:
3617 * xfs_io -c "fsync" mydir
3619 * <mount fs and log replay>
3621 * Would result in a fsync log that when replayed, our
3622 * file inode would have a link count of 1, but we get
3623 * two directory entries pointing to the same inode.
3624 * After removing one of the names, it would not be
3625 * possible to remove the other name, which resulted
3626 * always in stale file handle errors, and would not
3627 * be possible to rmdir the parent directory, since
3628 * its i_size could never decrement to the value
3629 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3631 di
= btrfs_item_ptr(src
, i
, struct btrfs_dir_item
);
3632 btrfs_dir_item_key_to_cpu(src
, di
, &tmp
);
3634 (btrfs_dir_transid(src
, di
) == trans
->transid
||
3635 btrfs_dir_type(src
, di
) == BTRFS_FT_DIR
) &&
3636 tmp
.type
!= BTRFS_ROOT_ITEM_KEY
)
3637 ctx
->log_new_dentries
= true;
3639 path
->slots
[0] = nritems
;
3642 * look ahead to the next item and see if it is also
3643 * from this directory and from this transaction
3645 ret
= btrfs_next_leaf(root
, path
);
3648 last_offset
= (u64
)-1;
3653 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3654 if (tmp
.objectid
!= ino
|| tmp
.type
!= key_type
) {
3655 last_offset
= (u64
)-1;
3658 if (btrfs_header_generation(path
->nodes
[0]) != trans
->transid
) {
3659 ret
= overwrite_item(trans
, log
, dst_path
,
3660 path
->nodes
[0], path
->slots
[0],
3665 last_offset
= tmp
.offset
;
3670 btrfs_release_path(path
);
3671 btrfs_release_path(dst_path
);
3674 *last_offset_ret
= last_offset
;
3676 * insert the log range keys to indicate where the log
3679 ret
= insert_dir_log_key(trans
, log
, path
, key_type
,
3680 ino
, first_offset
, last_offset
);
3688 * logging directories is very similar to logging inodes, We find all the items
3689 * from the current transaction and write them to the log.
3691 * The recovery code scans the directory in the subvolume, and if it finds a
3692 * key in the range logged that is not present in the log tree, then it means
3693 * that dir entry was unlinked during the transaction.
3695 * In order for that scan to work, we must include one key smaller than
3696 * the smallest logged by this transaction and one key larger than the largest
3697 * key logged by this transaction.
3699 static noinline
int log_directory_changes(struct btrfs_trans_handle
*trans
,
3700 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
3701 struct btrfs_path
*path
,
3702 struct btrfs_path
*dst_path
,
3703 struct btrfs_log_ctx
*ctx
)
3708 int key_type
= BTRFS_DIR_ITEM_KEY
;
3714 ret
= log_dir_items(trans
, root
, inode
, path
, dst_path
, key_type
,
3715 ctx
, min_key
, &max_key
);
3718 if (max_key
== (u64
)-1)
3720 min_key
= max_key
+ 1;
3723 if (key_type
== BTRFS_DIR_ITEM_KEY
) {
3724 key_type
= BTRFS_DIR_INDEX_KEY
;
3731 * a helper function to drop items from the log before we relog an
3732 * inode. max_key_type indicates the highest item type to remove.
3733 * This cannot be run for file data extents because it does not
3734 * free the extents they point to.
3736 static int drop_objectid_items(struct btrfs_trans_handle
*trans
,
3737 struct btrfs_root
*log
,
3738 struct btrfs_path
*path
,
3739 u64 objectid
, int max_key_type
)
3742 struct btrfs_key key
;
3743 struct btrfs_key found_key
;
3746 key
.objectid
= objectid
;
3747 key
.type
= max_key_type
;
3748 key
.offset
= (u64
)-1;
3751 ret
= btrfs_search_slot(trans
, log
, &key
, path
, -1, 1);
3752 BUG_ON(ret
== 0); /* Logic error */
3756 if (path
->slots
[0] == 0)
3760 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
3763 if (found_key
.objectid
!= objectid
)
3766 found_key
.offset
= 0;
3768 ret
= btrfs_bin_search(path
->nodes
[0], &found_key
, 0,
3773 ret
= btrfs_del_items(trans
, log
, path
, start_slot
,
3774 path
->slots
[0] - start_slot
+ 1);
3776 * If start slot isn't 0 then we don't need to re-search, we've
3777 * found the last guy with the objectid in this tree.
3779 if (ret
|| start_slot
!= 0)
3781 btrfs_release_path(path
);
3783 btrfs_release_path(path
);
3789 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3790 struct extent_buffer
*leaf
,
3791 struct btrfs_inode_item
*item
,
3792 struct inode
*inode
, int log_inode_only
,
3795 struct btrfs_map_token token
;
3797 btrfs_init_map_token(&token
);
3799 if (log_inode_only
) {
3800 /* set the generation to zero so the recover code
3801 * can tell the difference between an logging
3802 * just to say 'this inode exists' and a logging
3803 * to say 'update this inode with these values'
3805 btrfs_set_token_inode_generation(leaf
, item
, 0, &token
);
3806 btrfs_set_token_inode_size(leaf
, item
, logged_isize
, &token
);
3808 btrfs_set_token_inode_generation(leaf
, item
,
3809 BTRFS_I(inode
)->generation
,
3811 btrfs_set_token_inode_size(leaf
, item
, inode
->i_size
, &token
);
3814 btrfs_set_token_inode_uid(leaf
, item
, i_uid_read(inode
), &token
);
3815 btrfs_set_token_inode_gid(leaf
, item
, i_gid_read(inode
), &token
);
3816 btrfs_set_token_inode_mode(leaf
, item
, inode
->i_mode
, &token
);
3817 btrfs_set_token_inode_nlink(leaf
, item
, inode
->i_nlink
, &token
);
3819 btrfs_set_token_timespec_sec(leaf
, &item
->atime
,
3820 inode
->i_atime
.tv_sec
, &token
);
3821 btrfs_set_token_timespec_nsec(leaf
, &item
->atime
,
3822 inode
->i_atime
.tv_nsec
, &token
);
3824 btrfs_set_token_timespec_sec(leaf
, &item
->mtime
,
3825 inode
->i_mtime
.tv_sec
, &token
);
3826 btrfs_set_token_timespec_nsec(leaf
, &item
->mtime
,
3827 inode
->i_mtime
.tv_nsec
, &token
);
3829 btrfs_set_token_timespec_sec(leaf
, &item
->ctime
,
3830 inode
->i_ctime
.tv_sec
, &token
);
3831 btrfs_set_token_timespec_nsec(leaf
, &item
->ctime
,
3832 inode
->i_ctime
.tv_nsec
, &token
);
3834 btrfs_set_token_inode_nbytes(leaf
, item
, inode_get_bytes(inode
),
3837 btrfs_set_token_inode_sequence(leaf
, item
,
3838 inode_peek_iversion(inode
), &token
);
3839 btrfs_set_token_inode_transid(leaf
, item
, trans
->transid
, &token
);
3840 btrfs_set_token_inode_rdev(leaf
, item
, inode
->i_rdev
, &token
);
3841 btrfs_set_token_inode_flags(leaf
, item
, BTRFS_I(inode
)->flags
, &token
);
3842 btrfs_set_token_inode_block_group(leaf
, item
, 0, &token
);
3845 static int log_inode_item(struct btrfs_trans_handle
*trans
,
3846 struct btrfs_root
*log
, struct btrfs_path
*path
,
3847 struct btrfs_inode
*inode
)
3849 struct btrfs_inode_item
*inode_item
;
3852 ret
= btrfs_insert_empty_item(trans
, log
, path
,
3853 &inode
->location
, sizeof(*inode_item
));
3854 if (ret
&& ret
!= -EEXIST
)
3856 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3857 struct btrfs_inode_item
);
3858 fill_inode_item(trans
, path
->nodes
[0], inode_item
, &inode
->vfs_inode
,
3860 btrfs_release_path(path
);
3864 static noinline
int copy_items(struct btrfs_trans_handle
*trans
,
3865 struct btrfs_inode
*inode
,
3866 struct btrfs_path
*dst_path
,
3867 struct btrfs_path
*src_path
, u64
*last_extent
,
3868 int start_slot
, int nr
, int inode_only
,
3871 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3872 unsigned long src_offset
;
3873 unsigned long dst_offset
;
3874 struct btrfs_root
*log
= inode
->root
->log_root
;
3875 struct btrfs_file_extent_item
*extent
;
3876 struct btrfs_inode_item
*inode_item
;
3877 struct extent_buffer
*src
= src_path
->nodes
[0];
3878 struct btrfs_key first_key
, last_key
, key
;
3880 struct btrfs_key
*ins_keys
;
3884 struct list_head ordered_sums
;
3885 int skip_csum
= inode
->flags
& BTRFS_INODE_NODATASUM
;
3886 bool has_extents
= false;
3887 bool need_find_last_extent
= true;
3890 INIT_LIST_HEAD(&ordered_sums
);
3892 ins_data
= kmalloc(nr
* sizeof(struct btrfs_key
) +
3893 nr
* sizeof(u32
), GFP_NOFS
);
3897 first_key
.objectid
= (u64
)-1;
3899 ins_sizes
= (u32
*)ins_data
;
3900 ins_keys
= (struct btrfs_key
*)(ins_data
+ nr
* sizeof(u32
));
3902 for (i
= 0; i
< nr
; i
++) {
3903 ins_sizes
[i
] = btrfs_item_size_nr(src
, i
+ start_slot
);
3904 btrfs_item_key_to_cpu(src
, ins_keys
+ i
, i
+ start_slot
);
3906 ret
= btrfs_insert_empty_items(trans
, log
, dst_path
,
3907 ins_keys
, ins_sizes
, nr
);
3913 for (i
= 0; i
< nr
; i
++, dst_path
->slots
[0]++) {
3914 dst_offset
= btrfs_item_ptr_offset(dst_path
->nodes
[0],
3915 dst_path
->slots
[0]);
3917 src_offset
= btrfs_item_ptr_offset(src
, start_slot
+ i
);
3920 last_key
= ins_keys
[i
];
3922 if (ins_keys
[i
].type
== BTRFS_INODE_ITEM_KEY
) {
3923 inode_item
= btrfs_item_ptr(dst_path
->nodes
[0],
3925 struct btrfs_inode_item
);
3926 fill_inode_item(trans
, dst_path
->nodes
[0], inode_item
,
3928 inode_only
== LOG_INODE_EXISTS
,
3931 copy_extent_buffer(dst_path
->nodes
[0], src
, dst_offset
,
3932 src_offset
, ins_sizes
[i
]);
3936 * We set need_find_last_extent here in case we know we were
3937 * processing other items and then walk into the first extent in
3938 * the inode. If we don't hit an extent then nothing changes,
3939 * we'll do the last search the next time around.
3941 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
) {
3943 if (first_key
.objectid
== (u64
)-1)
3944 first_key
= ins_keys
[i
];
3946 need_find_last_extent
= false;
3949 /* take a reference on file data extents so that truncates
3950 * or deletes of this inode don't have to relog the inode
3953 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
&&
3956 extent
= btrfs_item_ptr(src
, start_slot
+ i
,
3957 struct btrfs_file_extent_item
);
3959 if (btrfs_file_extent_generation(src
, extent
) < trans
->transid
)
3962 found_type
= btrfs_file_extent_type(src
, extent
);
3963 if (found_type
== BTRFS_FILE_EXTENT_REG
) {
3965 ds
= btrfs_file_extent_disk_bytenr(src
,
3967 /* ds == 0 is a hole */
3971 dl
= btrfs_file_extent_disk_num_bytes(src
,
3973 cs
= btrfs_file_extent_offset(src
, extent
);
3974 cl
= btrfs_file_extent_num_bytes(src
,
3976 if (btrfs_file_extent_compression(src
,
3982 ret
= btrfs_lookup_csums_range(
3984 ds
+ cs
, ds
+ cs
+ cl
- 1,
3987 btrfs_release_path(dst_path
);
3995 btrfs_mark_buffer_dirty(dst_path
->nodes
[0]);
3996 btrfs_release_path(dst_path
);
4000 * we have to do this after the loop above to avoid changing the
4001 * log tree while trying to change the log tree.
4004 while (!list_empty(&ordered_sums
)) {
4005 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
4006 struct btrfs_ordered_sum
,
4009 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
4010 list_del(&sums
->list
);
4017 if (need_find_last_extent
&& *last_extent
== first_key
.offset
) {
4019 * We don't have any leafs between our current one and the one
4020 * we processed before that can have file extent items for our
4021 * inode (and have a generation number smaller than our current
4024 need_find_last_extent
= false;
4028 * Because we use btrfs_search_forward we could skip leaves that were
4029 * not modified and then assume *last_extent is valid when it really
4030 * isn't. So back up to the previous leaf and read the end of the last
4031 * extent before we go and fill in holes.
4033 if (need_find_last_extent
) {
4036 ret
= btrfs_prev_leaf(inode
->root
, src_path
);
4041 if (src_path
->slots
[0])
4042 src_path
->slots
[0]--;
4043 src
= src_path
->nodes
[0];
4044 btrfs_item_key_to_cpu(src
, &key
, src_path
->slots
[0]);
4045 if (key
.objectid
!= btrfs_ino(inode
) ||
4046 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
4048 extent
= btrfs_item_ptr(src
, src_path
->slots
[0],
4049 struct btrfs_file_extent_item
);
4050 if (btrfs_file_extent_type(src
, extent
) ==
4051 BTRFS_FILE_EXTENT_INLINE
) {
4052 len
= btrfs_file_extent_ram_bytes(src
, extent
);
4053 *last_extent
= ALIGN(key
.offset
+ len
,
4054 fs_info
->sectorsize
);
4056 len
= btrfs_file_extent_num_bytes(src
, extent
);
4057 *last_extent
= key
.offset
+ len
;
4061 /* So we did prev_leaf, now we need to move to the next leaf, but a few
4062 * things could have happened
4064 * 1) A merge could have happened, so we could currently be on a leaf
4065 * that holds what we were copying in the first place.
4066 * 2) A split could have happened, and now not all of the items we want
4067 * are on the same leaf.
4069 * So we need to adjust how we search for holes, we need to drop the
4070 * path and re-search for the first extent key we found, and then walk
4071 * forward until we hit the last one we copied.
4073 if (need_find_last_extent
) {
4074 /* btrfs_prev_leaf could return 1 without releasing the path */
4075 btrfs_release_path(src_path
);
4076 ret
= btrfs_search_slot(NULL
, inode
->root
, &first_key
,
4081 src
= src_path
->nodes
[0];
4082 i
= src_path
->slots
[0];
4088 * Ok so here we need to go through and fill in any holes we may have
4089 * to make sure that holes are punched for those areas in case they had
4090 * extents previously.
4096 if (i
>= btrfs_header_nritems(src_path
->nodes
[0])) {
4097 ret
= btrfs_next_leaf(inode
->root
, src_path
);
4101 src
= src_path
->nodes
[0];
4103 need_find_last_extent
= true;
4106 btrfs_item_key_to_cpu(src
, &key
, i
);
4107 if (!btrfs_comp_cpu_keys(&key
, &last_key
))
4109 if (key
.objectid
!= btrfs_ino(inode
) ||
4110 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
4114 extent
= btrfs_item_ptr(src
, i
, struct btrfs_file_extent_item
);
4115 if (btrfs_file_extent_type(src
, extent
) ==
4116 BTRFS_FILE_EXTENT_INLINE
) {
4117 len
= btrfs_file_extent_ram_bytes(src
, extent
);
4118 extent_end
= ALIGN(key
.offset
+ len
,
4119 fs_info
->sectorsize
);
4121 len
= btrfs_file_extent_num_bytes(src
, extent
);
4122 extent_end
= key
.offset
+ len
;
4126 if (*last_extent
== key
.offset
) {
4127 *last_extent
= extent_end
;
4130 offset
= *last_extent
;
4131 len
= key
.offset
- *last_extent
;
4132 ret
= btrfs_insert_file_extent(trans
, log
, btrfs_ino(inode
),
4133 offset
, 0, 0, len
, 0, len
, 0, 0, 0);
4136 *last_extent
= extent_end
;
4140 * Check if there is a hole between the last extent found in our leaf
4141 * and the first extent in the next leaf. If there is one, we need to
4142 * log an explicit hole so that at replay time we can punch the hole.
4145 key
.objectid
== btrfs_ino(inode
) &&
4146 key
.type
== BTRFS_EXTENT_DATA_KEY
&&
4147 i
== btrfs_header_nritems(src_path
->nodes
[0])) {
4148 ret
= btrfs_next_leaf(inode
->root
, src_path
);
4149 need_find_last_extent
= true;
4152 } else if (ret
== 0) {
4153 btrfs_item_key_to_cpu(src_path
->nodes
[0], &key
,
4154 src_path
->slots
[0]);
4155 if (key
.objectid
== btrfs_ino(inode
) &&
4156 key
.type
== BTRFS_EXTENT_DATA_KEY
&&
4157 *last_extent
< key
.offset
) {
4158 const u64 len
= key
.offset
- *last_extent
;
4160 ret
= btrfs_insert_file_extent(trans
, log
,
4169 * Need to let the callers know we dropped the path so they should
4172 if (!ret
&& need_find_last_extent
)
4177 static int extent_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
4179 struct extent_map
*em1
, *em2
;
4181 em1
= list_entry(a
, struct extent_map
, list
);
4182 em2
= list_entry(b
, struct extent_map
, list
);
4184 if (em1
->start
< em2
->start
)
4186 else if (em1
->start
> em2
->start
)
4191 static int log_extent_csums(struct btrfs_trans_handle
*trans
,
4192 struct btrfs_inode
*inode
,
4193 struct btrfs_root
*log_root
,
4194 const struct extent_map
*em
)
4198 LIST_HEAD(ordered_sums
);
4201 if (inode
->flags
& BTRFS_INODE_NODATASUM
||
4202 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
4203 em
->block_start
== EXTENT_MAP_HOLE
)
4206 /* If we're compressed we have to save the entire range of csums. */
4207 if (em
->compress_type
) {
4209 csum_len
= max(em
->block_len
, em
->orig_block_len
);
4211 csum_offset
= em
->mod_start
- em
->start
;
4212 csum_len
= em
->mod_len
;
4215 /* block start is already adjusted for the file extent offset. */
4216 ret
= btrfs_lookup_csums_range(trans
->fs_info
->csum_root
,
4217 em
->block_start
+ csum_offset
,
4218 em
->block_start
+ csum_offset
+
4219 csum_len
- 1, &ordered_sums
, 0);
4223 while (!list_empty(&ordered_sums
)) {
4224 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
4225 struct btrfs_ordered_sum
,
4228 ret
= btrfs_csum_file_blocks(trans
, log_root
, sums
);
4229 list_del(&sums
->list
);
4236 static int log_one_extent(struct btrfs_trans_handle
*trans
,
4237 struct btrfs_inode
*inode
, struct btrfs_root
*root
,
4238 const struct extent_map
*em
,
4239 struct btrfs_path
*path
,
4240 struct btrfs_log_ctx
*ctx
)
4242 struct btrfs_root
*log
= root
->log_root
;
4243 struct btrfs_file_extent_item
*fi
;
4244 struct extent_buffer
*leaf
;
4245 struct btrfs_map_token token
;
4246 struct btrfs_key key
;
4247 u64 extent_offset
= em
->start
- em
->orig_start
;
4250 int extent_inserted
= 0;
4252 ret
= log_extent_csums(trans
, inode
, log
, em
);
4256 btrfs_init_map_token(&token
);
4258 ret
= __btrfs_drop_extents(trans
, log
, &inode
->vfs_inode
, path
, em
->start
,
4259 em
->start
+ em
->len
, NULL
, 0, 1,
4260 sizeof(*fi
), &extent_inserted
);
4264 if (!extent_inserted
) {
4265 key
.objectid
= btrfs_ino(inode
);
4266 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4267 key
.offset
= em
->start
;
4269 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
,
4274 leaf
= path
->nodes
[0];
4275 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4276 struct btrfs_file_extent_item
);
4278 btrfs_set_token_file_extent_generation(leaf
, fi
, trans
->transid
,
4280 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
4281 btrfs_set_token_file_extent_type(leaf
, fi
,
4282 BTRFS_FILE_EXTENT_PREALLOC
,
4285 btrfs_set_token_file_extent_type(leaf
, fi
,
4286 BTRFS_FILE_EXTENT_REG
,
4289 block_len
= max(em
->block_len
, em
->orig_block_len
);
4290 if (em
->compress_type
!= BTRFS_COMPRESS_NONE
) {
4291 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
4294 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
4296 } else if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
4297 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
4299 extent_offset
, &token
);
4300 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
4303 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
, 0, &token
);
4304 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, 0,
4308 btrfs_set_token_file_extent_offset(leaf
, fi
, extent_offset
, &token
);
4309 btrfs_set_token_file_extent_num_bytes(leaf
, fi
, em
->len
, &token
);
4310 btrfs_set_token_file_extent_ram_bytes(leaf
, fi
, em
->ram_bytes
, &token
);
4311 btrfs_set_token_file_extent_compression(leaf
, fi
, em
->compress_type
,
4313 btrfs_set_token_file_extent_encryption(leaf
, fi
, 0, &token
);
4314 btrfs_set_token_file_extent_other_encoding(leaf
, fi
, 0, &token
);
4315 btrfs_mark_buffer_dirty(leaf
);
4317 btrfs_release_path(path
);
4323 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4324 * lose them after doing a fast fsync and replaying the log. We scan the
4325 * subvolume's root instead of iterating the inode's extent map tree because
4326 * otherwise we can log incorrect extent items based on extent map conversion.
4327 * That can happen due to the fact that extent maps are merged when they
4328 * are not in the extent map tree's list of modified extents.
4330 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle
*trans
,
4331 struct btrfs_inode
*inode
,
4332 struct btrfs_path
*path
)
4334 struct btrfs_root
*root
= inode
->root
;
4335 struct btrfs_key key
;
4336 const u64 i_size
= i_size_read(&inode
->vfs_inode
);
4337 const u64 ino
= btrfs_ino(inode
);
4338 struct btrfs_path
*dst_path
= NULL
;
4339 u64 last_extent
= (u64
)-1;
4344 if (!(inode
->flags
& BTRFS_INODE_PREALLOC
))
4348 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4349 key
.offset
= i_size
;
4350 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4355 struct extent_buffer
*leaf
= path
->nodes
[0];
4356 int slot
= path
->slots
[0];
4358 if (slot
>= btrfs_header_nritems(leaf
)) {
4360 ret
= copy_items(trans
, inode
, dst_path
, path
,
4361 &last_extent
, start_slot
,
4367 ret
= btrfs_next_leaf(root
, path
);
4377 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4378 if (key
.objectid
> ino
)
4380 if (WARN_ON_ONCE(key
.objectid
< ino
) ||
4381 key
.type
< BTRFS_EXTENT_DATA_KEY
||
4382 key
.offset
< i_size
) {
4386 if (last_extent
== (u64
)-1) {
4387 last_extent
= key
.offset
;
4389 * Avoid logging extent items logged in past fsync calls
4390 * and leading to duplicate keys in the log tree.
4393 ret
= btrfs_truncate_inode_items(trans
,
4397 BTRFS_EXTENT_DATA_KEY
);
4398 } while (ret
== -EAGAIN
);
4407 dst_path
= btrfs_alloc_path();
4415 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
4416 start_slot
, ins_nr
, 1, 0);
4421 btrfs_release_path(path
);
4422 btrfs_free_path(dst_path
);
4426 static int btrfs_log_changed_extents(struct btrfs_trans_handle
*trans
,
4427 struct btrfs_root
*root
,
4428 struct btrfs_inode
*inode
,
4429 struct btrfs_path
*path
,
4430 struct btrfs_log_ctx
*ctx
,
4434 struct extent_map
*em
, *n
;
4435 struct list_head extents
;
4436 struct extent_map_tree
*tree
= &inode
->extent_tree
;
4441 INIT_LIST_HEAD(&extents
);
4443 write_lock(&tree
->lock
);
4444 test_gen
= root
->fs_info
->last_trans_committed
;
4446 list_for_each_entry_safe(em
, n
, &tree
->modified_extents
, list
) {
4448 * Skip extents outside our logging range. It's important to do
4449 * it for correctness because if we don't ignore them, we may
4450 * log them before their ordered extent completes, and therefore
4451 * we could log them without logging their respective checksums
4452 * (the checksum items are added to the csum tree at the very
4453 * end of btrfs_finish_ordered_io()). Also leave such extents
4454 * outside of our range in the list, since we may have another
4455 * ranged fsync in the near future that needs them. If an extent
4456 * outside our range corresponds to a hole, log it to avoid
4457 * leaving gaps between extents (fsck will complain when we are
4458 * not using the NO_HOLES feature).
4460 if ((em
->start
> end
|| em
->start
+ em
->len
<= start
) &&
4461 em
->block_start
!= EXTENT_MAP_HOLE
)
4464 list_del_init(&em
->list
);
4466 * Just an arbitrary number, this can be really CPU intensive
4467 * once we start getting a lot of extents, and really once we
4468 * have a bunch of extents we just want to commit since it will
4471 if (++num
> 32768) {
4472 list_del_init(&tree
->modified_extents
);
4477 if (em
->generation
<= test_gen
)
4480 /* We log prealloc extents beyond eof later. */
4481 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) &&
4482 em
->start
>= i_size_read(&inode
->vfs_inode
))
4485 /* Need a ref to keep it from getting evicted from cache */
4486 refcount_inc(&em
->refs
);
4487 set_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
4488 list_add_tail(&em
->list
, &extents
);
4492 list_sort(NULL
, &extents
, extent_cmp
);
4494 while (!list_empty(&extents
)) {
4495 em
= list_entry(extents
.next
, struct extent_map
, list
);
4497 list_del_init(&em
->list
);
4500 * If we had an error we just need to delete everybody from our
4504 clear_em_logging(tree
, em
);
4505 free_extent_map(em
);
4509 write_unlock(&tree
->lock
);
4511 ret
= log_one_extent(trans
, inode
, root
, em
, path
, ctx
);
4512 write_lock(&tree
->lock
);
4513 clear_em_logging(tree
, em
);
4514 free_extent_map(em
);
4516 WARN_ON(!list_empty(&extents
));
4517 write_unlock(&tree
->lock
);
4519 btrfs_release_path(path
);
4521 ret
= btrfs_log_prealloc_extents(trans
, inode
, path
);
4526 static int logged_inode_size(struct btrfs_root
*log
, struct btrfs_inode
*inode
,
4527 struct btrfs_path
*path
, u64
*size_ret
)
4529 struct btrfs_key key
;
4532 key
.objectid
= btrfs_ino(inode
);
4533 key
.type
= BTRFS_INODE_ITEM_KEY
;
4536 ret
= btrfs_search_slot(NULL
, log
, &key
, path
, 0, 0);
4539 } else if (ret
> 0) {
4542 struct btrfs_inode_item
*item
;
4544 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4545 struct btrfs_inode_item
);
4546 *size_ret
= btrfs_inode_size(path
->nodes
[0], item
);
4549 btrfs_release_path(path
);
4554 * At the moment we always log all xattrs. This is to figure out at log replay
4555 * time which xattrs must have their deletion replayed. If a xattr is missing
4556 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4557 * because if a xattr is deleted, the inode is fsynced and a power failure
4558 * happens, causing the log to be replayed the next time the fs is mounted,
4559 * we want the xattr to not exist anymore (same behaviour as other filesystems
4560 * with a journal, ext3/4, xfs, f2fs, etc).
4562 static int btrfs_log_all_xattrs(struct btrfs_trans_handle
*trans
,
4563 struct btrfs_root
*root
,
4564 struct btrfs_inode
*inode
,
4565 struct btrfs_path
*path
,
4566 struct btrfs_path
*dst_path
)
4569 struct btrfs_key key
;
4570 const u64 ino
= btrfs_ino(inode
);
4575 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4578 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4583 int slot
= path
->slots
[0];
4584 struct extent_buffer
*leaf
= path
->nodes
[0];
4585 int nritems
= btrfs_header_nritems(leaf
);
4587 if (slot
>= nritems
) {
4589 u64 last_extent
= 0;
4591 ret
= copy_items(trans
, inode
, dst_path
, path
,
4592 &last_extent
, start_slot
,
4594 /* can't be 1, extent items aren't processed */
4600 ret
= btrfs_next_leaf(root
, path
);
4608 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4609 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_XATTR_ITEM_KEY
)
4619 u64 last_extent
= 0;
4621 ret
= copy_items(trans
, inode
, dst_path
, path
,
4622 &last_extent
, start_slot
,
4624 /* can't be 1, extent items aren't processed */
4634 * If the no holes feature is enabled we need to make sure any hole between the
4635 * last extent and the i_size of our inode is explicitly marked in the log. This
4636 * is to make sure that doing something like:
4638 * 1) create file with 128Kb of data
4639 * 2) truncate file to 64Kb
4640 * 3) truncate file to 256Kb
4642 * 5) <crash/power failure>
4643 * 6) mount fs and trigger log replay
4645 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4646 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4647 * file correspond to a hole. The presence of explicit holes in a log tree is
4648 * what guarantees that log replay will remove/adjust file extent items in the
4651 * Here we do not need to care about holes between extents, that is already done
4652 * by copy_items(). We also only need to do this in the full sync path, where we
4653 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4654 * lookup the list of modified extent maps and if any represents a hole, we
4655 * insert a corresponding extent representing a hole in the log tree.
4657 static int btrfs_log_trailing_hole(struct btrfs_trans_handle
*trans
,
4658 struct btrfs_root
*root
,
4659 struct btrfs_inode
*inode
,
4660 struct btrfs_path
*path
)
4662 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4664 struct btrfs_key key
;
4667 struct extent_buffer
*leaf
;
4668 struct btrfs_root
*log
= root
->log_root
;
4669 const u64 ino
= btrfs_ino(inode
);
4670 const u64 i_size
= i_size_read(&inode
->vfs_inode
);
4672 if (!btrfs_fs_incompat(fs_info
, NO_HOLES
))
4676 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4677 key
.offset
= (u64
)-1;
4679 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4684 ASSERT(path
->slots
[0] > 0);
4686 leaf
= path
->nodes
[0];
4687 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4689 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
4690 /* inode does not have any extents */
4694 struct btrfs_file_extent_item
*extent
;
4698 * If there's an extent beyond i_size, an explicit hole was
4699 * already inserted by copy_items().
4701 if (key
.offset
>= i_size
)
4704 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
4705 struct btrfs_file_extent_item
);
4707 if (btrfs_file_extent_type(leaf
, extent
) ==
4708 BTRFS_FILE_EXTENT_INLINE
) {
4709 len
= btrfs_file_extent_ram_bytes(leaf
, extent
);
4710 ASSERT(len
== i_size
||
4711 (len
== fs_info
->sectorsize
&&
4712 btrfs_file_extent_compression(leaf
, extent
) !=
4713 BTRFS_COMPRESS_NONE
) ||
4714 (len
< i_size
&& i_size
< fs_info
->sectorsize
));
4718 len
= btrfs_file_extent_num_bytes(leaf
, extent
);
4719 /* Last extent goes beyond i_size, no need to log a hole. */
4720 if (key
.offset
+ len
> i_size
)
4722 hole_start
= key
.offset
+ len
;
4723 hole_size
= i_size
- hole_start
;
4725 btrfs_release_path(path
);
4727 /* Last extent ends at i_size. */
4731 hole_size
= ALIGN(hole_size
, fs_info
->sectorsize
);
4732 ret
= btrfs_insert_file_extent(trans
, log
, ino
, hole_start
, 0, 0,
4733 hole_size
, 0, hole_size
, 0, 0, 0);
4738 * When we are logging a new inode X, check if it doesn't have a reference that
4739 * matches the reference from some other inode Y created in a past transaction
4740 * and that was renamed in the current transaction. If we don't do this, then at
4741 * log replay time we can lose inode Y (and all its files if it's a directory):
4744 * echo "hello world" > /mnt/x/foobar
4747 * mkdir /mnt/x # or touch /mnt/x
4748 * xfs_io -c fsync /mnt/x
4750 * mount fs, trigger log replay
4752 * After the log replay procedure, we would lose the first directory and all its
4753 * files (file foobar).
4754 * For the case where inode Y is not a directory we simply end up losing it:
4756 * echo "123" > /mnt/foo
4758 * mv /mnt/foo /mnt/bar
4759 * echo "abc" > /mnt/foo
4760 * xfs_io -c fsync /mnt/foo
4763 * We also need this for cases where a snapshot entry is replaced by some other
4764 * entry (file or directory) otherwise we end up with an unreplayable log due to
4765 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4766 * if it were a regular entry:
4769 * btrfs subvolume snapshot /mnt /mnt/x/snap
4770 * btrfs subvolume delete /mnt/x/snap
4773 * fsync /mnt/x or fsync some new file inside it
4776 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4777 * the same transaction.
4779 static int btrfs_check_ref_name_override(struct extent_buffer
*eb
,
4781 const struct btrfs_key
*key
,
4782 struct btrfs_inode
*inode
,
4783 u64
*other_ino
, u64
*other_parent
)
4786 struct btrfs_path
*search_path
;
4789 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
4791 unsigned long ptr
= btrfs_item_ptr_offset(eb
, slot
);
4793 search_path
= btrfs_alloc_path();
4796 search_path
->search_commit_root
= 1;
4797 search_path
->skip_locking
= 1;
4799 while (cur_offset
< item_size
) {
4803 unsigned long name_ptr
;
4804 struct btrfs_dir_item
*di
;
4806 if (key
->type
== BTRFS_INODE_REF_KEY
) {
4807 struct btrfs_inode_ref
*iref
;
4809 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur_offset
);
4810 parent
= key
->offset
;
4811 this_name_len
= btrfs_inode_ref_name_len(eb
, iref
);
4812 name_ptr
= (unsigned long)(iref
+ 1);
4813 this_len
= sizeof(*iref
) + this_name_len
;
4815 struct btrfs_inode_extref
*extref
;
4817 extref
= (struct btrfs_inode_extref
*)(ptr
+
4819 parent
= btrfs_inode_extref_parent(eb
, extref
);
4820 this_name_len
= btrfs_inode_extref_name_len(eb
, extref
);
4821 name_ptr
= (unsigned long)&extref
->name
;
4822 this_len
= sizeof(*extref
) + this_name_len
;
4825 if (this_name_len
> name_len
) {
4828 new_name
= krealloc(name
, this_name_len
, GFP_NOFS
);
4833 name_len
= this_name_len
;
4837 read_extent_buffer(eb
, name
, name_ptr
, this_name_len
);
4838 di
= btrfs_lookup_dir_item(NULL
, inode
->root
, search_path
,
4839 parent
, name
, this_name_len
, 0);
4840 if (di
&& !IS_ERR(di
)) {
4841 struct btrfs_key di_key
;
4843 btrfs_dir_item_key_to_cpu(search_path
->nodes
[0],
4845 if (di_key
.type
== BTRFS_INODE_ITEM_KEY
) {
4846 if (di_key
.objectid
!= key
->objectid
) {
4848 *other_ino
= di_key
.objectid
;
4849 *other_parent
= parent
;
4857 } else if (IS_ERR(di
)) {
4861 btrfs_release_path(search_path
);
4863 cur_offset
+= this_len
;
4867 btrfs_free_path(search_path
);
4872 struct btrfs_ino_list
{
4875 struct list_head list
;
4878 static int log_conflicting_inodes(struct btrfs_trans_handle
*trans
,
4879 struct btrfs_root
*root
,
4880 struct btrfs_path
*path
,
4881 struct btrfs_log_ctx
*ctx
,
4882 u64 ino
, u64 parent
)
4884 struct btrfs_ino_list
*ino_elem
;
4885 LIST_HEAD(inode_list
);
4888 ino_elem
= kmalloc(sizeof(*ino_elem
), GFP_NOFS
);
4891 ino_elem
->ino
= ino
;
4892 ino_elem
->parent
= parent
;
4893 list_add_tail(&ino_elem
->list
, &inode_list
);
4895 while (!list_empty(&inode_list
)) {
4896 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4897 struct btrfs_key key
;
4898 struct inode
*inode
;
4900 ino_elem
= list_first_entry(&inode_list
, struct btrfs_ino_list
,
4902 ino
= ino_elem
->ino
;
4903 parent
= ino_elem
->parent
;
4904 list_del(&ino_elem
->list
);
4909 btrfs_release_path(path
);
4912 key
.type
= BTRFS_INODE_ITEM_KEY
;
4914 inode
= btrfs_iget(fs_info
->sb
, &key
, root
, NULL
);
4916 * If the other inode that had a conflicting dir entry was
4917 * deleted in the current transaction, we need to log its parent
4920 if (IS_ERR(inode
)) {
4921 ret
= PTR_ERR(inode
);
4922 if (ret
== -ENOENT
) {
4923 key
.objectid
= parent
;
4924 inode
= btrfs_iget(fs_info
->sb
, &key
, root
,
4926 if (IS_ERR(inode
)) {
4927 ret
= PTR_ERR(inode
);
4929 ret
= btrfs_log_inode(trans
, root
,
4931 LOG_OTHER_INODE_ALL
,
4939 * We are safe logging the other inode without acquiring its
4940 * lock as long as we log with the LOG_INODE_EXISTS mode. We
4941 * are safe against concurrent renames of the other inode as
4942 * well because during a rename we pin the log and update the
4943 * log with the new name before we unpin it.
4945 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(inode
),
4946 LOG_OTHER_INODE
, 0, LLONG_MAX
, ctx
);
4953 key
.type
= BTRFS_INODE_REF_KEY
;
4955 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4962 struct extent_buffer
*leaf
= path
->nodes
[0];
4963 int slot
= path
->slots
[0];
4965 u64 other_parent
= 0;
4967 if (slot
>= btrfs_header_nritems(leaf
)) {
4968 ret
= btrfs_next_leaf(root
, path
);
4971 } else if (ret
> 0) {
4978 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4979 if (key
.objectid
!= ino
||
4980 (key
.type
!= BTRFS_INODE_REF_KEY
&&
4981 key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
4986 ret
= btrfs_check_ref_name_override(leaf
, slot
, &key
,
4987 BTRFS_I(inode
), &other_ino
,
4992 ino_elem
= kmalloc(sizeof(*ino_elem
), GFP_NOFS
);
4997 ino_elem
->ino
= other_ino
;
4998 ino_elem
->parent
= other_parent
;
4999 list_add_tail(&ino_elem
->list
, &inode_list
);
5010 /* log a single inode in the tree log.
5011 * At least one parent directory for this inode must exist in the tree
5012 * or be logged already.
5014 * Any items from this inode changed by the current transaction are copied
5015 * to the log tree. An extra reference is taken on any extents in this
5016 * file, allowing us to avoid a whole pile of corner cases around logging
5017 * blocks that have been removed from the tree.
5019 * See LOG_INODE_ALL and related defines for a description of what inode_only
5022 * This handles both files and directories.
5024 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
5025 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
5029 struct btrfs_log_ctx
*ctx
)
5031 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5032 struct btrfs_path
*path
;
5033 struct btrfs_path
*dst_path
;
5034 struct btrfs_key min_key
;
5035 struct btrfs_key max_key
;
5036 struct btrfs_root
*log
= root
->log_root
;
5037 u64 last_extent
= 0;
5041 int ins_start_slot
= 0;
5043 bool fast_search
= false;
5044 u64 ino
= btrfs_ino(inode
);
5045 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
5046 u64 logged_isize
= 0;
5047 bool need_log_inode_item
= true;
5048 bool xattrs_logged
= false;
5049 bool recursive_logging
= false;
5051 path
= btrfs_alloc_path();
5054 dst_path
= btrfs_alloc_path();
5056 btrfs_free_path(path
);
5060 min_key
.objectid
= ino
;
5061 min_key
.type
= BTRFS_INODE_ITEM_KEY
;
5064 max_key
.objectid
= ino
;
5067 /* today the code can only do partial logging of directories */
5068 if (S_ISDIR(inode
->vfs_inode
.i_mode
) ||
5069 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
5070 &inode
->runtime_flags
) &&
5071 inode_only
>= LOG_INODE_EXISTS
))
5072 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
5074 max_key
.type
= (u8
)-1;
5075 max_key
.offset
= (u64
)-1;
5078 * Only run delayed items if we are a dir or a new file.
5079 * Otherwise commit the delayed inode only, which is needed in
5080 * order for the log replay code to mark inodes for link count
5081 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
5083 if (S_ISDIR(inode
->vfs_inode
.i_mode
) ||
5084 inode
->generation
> fs_info
->last_trans_committed
)
5085 ret
= btrfs_commit_inode_delayed_items(trans
, inode
);
5087 ret
= btrfs_commit_inode_delayed_inode(inode
);
5090 btrfs_free_path(path
);
5091 btrfs_free_path(dst_path
);
5095 if (inode_only
== LOG_OTHER_INODE
|| inode_only
== LOG_OTHER_INODE_ALL
) {
5096 recursive_logging
= true;
5097 if (inode_only
== LOG_OTHER_INODE
)
5098 inode_only
= LOG_INODE_EXISTS
;
5100 inode_only
= LOG_INODE_ALL
;
5101 mutex_lock_nested(&inode
->log_mutex
, SINGLE_DEPTH_NESTING
);
5103 mutex_lock(&inode
->log_mutex
);
5107 * a brute force approach to making sure we get the most uptodate
5108 * copies of everything.
5110 if (S_ISDIR(inode
->vfs_inode
.i_mode
)) {
5111 int max_key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
5113 if (inode_only
== LOG_INODE_EXISTS
)
5114 max_key_type
= BTRFS_XATTR_ITEM_KEY
;
5115 ret
= drop_objectid_items(trans
, log
, path
, ino
, max_key_type
);
5117 if (inode_only
== LOG_INODE_EXISTS
) {
5119 * Make sure the new inode item we write to the log has
5120 * the same isize as the current one (if it exists).
5121 * This is necessary to prevent data loss after log
5122 * replay, and also to prevent doing a wrong expanding
5123 * truncate - for e.g. create file, write 4K into offset
5124 * 0, fsync, write 4K into offset 4096, add hard link,
5125 * fsync some other file (to sync log), power fail - if
5126 * we use the inode's current i_size, after log replay
5127 * we get a 8Kb file, with the last 4Kb extent as a hole
5128 * (zeroes), as if an expanding truncate happened,
5129 * instead of getting a file of 4Kb only.
5131 err
= logged_inode_size(log
, inode
, path
, &logged_isize
);
5135 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
5136 &inode
->runtime_flags
)) {
5137 if (inode_only
== LOG_INODE_EXISTS
) {
5138 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
5139 ret
= drop_objectid_items(trans
, log
, path
, ino
,
5142 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
5143 &inode
->runtime_flags
);
5144 clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
5145 &inode
->runtime_flags
);
5147 ret
= btrfs_truncate_inode_items(trans
,
5148 log
, &inode
->vfs_inode
, 0, 0);
5153 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
5154 &inode
->runtime_flags
) ||
5155 inode_only
== LOG_INODE_EXISTS
) {
5156 if (inode_only
== LOG_INODE_ALL
)
5158 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
5159 ret
= drop_objectid_items(trans
, log
, path
, ino
,
5162 if (inode_only
== LOG_INODE_ALL
)
5175 ret
= btrfs_search_forward(root
, &min_key
,
5176 path
, trans
->transid
);
5184 /* note, ins_nr might be > 0 here, cleanup outside the loop */
5185 if (min_key
.objectid
!= ino
)
5187 if (min_key
.type
> max_key
.type
)
5190 if (min_key
.type
== BTRFS_INODE_ITEM_KEY
)
5191 need_log_inode_item
= false;
5193 if ((min_key
.type
== BTRFS_INODE_REF_KEY
||
5194 min_key
.type
== BTRFS_INODE_EXTREF_KEY
) &&
5195 inode
->generation
== trans
->transid
&&
5196 !recursive_logging
) {
5198 u64 other_parent
= 0;
5200 ret
= btrfs_check_ref_name_override(path
->nodes
[0],
5201 path
->slots
[0], &min_key
, inode
,
5202 &other_ino
, &other_parent
);
5206 } else if (ret
> 0 && ctx
&&
5207 other_ino
!= btrfs_ino(BTRFS_I(ctx
->inode
))) {
5212 ins_start_slot
= path
->slots
[0];
5214 ret
= copy_items(trans
, inode
, dst_path
, path
,
5215 &last_extent
, ins_start_slot
,
5224 err
= log_conflicting_inodes(trans
, root
, path
,
5225 ctx
, other_ino
, other_parent
);
5228 btrfs_release_path(path
);
5233 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5234 if (min_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
5237 ret
= copy_items(trans
, inode
, dst_path
, path
,
5238 &last_extent
, ins_start_slot
,
5239 ins_nr
, inode_only
, logged_isize
);
5246 btrfs_release_path(path
);
5252 if (ins_nr
&& ins_start_slot
+ ins_nr
== path
->slots
[0]) {
5255 } else if (!ins_nr
) {
5256 ins_start_slot
= path
->slots
[0];
5261 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
5262 ins_start_slot
, ins_nr
, inode_only
,
5270 btrfs_release_path(path
);
5274 ins_start_slot
= path
->slots
[0];
5277 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5279 if (path
->slots
[0] < nritems
) {
5280 btrfs_item_key_to_cpu(path
->nodes
[0], &min_key
,
5285 ret
= copy_items(trans
, inode
, dst_path
, path
,
5286 &last_extent
, ins_start_slot
,
5287 ins_nr
, inode_only
, logged_isize
);
5295 btrfs_release_path(path
);
5297 if (min_key
.offset
< (u64
)-1) {
5299 } else if (min_key
.type
< max_key
.type
) {
5307 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
5308 ins_start_slot
, ins_nr
, inode_only
,
5318 btrfs_release_path(path
);
5319 btrfs_release_path(dst_path
);
5320 err
= btrfs_log_all_xattrs(trans
, root
, inode
, path
, dst_path
);
5323 xattrs_logged
= true;
5324 if (max_key
.type
>= BTRFS_EXTENT_DATA_KEY
&& !fast_search
) {
5325 btrfs_release_path(path
);
5326 btrfs_release_path(dst_path
);
5327 err
= btrfs_log_trailing_hole(trans
, root
, inode
, path
);
5332 btrfs_release_path(path
);
5333 btrfs_release_path(dst_path
);
5334 if (need_log_inode_item
) {
5335 err
= log_inode_item(trans
, log
, dst_path
, inode
);
5336 if (!err
&& !xattrs_logged
) {
5337 err
= btrfs_log_all_xattrs(trans
, root
, inode
, path
,
5339 btrfs_release_path(path
);
5345 ret
= btrfs_log_changed_extents(trans
, root
, inode
, dst_path
,
5351 } else if (inode_only
== LOG_INODE_ALL
) {
5352 struct extent_map
*em
, *n
;
5354 write_lock(&em_tree
->lock
);
5356 * We can't just remove every em if we're called for a ranged
5357 * fsync - that is, one that doesn't cover the whole possible
5358 * file range (0 to LLONG_MAX). This is because we can have
5359 * em's that fall outside the range we're logging and therefore
5360 * their ordered operations haven't completed yet
5361 * (btrfs_finish_ordered_io() not invoked yet). This means we
5362 * didn't get their respective file extent item in the fs/subvol
5363 * tree yet, and need to let the next fast fsync (one which
5364 * consults the list of modified extent maps) find the em so
5365 * that it logs a matching file extent item and waits for the
5366 * respective ordered operation to complete (if it's still
5369 * Removing every em outside the range we're logging would make
5370 * the next fast fsync not log their matching file extent items,
5371 * therefore making us lose data after a log replay.
5373 list_for_each_entry_safe(em
, n
, &em_tree
->modified_extents
,
5375 const u64 mod_end
= em
->mod_start
+ em
->mod_len
- 1;
5377 if (em
->mod_start
>= start
&& mod_end
<= end
)
5378 list_del_init(&em
->list
);
5380 write_unlock(&em_tree
->lock
);
5383 if (inode_only
== LOG_INODE_ALL
&& S_ISDIR(inode
->vfs_inode
.i_mode
)) {
5384 ret
= log_directory_changes(trans
, root
, inode
, path
, dst_path
,
5392 spin_lock(&inode
->lock
);
5393 inode
->logged_trans
= trans
->transid
;
5394 inode
->last_log_commit
= inode
->last_sub_trans
;
5395 spin_unlock(&inode
->lock
);
5397 mutex_unlock(&inode
->log_mutex
);
5399 btrfs_free_path(path
);
5400 btrfs_free_path(dst_path
);
5405 * Check if we must fallback to a transaction commit when logging an inode.
5406 * This must be called after logging the inode and is used only in the context
5407 * when fsyncing an inode requires the need to log some other inode - in which
5408 * case we can't lock the i_mutex of each other inode we need to log as that
5409 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5410 * log inodes up or down in the hierarchy) or rename operations for example. So
5411 * we take the log_mutex of the inode after we have logged it and then check for
5412 * its last_unlink_trans value - this is safe because any task setting
5413 * last_unlink_trans must take the log_mutex and it must do this before it does
5414 * the actual unlink operation, so if we do this check before a concurrent task
5415 * sets last_unlink_trans it means we've logged a consistent version/state of
5416 * all the inode items, otherwise we are not sure and must do a transaction
5417 * commit (the concurrent task might have only updated last_unlink_trans before
5418 * we logged the inode or it might have also done the unlink).
5420 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle
*trans
,
5421 struct btrfs_inode
*inode
)
5423 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
5426 mutex_lock(&inode
->log_mutex
);
5427 if (inode
->last_unlink_trans
> fs_info
->last_trans_committed
) {
5429 * Make sure any commits to the log are forced to be full
5432 btrfs_set_log_full_commit(fs_info
, trans
);
5435 mutex_unlock(&inode
->log_mutex
);
5441 * follow the dentry parent pointers up the chain and see if any
5442 * of the directories in it require a full commit before they can
5443 * be logged. Returns zero if nothing special needs to be done or 1 if
5444 * a full commit is required.
5446 static noinline
int check_parent_dirs_for_sync(struct btrfs_trans_handle
*trans
,
5447 struct btrfs_inode
*inode
,
5448 struct dentry
*parent
,
5449 struct super_block
*sb
,
5453 struct dentry
*old_parent
= NULL
;
5454 struct btrfs_inode
*orig_inode
= inode
;
5457 * for regular files, if its inode is already on disk, we don't
5458 * have to worry about the parents at all. This is because
5459 * we can use the last_unlink_trans field to record renames
5460 * and other fun in this file.
5462 if (S_ISREG(inode
->vfs_inode
.i_mode
) &&
5463 inode
->generation
<= last_committed
&&
5464 inode
->last_unlink_trans
<= last_committed
)
5467 if (!S_ISDIR(inode
->vfs_inode
.i_mode
)) {
5468 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5470 inode
= BTRFS_I(d_inode(parent
));
5475 * If we are logging a directory then we start with our inode,
5476 * not our parent's inode, so we need to skip setting the
5477 * logged_trans so that further down in the log code we don't
5478 * think this inode has already been logged.
5480 if (inode
!= orig_inode
)
5481 inode
->logged_trans
= trans
->transid
;
5484 if (btrfs_must_commit_transaction(trans
, inode
)) {
5489 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5492 if (IS_ROOT(parent
)) {
5493 inode
= BTRFS_I(d_inode(parent
));
5494 if (btrfs_must_commit_transaction(trans
, inode
))
5499 parent
= dget_parent(parent
);
5501 old_parent
= parent
;
5502 inode
= BTRFS_I(d_inode(parent
));
5510 struct btrfs_dir_list
{
5512 struct list_head list
;
5516 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5517 * details about the why it is needed.
5518 * This is a recursive operation - if an existing dentry corresponds to a
5519 * directory, that directory's new entries are logged too (same behaviour as
5520 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5521 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5522 * complains about the following circular lock dependency / possible deadlock:
5526 * lock(&type->i_mutex_dir_key#3/2);
5527 * lock(sb_internal#2);
5528 * lock(&type->i_mutex_dir_key#3/2);
5529 * lock(&sb->s_type->i_mutex_key#14);
5531 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5532 * sb_start_intwrite() in btrfs_start_transaction().
5533 * Not locking i_mutex of the inodes is still safe because:
5535 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5536 * that while logging the inode new references (names) are added or removed
5537 * from the inode, leaving the logged inode item with a link count that does
5538 * not match the number of logged inode reference items. This is fine because
5539 * at log replay time we compute the real number of links and correct the
5540 * link count in the inode item (see replay_one_buffer() and
5541 * link_to_fixup_dir());
5543 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5544 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5545 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5546 * has a size that doesn't match the sum of the lengths of all the logged
5547 * names. This does not result in a problem because if a dir_item key is
5548 * logged but its matching dir_index key is not logged, at log replay time we
5549 * don't use it to replay the respective name (see replay_one_name()). On the
5550 * other hand if only the dir_index key ends up being logged, the respective
5551 * name is added to the fs/subvol tree with both the dir_item and dir_index
5552 * keys created (see replay_one_name()).
5553 * The directory's inode item with a wrong i_size is not a problem as well,
5554 * since we don't use it at log replay time to set the i_size in the inode
5555 * item of the fs/subvol tree (see overwrite_item()).
5557 static int log_new_dir_dentries(struct btrfs_trans_handle
*trans
,
5558 struct btrfs_root
*root
,
5559 struct btrfs_inode
*start_inode
,
5560 struct btrfs_log_ctx
*ctx
)
5562 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5563 struct btrfs_root
*log
= root
->log_root
;
5564 struct btrfs_path
*path
;
5565 LIST_HEAD(dir_list
);
5566 struct btrfs_dir_list
*dir_elem
;
5569 path
= btrfs_alloc_path();
5573 dir_elem
= kmalloc(sizeof(*dir_elem
), GFP_NOFS
);
5575 btrfs_free_path(path
);
5578 dir_elem
->ino
= btrfs_ino(start_inode
);
5579 list_add_tail(&dir_elem
->list
, &dir_list
);
5581 while (!list_empty(&dir_list
)) {
5582 struct extent_buffer
*leaf
;
5583 struct btrfs_key min_key
;
5587 dir_elem
= list_first_entry(&dir_list
, struct btrfs_dir_list
,
5590 goto next_dir_inode
;
5592 min_key
.objectid
= dir_elem
->ino
;
5593 min_key
.type
= BTRFS_DIR_ITEM_KEY
;
5596 btrfs_release_path(path
);
5597 ret
= btrfs_search_forward(log
, &min_key
, path
, trans
->transid
);
5599 goto next_dir_inode
;
5600 } else if (ret
> 0) {
5602 goto next_dir_inode
;
5606 leaf
= path
->nodes
[0];
5607 nritems
= btrfs_header_nritems(leaf
);
5608 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
5609 struct btrfs_dir_item
*di
;
5610 struct btrfs_key di_key
;
5611 struct inode
*di_inode
;
5612 struct btrfs_dir_list
*new_dir_elem
;
5613 int log_mode
= LOG_INODE_EXISTS
;
5616 btrfs_item_key_to_cpu(leaf
, &min_key
, i
);
5617 if (min_key
.objectid
!= dir_elem
->ino
||
5618 min_key
.type
!= BTRFS_DIR_ITEM_KEY
)
5619 goto next_dir_inode
;
5621 di
= btrfs_item_ptr(leaf
, i
, struct btrfs_dir_item
);
5622 type
= btrfs_dir_type(leaf
, di
);
5623 if (btrfs_dir_transid(leaf
, di
) < trans
->transid
&&
5624 type
!= BTRFS_FT_DIR
)
5626 btrfs_dir_item_key_to_cpu(leaf
, di
, &di_key
);
5627 if (di_key
.type
== BTRFS_ROOT_ITEM_KEY
)
5630 btrfs_release_path(path
);
5631 di_inode
= btrfs_iget(fs_info
->sb
, &di_key
, root
, NULL
);
5632 if (IS_ERR(di_inode
)) {
5633 ret
= PTR_ERR(di_inode
);
5634 goto next_dir_inode
;
5637 if (btrfs_inode_in_log(BTRFS_I(di_inode
), trans
->transid
)) {
5642 ctx
->log_new_dentries
= false;
5643 if (type
== BTRFS_FT_DIR
|| type
== BTRFS_FT_SYMLINK
)
5644 log_mode
= LOG_INODE_ALL
;
5645 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(di_inode
),
5646 log_mode
, 0, LLONG_MAX
, ctx
);
5648 btrfs_must_commit_transaction(trans
, BTRFS_I(di_inode
)))
5652 goto next_dir_inode
;
5653 if (ctx
->log_new_dentries
) {
5654 new_dir_elem
= kmalloc(sizeof(*new_dir_elem
),
5656 if (!new_dir_elem
) {
5658 goto next_dir_inode
;
5660 new_dir_elem
->ino
= di_key
.objectid
;
5661 list_add_tail(&new_dir_elem
->list
, &dir_list
);
5666 ret
= btrfs_next_leaf(log
, path
);
5668 goto next_dir_inode
;
5669 } else if (ret
> 0) {
5671 goto next_dir_inode
;
5675 if (min_key
.offset
< (u64
)-1) {
5680 list_del(&dir_elem
->list
);
5684 btrfs_free_path(path
);
5688 static int btrfs_log_all_parents(struct btrfs_trans_handle
*trans
,
5689 struct btrfs_inode
*inode
,
5690 struct btrfs_log_ctx
*ctx
)
5692 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5694 struct btrfs_path
*path
;
5695 struct btrfs_key key
;
5696 struct btrfs_root
*root
= inode
->root
;
5697 const u64 ino
= btrfs_ino(inode
);
5699 path
= btrfs_alloc_path();
5702 path
->skip_locking
= 1;
5703 path
->search_commit_root
= 1;
5706 key
.type
= BTRFS_INODE_REF_KEY
;
5708 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5713 struct extent_buffer
*leaf
= path
->nodes
[0];
5714 int slot
= path
->slots
[0];
5719 if (slot
>= btrfs_header_nritems(leaf
)) {
5720 ret
= btrfs_next_leaf(root
, path
);
5728 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5729 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5730 if (key
.objectid
!= ino
|| key
.type
> BTRFS_INODE_EXTREF_KEY
)
5733 item_size
= btrfs_item_size_nr(leaf
, slot
);
5734 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
5735 while (cur_offset
< item_size
) {
5736 struct btrfs_key inode_key
;
5737 struct inode
*dir_inode
;
5739 inode_key
.type
= BTRFS_INODE_ITEM_KEY
;
5740 inode_key
.offset
= 0;
5742 if (key
.type
== BTRFS_INODE_EXTREF_KEY
) {
5743 struct btrfs_inode_extref
*extref
;
5745 extref
= (struct btrfs_inode_extref
*)
5747 inode_key
.objectid
= btrfs_inode_extref_parent(
5749 cur_offset
+= sizeof(*extref
);
5750 cur_offset
+= btrfs_inode_extref_name_len(leaf
,
5753 inode_key
.objectid
= key
.offset
;
5754 cur_offset
= item_size
;
5757 dir_inode
= btrfs_iget(fs_info
->sb
, &inode_key
,
5760 * If the parent inode was deleted, return an error to
5761 * fallback to a transaction commit. This is to prevent
5762 * getting an inode that was moved from one parent A to
5763 * a parent B, got its former parent A deleted and then
5764 * it got fsync'ed, from existing at both parents after
5765 * a log replay (and the old parent still existing).
5772 * mv /mnt/B/bar /mnt/A/bar
5773 * mv -T /mnt/A /mnt/B
5777 * If we ignore the old parent B which got deleted,
5778 * after a log replay we would have file bar linked
5779 * at both parents and the old parent B would still
5782 if (IS_ERR(dir_inode
)) {
5783 ret
= PTR_ERR(dir_inode
);
5788 ctx
->log_new_dentries
= false;
5789 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(dir_inode
),
5790 LOG_INODE_ALL
, 0, LLONG_MAX
, ctx
);
5792 btrfs_must_commit_transaction(trans
, BTRFS_I(dir_inode
)))
5794 if (!ret
&& ctx
&& ctx
->log_new_dentries
)
5795 ret
= log_new_dir_dentries(trans
, root
,
5796 BTRFS_I(dir_inode
), ctx
);
5805 btrfs_free_path(path
);
5810 * helper function around btrfs_log_inode to make sure newly created
5811 * parent directories also end up in the log. A minimal inode and backref
5812 * only logging is done of any parent directories that are older than
5813 * the last committed transaction
5815 static int btrfs_log_inode_parent(struct btrfs_trans_handle
*trans
,
5816 struct btrfs_inode
*inode
,
5817 struct dentry
*parent
,
5821 struct btrfs_log_ctx
*ctx
)
5823 struct btrfs_root
*root
= inode
->root
;
5824 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5825 struct super_block
*sb
;
5826 struct dentry
*old_parent
= NULL
;
5828 u64 last_committed
= fs_info
->last_trans_committed
;
5829 bool log_dentries
= false;
5830 struct btrfs_inode
*orig_inode
= inode
;
5832 sb
= inode
->vfs_inode
.i_sb
;
5834 if (btrfs_test_opt(fs_info
, NOTREELOG
)) {
5840 * The prev transaction commit doesn't complete, we need do
5841 * full commit by ourselves.
5843 if (fs_info
->last_trans_log_full_commit
>
5844 fs_info
->last_trans_committed
) {
5849 if (btrfs_root_refs(&root
->root_item
) == 0) {
5854 ret
= check_parent_dirs_for_sync(trans
, inode
, parent
, sb
,
5860 * Skip already logged inodes or inodes corresponding to tmpfiles
5861 * (since logging them is pointless, a link count of 0 means they
5862 * will never be accessible).
5864 if (btrfs_inode_in_log(inode
, trans
->transid
) ||
5865 inode
->vfs_inode
.i_nlink
== 0) {
5866 ret
= BTRFS_NO_LOG_SYNC
;
5870 ret
= start_log_trans(trans
, root
, ctx
);
5874 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
, start
, end
, ctx
);
5879 * for regular files, if its inode is already on disk, we don't
5880 * have to worry about the parents at all. This is because
5881 * we can use the last_unlink_trans field to record renames
5882 * and other fun in this file.
5884 if (S_ISREG(inode
->vfs_inode
.i_mode
) &&
5885 inode
->generation
<= last_committed
&&
5886 inode
->last_unlink_trans
<= last_committed
) {
5891 if (S_ISDIR(inode
->vfs_inode
.i_mode
) && ctx
&& ctx
->log_new_dentries
)
5892 log_dentries
= true;
5895 * On unlink we must make sure all our current and old parent directory
5896 * inodes are fully logged. This is to prevent leaving dangling
5897 * directory index entries in directories that were our parents but are
5898 * not anymore. Not doing this results in old parent directory being
5899 * impossible to delete after log replay (rmdir will always fail with
5900 * error -ENOTEMPTY).
5906 * ln testdir/foo testdir/bar
5908 * unlink testdir/bar
5909 * xfs_io -c fsync testdir/foo
5911 * mount fs, triggers log replay
5913 * If we don't log the parent directory (testdir), after log replay the
5914 * directory still has an entry pointing to the file inode using the bar
5915 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5916 * the file inode has a link count of 1.
5922 * ln foo testdir/foo2
5923 * ln foo testdir/foo3
5925 * unlink testdir/foo3
5926 * xfs_io -c fsync foo
5928 * mount fs, triggers log replay
5930 * Similar as the first example, after log replay the parent directory
5931 * testdir still has an entry pointing to the inode file with name foo3
5932 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5933 * and has a link count of 2.
5935 if (inode
->last_unlink_trans
> last_committed
) {
5936 ret
= btrfs_log_all_parents(trans
, orig_inode
, ctx
);
5942 * If a new hard link was added to the inode in the current transaction
5943 * and its link count is now greater than 1, we need to fallback to a
5944 * transaction commit, otherwise we can end up not logging all its new
5945 * parents for all the hard links. Here just from the dentry used to
5946 * fsync, we can not visit the ancestor inodes for all the other hard
5947 * links to figure out if any is new, so we fallback to a transaction
5948 * commit (instead of adding a lot of complexity of scanning a btree,
5949 * since this scenario is not a common use case).
5951 if (inode
->vfs_inode
.i_nlink
> 1 &&
5952 inode
->last_link_trans
> last_committed
) {
5958 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5961 inode
= BTRFS_I(d_inode(parent
));
5962 if (root
!= inode
->root
)
5965 if (inode
->generation
> last_committed
) {
5966 ret
= btrfs_log_inode(trans
, root
, inode
,
5967 LOG_INODE_EXISTS
, 0, LLONG_MAX
, ctx
);
5971 if (IS_ROOT(parent
))
5974 parent
= dget_parent(parent
);
5976 old_parent
= parent
;
5979 ret
= log_new_dir_dentries(trans
, root
, orig_inode
, ctx
);
5985 btrfs_set_log_full_commit(fs_info
, trans
);
5990 btrfs_remove_log_ctx(root
, ctx
);
5991 btrfs_end_log_trans(root
);
5997 * it is not safe to log dentry if the chunk root has added new
5998 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5999 * If this returns 1, you must commit the transaction to safely get your
6002 int btrfs_log_dentry_safe(struct btrfs_trans_handle
*trans
,
6003 struct dentry
*dentry
,
6006 struct btrfs_log_ctx
*ctx
)
6008 struct dentry
*parent
= dget_parent(dentry
);
6011 ret
= btrfs_log_inode_parent(trans
, BTRFS_I(d_inode(dentry
)), parent
,
6012 start
, end
, LOG_INODE_ALL
, ctx
);
6019 * should be called during mount to recover any replay any log trees
6022 int btrfs_recover_log_trees(struct btrfs_root
*log_root_tree
)
6025 struct btrfs_path
*path
;
6026 struct btrfs_trans_handle
*trans
;
6027 struct btrfs_key key
;
6028 struct btrfs_key found_key
;
6029 struct btrfs_key tmp_key
;
6030 struct btrfs_root
*log
;
6031 struct btrfs_fs_info
*fs_info
= log_root_tree
->fs_info
;
6032 struct walk_control wc
= {
6033 .process_func
= process_one_buffer
,
6037 path
= btrfs_alloc_path();
6041 set_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
);
6043 trans
= btrfs_start_transaction(fs_info
->tree_root
, 0);
6044 if (IS_ERR(trans
)) {
6045 ret
= PTR_ERR(trans
);
6052 ret
= walk_log_tree(trans
, log_root_tree
, &wc
);
6054 btrfs_handle_fs_error(fs_info
, ret
,
6055 "Failed to pin buffers while recovering log root tree.");
6060 key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
6061 key
.offset
= (u64
)-1;
6062 key
.type
= BTRFS_ROOT_ITEM_KEY
;
6065 ret
= btrfs_search_slot(NULL
, log_root_tree
, &key
, path
, 0, 0);
6068 btrfs_handle_fs_error(fs_info
, ret
,
6069 "Couldn't find tree log root.");
6073 if (path
->slots
[0] == 0)
6077 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
6079 btrfs_release_path(path
);
6080 if (found_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
6083 log
= btrfs_read_fs_root(log_root_tree
, &found_key
);
6086 btrfs_handle_fs_error(fs_info
, ret
,
6087 "Couldn't read tree log root.");
6091 tmp_key
.objectid
= found_key
.offset
;
6092 tmp_key
.type
= BTRFS_ROOT_ITEM_KEY
;
6093 tmp_key
.offset
= (u64
)-1;
6095 wc
.replay_dest
= btrfs_read_fs_root_no_name(fs_info
, &tmp_key
);
6096 if (IS_ERR(wc
.replay_dest
)) {
6097 ret
= PTR_ERR(wc
.replay_dest
);
6098 free_extent_buffer(log
->node
);
6099 free_extent_buffer(log
->commit_root
);
6101 btrfs_handle_fs_error(fs_info
, ret
,
6102 "Couldn't read target root for tree log recovery.");
6106 wc
.replay_dest
->log_root
= log
;
6107 btrfs_record_root_in_trans(trans
, wc
.replay_dest
);
6108 ret
= walk_log_tree(trans
, log
, &wc
);
6110 if (!ret
&& wc
.stage
== LOG_WALK_REPLAY_ALL
) {
6111 ret
= fixup_inode_link_counts(trans
, wc
.replay_dest
,
6115 if (!ret
&& wc
.stage
== LOG_WALK_REPLAY_ALL
) {
6116 struct btrfs_root
*root
= wc
.replay_dest
;
6118 btrfs_release_path(path
);
6121 * We have just replayed everything, and the highest
6122 * objectid of fs roots probably has changed in case
6123 * some inode_item's got replayed.
6125 * root->objectid_mutex is not acquired as log replay
6126 * could only happen during mount.
6128 ret
= btrfs_find_highest_objectid(root
,
6129 &root
->highest_objectid
);
6132 key
.offset
= found_key
.offset
- 1;
6133 wc
.replay_dest
->log_root
= NULL
;
6134 free_extent_buffer(log
->node
);
6135 free_extent_buffer(log
->commit_root
);
6141 if (found_key
.offset
== 0)
6144 btrfs_release_path(path
);
6146 /* step one is to pin it all, step two is to replay just inodes */
6149 wc
.process_func
= replay_one_buffer
;
6150 wc
.stage
= LOG_WALK_REPLAY_INODES
;
6153 /* step three is to replay everything */
6154 if (wc
.stage
< LOG_WALK_REPLAY_ALL
) {
6159 btrfs_free_path(path
);
6161 /* step 4: commit the transaction, which also unpins the blocks */
6162 ret
= btrfs_commit_transaction(trans
);
6166 free_extent_buffer(log_root_tree
->node
);
6167 log_root_tree
->log_root
= NULL
;
6168 clear_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
);
6169 kfree(log_root_tree
);
6174 btrfs_end_transaction(wc
.trans
);
6175 btrfs_free_path(path
);
6180 * there are some corner cases where we want to force a full
6181 * commit instead of allowing a directory to be logged.
6183 * They revolve around files there were unlinked from the directory, and
6184 * this function updates the parent directory so that a full commit is
6185 * properly done if it is fsync'd later after the unlinks are done.
6187 * Must be called before the unlink operations (updates to the subvolume tree,
6188 * inodes, etc) are done.
6190 void btrfs_record_unlink_dir(struct btrfs_trans_handle
*trans
,
6191 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
6195 * when we're logging a file, if it hasn't been renamed
6196 * or unlinked, and its inode is fully committed on disk,
6197 * we don't have to worry about walking up the directory chain
6198 * to log its parents.
6200 * So, we use the last_unlink_trans field to put this transid
6201 * into the file. When the file is logged we check it and
6202 * don't log the parents if the file is fully on disk.
6204 mutex_lock(&inode
->log_mutex
);
6205 inode
->last_unlink_trans
= trans
->transid
;
6206 mutex_unlock(&inode
->log_mutex
);
6209 * if this directory was already logged any new
6210 * names for this file/dir will get recorded
6213 if (dir
->logged_trans
== trans
->transid
)
6217 * if the inode we're about to unlink was logged,
6218 * the log will be properly updated for any new names
6220 if (inode
->logged_trans
== trans
->transid
)
6224 * when renaming files across directories, if the directory
6225 * there we're unlinking from gets fsync'd later on, there's
6226 * no way to find the destination directory later and fsync it
6227 * properly. So, we have to be conservative and force commits
6228 * so the new name gets discovered.
6233 /* we can safely do the unlink without any special recording */
6237 mutex_lock(&dir
->log_mutex
);
6238 dir
->last_unlink_trans
= trans
->transid
;
6239 mutex_unlock(&dir
->log_mutex
);
6243 * Make sure that if someone attempts to fsync the parent directory of a deleted
6244 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6245 * that after replaying the log tree of the parent directory's root we will not
6246 * see the snapshot anymore and at log replay time we will not see any log tree
6247 * corresponding to the deleted snapshot's root, which could lead to replaying
6248 * it after replaying the log tree of the parent directory (which would replay
6249 * the snapshot delete operation).
6251 * Must be called before the actual snapshot destroy operation (updates to the
6252 * parent root and tree of tree roots trees, etc) are done.
6254 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle
*trans
,
6255 struct btrfs_inode
*dir
)
6257 mutex_lock(&dir
->log_mutex
);
6258 dir
->last_unlink_trans
= trans
->transid
;
6259 mutex_unlock(&dir
->log_mutex
);
6263 * Call this after adding a new name for a file and it will properly
6264 * update the log to reflect the new name.
6266 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
6267 * true (because it's not used).
6269 * Return value depends on whether @sync_log is true or false.
6270 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6271 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
6273 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
6274 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
6275 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6276 * committed (without attempting to sync the log).
6278 int btrfs_log_new_name(struct btrfs_trans_handle
*trans
,
6279 struct btrfs_inode
*inode
, struct btrfs_inode
*old_dir
,
6280 struct dentry
*parent
,
6281 bool sync_log
, struct btrfs_log_ctx
*ctx
)
6283 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
6287 * this will force the logging code to walk the dentry chain
6290 if (!S_ISDIR(inode
->vfs_inode
.i_mode
))
6291 inode
->last_unlink_trans
= trans
->transid
;
6294 * if this inode hasn't been logged and directory we're renaming it
6295 * from hasn't been logged, we don't need to log it
6297 if (inode
->logged_trans
<= fs_info
->last_trans_committed
&&
6298 (!old_dir
|| old_dir
->logged_trans
<= fs_info
->last_trans_committed
))
6299 return sync_log
? BTRFS_DONT_NEED_TRANS_COMMIT
:
6300 BTRFS_DONT_NEED_LOG_SYNC
;
6303 struct btrfs_log_ctx ctx2
;
6305 btrfs_init_log_ctx(&ctx2
, &inode
->vfs_inode
);
6306 ret
= btrfs_log_inode_parent(trans
, inode
, parent
, 0, LLONG_MAX
,
6307 LOG_INODE_EXISTS
, &ctx2
);
6308 if (ret
== BTRFS_NO_LOG_SYNC
)
6309 return BTRFS_DONT_NEED_TRANS_COMMIT
;
6311 return BTRFS_NEED_TRANS_COMMIT
;
6313 ret
= btrfs_sync_log(trans
, inode
->root
, &ctx2
);
6315 return BTRFS_NEED_TRANS_COMMIT
;
6316 return BTRFS_DONT_NEED_TRANS_COMMIT
;
6320 ret
= btrfs_log_inode_parent(trans
, inode
, parent
, 0, LLONG_MAX
,
6321 LOG_INODE_EXISTS
, ctx
);
6322 if (ret
== BTRFS_NO_LOG_SYNC
)
6323 return BTRFS_DONT_NEED_LOG_SYNC
;
6325 return BTRFS_NEED_TRANS_COMMIT
;
6327 return BTRFS_NEED_LOG_SYNC
;