2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
26 #include "print-tree.h"
29 #include "compression.h"
31 #include "inode-map.h"
33 /* magic values for the inode_only field in btrfs_log_inode:
35 * LOG_INODE_ALL means to log everything
36 * LOG_INODE_EXISTS means to log just enough to recreate the inode
39 #define LOG_INODE_ALL 0
40 #define LOG_INODE_EXISTS 1
41 #define LOG_OTHER_INODE 2
44 * directory trouble cases
46 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
47 * log, we must force a full commit before doing an fsync of the directory
48 * where the unlink was done.
49 * ---> record transid of last unlink/rename per directory
53 * rename foo/some_dir foo2/some_dir
55 * fsync foo/some_dir/some_file
57 * The fsync above will unlink the original some_dir without recording
58 * it in its new location (foo2). After a crash, some_dir will be gone
59 * unless the fsync of some_file forces a full commit
61 * 2) we must log any new names for any file or dir that is in the fsync
62 * log. ---> check inode while renaming/linking.
64 * 2a) we must log any new names for any file or dir during rename
65 * when the directory they are being removed from was logged.
66 * ---> check inode and old parent dir during rename
68 * 2a is actually the more important variant. With the extra logging
69 * a crash might unlink the old name without recreating the new one
71 * 3) after a crash, we must go through any directories with a link count
72 * of zero and redo the rm -rf
79 * The directory f1 was fully removed from the FS, but fsync was never
80 * called on f1, only its parent dir. After a crash the rm -rf must
81 * be replayed. This must be able to recurse down the entire
82 * directory tree. The inode link count fixup code takes care of the
87 * stages for the tree walking. The first
88 * stage (0) is to only pin down the blocks we find
89 * the second stage (1) is to make sure that all the inodes
90 * we find in the log are created in the subvolume.
92 * The last stage is to deal with directories and links and extents
93 * and all the other fun semantics
95 #define LOG_WALK_PIN_ONLY 0
96 #define LOG_WALK_REPLAY_INODES 1
97 #define LOG_WALK_REPLAY_DIR_INDEX 2
98 #define LOG_WALK_REPLAY_ALL 3
100 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
101 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
105 struct btrfs_log_ctx
*ctx
);
106 static int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
107 struct btrfs_root
*root
,
108 struct btrfs_path
*path
, u64 objectid
);
109 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
110 struct btrfs_root
*root
,
111 struct btrfs_root
*log
,
112 struct btrfs_path
*path
,
113 u64 dirid
, int del_all
);
116 * tree logging is a special write ahead log used to make sure that
117 * fsyncs and O_SYNCs can happen without doing full tree commits.
119 * Full tree commits are expensive because they require commonly
120 * modified blocks to be recowed, creating many dirty pages in the
121 * extent tree an 4x-6x higher write load than ext3.
123 * Instead of doing a tree commit on every fsync, we use the
124 * key ranges and transaction ids to find items for a given file or directory
125 * that have changed in this transaction. Those items are copied into
126 * a special tree (one per subvolume root), that tree is written to disk
127 * and then the fsync is considered complete.
129 * After a crash, items are copied out of the log-tree back into the
130 * subvolume tree. Any file data extents found are recorded in the extent
131 * allocation tree, and the log-tree freed.
133 * The log tree is read three times, once to pin down all the extents it is
134 * using in ram and once, once to create all the inodes logged in the tree
135 * and once to do all the other items.
139 * start a sub transaction and setup the log tree
140 * this increments the log tree writer count to make the people
141 * syncing the tree wait for us to finish
143 static int start_log_trans(struct btrfs_trans_handle
*trans
,
144 struct btrfs_root
*root
,
145 struct btrfs_log_ctx
*ctx
)
147 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
150 mutex_lock(&root
->log_mutex
);
152 if (root
->log_root
) {
153 if (btrfs_need_log_full_commit(fs_info
, trans
)) {
158 if (!root
->log_start_pid
) {
159 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
160 root
->log_start_pid
= current
->pid
;
161 } else if (root
->log_start_pid
!= current
->pid
) {
162 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
165 mutex_lock(&fs_info
->tree_log_mutex
);
166 if (!fs_info
->log_root_tree
)
167 ret
= btrfs_init_log_root_tree(trans
, fs_info
);
168 mutex_unlock(&fs_info
->tree_log_mutex
);
172 ret
= btrfs_add_log_tree(trans
, root
);
176 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
177 root
->log_start_pid
= current
->pid
;
180 atomic_inc(&root
->log_batch
);
181 atomic_inc(&root
->log_writers
);
183 int index
= root
->log_transid
% 2;
184 list_add_tail(&ctx
->list
, &root
->log_ctxs
[index
]);
185 ctx
->log_transid
= root
->log_transid
;
189 mutex_unlock(&root
->log_mutex
);
194 * returns 0 if there was a log transaction running and we were able
195 * to join, or returns -ENOENT if there were not transactions
198 static int join_running_log_trans(struct btrfs_root
*root
)
206 mutex_lock(&root
->log_mutex
);
207 if (root
->log_root
) {
209 atomic_inc(&root
->log_writers
);
211 mutex_unlock(&root
->log_mutex
);
216 * This either makes the current running log transaction wait
217 * until you call btrfs_end_log_trans() or it makes any future
218 * log transactions wait until you call btrfs_end_log_trans()
220 int btrfs_pin_log_trans(struct btrfs_root
*root
)
224 mutex_lock(&root
->log_mutex
);
225 atomic_inc(&root
->log_writers
);
226 mutex_unlock(&root
->log_mutex
);
231 * indicate we're done making changes to the log tree
232 * and wake up anyone waiting to do a sync
234 void btrfs_end_log_trans(struct btrfs_root
*root
)
236 if (atomic_dec_and_test(&root
->log_writers
)) {
238 * Implicit memory barrier after atomic_dec_and_test
240 if (waitqueue_active(&root
->log_writer_wait
))
241 wake_up(&root
->log_writer_wait
);
247 * the walk control struct is used to pass state down the chain when
248 * processing the log tree. The stage field tells us which part
249 * of the log tree processing we are currently doing. The others
250 * are state fields used for that specific part
252 struct walk_control
{
253 /* should we free the extent on disk when done? This is used
254 * at transaction commit time while freeing a log tree
258 /* should we write out the extent buffer? This is used
259 * while flushing the log tree to disk during a sync
263 /* should we wait for the extent buffer io to finish? Also used
264 * while flushing the log tree to disk for a sync
268 /* pin only walk, we record which extents on disk belong to the
273 /* what stage of the replay code we're currently in */
277 * Ignore any items from the inode currently being processed. Needs
278 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
279 * the LOG_WALK_REPLAY_INODES stage.
281 bool ignore_cur_inode
;
283 /* the root we are currently replaying */
284 struct btrfs_root
*replay_dest
;
286 /* the trans handle for the current replay */
287 struct btrfs_trans_handle
*trans
;
289 /* the function that gets used to process blocks we find in the
290 * tree. Note the extent_buffer might not be up to date when it is
291 * passed in, and it must be checked or read if you need the data
294 int (*process_func
)(struct btrfs_root
*log
, struct extent_buffer
*eb
,
295 struct walk_control
*wc
, u64 gen
);
299 * process_func used to pin down extents, write them or wait on them
301 static int process_one_buffer(struct btrfs_root
*log
,
302 struct extent_buffer
*eb
,
303 struct walk_control
*wc
, u64 gen
)
305 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
309 * If this fs is mixed then we need to be able to process the leaves to
310 * pin down any logged extents, so we have to read the block.
312 if (btrfs_fs_incompat(fs_info
, MIXED_GROUPS
)) {
313 ret
= btrfs_read_buffer(eb
, gen
);
319 ret
= btrfs_pin_extent_for_log_replay(fs_info
, eb
->start
,
322 if (!ret
&& btrfs_buffer_uptodate(eb
, gen
, 0)) {
323 if (wc
->pin
&& btrfs_header_level(eb
) == 0)
324 ret
= btrfs_exclude_logged_extents(fs_info
, eb
);
326 btrfs_write_tree_block(eb
);
328 btrfs_wait_tree_block_writeback(eb
);
334 * Item overwrite used by replay and tree logging. eb, slot and key all refer
335 * to the src data we are copying out.
337 * root is the tree we are copying into, and path is a scratch
338 * path for use in this function (it should be released on entry and
339 * will be released on exit).
341 * If the key is already in the destination tree the existing item is
342 * overwritten. If the existing item isn't big enough, it is extended.
343 * If it is too large, it is truncated.
345 * If the key isn't in the destination yet, a new item is inserted.
347 static noinline
int overwrite_item(struct btrfs_trans_handle
*trans
,
348 struct btrfs_root
*root
,
349 struct btrfs_path
*path
,
350 struct extent_buffer
*eb
, int slot
,
351 struct btrfs_key
*key
)
353 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
356 u64 saved_i_size
= 0;
357 int save_old_i_size
= 0;
358 unsigned long src_ptr
;
359 unsigned long dst_ptr
;
360 int overwrite_root
= 0;
361 bool inode_item
= key
->type
== BTRFS_INODE_ITEM_KEY
;
363 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
366 item_size
= btrfs_item_size_nr(eb
, slot
);
367 src_ptr
= btrfs_item_ptr_offset(eb
, slot
);
369 /* look for the key in the destination tree */
370 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
377 u32 dst_size
= btrfs_item_size_nr(path
->nodes
[0],
379 if (dst_size
!= item_size
)
382 if (item_size
== 0) {
383 btrfs_release_path(path
);
386 dst_copy
= kmalloc(item_size
, GFP_NOFS
);
387 src_copy
= kmalloc(item_size
, GFP_NOFS
);
388 if (!dst_copy
|| !src_copy
) {
389 btrfs_release_path(path
);
395 read_extent_buffer(eb
, src_copy
, src_ptr
, item_size
);
397 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
398 read_extent_buffer(path
->nodes
[0], dst_copy
, dst_ptr
,
400 ret
= memcmp(dst_copy
, src_copy
, item_size
);
405 * they have the same contents, just return, this saves
406 * us from cowing blocks in the destination tree and doing
407 * extra writes that may not have been done by a previous
411 btrfs_release_path(path
);
416 * We need to load the old nbytes into the inode so when we
417 * replay the extents we've logged we get the right nbytes.
420 struct btrfs_inode_item
*item
;
424 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
425 struct btrfs_inode_item
);
426 nbytes
= btrfs_inode_nbytes(path
->nodes
[0], item
);
427 item
= btrfs_item_ptr(eb
, slot
,
428 struct btrfs_inode_item
);
429 btrfs_set_inode_nbytes(eb
, item
, nbytes
);
432 * If this is a directory we need to reset the i_size to
433 * 0 so that we can set it up properly when replaying
434 * the rest of the items in this log.
436 mode
= btrfs_inode_mode(eb
, item
);
438 btrfs_set_inode_size(eb
, item
, 0);
440 } else if (inode_item
) {
441 struct btrfs_inode_item
*item
;
445 * New inode, set nbytes to 0 so that the nbytes comes out
446 * properly when we replay the extents.
448 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
449 btrfs_set_inode_nbytes(eb
, item
, 0);
452 * If this is a directory we need to reset the i_size to 0 so
453 * that we can set it up properly when replaying the rest of
454 * the items in this log.
456 mode
= btrfs_inode_mode(eb
, item
);
458 btrfs_set_inode_size(eb
, item
, 0);
461 btrfs_release_path(path
);
462 /* try to insert the key into the destination tree */
463 path
->skip_release_on_error
= 1;
464 ret
= btrfs_insert_empty_item(trans
, root
, path
,
466 path
->skip_release_on_error
= 0;
468 /* make sure any existing item is the correct size */
469 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
) {
471 found_size
= btrfs_item_size_nr(path
->nodes
[0],
473 if (found_size
> item_size
)
474 btrfs_truncate_item(fs_info
, path
, item_size
, 1);
475 else if (found_size
< item_size
)
476 btrfs_extend_item(fs_info
, path
,
477 item_size
- found_size
);
481 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0],
484 /* don't overwrite an existing inode if the generation number
485 * was logged as zero. This is done when the tree logging code
486 * is just logging an inode to make sure it exists after recovery.
488 * Also, don't overwrite i_size on directories during replay.
489 * log replay inserts and removes directory items based on the
490 * state of the tree found in the subvolume, and i_size is modified
493 if (key
->type
== BTRFS_INODE_ITEM_KEY
&& ret
== -EEXIST
) {
494 struct btrfs_inode_item
*src_item
;
495 struct btrfs_inode_item
*dst_item
;
497 src_item
= (struct btrfs_inode_item
*)src_ptr
;
498 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
500 if (btrfs_inode_generation(eb
, src_item
) == 0) {
501 struct extent_buffer
*dst_eb
= path
->nodes
[0];
502 const u64 ino_size
= btrfs_inode_size(eb
, src_item
);
505 * For regular files an ino_size == 0 is used only when
506 * logging that an inode exists, as part of a directory
507 * fsync, and the inode wasn't fsynced before. In this
508 * case don't set the size of the inode in the fs/subvol
509 * tree, otherwise we would be throwing valid data away.
511 if (S_ISREG(btrfs_inode_mode(eb
, src_item
)) &&
512 S_ISREG(btrfs_inode_mode(dst_eb
, dst_item
)) &&
514 struct btrfs_map_token token
;
516 btrfs_init_map_token(&token
);
517 btrfs_set_token_inode_size(dst_eb
, dst_item
,
523 if (overwrite_root
&&
524 S_ISDIR(btrfs_inode_mode(eb
, src_item
)) &&
525 S_ISDIR(btrfs_inode_mode(path
->nodes
[0], dst_item
))) {
527 saved_i_size
= btrfs_inode_size(path
->nodes
[0],
532 copy_extent_buffer(path
->nodes
[0], eb
, dst_ptr
,
535 if (save_old_i_size
) {
536 struct btrfs_inode_item
*dst_item
;
537 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
538 btrfs_set_inode_size(path
->nodes
[0], dst_item
, saved_i_size
);
541 /* make sure the generation is filled in */
542 if (key
->type
== BTRFS_INODE_ITEM_KEY
) {
543 struct btrfs_inode_item
*dst_item
;
544 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
545 if (btrfs_inode_generation(path
->nodes
[0], dst_item
) == 0) {
546 btrfs_set_inode_generation(path
->nodes
[0], dst_item
,
551 btrfs_mark_buffer_dirty(path
->nodes
[0]);
552 btrfs_release_path(path
);
557 * simple helper to read an inode off the disk from a given root
558 * This can only be called for subvolume roots and not for the log
560 static noinline
struct inode
*read_one_inode(struct btrfs_root
*root
,
563 struct btrfs_key key
;
566 key
.objectid
= objectid
;
567 key
.type
= BTRFS_INODE_ITEM_KEY
;
569 inode
= btrfs_iget(root
->fs_info
->sb
, &key
, root
, NULL
);
572 } else if (is_bad_inode(inode
)) {
579 /* replays a single extent in 'eb' at 'slot' with 'key' into the
580 * subvolume 'root'. path is released on entry and should be released
583 * extents in the log tree have not been allocated out of the extent
584 * tree yet. So, this completes the allocation, taking a reference
585 * as required if the extent already exists or creating a new extent
586 * if it isn't in the extent allocation tree yet.
588 * The extent is inserted into the file, dropping any existing extents
589 * from the file that overlap the new one.
591 static noinline
int replay_one_extent(struct btrfs_trans_handle
*trans
,
592 struct btrfs_root
*root
,
593 struct btrfs_path
*path
,
594 struct extent_buffer
*eb
, int slot
,
595 struct btrfs_key
*key
)
597 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
600 u64 start
= key
->offset
;
602 struct btrfs_file_extent_item
*item
;
603 struct inode
*inode
= NULL
;
607 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
608 found_type
= btrfs_file_extent_type(eb
, item
);
610 if (found_type
== BTRFS_FILE_EXTENT_REG
||
611 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
612 nbytes
= btrfs_file_extent_num_bytes(eb
, item
);
613 extent_end
= start
+ nbytes
;
616 * We don't add to the inodes nbytes if we are prealloc or a
619 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0)
621 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
622 size
= btrfs_file_extent_inline_len(eb
, slot
, item
);
623 nbytes
= btrfs_file_extent_ram_bytes(eb
, item
);
624 extent_end
= ALIGN(start
+ size
,
625 fs_info
->sectorsize
);
631 inode
= read_one_inode(root
, key
->objectid
);
638 * first check to see if we already have this extent in the
639 * file. This must be done before the btrfs_drop_extents run
640 * so we don't try to drop this extent.
642 ret
= btrfs_lookup_file_extent(trans
, root
, path
,
643 btrfs_ino(BTRFS_I(inode
)), start
, 0);
646 (found_type
== BTRFS_FILE_EXTENT_REG
||
647 found_type
== BTRFS_FILE_EXTENT_PREALLOC
)) {
648 struct btrfs_file_extent_item cmp1
;
649 struct btrfs_file_extent_item cmp2
;
650 struct btrfs_file_extent_item
*existing
;
651 struct extent_buffer
*leaf
;
653 leaf
= path
->nodes
[0];
654 existing
= btrfs_item_ptr(leaf
, path
->slots
[0],
655 struct btrfs_file_extent_item
);
657 read_extent_buffer(eb
, &cmp1
, (unsigned long)item
,
659 read_extent_buffer(leaf
, &cmp2
, (unsigned long)existing
,
663 * we already have a pointer to this exact extent,
664 * we don't have to do anything
666 if (memcmp(&cmp1
, &cmp2
, sizeof(cmp1
)) == 0) {
667 btrfs_release_path(path
);
671 btrfs_release_path(path
);
673 /* drop any overlapping extents */
674 ret
= btrfs_drop_extents(trans
, root
, inode
, start
, extent_end
, 1);
678 if (found_type
== BTRFS_FILE_EXTENT_REG
||
679 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
681 unsigned long dest_offset
;
682 struct btrfs_key ins
;
684 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0 &&
685 btrfs_fs_incompat(fs_info
, NO_HOLES
))
688 ret
= btrfs_insert_empty_item(trans
, root
, path
, key
,
692 dest_offset
= btrfs_item_ptr_offset(path
->nodes
[0],
694 copy_extent_buffer(path
->nodes
[0], eb
, dest_offset
,
695 (unsigned long)item
, sizeof(*item
));
697 ins
.objectid
= btrfs_file_extent_disk_bytenr(eb
, item
);
698 ins
.offset
= btrfs_file_extent_disk_num_bytes(eb
, item
);
699 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
700 offset
= key
->offset
- btrfs_file_extent_offset(eb
, item
);
703 * Manually record dirty extent, as here we did a shallow
704 * file extent item copy and skip normal backref update,
705 * but modifying extent tree all by ourselves.
706 * So need to manually record dirty extent for qgroup,
707 * as the owner of the file extent changed from log tree
708 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
710 ret
= btrfs_qgroup_trace_extent(trans
, fs_info
,
711 btrfs_file_extent_disk_bytenr(eb
, item
),
712 btrfs_file_extent_disk_num_bytes(eb
, item
),
717 if (ins
.objectid
> 0) {
720 LIST_HEAD(ordered_sums
);
722 * is this extent already allocated in the extent
723 * allocation tree? If so, just add a reference
725 ret
= btrfs_lookup_data_extent(fs_info
, ins
.objectid
,
728 ret
= btrfs_inc_extent_ref(trans
, root
,
729 ins
.objectid
, ins
.offset
,
730 0, root
->root_key
.objectid
,
731 key
->objectid
, offset
);
736 * insert the extent pointer in the extent
739 ret
= btrfs_alloc_logged_file_extent(trans
,
741 root
->root_key
.objectid
,
742 key
->objectid
, offset
, &ins
);
746 btrfs_release_path(path
);
748 if (btrfs_file_extent_compression(eb
, item
)) {
749 csum_start
= ins
.objectid
;
750 csum_end
= csum_start
+ ins
.offset
;
752 csum_start
= ins
.objectid
+
753 btrfs_file_extent_offset(eb
, item
);
754 csum_end
= csum_start
+
755 btrfs_file_extent_num_bytes(eb
, item
);
758 ret
= btrfs_lookup_csums_range(root
->log_root
,
759 csum_start
, csum_end
- 1,
764 * Now delete all existing cums in the csum root that
765 * cover our range. We do this because we can have an
766 * extent that is completely referenced by one file
767 * extent item and partially referenced by another
768 * file extent item (like after using the clone or
769 * extent_same ioctls). In this case if we end up doing
770 * the replay of the one that partially references the
771 * extent first, and we do not do the csum deletion
772 * below, we can get 2 csum items in the csum tree that
773 * overlap each other. For example, imagine our log has
774 * the two following file extent items:
776 * key (257 EXTENT_DATA 409600)
777 * extent data disk byte 12845056 nr 102400
778 * extent data offset 20480 nr 20480 ram 102400
780 * key (257 EXTENT_DATA 819200)
781 * extent data disk byte 12845056 nr 102400
782 * extent data offset 0 nr 102400 ram 102400
784 * Where the second one fully references the 100K extent
785 * that starts at disk byte 12845056, and the log tree
786 * has a single csum item that covers the entire range
789 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
791 * After the first file extent item is replayed, the
792 * csum tree gets the following csum item:
794 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
796 * Which covers the 20K sub-range starting at offset 20K
797 * of our extent. Now when we replay the second file
798 * extent item, if we do not delete existing csum items
799 * that cover any of its blocks, we end up getting two
800 * csum items in our csum tree that overlap each other:
802 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
803 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
805 * Which is a problem, because after this anyone trying
806 * to lookup up for the checksum of any block of our
807 * extent starting at an offset of 40K or higher, will
808 * end up looking at the second csum item only, which
809 * does not contain the checksum for any block starting
810 * at offset 40K or higher of our extent.
812 while (!list_empty(&ordered_sums
)) {
813 struct btrfs_ordered_sum
*sums
;
814 sums
= list_entry(ordered_sums
.next
,
815 struct btrfs_ordered_sum
,
818 ret
= btrfs_del_csums(trans
, fs_info
,
822 ret
= btrfs_csum_file_blocks(trans
,
823 fs_info
->csum_root
, sums
);
824 list_del(&sums
->list
);
830 btrfs_release_path(path
);
832 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
833 /* inline extents are easy, we just overwrite them */
834 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
839 inode_add_bytes(inode
, nbytes
);
841 ret
= btrfs_update_inode(trans
, root
, inode
);
849 * when cleaning up conflicts between the directory names in the
850 * subvolume, directory names in the log and directory names in the
851 * inode back references, we may have to unlink inodes from directories.
853 * This is a helper function to do the unlink of a specific directory
856 static noinline
int drop_one_dir_item(struct btrfs_trans_handle
*trans
,
857 struct btrfs_root
*root
,
858 struct btrfs_path
*path
,
859 struct btrfs_inode
*dir
,
860 struct btrfs_dir_item
*di
)
862 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
866 struct extent_buffer
*leaf
;
867 struct btrfs_key location
;
870 leaf
= path
->nodes
[0];
872 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
873 name_len
= btrfs_dir_name_len(leaf
, di
);
874 name
= kmalloc(name_len
, GFP_NOFS
);
878 read_extent_buffer(leaf
, name
, (unsigned long)(di
+ 1), name_len
);
879 btrfs_release_path(path
);
881 inode
= read_one_inode(root
, location
.objectid
);
887 ret
= link_to_fixup_dir(trans
, root
, path
, location
.objectid
);
891 ret
= btrfs_unlink_inode(trans
, root
, dir
, BTRFS_I(inode
), name
,
896 ret
= btrfs_run_delayed_items(trans
, fs_info
);
904 * helper function to see if a given name and sequence number found
905 * in an inode back reference are already in a directory and correctly
906 * point to this inode
908 static noinline
int inode_in_dir(struct btrfs_root
*root
,
909 struct btrfs_path
*path
,
910 u64 dirid
, u64 objectid
, u64 index
,
911 const char *name
, int name_len
)
913 struct btrfs_dir_item
*di
;
914 struct btrfs_key location
;
917 di
= btrfs_lookup_dir_index_item(NULL
, root
, path
, dirid
,
918 index
, name
, name_len
, 0);
919 if (di
&& !IS_ERR(di
)) {
920 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
921 if (location
.objectid
!= objectid
)
925 btrfs_release_path(path
);
927 di
= btrfs_lookup_dir_item(NULL
, root
, path
, dirid
, name
, name_len
, 0);
928 if (di
&& !IS_ERR(di
)) {
929 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
930 if (location
.objectid
!= objectid
)
936 btrfs_release_path(path
);
941 * helper function to check a log tree for a named back reference in
942 * an inode. This is used to decide if a back reference that is
943 * found in the subvolume conflicts with what we find in the log.
945 * inode backreferences may have multiple refs in a single item,
946 * during replay we process one reference at a time, and we don't
947 * want to delete valid links to a file from the subvolume if that
948 * link is also in the log.
950 static noinline
int backref_in_log(struct btrfs_root
*log
,
951 struct btrfs_key
*key
,
953 const char *name
, int namelen
)
955 struct btrfs_path
*path
;
956 struct btrfs_inode_ref
*ref
;
958 unsigned long ptr_end
;
959 unsigned long name_ptr
;
965 path
= btrfs_alloc_path();
969 ret
= btrfs_search_slot(NULL
, log
, key
, path
, 0, 0);
973 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
975 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
976 if (btrfs_find_name_in_ext_backref(path
, ref_objectid
,
977 name
, namelen
, NULL
))
983 item_size
= btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]);
984 ptr_end
= ptr
+ item_size
;
985 while (ptr
< ptr_end
) {
986 ref
= (struct btrfs_inode_ref
*)ptr
;
987 found_name_len
= btrfs_inode_ref_name_len(path
->nodes
[0], ref
);
988 if (found_name_len
== namelen
) {
989 name_ptr
= (unsigned long)(ref
+ 1);
990 ret
= memcmp_extent_buffer(path
->nodes
[0], name
,
997 ptr
= (unsigned long)(ref
+ 1) + found_name_len
;
1000 btrfs_free_path(path
);
1004 static inline int __add_inode_ref(struct btrfs_trans_handle
*trans
,
1005 struct btrfs_root
*root
,
1006 struct btrfs_path
*path
,
1007 struct btrfs_root
*log_root
,
1008 struct btrfs_inode
*dir
,
1009 struct btrfs_inode
*inode
,
1010 u64 inode_objectid
, u64 parent_objectid
,
1011 u64 ref_index
, char *name
, int namelen
,
1014 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1017 int victim_name_len
;
1018 struct extent_buffer
*leaf
;
1019 struct btrfs_dir_item
*di
;
1020 struct btrfs_key search_key
;
1021 struct btrfs_inode_extref
*extref
;
1024 /* Search old style refs */
1025 search_key
.objectid
= inode_objectid
;
1026 search_key
.type
= BTRFS_INODE_REF_KEY
;
1027 search_key
.offset
= parent_objectid
;
1028 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
1030 struct btrfs_inode_ref
*victim_ref
;
1032 unsigned long ptr_end
;
1034 leaf
= path
->nodes
[0];
1036 /* are we trying to overwrite a back ref for the root directory
1037 * if so, just jump out, we're done
1039 if (search_key
.objectid
== search_key
.offset
)
1042 /* check all the names in this back reference to see
1043 * if they are in the log. if so, we allow them to stay
1044 * otherwise they must be unlinked as a conflict
1046 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1047 ptr_end
= ptr
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1048 while (ptr
< ptr_end
) {
1049 victim_ref
= (struct btrfs_inode_ref
*)ptr
;
1050 victim_name_len
= btrfs_inode_ref_name_len(leaf
,
1052 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1056 read_extent_buffer(leaf
, victim_name
,
1057 (unsigned long)(victim_ref
+ 1),
1060 if (!backref_in_log(log_root
, &search_key
,
1064 inc_nlink(&inode
->vfs_inode
);
1065 btrfs_release_path(path
);
1067 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
,
1068 victim_name
, victim_name_len
);
1072 ret
= btrfs_run_delayed_items(trans
, fs_info
);
1080 ptr
= (unsigned long)(victim_ref
+ 1) + victim_name_len
;
1084 * NOTE: we have searched root tree and checked the
1085 * corresponding ref, it does not need to check again.
1089 btrfs_release_path(path
);
1091 /* Same search but for extended refs */
1092 extref
= btrfs_lookup_inode_extref(NULL
, root
, path
, name
, namelen
,
1093 inode_objectid
, parent_objectid
, 0,
1095 if (!IS_ERR_OR_NULL(extref
)) {
1099 struct inode
*victim_parent
;
1101 leaf
= path
->nodes
[0];
1103 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1104 base
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1106 while (cur_offset
< item_size
) {
1107 extref
= (struct btrfs_inode_extref
*)(base
+ cur_offset
);
1109 victim_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1111 if (btrfs_inode_extref_parent(leaf
, extref
) != parent_objectid
)
1114 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1117 read_extent_buffer(leaf
, victim_name
, (unsigned long)&extref
->name
,
1120 search_key
.objectid
= inode_objectid
;
1121 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1122 search_key
.offset
= btrfs_extref_hash(parent_objectid
,
1126 if (!backref_in_log(log_root
, &search_key
,
1127 parent_objectid
, victim_name
,
1130 victim_parent
= read_one_inode(root
,
1132 if (victim_parent
) {
1133 inc_nlink(&inode
->vfs_inode
);
1134 btrfs_release_path(path
);
1136 ret
= btrfs_unlink_inode(trans
, root
,
1137 BTRFS_I(victim_parent
),
1142 ret
= btrfs_run_delayed_items(
1146 iput(victim_parent
);
1155 cur_offset
+= victim_name_len
+ sizeof(*extref
);
1159 btrfs_release_path(path
);
1161 /* look for a conflicting sequence number */
1162 di
= btrfs_lookup_dir_index_item(trans
, root
, path
, btrfs_ino(dir
),
1163 ref_index
, name
, namelen
, 0);
1164 if (di
&& !IS_ERR(di
)) {
1165 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1169 btrfs_release_path(path
);
1171 /* look for a conflicing name */
1172 di
= btrfs_lookup_dir_item(trans
, root
, path
, btrfs_ino(dir
),
1174 if (di
&& !IS_ERR(di
)) {
1175 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1179 btrfs_release_path(path
);
1184 static int extref_get_fields(struct extent_buffer
*eb
, int slot
,
1185 unsigned long ref_ptr
, u32
*namelen
, char **name
,
1186 u64
*index
, u64
*parent_objectid
)
1188 struct btrfs_inode_extref
*extref
;
1190 extref
= (struct btrfs_inode_extref
*)ref_ptr
;
1192 *namelen
= btrfs_inode_extref_name_len(eb
, extref
);
1193 if (!btrfs_is_name_len_valid(eb
, slot
, (unsigned long)&extref
->name
,
1197 *name
= kmalloc(*namelen
, GFP_NOFS
);
1201 read_extent_buffer(eb
, *name
, (unsigned long)&extref
->name
,
1204 *index
= btrfs_inode_extref_index(eb
, extref
);
1205 if (parent_objectid
)
1206 *parent_objectid
= btrfs_inode_extref_parent(eb
, extref
);
1211 static int ref_get_fields(struct extent_buffer
*eb
, int slot
,
1212 unsigned long ref_ptr
, u32
*namelen
, char **name
,
1215 struct btrfs_inode_ref
*ref
;
1217 ref
= (struct btrfs_inode_ref
*)ref_ptr
;
1219 *namelen
= btrfs_inode_ref_name_len(eb
, ref
);
1220 if (!btrfs_is_name_len_valid(eb
, slot
, (unsigned long)(ref
+ 1),
1224 *name
= kmalloc(*namelen
, GFP_NOFS
);
1228 read_extent_buffer(eb
, *name
, (unsigned long)(ref
+ 1), *namelen
);
1230 *index
= btrfs_inode_ref_index(eb
, ref
);
1236 * replay one inode back reference item found in the log tree.
1237 * eb, slot and key refer to the buffer and key found in the log tree.
1238 * root is the destination we are replaying into, and path is for temp
1239 * use by this function. (it should be released on return).
1241 static noinline
int add_inode_ref(struct btrfs_trans_handle
*trans
,
1242 struct btrfs_root
*root
,
1243 struct btrfs_root
*log
,
1244 struct btrfs_path
*path
,
1245 struct extent_buffer
*eb
, int slot
,
1246 struct btrfs_key
*key
)
1248 struct inode
*dir
= NULL
;
1249 struct inode
*inode
= NULL
;
1250 unsigned long ref_ptr
;
1251 unsigned long ref_end
;
1255 int search_done
= 0;
1256 int log_ref_ver
= 0;
1257 u64 parent_objectid
;
1260 int ref_struct_size
;
1262 ref_ptr
= btrfs_item_ptr_offset(eb
, slot
);
1263 ref_end
= ref_ptr
+ btrfs_item_size_nr(eb
, slot
);
1265 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
1266 struct btrfs_inode_extref
*r
;
1268 ref_struct_size
= sizeof(struct btrfs_inode_extref
);
1270 r
= (struct btrfs_inode_extref
*)ref_ptr
;
1271 parent_objectid
= btrfs_inode_extref_parent(eb
, r
);
1273 ref_struct_size
= sizeof(struct btrfs_inode_ref
);
1274 parent_objectid
= key
->offset
;
1276 inode_objectid
= key
->objectid
;
1279 * it is possible that we didn't log all the parent directories
1280 * for a given inode. If we don't find the dir, just don't
1281 * copy the back ref in. The link count fixup code will take
1284 dir
= read_one_inode(root
, parent_objectid
);
1290 inode
= read_one_inode(root
, inode_objectid
);
1296 while (ref_ptr
< ref_end
) {
1298 ret
= extref_get_fields(eb
, slot
, ref_ptr
, &namelen
,
1299 &name
, &ref_index
, &parent_objectid
);
1301 * parent object can change from one array
1305 dir
= read_one_inode(root
, parent_objectid
);
1311 ret
= ref_get_fields(eb
, slot
, ref_ptr
, &namelen
,
1317 /* if we already have a perfect match, we're done */
1318 if (!inode_in_dir(root
, path
, btrfs_ino(BTRFS_I(dir
)),
1319 btrfs_ino(BTRFS_I(inode
)), ref_index
,
1322 * look for a conflicting back reference in the
1323 * metadata. if we find one we have to unlink that name
1324 * of the file before we add our new link. Later on, we
1325 * overwrite any existing back reference, and we don't
1326 * want to create dangling pointers in the directory.
1330 ret
= __add_inode_ref(trans
, root
, path
, log
,
1335 ref_index
, name
, namelen
,
1344 /* insert our name */
1345 ret
= btrfs_add_link(trans
, BTRFS_I(dir
),
1347 name
, namelen
, 0, ref_index
);
1351 btrfs_update_inode(trans
, root
, inode
);
1354 ref_ptr
= (unsigned long)(ref_ptr
+ ref_struct_size
) + namelen
;
1363 /* finally write the back reference in the inode */
1364 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
1366 btrfs_release_path(path
);
1373 static int insert_orphan_item(struct btrfs_trans_handle
*trans
,
1374 struct btrfs_root
*root
, u64 ino
)
1378 ret
= btrfs_insert_orphan_item(trans
, root
, ino
);
1385 static int count_inode_extrefs(struct btrfs_root
*root
,
1386 struct btrfs_inode
*inode
, struct btrfs_path
*path
)
1390 unsigned int nlink
= 0;
1393 u64 inode_objectid
= btrfs_ino(inode
);
1396 struct btrfs_inode_extref
*extref
;
1397 struct extent_buffer
*leaf
;
1400 ret
= btrfs_find_one_extref(root
, inode_objectid
, offset
, path
,
1405 leaf
= path
->nodes
[0];
1406 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1407 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1410 while (cur_offset
< item_size
) {
1411 extref
= (struct btrfs_inode_extref
*) (ptr
+ cur_offset
);
1412 name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1416 cur_offset
+= name_len
+ sizeof(*extref
);
1420 btrfs_release_path(path
);
1422 btrfs_release_path(path
);
1424 if (ret
< 0 && ret
!= -ENOENT
)
1429 static int count_inode_refs(struct btrfs_root
*root
,
1430 struct btrfs_inode
*inode
, struct btrfs_path
*path
)
1433 struct btrfs_key key
;
1434 unsigned int nlink
= 0;
1436 unsigned long ptr_end
;
1438 u64 ino
= btrfs_ino(inode
);
1441 key
.type
= BTRFS_INODE_REF_KEY
;
1442 key
.offset
= (u64
)-1;
1445 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1449 if (path
->slots
[0] == 0)
1454 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1456 if (key
.objectid
!= ino
||
1457 key
.type
!= BTRFS_INODE_REF_KEY
)
1459 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
1460 ptr_end
= ptr
+ btrfs_item_size_nr(path
->nodes
[0],
1462 while (ptr
< ptr_end
) {
1463 struct btrfs_inode_ref
*ref
;
1465 ref
= (struct btrfs_inode_ref
*)ptr
;
1466 name_len
= btrfs_inode_ref_name_len(path
->nodes
[0],
1468 ptr
= (unsigned long)(ref
+ 1) + name_len
;
1472 if (key
.offset
== 0)
1474 if (path
->slots
[0] > 0) {
1479 btrfs_release_path(path
);
1481 btrfs_release_path(path
);
1487 * There are a few corners where the link count of the file can't
1488 * be properly maintained during replay. So, instead of adding
1489 * lots of complexity to the log code, we just scan the backrefs
1490 * for any file that has been through replay.
1492 * The scan will update the link count on the inode to reflect the
1493 * number of back refs found. If it goes down to zero, the iput
1494 * will free the inode.
1496 static noinline
int fixup_inode_link_count(struct btrfs_trans_handle
*trans
,
1497 struct btrfs_root
*root
,
1498 struct inode
*inode
)
1500 struct btrfs_path
*path
;
1503 u64 ino
= btrfs_ino(BTRFS_I(inode
));
1505 path
= btrfs_alloc_path();
1509 ret
= count_inode_refs(root
, BTRFS_I(inode
), path
);
1515 ret
= count_inode_extrefs(root
, BTRFS_I(inode
), path
);
1523 if (nlink
!= inode
->i_nlink
) {
1524 set_nlink(inode
, nlink
);
1525 btrfs_update_inode(trans
, root
, inode
);
1527 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1529 if (inode
->i_nlink
== 0) {
1530 if (S_ISDIR(inode
->i_mode
)) {
1531 ret
= replay_dir_deletes(trans
, root
, NULL
, path
,
1536 ret
= insert_orphan_item(trans
, root
, ino
);
1540 btrfs_free_path(path
);
1544 static noinline
int fixup_inode_link_counts(struct btrfs_trans_handle
*trans
,
1545 struct btrfs_root
*root
,
1546 struct btrfs_path
*path
)
1549 struct btrfs_key key
;
1550 struct inode
*inode
;
1552 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1553 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1554 key
.offset
= (u64
)-1;
1556 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1561 if (path
->slots
[0] == 0)
1566 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1567 if (key
.objectid
!= BTRFS_TREE_LOG_FIXUP_OBJECTID
||
1568 key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
1571 ret
= btrfs_del_item(trans
, root
, path
);
1575 btrfs_release_path(path
);
1576 inode
= read_one_inode(root
, key
.offset
);
1580 ret
= fixup_inode_link_count(trans
, root
, inode
);
1586 * fixup on a directory may create new entries,
1587 * make sure we always look for the highset possible
1590 key
.offset
= (u64
)-1;
1594 btrfs_release_path(path
);
1600 * record a given inode in the fixup dir so we can check its link
1601 * count when replay is done. The link count is incremented here
1602 * so the inode won't go away until we check it
1604 static noinline
int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
1605 struct btrfs_root
*root
,
1606 struct btrfs_path
*path
,
1609 struct btrfs_key key
;
1611 struct inode
*inode
;
1613 inode
= read_one_inode(root
, objectid
);
1617 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1618 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1619 key
.offset
= objectid
;
1621 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1623 btrfs_release_path(path
);
1625 if (!inode
->i_nlink
)
1626 set_nlink(inode
, 1);
1629 ret
= btrfs_update_inode(trans
, root
, inode
);
1630 } else if (ret
== -EEXIST
) {
1633 BUG(); /* Logic Error */
1641 * when replaying the log for a directory, we only insert names
1642 * for inodes that actually exist. This means an fsync on a directory
1643 * does not implicitly fsync all the new files in it
1645 static noinline
int insert_one_name(struct btrfs_trans_handle
*trans
,
1646 struct btrfs_root
*root
,
1647 u64 dirid
, u64 index
,
1648 char *name
, int name_len
,
1649 struct btrfs_key
*location
)
1651 struct inode
*inode
;
1655 inode
= read_one_inode(root
, location
->objectid
);
1659 dir
= read_one_inode(root
, dirid
);
1665 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
), name
,
1666 name_len
, 1, index
);
1668 /* FIXME, put inode into FIXUP list */
1676 * Return true if an inode reference exists in the log for the given name,
1677 * inode and parent inode.
1679 static bool name_in_log_ref(struct btrfs_root
*log_root
,
1680 const char *name
, const int name_len
,
1681 const u64 dirid
, const u64 ino
)
1683 struct btrfs_key search_key
;
1685 search_key
.objectid
= ino
;
1686 search_key
.type
= BTRFS_INODE_REF_KEY
;
1687 search_key
.offset
= dirid
;
1688 if (backref_in_log(log_root
, &search_key
, dirid
, name
, name_len
))
1691 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1692 search_key
.offset
= btrfs_extref_hash(dirid
, name
, name_len
);
1693 if (backref_in_log(log_root
, &search_key
, dirid
, name
, name_len
))
1700 * take a single entry in a log directory item and replay it into
1703 * if a conflicting item exists in the subdirectory already,
1704 * the inode it points to is unlinked and put into the link count
1707 * If a name from the log points to a file or directory that does
1708 * not exist in the FS, it is skipped. fsyncs on directories
1709 * do not force down inodes inside that directory, just changes to the
1710 * names or unlinks in a directory.
1712 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1713 * non-existing inode) and 1 if the name was replayed.
1715 static noinline
int replay_one_name(struct btrfs_trans_handle
*trans
,
1716 struct btrfs_root
*root
,
1717 struct btrfs_path
*path
,
1718 struct extent_buffer
*eb
,
1719 struct btrfs_dir_item
*di
,
1720 struct btrfs_key
*key
)
1724 struct btrfs_dir_item
*dst_di
;
1725 struct btrfs_key found_key
;
1726 struct btrfs_key log_key
;
1731 bool update_size
= (key
->type
== BTRFS_DIR_INDEX_KEY
);
1732 bool name_added
= false;
1734 dir
= read_one_inode(root
, key
->objectid
);
1738 name_len
= btrfs_dir_name_len(eb
, di
);
1739 name
= kmalloc(name_len
, GFP_NOFS
);
1745 log_type
= btrfs_dir_type(eb
, di
);
1746 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1749 btrfs_dir_item_key_to_cpu(eb
, di
, &log_key
);
1750 exists
= btrfs_lookup_inode(trans
, root
, path
, &log_key
, 0);
1755 btrfs_release_path(path
);
1757 if (key
->type
== BTRFS_DIR_ITEM_KEY
) {
1758 dst_di
= btrfs_lookup_dir_item(trans
, root
, path
, key
->objectid
,
1760 } else if (key
->type
== BTRFS_DIR_INDEX_KEY
) {
1761 dst_di
= btrfs_lookup_dir_index_item(trans
, root
, path
,
1770 if (IS_ERR_OR_NULL(dst_di
)) {
1771 /* we need a sequence number to insert, so we only
1772 * do inserts for the BTRFS_DIR_INDEX_KEY types
1774 if (key
->type
!= BTRFS_DIR_INDEX_KEY
)
1779 btrfs_dir_item_key_to_cpu(path
->nodes
[0], dst_di
, &found_key
);
1780 /* the existing item matches the logged item */
1781 if (found_key
.objectid
== log_key
.objectid
&&
1782 found_key
.type
== log_key
.type
&&
1783 found_key
.offset
== log_key
.offset
&&
1784 btrfs_dir_type(path
->nodes
[0], dst_di
) == log_type
) {
1785 update_size
= false;
1790 * don't drop the conflicting directory entry if the inode
1791 * for the new entry doesn't exist
1796 ret
= drop_one_dir_item(trans
, root
, path
, BTRFS_I(dir
), dst_di
);
1800 if (key
->type
== BTRFS_DIR_INDEX_KEY
)
1803 btrfs_release_path(path
);
1804 if (!ret
&& update_size
) {
1805 btrfs_i_size_write(BTRFS_I(dir
), dir
->i_size
+ name_len
* 2);
1806 ret
= btrfs_update_inode(trans
, root
, dir
);
1810 if (!ret
&& name_added
)
1815 if (name_in_log_ref(root
->log_root
, name
, name_len
,
1816 key
->objectid
, log_key
.objectid
)) {
1817 /* The dentry will be added later. */
1819 update_size
= false;
1822 btrfs_release_path(path
);
1823 ret
= insert_one_name(trans
, root
, key
->objectid
, key
->offset
,
1824 name
, name_len
, &log_key
);
1825 if (ret
&& ret
!= -ENOENT
&& ret
!= -EEXIST
)
1829 update_size
= false;
1835 * find all the names in a directory item and reconcile them into
1836 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1837 * one name in a directory item, but the same code gets used for
1838 * both directory index types
1840 static noinline
int replay_one_dir_item(struct btrfs_trans_handle
*trans
,
1841 struct btrfs_root
*root
,
1842 struct btrfs_path
*path
,
1843 struct extent_buffer
*eb
, int slot
,
1844 struct btrfs_key
*key
)
1846 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1848 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
1849 struct btrfs_dir_item
*di
;
1852 unsigned long ptr_end
;
1853 struct btrfs_path
*fixup_path
= NULL
;
1855 ptr
= btrfs_item_ptr_offset(eb
, slot
);
1856 ptr_end
= ptr
+ item_size
;
1857 while (ptr
< ptr_end
) {
1858 di
= (struct btrfs_dir_item
*)ptr
;
1859 if (verify_dir_item(fs_info
, eb
, slot
, di
))
1861 name_len
= btrfs_dir_name_len(eb
, di
);
1862 ret
= replay_one_name(trans
, root
, path
, eb
, di
, key
);
1865 ptr
= (unsigned long)(di
+ 1);
1869 * If this entry refers to a non-directory (directories can not
1870 * have a link count > 1) and it was added in the transaction
1871 * that was not committed, make sure we fixup the link count of
1872 * the inode it the entry points to. Otherwise something like
1873 * the following would result in a directory pointing to an
1874 * inode with a wrong link that does not account for this dir
1882 * ln testdir/bar testdir/bar_link
1883 * ln testdir/foo testdir/foo_link
1884 * xfs_io -c "fsync" testdir/bar
1888 * mount fs, log replay happens
1890 * File foo would remain with a link count of 1 when it has two
1891 * entries pointing to it in the directory testdir. This would
1892 * make it impossible to ever delete the parent directory has
1893 * it would result in stale dentries that can never be deleted.
1895 if (ret
== 1 && btrfs_dir_type(eb
, di
) != BTRFS_FT_DIR
) {
1896 struct btrfs_key di_key
;
1899 fixup_path
= btrfs_alloc_path();
1906 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
1907 ret
= link_to_fixup_dir(trans
, root
, fixup_path
,
1914 btrfs_free_path(fixup_path
);
1919 * directory replay has two parts. There are the standard directory
1920 * items in the log copied from the subvolume, and range items
1921 * created in the log while the subvolume was logged.
1923 * The range items tell us which parts of the key space the log
1924 * is authoritative for. During replay, if a key in the subvolume
1925 * directory is in a logged range item, but not actually in the log
1926 * that means it was deleted from the directory before the fsync
1927 * and should be removed.
1929 static noinline
int find_dir_range(struct btrfs_root
*root
,
1930 struct btrfs_path
*path
,
1931 u64 dirid
, int key_type
,
1932 u64
*start_ret
, u64
*end_ret
)
1934 struct btrfs_key key
;
1936 struct btrfs_dir_log_item
*item
;
1940 if (*start_ret
== (u64
)-1)
1943 key
.objectid
= dirid
;
1944 key
.type
= key_type
;
1945 key
.offset
= *start_ret
;
1947 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1951 if (path
->slots
[0] == 0)
1956 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1958 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1962 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1963 struct btrfs_dir_log_item
);
1964 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1966 if (*start_ret
>= key
.offset
&& *start_ret
<= found_end
) {
1968 *start_ret
= key
.offset
;
1969 *end_ret
= found_end
;
1974 /* check the next slot in the tree to see if it is a valid item */
1975 nritems
= btrfs_header_nritems(path
->nodes
[0]);
1977 if (path
->slots
[0] >= nritems
) {
1978 ret
= btrfs_next_leaf(root
, path
);
1983 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1985 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1989 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1990 struct btrfs_dir_log_item
);
1991 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1992 *start_ret
= key
.offset
;
1993 *end_ret
= found_end
;
1996 btrfs_release_path(path
);
2001 * this looks for a given directory item in the log. If the directory
2002 * item is not in the log, the item is removed and the inode it points
2005 static noinline
int check_item_in_log(struct btrfs_trans_handle
*trans
,
2006 struct btrfs_root
*root
,
2007 struct btrfs_root
*log
,
2008 struct btrfs_path
*path
,
2009 struct btrfs_path
*log_path
,
2011 struct btrfs_key
*dir_key
)
2013 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2015 struct extent_buffer
*eb
;
2018 struct btrfs_dir_item
*di
;
2019 struct btrfs_dir_item
*log_di
;
2022 unsigned long ptr_end
;
2024 struct inode
*inode
;
2025 struct btrfs_key location
;
2028 eb
= path
->nodes
[0];
2029 slot
= path
->slots
[0];
2030 item_size
= btrfs_item_size_nr(eb
, slot
);
2031 ptr
= btrfs_item_ptr_offset(eb
, slot
);
2032 ptr_end
= ptr
+ item_size
;
2033 while (ptr
< ptr_end
) {
2034 di
= (struct btrfs_dir_item
*)ptr
;
2035 if (verify_dir_item(fs_info
, eb
, slot
, di
)) {
2040 name_len
= btrfs_dir_name_len(eb
, di
);
2041 name
= kmalloc(name_len
, GFP_NOFS
);
2046 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
2049 if (log
&& dir_key
->type
== BTRFS_DIR_ITEM_KEY
) {
2050 log_di
= btrfs_lookup_dir_item(trans
, log
, log_path
,
2053 } else if (log
&& dir_key
->type
== BTRFS_DIR_INDEX_KEY
) {
2054 log_di
= btrfs_lookup_dir_index_item(trans
, log
,
2060 if (!log_di
|| (IS_ERR(log_di
) && PTR_ERR(log_di
) == -ENOENT
)) {
2061 btrfs_dir_item_key_to_cpu(eb
, di
, &location
);
2062 btrfs_release_path(path
);
2063 btrfs_release_path(log_path
);
2064 inode
= read_one_inode(root
, location
.objectid
);
2070 ret
= link_to_fixup_dir(trans
, root
,
2071 path
, location
.objectid
);
2079 ret
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
),
2080 BTRFS_I(inode
), name
, name_len
);
2082 ret
= btrfs_run_delayed_items(trans
, fs_info
);
2088 /* there might still be more names under this key
2089 * check and repeat if required
2091 ret
= btrfs_search_slot(NULL
, root
, dir_key
, path
,
2097 } else if (IS_ERR(log_di
)) {
2099 return PTR_ERR(log_di
);
2101 btrfs_release_path(log_path
);
2104 ptr
= (unsigned long)(di
+ 1);
2109 btrfs_release_path(path
);
2110 btrfs_release_path(log_path
);
2114 static int replay_xattr_deletes(struct btrfs_trans_handle
*trans
,
2115 struct btrfs_root
*root
,
2116 struct btrfs_root
*log
,
2117 struct btrfs_path
*path
,
2120 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2121 struct btrfs_key search_key
;
2122 struct btrfs_path
*log_path
;
2127 log_path
= btrfs_alloc_path();
2131 search_key
.objectid
= ino
;
2132 search_key
.type
= BTRFS_XATTR_ITEM_KEY
;
2133 search_key
.offset
= 0;
2135 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
2139 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2140 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
2141 struct btrfs_key key
;
2142 struct btrfs_dir_item
*di
;
2143 struct btrfs_dir_item
*log_di
;
2147 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, i
);
2148 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_XATTR_ITEM_KEY
) {
2153 di
= btrfs_item_ptr(path
->nodes
[0], i
, struct btrfs_dir_item
);
2154 total_size
= btrfs_item_size_nr(path
->nodes
[0], i
);
2156 while (cur
< total_size
) {
2157 u16 name_len
= btrfs_dir_name_len(path
->nodes
[0], di
);
2158 u16 data_len
= btrfs_dir_data_len(path
->nodes
[0], di
);
2159 u32 this_len
= sizeof(*di
) + name_len
+ data_len
;
2162 ret
= verify_dir_item(fs_info
, path
->nodes
[0], i
, di
);
2167 name
= kmalloc(name_len
, GFP_NOFS
);
2172 read_extent_buffer(path
->nodes
[0], name
,
2173 (unsigned long)(di
+ 1), name_len
);
2175 log_di
= btrfs_lookup_xattr(NULL
, log
, log_path
, ino
,
2177 btrfs_release_path(log_path
);
2179 /* Doesn't exist in log tree, so delete it. */
2180 btrfs_release_path(path
);
2181 di
= btrfs_lookup_xattr(trans
, root
, path
, ino
,
2182 name
, name_len
, -1);
2189 ret
= btrfs_delete_one_dir_name(trans
, root
,
2193 btrfs_release_path(path
);
2198 if (IS_ERR(log_di
)) {
2199 ret
= PTR_ERR(log_di
);
2203 di
= (struct btrfs_dir_item
*)((char *)di
+ this_len
);
2206 ret
= btrfs_next_leaf(root
, path
);
2212 btrfs_free_path(log_path
);
2213 btrfs_release_path(path
);
2219 * deletion replay happens before we copy any new directory items
2220 * out of the log or out of backreferences from inodes. It
2221 * scans the log to find ranges of keys that log is authoritative for,
2222 * and then scans the directory to find items in those ranges that are
2223 * not present in the log.
2225 * Anything we don't find in the log is unlinked and removed from the
2228 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
2229 struct btrfs_root
*root
,
2230 struct btrfs_root
*log
,
2231 struct btrfs_path
*path
,
2232 u64 dirid
, int del_all
)
2236 int key_type
= BTRFS_DIR_LOG_ITEM_KEY
;
2238 struct btrfs_key dir_key
;
2239 struct btrfs_key found_key
;
2240 struct btrfs_path
*log_path
;
2243 dir_key
.objectid
= dirid
;
2244 dir_key
.type
= BTRFS_DIR_ITEM_KEY
;
2245 log_path
= btrfs_alloc_path();
2249 dir
= read_one_inode(root
, dirid
);
2250 /* it isn't an error if the inode isn't there, that can happen
2251 * because we replay the deletes before we copy in the inode item
2255 btrfs_free_path(log_path
);
2263 range_end
= (u64
)-1;
2265 ret
= find_dir_range(log
, path
, dirid
, key_type
,
2266 &range_start
, &range_end
);
2271 dir_key
.offset
= range_start
;
2274 ret
= btrfs_search_slot(NULL
, root
, &dir_key
, path
,
2279 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2280 if (path
->slots
[0] >= nritems
) {
2281 ret
= btrfs_next_leaf(root
, path
);
2287 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2289 if (found_key
.objectid
!= dirid
||
2290 found_key
.type
!= dir_key
.type
)
2293 if (found_key
.offset
> range_end
)
2296 ret
= check_item_in_log(trans
, root
, log
, path
,
2301 if (found_key
.offset
== (u64
)-1)
2303 dir_key
.offset
= found_key
.offset
+ 1;
2305 btrfs_release_path(path
);
2306 if (range_end
== (u64
)-1)
2308 range_start
= range_end
+ 1;
2313 if (key_type
== BTRFS_DIR_LOG_ITEM_KEY
) {
2314 key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
2315 dir_key
.type
= BTRFS_DIR_INDEX_KEY
;
2316 btrfs_release_path(path
);
2320 btrfs_release_path(path
);
2321 btrfs_free_path(log_path
);
2327 * the process_func used to replay items from the log tree. This
2328 * gets called in two different stages. The first stage just looks
2329 * for inodes and makes sure they are all copied into the subvolume.
2331 * The second stage copies all the other item types from the log into
2332 * the subvolume. The two stage approach is slower, but gets rid of
2333 * lots of complexity around inodes referencing other inodes that exist
2334 * only in the log (references come from either directory items or inode
2337 static int replay_one_buffer(struct btrfs_root
*log
, struct extent_buffer
*eb
,
2338 struct walk_control
*wc
, u64 gen
)
2341 struct btrfs_path
*path
;
2342 struct btrfs_root
*root
= wc
->replay_dest
;
2343 struct btrfs_key key
;
2348 ret
= btrfs_read_buffer(eb
, gen
);
2352 level
= btrfs_header_level(eb
);
2357 path
= btrfs_alloc_path();
2361 nritems
= btrfs_header_nritems(eb
);
2362 for (i
= 0; i
< nritems
; i
++) {
2363 btrfs_item_key_to_cpu(eb
, &key
, i
);
2365 /* inode keys are done during the first stage */
2366 if (key
.type
== BTRFS_INODE_ITEM_KEY
&&
2367 wc
->stage
== LOG_WALK_REPLAY_INODES
) {
2368 struct btrfs_inode_item
*inode_item
;
2371 inode_item
= btrfs_item_ptr(eb
, i
,
2372 struct btrfs_inode_item
);
2374 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2375 * and never got linked before the fsync, skip it, as
2376 * replaying it is pointless since it would be deleted
2377 * later. We skip logging tmpfiles, but it's always
2378 * possible we are replaying a log created with a kernel
2379 * that used to log tmpfiles.
2381 if (btrfs_inode_nlink(eb
, inode_item
) == 0) {
2382 wc
->ignore_cur_inode
= true;
2385 wc
->ignore_cur_inode
= false;
2387 ret
= replay_xattr_deletes(wc
->trans
, root
, log
,
2388 path
, key
.objectid
);
2391 mode
= btrfs_inode_mode(eb
, inode_item
);
2392 if (S_ISDIR(mode
)) {
2393 ret
= replay_dir_deletes(wc
->trans
,
2394 root
, log
, path
, key
.objectid
, 0);
2398 ret
= overwrite_item(wc
->trans
, root
, path
,
2404 * Before replaying extents, truncate the inode to its
2405 * size. We need to do it now and not after log replay
2406 * because before an fsync we can have prealloc extents
2407 * added beyond the inode's i_size. If we did it after,
2408 * through orphan cleanup for example, we would drop
2409 * those prealloc extents just after replaying them.
2411 if (S_ISREG(mode
)) {
2412 struct inode
*inode
;
2415 inode
= read_one_inode(root
, key
.objectid
);
2420 from
= ALIGN(i_size_read(inode
),
2421 root
->fs_info
->sectorsize
);
2422 ret
= btrfs_drop_extents(wc
->trans
, root
, inode
,
2425 /* Update the inode's nbytes. */
2426 ret
= btrfs_update_inode(wc
->trans
,
2434 ret
= link_to_fixup_dir(wc
->trans
, root
,
2435 path
, key
.objectid
);
2440 if (wc
->ignore_cur_inode
)
2443 if (key
.type
== BTRFS_DIR_INDEX_KEY
&&
2444 wc
->stage
== LOG_WALK_REPLAY_DIR_INDEX
) {
2445 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2451 if (wc
->stage
< LOG_WALK_REPLAY_ALL
)
2454 /* these keys are simply copied */
2455 if (key
.type
== BTRFS_XATTR_ITEM_KEY
) {
2456 ret
= overwrite_item(wc
->trans
, root
, path
,
2460 } else if (key
.type
== BTRFS_INODE_REF_KEY
||
2461 key
.type
== BTRFS_INODE_EXTREF_KEY
) {
2462 ret
= add_inode_ref(wc
->trans
, root
, log
, path
,
2464 if (ret
&& ret
!= -ENOENT
)
2467 } else if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
2468 ret
= replay_one_extent(wc
->trans
, root
, path
,
2472 } else if (key
.type
== BTRFS_DIR_ITEM_KEY
) {
2473 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2479 btrfs_free_path(path
);
2483 static noinline
int walk_down_log_tree(struct btrfs_trans_handle
*trans
,
2484 struct btrfs_root
*root
,
2485 struct btrfs_path
*path
, int *level
,
2486 struct walk_control
*wc
)
2488 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2492 struct extent_buffer
*next
;
2493 struct extent_buffer
*cur
;
2494 struct extent_buffer
*parent
;
2498 WARN_ON(*level
< 0);
2499 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2501 while (*level
> 0) {
2502 WARN_ON(*level
< 0);
2503 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2504 cur
= path
->nodes
[*level
];
2506 WARN_ON(btrfs_header_level(cur
) != *level
);
2508 if (path
->slots
[*level
] >=
2509 btrfs_header_nritems(cur
))
2512 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2513 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2514 blocksize
= fs_info
->nodesize
;
2516 parent
= path
->nodes
[*level
];
2517 root_owner
= btrfs_header_owner(parent
);
2519 next
= btrfs_find_create_tree_block(fs_info
, bytenr
);
2521 return PTR_ERR(next
);
2524 ret
= wc
->process_func(root
, next
, wc
, ptr_gen
);
2526 free_extent_buffer(next
);
2530 path
->slots
[*level
]++;
2532 ret
= btrfs_read_buffer(next
, ptr_gen
);
2534 free_extent_buffer(next
);
2539 btrfs_tree_lock(next
);
2540 btrfs_set_lock_blocking(next
);
2541 clean_tree_block(fs_info
, next
);
2542 btrfs_wait_tree_block_writeback(next
);
2543 btrfs_tree_unlock(next
);
2545 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &next
->bflags
))
2546 clear_extent_buffer_dirty(next
);
2549 WARN_ON(root_owner
!=
2550 BTRFS_TREE_LOG_OBJECTID
);
2551 ret
= btrfs_free_and_pin_reserved_extent(
2555 free_extent_buffer(next
);
2559 free_extent_buffer(next
);
2562 ret
= btrfs_read_buffer(next
, ptr_gen
);
2564 free_extent_buffer(next
);
2568 WARN_ON(*level
<= 0);
2569 if (path
->nodes
[*level
-1])
2570 free_extent_buffer(path
->nodes
[*level
-1]);
2571 path
->nodes
[*level
-1] = next
;
2572 *level
= btrfs_header_level(next
);
2573 path
->slots
[*level
] = 0;
2576 WARN_ON(*level
< 0);
2577 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2579 path
->slots
[*level
] = btrfs_header_nritems(path
->nodes
[*level
]);
2585 static noinline
int walk_up_log_tree(struct btrfs_trans_handle
*trans
,
2586 struct btrfs_root
*root
,
2587 struct btrfs_path
*path
, int *level
,
2588 struct walk_control
*wc
)
2590 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2596 for (i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2597 slot
= path
->slots
[i
];
2598 if (slot
+ 1 < btrfs_header_nritems(path
->nodes
[i
])) {
2601 WARN_ON(*level
== 0);
2604 struct extent_buffer
*parent
;
2605 if (path
->nodes
[*level
] == root
->node
)
2606 parent
= path
->nodes
[*level
];
2608 parent
= path
->nodes
[*level
+ 1];
2610 root_owner
= btrfs_header_owner(parent
);
2611 ret
= wc
->process_func(root
, path
->nodes
[*level
], wc
,
2612 btrfs_header_generation(path
->nodes
[*level
]));
2617 struct extent_buffer
*next
;
2619 next
= path
->nodes
[*level
];
2622 btrfs_tree_lock(next
);
2623 btrfs_set_lock_blocking(next
);
2624 clean_tree_block(fs_info
, next
);
2625 btrfs_wait_tree_block_writeback(next
);
2626 btrfs_tree_unlock(next
);
2628 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &next
->bflags
))
2629 clear_extent_buffer_dirty(next
);
2632 WARN_ON(root_owner
!= BTRFS_TREE_LOG_OBJECTID
);
2633 ret
= btrfs_free_and_pin_reserved_extent(
2635 path
->nodes
[*level
]->start
,
2636 path
->nodes
[*level
]->len
);
2640 free_extent_buffer(path
->nodes
[*level
]);
2641 path
->nodes
[*level
] = NULL
;
2649 * drop the reference count on the tree rooted at 'snap'. This traverses
2650 * the tree freeing any blocks that have a ref count of zero after being
2653 static int walk_log_tree(struct btrfs_trans_handle
*trans
,
2654 struct btrfs_root
*log
, struct walk_control
*wc
)
2656 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
2660 struct btrfs_path
*path
;
2663 path
= btrfs_alloc_path();
2667 level
= btrfs_header_level(log
->node
);
2669 path
->nodes
[level
] = log
->node
;
2670 extent_buffer_get(log
->node
);
2671 path
->slots
[level
] = 0;
2674 wret
= walk_down_log_tree(trans
, log
, path
, &level
, wc
);
2682 wret
= walk_up_log_tree(trans
, log
, path
, &level
, wc
);
2691 /* was the root node processed? if not, catch it here */
2692 if (path
->nodes
[orig_level
]) {
2693 ret
= wc
->process_func(log
, path
->nodes
[orig_level
], wc
,
2694 btrfs_header_generation(path
->nodes
[orig_level
]));
2698 struct extent_buffer
*next
;
2700 next
= path
->nodes
[orig_level
];
2703 btrfs_tree_lock(next
);
2704 btrfs_set_lock_blocking(next
);
2705 clean_tree_block(fs_info
, next
);
2706 btrfs_wait_tree_block_writeback(next
);
2707 btrfs_tree_unlock(next
);
2709 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &next
->bflags
))
2710 clear_extent_buffer_dirty(next
);
2713 WARN_ON(log
->root_key
.objectid
!=
2714 BTRFS_TREE_LOG_OBJECTID
);
2715 ret
= btrfs_free_and_pin_reserved_extent(fs_info
,
2716 next
->start
, next
->len
);
2723 btrfs_free_path(path
);
2728 * helper function to update the item for a given subvolumes log root
2729 * in the tree of log roots
2731 static int update_log_root(struct btrfs_trans_handle
*trans
,
2732 struct btrfs_root
*log
)
2734 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
2737 if (log
->log_transid
== 1) {
2738 /* insert root item on the first sync */
2739 ret
= btrfs_insert_root(trans
, fs_info
->log_root_tree
,
2740 &log
->root_key
, &log
->root_item
);
2742 ret
= btrfs_update_root(trans
, fs_info
->log_root_tree
,
2743 &log
->root_key
, &log
->root_item
);
2748 static void wait_log_commit(struct btrfs_root
*root
, int transid
)
2751 int index
= transid
% 2;
2754 * we only allow two pending log transactions at a time,
2755 * so we know that if ours is more than 2 older than the
2756 * current transaction, we're done
2759 prepare_to_wait(&root
->log_commit_wait
[index
],
2760 &wait
, TASK_UNINTERRUPTIBLE
);
2762 if (!(root
->log_transid_committed
< transid
&&
2763 atomic_read(&root
->log_commit
[index
])))
2766 mutex_unlock(&root
->log_mutex
);
2768 mutex_lock(&root
->log_mutex
);
2770 finish_wait(&root
->log_commit_wait
[index
], &wait
);
2773 static void wait_for_writer(struct btrfs_root
*root
)
2778 prepare_to_wait(&root
->log_writer_wait
, &wait
,
2779 TASK_UNINTERRUPTIBLE
);
2780 if (!atomic_read(&root
->log_writers
))
2783 mutex_unlock(&root
->log_mutex
);
2785 mutex_lock(&root
->log_mutex
);
2787 finish_wait(&root
->log_writer_wait
, &wait
);
2790 static inline void btrfs_remove_log_ctx(struct btrfs_root
*root
,
2791 struct btrfs_log_ctx
*ctx
)
2796 mutex_lock(&root
->log_mutex
);
2797 list_del_init(&ctx
->list
);
2798 mutex_unlock(&root
->log_mutex
);
2802 * Invoked in log mutex context, or be sure there is no other task which
2803 * can access the list.
2805 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root
*root
,
2806 int index
, int error
)
2808 struct btrfs_log_ctx
*ctx
;
2809 struct btrfs_log_ctx
*safe
;
2811 list_for_each_entry_safe(ctx
, safe
, &root
->log_ctxs
[index
], list
) {
2812 list_del_init(&ctx
->list
);
2813 ctx
->log_ret
= error
;
2816 INIT_LIST_HEAD(&root
->log_ctxs
[index
]);
2820 * btrfs_sync_log does sends a given tree log down to the disk and
2821 * updates the super blocks to record it. When this call is done,
2822 * you know that any inodes previously logged are safely on disk only
2825 * Any other return value means you need to call btrfs_commit_transaction.
2826 * Some of the edge cases for fsyncing directories that have had unlinks
2827 * or renames done in the past mean that sometimes the only safe
2828 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2829 * that has happened.
2831 int btrfs_sync_log(struct btrfs_trans_handle
*trans
,
2832 struct btrfs_root
*root
, struct btrfs_log_ctx
*ctx
)
2838 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2839 struct btrfs_root
*log
= root
->log_root
;
2840 struct btrfs_root
*log_root_tree
= fs_info
->log_root_tree
;
2841 int log_transid
= 0;
2842 struct btrfs_log_ctx root_log_ctx
;
2843 struct blk_plug plug
;
2845 mutex_lock(&root
->log_mutex
);
2846 log_transid
= ctx
->log_transid
;
2847 if (root
->log_transid_committed
>= log_transid
) {
2848 mutex_unlock(&root
->log_mutex
);
2849 return ctx
->log_ret
;
2852 index1
= log_transid
% 2;
2853 if (atomic_read(&root
->log_commit
[index1
])) {
2854 wait_log_commit(root
, log_transid
);
2855 mutex_unlock(&root
->log_mutex
);
2856 return ctx
->log_ret
;
2858 ASSERT(log_transid
== root
->log_transid
);
2859 atomic_set(&root
->log_commit
[index1
], 1);
2861 /* wait for previous tree log sync to complete */
2862 if (atomic_read(&root
->log_commit
[(index1
+ 1) % 2]))
2863 wait_log_commit(root
, log_transid
- 1);
2866 int batch
= atomic_read(&root
->log_batch
);
2867 /* when we're on an ssd, just kick the log commit out */
2868 if (!btrfs_test_opt(fs_info
, SSD
) &&
2869 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
)) {
2870 mutex_unlock(&root
->log_mutex
);
2871 schedule_timeout_uninterruptible(1);
2872 mutex_lock(&root
->log_mutex
);
2874 wait_for_writer(root
);
2875 if (batch
== atomic_read(&root
->log_batch
))
2879 /* bail out if we need to do a full commit */
2880 if (btrfs_need_log_full_commit(fs_info
, trans
)) {
2882 btrfs_free_logged_extents(log
, log_transid
);
2883 mutex_unlock(&root
->log_mutex
);
2887 if (log_transid
% 2 == 0)
2888 mark
= EXTENT_DIRTY
;
2892 /* we start IO on all the marked extents here, but we don't actually
2893 * wait for them until later.
2895 blk_start_plug(&plug
);
2896 ret
= btrfs_write_marked_extents(fs_info
, &log
->dirty_log_pages
, mark
);
2898 blk_finish_plug(&plug
);
2899 btrfs_abort_transaction(trans
, ret
);
2900 btrfs_free_logged_extents(log
, log_transid
);
2901 btrfs_set_log_full_commit(fs_info
, trans
);
2902 mutex_unlock(&root
->log_mutex
);
2906 btrfs_set_root_node(&log
->root_item
, log
->node
);
2908 root
->log_transid
++;
2909 log
->log_transid
= root
->log_transid
;
2910 root
->log_start_pid
= 0;
2912 * IO has been started, blocks of the log tree have WRITTEN flag set
2913 * in their headers. new modifications of the log will be written to
2914 * new positions. so it's safe to allow log writers to go in.
2916 mutex_unlock(&root
->log_mutex
);
2918 btrfs_init_log_ctx(&root_log_ctx
, NULL
);
2920 mutex_lock(&log_root_tree
->log_mutex
);
2921 atomic_inc(&log_root_tree
->log_batch
);
2922 atomic_inc(&log_root_tree
->log_writers
);
2924 index2
= log_root_tree
->log_transid
% 2;
2925 list_add_tail(&root_log_ctx
.list
, &log_root_tree
->log_ctxs
[index2
]);
2926 root_log_ctx
.log_transid
= log_root_tree
->log_transid
;
2928 mutex_unlock(&log_root_tree
->log_mutex
);
2930 ret
= update_log_root(trans
, log
);
2932 mutex_lock(&log_root_tree
->log_mutex
);
2933 if (atomic_dec_and_test(&log_root_tree
->log_writers
)) {
2935 * Implicit memory barrier after atomic_dec_and_test
2937 if (waitqueue_active(&log_root_tree
->log_writer_wait
))
2938 wake_up(&log_root_tree
->log_writer_wait
);
2942 if (!list_empty(&root_log_ctx
.list
))
2943 list_del_init(&root_log_ctx
.list
);
2945 blk_finish_plug(&plug
);
2946 btrfs_set_log_full_commit(fs_info
, trans
);
2948 if (ret
!= -ENOSPC
) {
2949 btrfs_abort_transaction(trans
, ret
);
2950 mutex_unlock(&log_root_tree
->log_mutex
);
2953 btrfs_wait_tree_log_extents(log
, mark
);
2954 btrfs_free_logged_extents(log
, log_transid
);
2955 mutex_unlock(&log_root_tree
->log_mutex
);
2960 if (log_root_tree
->log_transid_committed
>= root_log_ctx
.log_transid
) {
2961 blk_finish_plug(&plug
);
2962 list_del_init(&root_log_ctx
.list
);
2963 mutex_unlock(&log_root_tree
->log_mutex
);
2964 ret
= root_log_ctx
.log_ret
;
2968 index2
= root_log_ctx
.log_transid
% 2;
2969 if (atomic_read(&log_root_tree
->log_commit
[index2
])) {
2970 blk_finish_plug(&plug
);
2971 ret
= btrfs_wait_tree_log_extents(log
, mark
);
2972 btrfs_wait_logged_extents(trans
, log
, log_transid
);
2973 wait_log_commit(log_root_tree
,
2974 root_log_ctx
.log_transid
);
2975 mutex_unlock(&log_root_tree
->log_mutex
);
2977 ret
= root_log_ctx
.log_ret
;
2980 ASSERT(root_log_ctx
.log_transid
== log_root_tree
->log_transid
);
2981 atomic_set(&log_root_tree
->log_commit
[index2
], 1);
2983 if (atomic_read(&log_root_tree
->log_commit
[(index2
+ 1) % 2])) {
2984 wait_log_commit(log_root_tree
,
2985 root_log_ctx
.log_transid
- 1);
2988 wait_for_writer(log_root_tree
);
2991 * now that we've moved on to the tree of log tree roots,
2992 * check the full commit flag again
2994 if (btrfs_need_log_full_commit(fs_info
, trans
)) {
2995 blk_finish_plug(&plug
);
2996 btrfs_wait_tree_log_extents(log
, mark
);
2997 btrfs_free_logged_extents(log
, log_transid
);
2998 mutex_unlock(&log_root_tree
->log_mutex
);
3000 goto out_wake_log_root
;
3003 ret
= btrfs_write_marked_extents(fs_info
,
3004 &log_root_tree
->dirty_log_pages
,
3005 EXTENT_DIRTY
| EXTENT_NEW
);
3006 blk_finish_plug(&plug
);
3008 btrfs_set_log_full_commit(fs_info
, trans
);
3009 btrfs_abort_transaction(trans
, ret
);
3010 btrfs_free_logged_extents(log
, log_transid
);
3011 mutex_unlock(&log_root_tree
->log_mutex
);
3012 goto out_wake_log_root
;
3014 ret
= btrfs_wait_tree_log_extents(log
, mark
);
3016 ret
= btrfs_wait_tree_log_extents(log_root_tree
,
3017 EXTENT_NEW
| EXTENT_DIRTY
);
3019 btrfs_set_log_full_commit(fs_info
, trans
);
3020 btrfs_free_logged_extents(log
, log_transid
);
3021 mutex_unlock(&log_root_tree
->log_mutex
);
3022 goto out_wake_log_root
;
3024 btrfs_wait_logged_extents(trans
, log
, log_transid
);
3026 btrfs_set_super_log_root(fs_info
->super_for_commit
,
3027 log_root_tree
->node
->start
);
3028 btrfs_set_super_log_root_level(fs_info
->super_for_commit
,
3029 btrfs_header_level(log_root_tree
->node
));
3031 log_root_tree
->log_transid
++;
3032 mutex_unlock(&log_root_tree
->log_mutex
);
3035 * nobody else is going to jump in and write the the ctree
3036 * super here because the log_commit atomic below is protecting
3037 * us. We must be called with a transaction handle pinning
3038 * the running transaction open, so a full commit can't hop
3039 * in and cause problems either.
3041 ret
= write_all_supers(fs_info
, 1);
3043 btrfs_set_log_full_commit(fs_info
, trans
);
3044 btrfs_abort_transaction(trans
, ret
);
3045 goto out_wake_log_root
;
3048 mutex_lock(&root
->log_mutex
);
3049 if (root
->last_log_commit
< log_transid
)
3050 root
->last_log_commit
= log_transid
;
3051 mutex_unlock(&root
->log_mutex
);
3054 mutex_lock(&log_root_tree
->log_mutex
);
3055 btrfs_remove_all_log_ctxs(log_root_tree
, index2
, ret
);
3057 log_root_tree
->log_transid_committed
++;
3058 atomic_set(&log_root_tree
->log_commit
[index2
], 0);
3059 mutex_unlock(&log_root_tree
->log_mutex
);
3062 * The barrier before waitqueue_active is needed so all the updates
3063 * above are seen by the woken threads. It might not be necessary, but
3064 * proving that seems to be hard.
3067 if (waitqueue_active(&log_root_tree
->log_commit_wait
[index2
]))
3068 wake_up(&log_root_tree
->log_commit_wait
[index2
]);
3070 mutex_lock(&root
->log_mutex
);
3071 btrfs_remove_all_log_ctxs(root
, index1
, ret
);
3072 root
->log_transid_committed
++;
3073 atomic_set(&root
->log_commit
[index1
], 0);
3074 mutex_unlock(&root
->log_mutex
);
3077 * The barrier before waitqueue_active is needed so all the updates
3078 * above are seen by the woken threads. It might not be necessary, but
3079 * proving that seems to be hard.
3082 if (waitqueue_active(&root
->log_commit_wait
[index1
]))
3083 wake_up(&root
->log_commit_wait
[index1
]);
3087 static void free_log_tree(struct btrfs_trans_handle
*trans
,
3088 struct btrfs_root
*log
)
3093 struct walk_control wc
= {
3095 .process_func
= process_one_buffer
3098 ret
= walk_log_tree(trans
, log
, &wc
);
3101 btrfs_abort_transaction(trans
, ret
);
3103 btrfs_handle_fs_error(log
->fs_info
, ret
, NULL
);
3107 ret
= find_first_extent_bit(&log
->dirty_log_pages
,
3109 EXTENT_DIRTY
| EXTENT_NEW
| EXTENT_NEED_WAIT
,
3114 clear_extent_bits(&log
->dirty_log_pages
, start
, end
,
3115 EXTENT_DIRTY
| EXTENT_NEW
| EXTENT_NEED_WAIT
);
3119 * We may have short-circuited the log tree with the full commit logic
3120 * and left ordered extents on our list, so clear these out to keep us
3121 * from leaking inodes and memory.
3123 btrfs_free_logged_extents(log
, 0);
3124 btrfs_free_logged_extents(log
, 1);
3126 free_extent_buffer(log
->node
);
3131 * free all the extents used by the tree log. This should be called
3132 * at commit time of the full transaction
3134 int btrfs_free_log(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
)
3136 if (root
->log_root
) {
3137 free_log_tree(trans
, root
->log_root
);
3138 root
->log_root
= NULL
;
3143 int btrfs_free_log_root_tree(struct btrfs_trans_handle
*trans
,
3144 struct btrfs_fs_info
*fs_info
)
3146 if (fs_info
->log_root_tree
) {
3147 free_log_tree(trans
, fs_info
->log_root_tree
);
3148 fs_info
->log_root_tree
= NULL
;
3154 * If both a file and directory are logged, and unlinks or renames are
3155 * mixed in, we have a few interesting corners:
3157 * create file X in dir Y
3158 * link file X to X.link in dir Y
3160 * unlink file X but leave X.link
3163 * After a crash we would expect only X.link to exist. But file X
3164 * didn't get fsync'd again so the log has back refs for X and X.link.
3166 * We solve this by removing directory entries and inode backrefs from the
3167 * log when a file that was logged in the current transaction is
3168 * unlinked. Any later fsync will include the updated log entries, and
3169 * we'll be able to reconstruct the proper directory items from backrefs.
3171 * This optimizations allows us to avoid relogging the entire inode
3172 * or the entire directory.
3174 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle
*trans
,
3175 struct btrfs_root
*root
,
3176 const char *name
, int name_len
,
3177 struct btrfs_inode
*dir
, u64 index
)
3179 struct btrfs_root
*log
;
3180 struct btrfs_dir_item
*di
;
3181 struct btrfs_path
*path
;
3185 u64 dir_ino
= btrfs_ino(dir
);
3187 if (dir
->logged_trans
< trans
->transid
)
3190 ret
= join_running_log_trans(root
);
3194 mutex_lock(&dir
->log_mutex
);
3196 log
= root
->log_root
;
3197 path
= btrfs_alloc_path();
3203 di
= btrfs_lookup_dir_item(trans
, log
, path
, dir_ino
,
3204 name
, name_len
, -1);
3210 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
3211 bytes_del
+= name_len
;
3217 btrfs_release_path(path
);
3218 di
= btrfs_lookup_dir_index_item(trans
, log
, path
, dir_ino
,
3219 index
, name
, name_len
, -1);
3225 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
3226 bytes_del
+= name_len
;
3233 /* update the directory size in the log to reflect the names
3237 struct btrfs_key key
;
3239 key
.objectid
= dir_ino
;
3241 key
.type
= BTRFS_INODE_ITEM_KEY
;
3242 btrfs_release_path(path
);
3244 ret
= btrfs_search_slot(trans
, log
, &key
, path
, 0, 1);
3250 struct btrfs_inode_item
*item
;
3253 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3254 struct btrfs_inode_item
);
3255 i_size
= btrfs_inode_size(path
->nodes
[0], item
);
3256 if (i_size
> bytes_del
)
3257 i_size
-= bytes_del
;
3260 btrfs_set_inode_size(path
->nodes
[0], item
, i_size
);
3261 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3264 btrfs_release_path(path
);
3267 btrfs_free_path(path
);
3269 mutex_unlock(&dir
->log_mutex
);
3270 if (ret
== -ENOSPC
) {
3271 btrfs_set_log_full_commit(root
->fs_info
, trans
);
3274 btrfs_abort_transaction(trans
, ret
);
3276 btrfs_end_log_trans(root
);
3281 /* see comments for btrfs_del_dir_entries_in_log */
3282 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle
*trans
,
3283 struct btrfs_root
*root
,
3284 const char *name
, int name_len
,
3285 struct btrfs_inode
*inode
, u64 dirid
)
3287 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3288 struct btrfs_root
*log
;
3292 if (inode
->logged_trans
< trans
->transid
)
3295 ret
= join_running_log_trans(root
);
3298 log
= root
->log_root
;
3299 mutex_lock(&inode
->log_mutex
);
3301 ret
= btrfs_del_inode_ref(trans
, log
, name
, name_len
, btrfs_ino(inode
),
3303 mutex_unlock(&inode
->log_mutex
);
3304 if (ret
== -ENOSPC
) {
3305 btrfs_set_log_full_commit(fs_info
, trans
);
3307 } else if (ret
< 0 && ret
!= -ENOENT
)
3308 btrfs_abort_transaction(trans
, ret
);
3309 btrfs_end_log_trans(root
);
3315 * creates a range item in the log for 'dirid'. first_offset and
3316 * last_offset tell us which parts of the key space the log should
3317 * be considered authoritative for.
3319 static noinline
int insert_dir_log_key(struct btrfs_trans_handle
*trans
,
3320 struct btrfs_root
*log
,
3321 struct btrfs_path
*path
,
3322 int key_type
, u64 dirid
,
3323 u64 first_offset
, u64 last_offset
)
3326 struct btrfs_key key
;
3327 struct btrfs_dir_log_item
*item
;
3329 key
.objectid
= dirid
;
3330 key
.offset
= first_offset
;
3331 if (key_type
== BTRFS_DIR_ITEM_KEY
)
3332 key
.type
= BTRFS_DIR_LOG_ITEM_KEY
;
3334 key
.type
= BTRFS_DIR_LOG_INDEX_KEY
;
3335 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
, sizeof(*item
));
3339 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3340 struct btrfs_dir_log_item
);
3341 btrfs_set_dir_log_end(path
->nodes
[0], item
, last_offset
);
3342 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3343 btrfs_release_path(path
);
3348 * log all the items included in the current transaction for a given
3349 * directory. This also creates the range items in the log tree required
3350 * to replay anything deleted before the fsync
3352 static noinline
int log_dir_items(struct btrfs_trans_handle
*trans
,
3353 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
3354 struct btrfs_path
*path
,
3355 struct btrfs_path
*dst_path
, int key_type
,
3356 struct btrfs_log_ctx
*ctx
,
3357 u64 min_offset
, u64
*last_offset_ret
)
3359 struct btrfs_key min_key
;
3360 struct btrfs_root
*log
= root
->log_root
;
3361 struct extent_buffer
*src
;
3366 u64 first_offset
= min_offset
;
3367 u64 last_offset
= (u64
)-1;
3368 u64 ino
= btrfs_ino(inode
);
3370 log
= root
->log_root
;
3372 min_key
.objectid
= ino
;
3373 min_key
.type
= key_type
;
3374 min_key
.offset
= min_offset
;
3376 ret
= btrfs_search_forward(root
, &min_key
, path
, trans
->transid
);
3379 * we didn't find anything from this transaction, see if there
3380 * is anything at all
3382 if (ret
!= 0 || min_key
.objectid
!= ino
|| min_key
.type
!= key_type
) {
3383 min_key
.objectid
= ino
;
3384 min_key
.type
= key_type
;
3385 min_key
.offset
= (u64
)-1;
3386 btrfs_release_path(path
);
3387 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3389 btrfs_release_path(path
);
3392 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3394 /* if ret == 0 there are items for this type,
3395 * create a range to tell us the last key of this type.
3396 * otherwise, there are no items in this directory after
3397 * *min_offset, and we create a range to indicate that.
3400 struct btrfs_key tmp
;
3401 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
,
3403 if (key_type
== tmp
.type
)
3404 first_offset
= max(min_offset
, tmp
.offset
) + 1;
3409 /* go backward to find any previous key */
3410 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3412 struct btrfs_key tmp
;
3413 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3414 if (key_type
== tmp
.type
) {
3415 first_offset
= tmp
.offset
;
3416 ret
= overwrite_item(trans
, log
, dst_path
,
3417 path
->nodes
[0], path
->slots
[0],
3425 btrfs_release_path(path
);
3427 /* find the first key from this transaction again */
3428 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3429 if (WARN_ON(ret
!= 0))
3433 * we have a block from this transaction, log every item in it
3434 * from our directory
3437 struct btrfs_key tmp
;
3438 src
= path
->nodes
[0];
3439 nritems
= btrfs_header_nritems(src
);
3440 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
3441 struct btrfs_dir_item
*di
;
3443 btrfs_item_key_to_cpu(src
, &min_key
, i
);
3445 if (min_key
.objectid
!= ino
|| min_key
.type
!= key_type
)
3447 ret
= overwrite_item(trans
, log
, dst_path
, src
, i
,
3455 * We must make sure that when we log a directory entry,
3456 * the corresponding inode, after log replay, has a
3457 * matching link count. For example:
3463 * xfs_io -c "fsync" mydir
3465 * <mount fs and log replay>
3467 * Would result in a fsync log that when replayed, our
3468 * file inode would have a link count of 1, but we get
3469 * two directory entries pointing to the same inode.
3470 * After removing one of the names, it would not be
3471 * possible to remove the other name, which resulted
3472 * always in stale file handle errors, and would not
3473 * be possible to rmdir the parent directory, since
3474 * its i_size could never decrement to the value
3475 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3477 di
= btrfs_item_ptr(src
, i
, struct btrfs_dir_item
);
3478 btrfs_dir_item_key_to_cpu(src
, di
, &tmp
);
3480 (btrfs_dir_transid(src
, di
) == trans
->transid
||
3481 btrfs_dir_type(src
, di
) == BTRFS_FT_DIR
) &&
3482 tmp
.type
!= BTRFS_ROOT_ITEM_KEY
)
3483 ctx
->log_new_dentries
= true;
3485 path
->slots
[0] = nritems
;
3488 * look ahead to the next item and see if it is also
3489 * from this directory and from this transaction
3491 ret
= btrfs_next_leaf(root
, path
);
3494 last_offset
= (u64
)-1;
3499 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3500 if (tmp
.objectid
!= ino
|| tmp
.type
!= key_type
) {
3501 last_offset
= (u64
)-1;
3504 if (btrfs_header_generation(path
->nodes
[0]) != trans
->transid
) {
3505 ret
= overwrite_item(trans
, log
, dst_path
,
3506 path
->nodes
[0], path
->slots
[0],
3511 last_offset
= tmp
.offset
;
3516 btrfs_release_path(path
);
3517 btrfs_release_path(dst_path
);
3520 *last_offset_ret
= last_offset
;
3522 * insert the log range keys to indicate where the log
3525 ret
= insert_dir_log_key(trans
, log
, path
, key_type
,
3526 ino
, first_offset
, last_offset
);
3534 * logging directories is very similar to logging inodes, We find all the items
3535 * from the current transaction and write them to the log.
3537 * The recovery code scans the directory in the subvolume, and if it finds a
3538 * key in the range logged that is not present in the log tree, then it means
3539 * that dir entry was unlinked during the transaction.
3541 * In order for that scan to work, we must include one key smaller than
3542 * the smallest logged by this transaction and one key larger than the largest
3543 * key logged by this transaction.
3545 static noinline
int log_directory_changes(struct btrfs_trans_handle
*trans
,
3546 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
3547 struct btrfs_path
*path
,
3548 struct btrfs_path
*dst_path
,
3549 struct btrfs_log_ctx
*ctx
)
3554 int key_type
= BTRFS_DIR_ITEM_KEY
;
3560 ret
= log_dir_items(trans
, root
, inode
, path
, dst_path
, key_type
,
3561 ctx
, min_key
, &max_key
);
3564 if (max_key
== (u64
)-1)
3566 min_key
= max_key
+ 1;
3569 if (key_type
== BTRFS_DIR_ITEM_KEY
) {
3570 key_type
= BTRFS_DIR_INDEX_KEY
;
3577 * a helper function to drop items from the log before we relog an
3578 * inode. max_key_type indicates the highest item type to remove.
3579 * This cannot be run for file data extents because it does not
3580 * free the extents they point to.
3582 static int drop_objectid_items(struct btrfs_trans_handle
*trans
,
3583 struct btrfs_root
*log
,
3584 struct btrfs_path
*path
,
3585 u64 objectid
, int max_key_type
)
3588 struct btrfs_key key
;
3589 struct btrfs_key found_key
;
3592 key
.objectid
= objectid
;
3593 key
.type
= max_key_type
;
3594 key
.offset
= (u64
)-1;
3597 ret
= btrfs_search_slot(trans
, log
, &key
, path
, -1, 1);
3598 BUG_ON(ret
== 0); /* Logic error */
3602 if (path
->slots
[0] == 0)
3606 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
3609 if (found_key
.objectid
!= objectid
)
3612 found_key
.offset
= 0;
3614 ret
= btrfs_bin_search(path
->nodes
[0], &found_key
, 0,
3617 ret
= btrfs_del_items(trans
, log
, path
, start_slot
,
3618 path
->slots
[0] - start_slot
+ 1);
3620 * If start slot isn't 0 then we don't need to re-search, we've
3621 * found the last guy with the objectid in this tree.
3623 if (ret
|| start_slot
!= 0)
3625 btrfs_release_path(path
);
3627 btrfs_release_path(path
);
3633 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3634 struct extent_buffer
*leaf
,
3635 struct btrfs_inode_item
*item
,
3636 struct inode
*inode
, int log_inode_only
,
3639 struct btrfs_map_token token
;
3641 btrfs_init_map_token(&token
);
3643 if (log_inode_only
) {
3644 /* set the generation to zero so the recover code
3645 * can tell the difference between an logging
3646 * just to say 'this inode exists' and a logging
3647 * to say 'update this inode with these values'
3649 btrfs_set_token_inode_generation(leaf
, item
, 0, &token
);
3650 btrfs_set_token_inode_size(leaf
, item
, logged_isize
, &token
);
3652 btrfs_set_token_inode_generation(leaf
, item
,
3653 BTRFS_I(inode
)->generation
,
3655 btrfs_set_token_inode_size(leaf
, item
, inode
->i_size
, &token
);
3658 btrfs_set_token_inode_uid(leaf
, item
, i_uid_read(inode
), &token
);
3659 btrfs_set_token_inode_gid(leaf
, item
, i_gid_read(inode
), &token
);
3660 btrfs_set_token_inode_mode(leaf
, item
, inode
->i_mode
, &token
);
3661 btrfs_set_token_inode_nlink(leaf
, item
, inode
->i_nlink
, &token
);
3663 btrfs_set_token_timespec_sec(leaf
, &item
->atime
,
3664 inode
->i_atime
.tv_sec
, &token
);
3665 btrfs_set_token_timespec_nsec(leaf
, &item
->atime
,
3666 inode
->i_atime
.tv_nsec
, &token
);
3668 btrfs_set_token_timespec_sec(leaf
, &item
->mtime
,
3669 inode
->i_mtime
.tv_sec
, &token
);
3670 btrfs_set_token_timespec_nsec(leaf
, &item
->mtime
,
3671 inode
->i_mtime
.tv_nsec
, &token
);
3673 btrfs_set_token_timespec_sec(leaf
, &item
->ctime
,
3674 inode
->i_ctime
.tv_sec
, &token
);
3675 btrfs_set_token_timespec_nsec(leaf
, &item
->ctime
,
3676 inode
->i_ctime
.tv_nsec
, &token
);
3678 btrfs_set_token_inode_nbytes(leaf
, item
, inode_get_bytes(inode
),
3681 btrfs_set_token_inode_sequence(leaf
, item
, inode
->i_version
, &token
);
3682 btrfs_set_token_inode_transid(leaf
, item
, trans
->transid
, &token
);
3683 btrfs_set_token_inode_rdev(leaf
, item
, inode
->i_rdev
, &token
);
3684 btrfs_set_token_inode_flags(leaf
, item
, BTRFS_I(inode
)->flags
, &token
);
3685 btrfs_set_token_inode_block_group(leaf
, item
, 0, &token
);
3688 static int log_inode_item(struct btrfs_trans_handle
*trans
,
3689 struct btrfs_root
*log
, struct btrfs_path
*path
,
3690 struct btrfs_inode
*inode
)
3692 struct btrfs_inode_item
*inode_item
;
3695 ret
= btrfs_insert_empty_item(trans
, log
, path
,
3696 &inode
->location
, sizeof(*inode_item
));
3697 if (ret
&& ret
!= -EEXIST
)
3699 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3700 struct btrfs_inode_item
);
3701 fill_inode_item(trans
, path
->nodes
[0], inode_item
, &inode
->vfs_inode
,
3703 btrfs_release_path(path
);
3707 static noinline
int copy_items(struct btrfs_trans_handle
*trans
,
3708 struct btrfs_inode
*inode
,
3709 struct btrfs_path
*dst_path
,
3710 struct btrfs_path
*src_path
, u64
*last_extent
,
3711 int start_slot
, int nr
, int inode_only
,
3714 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
3715 unsigned long src_offset
;
3716 unsigned long dst_offset
;
3717 struct btrfs_root
*log
= inode
->root
->log_root
;
3718 struct btrfs_file_extent_item
*extent
;
3719 struct btrfs_inode_item
*inode_item
;
3720 struct extent_buffer
*src
= src_path
->nodes
[0];
3721 struct btrfs_key first_key
, last_key
, key
;
3723 struct btrfs_key
*ins_keys
;
3727 struct list_head ordered_sums
;
3728 int skip_csum
= inode
->flags
& BTRFS_INODE_NODATASUM
;
3729 bool has_extents
= false;
3730 bool need_find_last_extent
= true;
3733 INIT_LIST_HEAD(&ordered_sums
);
3735 ins_data
= kmalloc(nr
* sizeof(struct btrfs_key
) +
3736 nr
* sizeof(u32
), GFP_NOFS
);
3740 first_key
.objectid
= (u64
)-1;
3742 ins_sizes
= (u32
*)ins_data
;
3743 ins_keys
= (struct btrfs_key
*)(ins_data
+ nr
* sizeof(u32
));
3745 for (i
= 0; i
< nr
; i
++) {
3746 ins_sizes
[i
] = btrfs_item_size_nr(src
, i
+ start_slot
);
3747 btrfs_item_key_to_cpu(src
, ins_keys
+ i
, i
+ start_slot
);
3749 ret
= btrfs_insert_empty_items(trans
, log
, dst_path
,
3750 ins_keys
, ins_sizes
, nr
);
3756 for (i
= 0; i
< nr
; i
++, dst_path
->slots
[0]++) {
3757 dst_offset
= btrfs_item_ptr_offset(dst_path
->nodes
[0],
3758 dst_path
->slots
[0]);
3760 src_offset
= btrfs_item_ptr_offset(src
, start_slot
+ i
);
3763 last_key
= ins_keys
[i
];
3765 if (ins_keys
[i
].type
== BTRFS_INODE_ITEM_KEY
) {
3766 inode_item
= btrfs_item_ptr(dst_path
->nodes
[0],
3768 struct btrfs_inode_item
);
3769 fill_inode_item(trans
, dst_path
->nodes
[0], inode_item
,
3771 inode_only
== LOG_INODE_EXISTS
,
3774 copy_extent_buffer(dst_path
->nodes
[0], src
, dst_offset
,
3775 src_offset
, ins_sizes
[i
]);
3779 * We set need_find_last_extent here in case we know we were
3780 * processing other items and then walk into the first extent in
3781 * the inode. If we don't hit an extent then nothing changes,
3782 * we'll do the last search the next time around.
3784 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
) {
3786 if (first_key
.objectid
== (u64
)-1)
3787 first_key
= ins_keys
[i
];
3789 need_find_last_extent
= false;
3792 /* take a reference on file data extents so that truncates
3793 * or deletes of this inode don't have to relog the inode
3796 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
&&
3799 extent
= btrfs_item_ptr(src
, start_slot
+ i
,
3800 struct btrfs_file_extent_item
);
3802 if (btrfs_file_extent_generation(src
, extent
) < trans
->transid
)
3805 found_type
= btrfs_file_extent_type(src
, extent
);
3806 if (found_type
== BTRFS_FILE_EXTENT_REG
) {
3808 ds
= btrfs_file_extent_disk_bytenr(src
,
3810 /* ds == 0 is a hole */
3814 dl
= btrfs_file_extent_disk_num_bytes(src
,
3816 cs
= btrfs_file_extent_offset(src
, extent
);
3817 cl
= btrfs_file_extent_num_bytes(src
,
3819 if (btrfs_file_extent_compression(src
,
3825 ret
= btrfs_lookup_csums_range(
3827 ds
+ cs
, ds
+ cs
+ cl
- 1,
3830 btrfs_release_path(dst_path
);
3838 btrfs_mark_buffer_dirty(dst_path
->nodes
[0]);
3839 btrfs_release_path(dst_path
);
3843 * we have to do this after the loop above to avoid changing the
3844 * log tree while trying to change the log tree.
3847 while (!list_empty(&ordered_sums
)) {
3848 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
3849 struct btrfs_ordered_sum
,
3852 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
3853 list_del(&sums
->list
);
3860 if (need_find_last_extent
&& *last_extent
== first_key
.offset
) {
3862 * We don't have any leafs between our current one and the one
3863 * we processed before that can have file extent items for our
3864 * inode (and have a generation number smaller than our current
3867 need_find_last_extent
= false;
3871 * Because we use btrfs_search_forward we could skip leaves that were
3872 * not modified and then assume *last_extent is valid when it really
3873 * isn't. So back up to the previous leaf and read the end of the last
3874 * extent before we go and fill in holes.
3876 if (need_find_last_extent
) {
3879 ret
= btrfs_prev_leaf(inode
->root
, src_path
);
3884 if (src_path
->slots
[0])
3885 src_path
->slots
[0]--;
3886 src
= src_path
->nodes
[0];
3887 btrfs_item_key_to_cpu(src
, &key
, src_path
->slots
[0]);
3888 if (key
.objectid
!= btrfs_ino(inode
) ||
3889 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
3891 extent
= btrfs_item_ptr(src
, src_path
->slots
[0],
3892 struct btrfs_file_extent_item
);
3893 if (btrfs_file_extent_type(src
, extent
) ==
3894 BTRFS_FILE_EXTENT_INLINE
) {
3895 len
= btrfs_file_extent_inline_len(src
,
3898 *last_extent
= ALIGN(key
.offset
+ len
,
3899 fs_info
->sectorsize
);
3901 len
= btrfs_file_extent_num_bytes(src
, extent
);
3902 *last_extent
= key
.offset
+ len
;
3906 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3907 * things could have happened
3909 * 1) A merge could have happened, so we could currently be on a leaf
3910 * that holds what we were copying in the first place.
3911 * 2) A split could have happened, and now not all of the items we want
3912 * are on the same leaf.
3914 * So we need to adjust how we search for holes, we need to drop the
3915 * path and re-search for the first extent key we found, and then walk
3916 * forward until we hit the last one we copied.
3918 if (need_find_last_extent
) {
3919 /* btrfs_prev_leaf could return 1 without releasing the path */
3920 btrfs_release_path(src_path
);
3921 ret
= btrfs_search_slot(NULL
, inode
->root
, &first_key
,
3926 src
= src_path
->nodes
[0];
3927 i
= src_path
->slots
[0];
3933 * Ok so here we need to go through and fill in any holes we may have
3934 * to make sure that holes are punched for those areas in case they had
3935 * extents previously.
3941 if (i
>= btrfs_header_nritems(src_path
->nodes
[0])) {
3942 ret
= btrfs_next_leaf(inode
->root
, src_path
);
3946 src
= src_path
->nodes
[0];
3948 need_find_last_extent
= true;
3951 btrfs_item_key_to_cpu(src
, &key
, i
);
3952 if (!btrfs_comp_cpu_keys(&key
, &last_key
))
3954 if (key
.objectid
!= btrfs_ino(inode
) ||
3955 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
3959 extent
= btrfs_item_ptr(src
, i
, struct btrfs_file_extent_item
);
3960 if (btrfs_file_extent_type(src
, extent
) ==
3961 BTRFS_FILE_EXTENT_INLINE
) {
3962 len
= btrfs_file_extent_inline_len(src
, i
, extent
);
3963 extent_end
= ALIGN(key
.offset
+ len
,
3964 fs_info
->sectorsize
);
3966 len
= btrfs_file_extent_num_bytes(src
, extent
);
3967 extent_end
= key
.offset
+ len
;
3971 if (*last_extent
== key
.offset
) {
3972 *last_extent
= extent_end
;
3975 offset
= *last_extent
;
3976 len
= key
.offset
- *last_extent
;
3977 ret
= btrfs_insert_file_extent(trans
, log
, btrfs_ino(inode
),
3978 offset
, 0, 0, len
, 0, len
, 0, 0, 0);
3981 *last_extent
= extent_end
;
3985 * Check if there is a hole between the last extent found in our leaf
3986 * and the first extent in the next leaf. If there is one, we need to
3987 * log an explicit hole so that at replay time we can punch the hole.
3990 key
.objectid
== btrfs_ino(inode
) &&
3991 key
.type
== BTRFS_EXTENT_DATA_KEY
&&
3992 i
== btrfs_header_nritems(src_path
->nodes
[0])) {
3993 ret
= btrfs_next_leaf(inode
->root
, src_path
);
3994 need_find_last_extent
= true;
3997 } else if (ret
== 0) {
3998 btrfs_item_key_to_cpu(src_path
->nodes
[0], &key
,
3999 src_path
->slots
[0]);
4000 if (key
.objectid
== btrfs_ino(inode
) &&
4001 key
.type
== BTRFS_EXTENT_DATA_KEY
&&
4002 *last_extent
< key
.offset
) {
4003 const u64 len
= key
.offset
- *last_extent
;
4005 ret
= btrfs_insert_file_extent(trans
, log
,
4014 * Need to let the callers know we dropped the path so they should
4017 if (!ret
&& need_find_last_extent
)
4022 static int extent_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
4024 struct extent_map
*em1
, *em2
;
4026 em1
= list_entry(a
, struct extent_map
, list
);
4027 em2
= list_entry(b
, struct extent_map
, list
);
4029 if (em1
->start
< em2
->start
)
4031 else if (em1
->start
> em2
->start
)
4036 static int wait_ordered_extents(struct btrfs_trans_handle
*trans
,
4037 struct inode
*inode
,
4038 struct btrfs_root
*root
,
4039 const struct extent_map
*em
,
4040 const struct list_head
*logged_list
,
4041 bool *ordered_io_error
)
4043 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4044 struct btrfs_ordered_extent
*ordered
;
4045 struct btrfs_root
*log
= root
->log_root
;
4046 u64 mod_start
= em
->mod_start
;
4047 u64 mod_len
= em
->mod_len
;
4048 const bool skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
4051 LIST_HEAD(ordered_sums
);
4054 *ordered_io_error
= false;
4056 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
4057 em
->block_start
== EXTENT_MAP_HOLE
)
4061 * Wait far any ordered extent that covers our extent map. If it
4062 * finishes without an error, first check and see if our csums are on
4063 * our outstanding ordered extents.
4065 list_for_each_entry(ordered
, logged_list
, log_list
) {
4066 struct btrfs_ordered_sum
*sum
;
4071 if (ordered
->file_offset
+ ordered
->len
<= mod_start
||
4072 mod_start
+ mod_len
<= ordered
->file_offset
)
4075 if (!test_bit(BTRFS_ORDERED_IO_DONE
, &ordered
->flags
) &&
4076 !test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
) &&
4077 !test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
)) {
4078 const u64 start
= ordered
->file_offset
;
4079 const u64 end
= ordered
->file_offset
+ ordered
->len
- 1;
4081 WARN_ON(ordered
->inode
!= inode
);
4082 filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
4085 wait_event(ordered
->wait
,
4086 (test_bit(BTRFS_ORDERED_IO_DONE
, &ordered
->flags
) ||
4087 test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
)));
4089 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
)) {
4091 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
4092 * i_mapping flags, so that the next fsync won't get
4093 * an outdated io error too.
4095 filemap_check_errors(inode
->i_mapping
);
4096 *ordered_io_error
= true;
4100 * We are going to copy all the csums on this ordered extent, so
4101 * go ahead and adjust mod_start and mod_len in case this
4102 * ordered extent has already been logged.
4104 if (ordered
->file_offset
> mod_start
) {
4105 if (ordered
->file_offset
+ ordered
->len
>=
4106 mod_start
+ mod_len
)
4107 mod_len
= ordered
->file_offset
- mod_start
;
4109 * If we have this case
4111 * |--------- logged extent ---------|
4112 * |----- ordered extent ----|
4114 * Just don't mess with mod_start and mod_len, we'll
4115 * just end up logging more csums than we need and it
4119 if (ordered
->file_offset
+ ordered
->len
<
4120 mod_start
+ mod_len
) {
4121 mod_len
= (mod_start
+ mod_len
) -
4122 (ordered
->file_offset
+ ordered
->len
);
4123 mod_start
= ordered
->file_offset
+
4134 * To keep us from looping for the above case of an ordered
4135 * extent that falls inside of the logged extent.
4137 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM
,
4141 list_for_each_entry(sum
, &ordered
->list
, list
) {
4142 ret
= btrfs_csum_file_blocks(trans
, log
, sum
);
4148 if (*ordered_io_error
|| !mod_len
|| ret
|| skip_csum
)
4151 if (em
->compress_type
) {
4153 csum_len
= max(em
->block_len
, em
->orig_block_len
);
4155 csum_offset
= mod_start
- em
->start
;
4159 /* block start is already adjusted for the file extent offset. */
4160 ret
= btrfs_lookup_csums_range(fs_info
->csum_root
,
4161 em
->block_start
+ csum_offset
,
4162 em
->block_start
+ csum_offset
+
4163 csum_len
- 1, &ordered_sums
, 0);
4167 while (!list_empty(&ordered_sums
)) {
4168 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
4169 struct btrfs_ordered_sum
,
4172 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
4173 list_del(&sums
->list
);
4180 static int log_one_extent(struct btrfs_trans_handle
*trans
,
4181 struct btrfs_inode
*inode
, struct btrfs_root
*root
,
4182 const struct extent_map
*em
,
4183 struct btrfs_path
*path
,
4184 const struct list_head
*logged_list
,
4185 struct btrfs_log_ctx
*ctx
)
4187 struct btrfs_root
*log
= root
->log_root
;
4188 struct btrfs_file_extent_item
*fi
;
4189 struct extent_buffer
*leaf
;
4190 struct btrfs_map_token token
;
4191 struct btrfs_key key
;
4192 u64 extent_offset
= em
->start
- em
->orig_start
;
4195 int extent_inserted
= 0;
4196 bool ordered_io_err
= false;
4198 ret
= wait_ordered_extents(trans
, &inode
->vfs_inode
, root
, em
,
4199 logged_list
, &ordered_io_err
);
4203 if (ordered_io_err
) {
4208 btrfs_init_map_token(&token
);
4210 ret
= __btrfs_drop_extents(trans
, log
, &inode
->vfs_inode
, path
, em
->start
,
4211 em
->start
+ em
->len
, NULL
, 0, 1,
4212 sizeof(*fi
), &extent_inserted
);
4216 if (!extent_inserted
) {
4217 key
.objectid
= btrfs_ino(inode
);
4218 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4219 key
.offset
= em
->start
;
4221 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
,
4226 leaf
= path
->nodes
[0];
4227 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4228 struct btrfs_file_extent_item
);
4230 btrfs_set_token_file_extent_generation(leaf
, fi
, trans
->transid
,
4232 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
4233 btrfs_set_token_file_extent_type(leaf
, fi
,
4234 BTRFS_FILE_EXTENT_PREALLOC
,
4237 btrfs_set_token_file_extent_type(leaf
, fi
,
4238 BTRFS_FILE_EXTENT_REG
,
4241 block_len
= max(em
->block_len
, em
->orig_block_len
);
4242 if (em
->compress_type
!= BTRFS_COMPRESS_NONE
) {
4243 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
4246 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
4248 } else if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
4249 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
4251 extent_offset
, &token
);
4252 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
4255 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
, 0, &token
);
4256 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, 0,
4260 btrfs_set_token_file_extent_offset(leaf
, fi
, extent_offset
, &token
);
4261 btrfs_set_token_file_extent_num_bytes(leaf
, fi
, em
->len
, &token
);
4262 btrfs_set_token_file_extent_ram_bytes(leaf
, fi
, em
->ram_bytes
, &token
);
4263 btrfs_set_token_file_extent_compression(leaf
, fi
, em
->compress_type
,
4265 btrfs_set_token_file_extent_encryption(leaf
, fi
, 0, &token
);
4266 btrfs_set_token_file_extent_other_encoding(leaf
, fi
, 0, &token
);
4267 btrfs_mark_buffer_dirty(leaf
);
4269 btrfs_release_path(path
);
4275 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4276 * lose them after doing a fast fsync and replaying the log. We scan the
4277 * subvolume's root instead of iterating the inode's extent map tree because
4278 * otherwise we can log incorrect extent items based on extent map conversion.
4279 * That can happen due to the fact that extent maps are merged when they
4280 * are not in the extent map tree's list of modified extents.
4282 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle
*trans
,
4283 struct btrfs_inode
*inode
,
4284 struct btrfs_path
*path
)
4286 struct btrfs_root
*root
= inode
->root
;
4287 struct btrfs_key key
;
4288 const u64 i_size
= i_size_read(&inode
->vfs_inode
);
4289 const u64 ino
= btrfs_ino(inode
);
4290 struct btrfs_path
*dst_path
= NULL
;
4291 u64 last_extent
= (u64
)-1;
4296 if (!(inode
->flags
& BTRFS_INODE_PREALLOC
))
4300 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4301 key
.offset
= i_size
;
4302 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4307 struct extent_buffer
*leaf
= path
->nodes
[0];
4308 int slot
= path
->slots
[0];
4310 if (slot
>= btrfs_header_nritems(leaf
)) {
4312 ret
= copy_items(trans
, inode
, dst_path
, path
,
4313 &last_extent
, start_slot
,
4319 ret
= btrfs_next_leaf(root
, path
);
4329 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4330 if (key
.objectid
> ino
)
4332 if (WARN_ON_ONCE(key
.objectid
< ino
) ||
4333 key
.type
< BTRFS_EXTENT_DATA_KEY
||
4334 key
.offset
< i_size
) {
4338 if (last_extent
== (u64
)-1) {
4339 last_extent
= key
.offset
;
4341 * Avoid logging extent items logged in past fsync calls
4342 * and leading to duplicate keys in the log tree.
4345 ret
= btrfs_truncate_inode_items(trans
,
4349 BTRFS_EXTENT_DATA_KEY
);
4350 } while (ret
== -EAGAIN
);
4359 dst_path
= btrfs_alloc_path();
4367 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
4368 start_slot
, ins_nr
, 1, 0);
4373 btrfs_release_path(path
);
4374 btrfs_free_path(dst_path
);
4378 static int btrfs_log_changed_extents(struct btrfs_trans_handle
*trans
,
4379 struct btrfs_root
*root
,
4380 struct btrfs_inode
*inode
,
4381 struct btrfs_path
*path
,
4382 struct list_head
*logged_list
,
4383 struct btrfs_log_ctx
*ctx
,
4387 struct extent_map
*em
, *n
;
4388 struct list_head extents
;
4389 struct extent_map_tree
*tree
= &inode
->extent_tree
;
4390 u64 logged_start
, logged_end
;
4395 INIT_LIST_HEAD(&extents
);
4397 write_lock(&tree
->lock
);
4398 test_gen
= root
->fs_info
->last_trans_committed
;
4399 logged_start
= start
;
4402 list_for_each_entry_safe(em
, n
, &tree
->modified_extents
, list
) {
4403 list_del_init(&em
->list
);
4405 * Just an arbitrary number, this can be really CPU intensive
4406 * once we start getting a lot of extents, and really once we
4407 * have a bunch of extents we just want to commit since it will
4410 if (++num
> 32768) {
4411 list_del_init(&tree
->modified_extents
);
4416 if (em
->generation
<= test_gen
)
4419 /* We log prealloc extents beyond eof later. */
4420 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) &&
4421 em
->start
>= i_size_read(&inode
->vfs_inode
))
4424 if (em
->start
< logged_start
)
4425 logged_start
= em
->start
;
4426 if ((em
->start
+ em
->len
- 1) > logged_end
)
4427 logged_end
= em
->start
+ em
->len
- 1;
4429 /* Need a ref to keep it from getting evicted from cache */
4430 refcount_inc(&em
->refs
);
4431 set_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
4432 list_add_tail(&em
->list
, &extents
);
4436 list_sort(NULL
, &extents
, extent_cmp
);
4437 btrfs_get_logged_extents(inode
, logged_list
, logged_start
, logged_end
);
4439 * Some ordered extents started by fsync might have completed
4440 * before we could collect them into the list logged_list, which
4441 * means they're gone, not in our logged_list nor in the inode's
4442 * ordered tree. We want the application/user space to know an
4443 * error happened while attempting to persist file data so that
4444 * it can take proper action. If such error happened, we leave
4445 * without writing to the log tree and the fsync must report the
4446 * file data write error and not commit the current transaction.
4448 ret
= filemap_check_errors(inode
->vfs_inode
.i_mapping
);
4452 while (!list_empty(&extents
)) {
4453 em
= list_entry(extents
.next
, struct extent_map
, list
);
4455 list_del_init(&em
->list
);
4458 * If we had an error we just need to delete everybody from our
4462 clear_em_logging(tree
, em
);
4463 free_extent_map(em
);
4467 write_unlock(&tree
->lock
);
4469 ret
= log_one_extent(trans
, inode
, root
, em
, path
, logged_list
,
4471 write_lock(&tree
->lock
);
4472 clear_em_logging(tree
, em
);
4473 free_extent_map(em
);
4475 WARN_ON(!list_empty(&extents
));
4476 write_unlock(&tree
->lock
);
4478 btrfs_release_path(path
);
4480 ret
= btrfs_log_prealloc_extents(trans
, inode
, path
);
4485 static int logged_inode_size(struct btrfs_root
*log
, struct btrfs_inode
*inode
,
4486 struct btrfs_path
*path
, u64
*size_ret
)
4488 struct btrfs_key key
;
4491 key
.objectid
= btrfs_ino(inode
);
4492 key
.type
= BTRFS_INODE_ITEM_KEY
;
4495 ret
= btrfs_search_slot(NULL
, log
, &key
, path
, 0, 0);
4498 } else if (ret
> 0) {
4501 struct btrfs_inode_item
*item
;
4503 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4504 struct btrfs_inode_item
);
4505 *size_ret
= btrfs_inode_size(path
->nodes
[0], item
);
4507 * If the in-memory inode's i_size is smaller then the inode
4508 * size stored in the btree, return the inode's i_size, so
4509 * that we get a correct inode size after replaying the log
4510 * when before a power failure we had a shrinking truncate
4511 * followed by addition of a new name (rename / new hard link).
4512 * Otherwise return the inode size from the btree, to avoid
4513 * data loss when replaying a log due to previously doing a
4514 * write that expands the inode's size and logging a new name
4515 * immediately after.
4517 if (*size_ret
> inode
->vfs_inode
.i_size
)
4518 *size_ret
= inode
->vfs_inode
.i_size
;
4521 btrfs_release_path(path
);
4526 * At the moment we always log all xattrs. This is to figure out at log replay
4527 * time which xattrs must have their deletion replayed. If a xattr is missing
4528 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4529 * because if a xattr is deleted, the inode is fsynced and a power failure
4530 * happens, causing the log to be replayed the next time the fs is mounted,
4531 * we want the xattr to not exist anymore (same behaviour as other filesystems
4532 * with a journal, ext3/4, xfs, f2fs, etc).
4534 static int btrfs_log_all_xattrs(struct btrfs_trans_handle
*trans
,
4535 struct btrfs_root
*root
,
4536 struct btrfs_inode
*inode
,
4537 struct btrfs_path
*path
,
4538 struct btrfs_path
*dst_path
)
4541 struct btrfs_key key
;
4542 const u64 ino
= btrfs_ino(inode
);
4547 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4550 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4555 int slot
= path
->slots
[0];
4556 struct extent_buffer
*leaf
= path
->nodes
[0];
4557 int nritems
= btrfs_header_nritems(leaf
);
4559 if (slot
>= nritems
) {
4561 u64 last_extent
= 0;
4563 ret
= copy_items(trans
, inode
, dst_path
, path
,
4564 &last_extent
, start_slot
,
4566 /* can't be 1, extent items aren't processed */
4572 ret
= btrfs_next_leaf(root
, path
);
4580 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4581 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_XATTR_ITEM_KEY
)
4591 u64 last_extent
= 0;
4593 ret
= copy_items(trans
, inode
, dst_path
, path
,
4594 &last_extent
, start_slot
,
4596 /* can't be 1, extent items aren't processed */
4606 * If the no holes feature is enabled we need to make sure any hole between the
4607 * last extent and the i_size of our inode is explicitly marked in the log. This
4608 * is to make sure that doing something like:
4610 * 1) create file with 128Kb of data
4611 * 2) truncate file to 64Kb
4612 * 3) truncate file to 256Kb
4614 * 5) <crash/power failure>
4615 * 6) mount fs and trigger log replay
4617 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4618 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4619 * file correspond to a hole. The presence of explicit holes in a log tree is
4620 * what guarantees that log replay will remove/adjust file extent items in the
4623 * Here we do not need to care about holes between extents, that is already done
4624 * by copy_items(). We also only need to do this in the full sync path, where we
4625 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4626 * lookup the list of modified extent maps and if any represents a hole, we
4627 * insert a corresponding extent representing a hole in the log tree.
4629 static int btrfs_log_trailing_hole(struct btrfs_trans_handle
*trans
,
4630 struct btrfs_root
*root
,
4631 struct btrfs_inode
*inode
,
4632 struct btrfs_path
*path
)
4634 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4636 struct btrfs_key key
;
4639 struct extent_buffer
*leaf
;
4640 struct btrfs_root
*log
= root
->log_root
;
4641 const u64 ino
= btrfs_ino(inode
);
4642 const u64 i_size
= i_size_read(&inode
->vfs_inode
);
4644 if (!btrfs_fs_incompat(fs_info
, NO_HOLES
))
4648 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4649 key
.offset
= (u64
)-1;
4651 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4656 ASSERT(path
->slots
[0] > 0);
4658 leaf
= path
->nodes
[0];
4659 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4661 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
4662 /* inode does not have any extents */
4666 struct btrfs_file_extent_item
*extent
;
4670 * If there's an extent beyond i_size, an explicit hole was
4671 * already inserted by copy_items().
4673 if (key
.offset
>= i_size
)
4676 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
4677 struct btrfs_file_extent_item
);
4679 if (btrfs_file_extent_type(leaf
, extent
) ==
4680 BTRFS_FILE_EXTENT_INLINE
) {
4681 len
= btrfs_file_extent_inline_len(leaf
,
4684 ASSERT(len
== i_size
||
4685 (len
== fs_info
->sectorsize
&&
4686 btrfs_file_extent_compression(leaf
, extent
) !=
4687 BTRFS_COMPRESS_NONE
) ||
4688 (len
< i_size
&& i_size
< fs_info
->sectorsize
));
4692 len
= btrfs_file_extent_num_bytes(leaf
, extent
);
4693 /* Last extent goes beyond i_size, no need to log a hole. */
4694 if (key
.offset
+ len
> i_size
)
4696 hole_start
= key
.offset
+ len
;
4697 hole_size
= i_size
- hole_start
;
4699 btrfs_release_path(path
);
4701 /* Last extent ends at i_size. */
4705 hole_size
= ALIGN(hole_size
, fs_info
->sectorsize
);
4706 ret
= btrfs_insert_file_extent(trans
, log
, ino
, hole_start
, 0, 0,
4707 hole_size
, 0, hole_size
, 0, 0, 0);
4712 * When we are logging a new inode X, check if it doesn't have a reference that
4713 * matches the reference from some other inode Y created in a past transaction
4714 * and that was renamed in the current transaction. If we don't do this, then at
4715 * log replay time we can lose inode Y (and all its files if it's a directory):
4718 * echo "hello world" > /mnt/x/foobar
4721 * mkdir /mnt/x # or touch /mnt/x
4722 * xfs_io -c fsync /mnt/x
4724 * mount fs, trigger log replay
4726 * After the log replay procedure, we would lose the first directory and all its
4727 * files (file foobar).
4728 * For the case where inode Y is not a directory we simply end up losing it:
4730 * echo "123" > /mnt/foo
4732 * mv /mnt/foo /mnt/bar
4733 * echo "abc" > /mnt/foo
4734 * xfs_io -c fsync /mnt/foo
4737 * We also need this for cases where a snapshot entry is replaced by some other
4738 * entry (file or directory) otherwise we end up with an unreplayable log due to
4739 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4740 * if it were a regular entry:
4743 * btrfs subvolume snapshot /mnt /mnt/x/snap
4744 * btrfs subvolume delete /mnt/x/snap
4747 * fsync /mnt/x or fsync some new file inside it
4750 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4751 * the same transaction.
4753 static int btrfs_check_ref_name_override(struct extent_buffer
*eb
,
4755 const struct btrfs_key
*key
,
4756 struct btrfs_inode
*inode
,
4760 struct btrfs_path
*search_path
;
4763 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
4765 unsigned long ptr
= btrfs_item_ptr_offset(eb
, slot
);
4767 search_path
= btrfs_alloc_path();
4770 search_path
->search_commit_root
= 1;
4771 search_path
->skip_locking
= 1;
4773 while (cur_offset
< item_size
) {
4777 unsigned long name_ptr
;
4778 struct btrfs_dir_item
*di
;
4780 if (key
->type
== BTRFS_INODE_REF_KEY
) {
4781 struct btrfs_inode_ref
*iref
;
4783 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur_offset
);
4784 parent
= key
->offset
;
4785 this_name_len
= btrfs_inode_ref_name_len(eb
, iref
);
4786 name_ptr
= (unsigned long)(iref
+ 1);
4787 this_len
= sizeof(*iref
) + this_name_len
;
4789 struct btrfs_inode_extref
*extref
;
4791 extref
= (struct btrfs_inode_extref
*)(ptr
+
4793 parent
= btrfs_inode_extref_parent(eb
, extref
);
4794 this_name_len
= btrfs_inode_extref_name_len(eb
, extref
);
4795 name_ptr
= (unsigned long)&extref
->name
;
4796 this_len
= sizeof(*extref
) + this_name_len
;
4799 ret
= btrfs_is_name_len_valid(eb
, slot
, name_ptr
,
4805 if (this_name_len
> name_len
) {
4808 new_name
= krealloc(name
, this_name_len
, GFP_NOFS
);
4813 name_len
= this_name_len
;
4817 read_extent_buffer(eb
, name
, name_ptr
, this_name_len
);
4818 di
= btrfs_lookup_dir_item(NULL
, inode
->root
, search_path
,
4819 parent
, name
, this_name_len
, 0);
4820 if (di
&& !IS_ERR(di
)) {
4821 struct btrfs_key di_key
;
4823 btrfs_dir_item_key_to_cpu(search_path
->nodes
[0],
4825 if (di_key
.type
== BTRFS_INODE_ITEM_KEY
) {
4827 *other_ino
= di_key
.objectid
;
4832 } else if (IS_ERR(di
)) {
4836 btrfs_release_path(search_path
);
4838 cur_offset
+= this_len
;
4842 btrfs_free_path(search_path
);
4847 /* log a single inode in the tree log.
4848 * At least one parent directory for this inode must exist in the tree
4849 * or be logged already.
4851 * Any items from this inode changed by the current transaction are copied
4852 * to the log tree. An extra reference is taken on any extents in this
4853 * file, allowing us to avoid a whole pile of corner cases around logging
4854 * blocks that have been removed from the tree.
4856 * See LOG_INODE_ALL and related defines for a description of what inode_only
4859 * This handles both files and directories.
4861 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
4862 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
4866 struct btrfs_log_ctx
*ctx
)
4868 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4869 struct btrfs_path
*path
;
4870 struct btrfs_path
*dst_path
;
4871 struct btrfs_key min_key
;
4872 struct btrfs_key max_key
;
4873 struct btrfs_root
*log
= root
->log_root
;
4874 LIST_HEAD(logged_list
);
4875 u64 last_extent
= 0;
4879 int ins_start_slot
= 0;
4881 bool fast_search
= false;
4882 u64 ino
= btrfs_ino(inode
);
4883 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
4884 u64 logged_isize
= 0;
4885 bool need_log_inode_item
= true;
4886 bool xattrs_logged
= false;
4888 path
= btrfs_alloc_path();
4891 dst_path
= btrfs_alloc_path();
4893 btrfs_free_path(path
);
4897 min_key
.objectid
= ino
;
4898 min_key
.type
= BTRFS_INODE_ITEM_KEY
;
4901 max_key
.objectid
= ino
;
4904 /* today the code can only do partial logging of directories */
4905 if (S_ISDIR(inode
->vfs_inode
.i_mode
) ||
4906 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
4907 &inode
->runtime_flags
) &&
4908 inode_only
>= LOG_INODE_EXISTS
))
4909 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
4911 max_key
.type
= (u8
)-1;
4912 max_key
.offset
= (u64
)-1;
4915 * Only run delayed items if we are a dir or a new file.
4916 * Otherwise commit the delayed inode only, which is needed in
4917 * order for the log replay code to mark inodes for link count
4918 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4920 if (S_ISDIR(inode
->vfs_inode
.i_mode
) ||
4921 inode
->generation
> fs_info
->last_trans_committed
)
4922 ret
= btrfs_commit_inode_delayed_items(trans
, inode
);
4924 ret
= btrfs_commit_inode_delayed_inode(inode
);
4927 btrfs_free_path(path
);
4928 btrfs_free_path(dst_path
);
4932 if (inode_only
== LOG_OTHER_INODE
) {
4933 inode_only
= LOG_INODE_EXISTS
;
4934 mutex_lock_nested(&inode
->log_mutex
, SINGLE_DEPTH_NESTING
);
4936 mutex_lock(&inode
->log_mutex
);
4940 * a brute force approach to making sure we get the most uptodate
4941 * copies of everything.
4943 if (S_ISDIR(inode
->vfs_inode
.i_mode
)) {
4944 int max_key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
4946 if (inode_only
== LOG_INODE_EXISTS
)
4947 max_key_type
= BTRFS_XATTR_ITEM_KEY
;
4948 ret
= drop_objectid_items(trans
, log
, path
, ino
, max_key_type
);
4950 if (inode_only
== LOG_INODE_EXISTS
) {
4952 * Make sure the new inode item we write to the log has
4953 * the same isize as the current one (if it exists).
4954 * This is necessary to prevent data loss after log
4955 * replay, and also to prevent doing a wrong expanding
4956 * truncate - for e.g. create file, write 4K into offset
4957 * 0, fsync, write 4K into offset 4096, add hard link,
4958 * fsync some other file (to sync log), power fail - if
4959 * we use the inode's current i_size, after log replay
4960 * we get a 8Kb file, with the last 4Kb extent as a hole
4961 * (zeroes), as if an expanding truncate happened,
4962 * instead of getting a file of 4Kb only.
4964 err
= logged_inode_size(log
, inode
, path
, &logged_isize
);
4968 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
4969 &inode
->runtime_flags
)) {
4970 if (inode_only
== LOG_INODE_EXISTS
) {
4971 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
4972 ret
= drop_objectid_items(trans
, log
, path
, ino
,
4975 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
4976 &inode
->runtime_flags
);
4977 clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
4978 &inode
->runtime_flags
);
4980 ret
= btrfs_truncate_inode_items(trans
,
4981 log
, &inode
->vfs_inode
, 0, 0);
4986 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
4987 &inode
->runtime_flags
) ||
4988 inode_only
== LOG_INODE_EXISTS
) {
4989 if (inode_only
== LOG_INODE_ALL
)
4991 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
4992 ret
= drop_objectid_items(trans
, log
, path
, ino
,
4995 if (inode_only
== LOG_INODE_ALL
)
5008 ret
= btrfs_search_forward(root
, &min_key
,
5009 path
, trans
->transid
);
5017 /* note, ins_nr might be > 0 here, cleanup outside the loop */
5018 if (min_key
.objectid
!= ino
)
5020 if (min_key
.type
> max_key
.type
)
5023 if (min_key
.type
== BTRFS_INODE_ITEM_KEY
)
5024 need_log_inode_item
= false;
5026 if ((min_key
.type
== BTRFS_INODE_REF_KEY
||
5027 min_key
.type
== BTRFS_INODE_EXTREF_KEY
) &&
5028 inode
->generation
== trans
->transid
) {
5031 ret
= btrfs_check_ref_name_override(path
->nodes
[0],
5032 path
->slots
[0], &min_key
, inode
,
5037 } else if (ret
> 0 && ctx
&&
5038 other_ino
!= btrfs_ino(BTRFS_I(ctx
->inode
))) {
5039 struct btrfs_key inode_key
;
5040 struct inode
*other_inode
;
5046 ins_start_slot
= path
->slots
[0];
5048 ret
= copy_items(trans
, inode
, dst_path
, path
,
5049 &last_extent
, ins_start_slot
,
5057 btrfs_release_path(path
);
5058 inode_key
.objectid
= other_ino
;
5059 inode_key
.type
= BTRFS_INODE_ITEM_KEY
;
5060 inode_key
.offset
= 0;
5061 other_inode
= btrfs_iget(fs_info
->sb
,
5065 * If the other inode that had a conflicting dir
5066 * entry was deleted in the current transaction,
5067 * we don't need to do more work nor fallback to
5068 * a transaction commit.
5070 if (IS_ERR(other_inode
) &&
5071 PTR_ERR(other_inode
) == -ENOENT
) {
5073 } else if (IS_ERR(other_inode
)) {
5074 err
= PTR_ERR(other_inode
);
5078 * We are safe logging the other inode without
5079 * acquiring its i_mutex as long as we log with
5080 * the LOG_INODE_EXISTS mode. We're safe against
5081 * concurrent renames of the other inode as well
5082 * because during a rename we pin the log and
5083 * update the log with the new name before we
5086 err
= btrfs_log_inode(trans
, root
,
5087 BTRFS_I(other_inode
),
5088 LOG_OTHER_INODE
, 0, LLONG_MAX
,
5098 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5099 if (min_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
5102 ret
= copy_items(trans
, inode
, dst_path
, path
,
5103 &last_extent
, ins_start_slot
,
5104 ins_nr
, inode_only
, logged_isize
);
5111 btrfs_release_path(path
);
5117 if (ins_nr
&& ins_start_slot
+ ins_nr
== path
->slots
[0]) {
5120 } else if (!ins_nr
) {
5121 ins_start_slot
= path
->slots
[0];
5126 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
5127 ins_start_slot
, ins_nr
, inode_only
,
5135 btrfs_release_path(path
);
5139 ins_start_slot
= path
->slots
[0];
5142 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5144 if (path
->slots
[0] < nritems
) {
5145 btrfs_item_key_to_cpu(path
->nodes
[0], &min_key
,
5150 ret
= copy_items(trans
, inode
, dst_path
, path
,
5151 &last_extent
, ins_start_slot
,
5152 ins_nr
, inode_only
, logged_isize
);
5160 btrfs_release_path(path
);
5162 if (min_key
.offset
< (u64
)-1) {
5164 } else if (min_key
.type
< max_key
.type
) {
5172 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
5173 ins_start_slot
, ins_nr
, inode_only
,
5183 btrfs_release_path(path
);
5184 btrfs_release_path(dst_path
);
5185 err
= btrfs_log_all_xattrs(trans
, root
, inode
, path
, dst_path
);
5188 xattrs_logged
= true;
5189 if (max_key
.type
>= BTRFS_EXTENT_DATA_KEY
&& !fast_search
) {
5190 btrfs_release_path(path
);
5191 btrfs_release_path(dst_path
);
5192 err
= btrfs_log_trailing_hole(trans
, root
, inode
, path
);
5197 btrfs_release_path(path
);
5198 btrfs_release_path(dst_path
);
5199 if (need_log_inode_item
) {
5200 err
= log_inode_item(trans
, log
, dst_path
, inode
);
5201 if (!err
&& !xattrs_logged
) {
5202 err
= btrfs_log_all_xattrs(trans
, root
, inode
, path
,
5204 btrfs_release_path(path
);
5210 ret
= btrfs_log_changed_extents(trans
, root
, inode
, dst_path
,
5211 &logged_list
, ctx
, start
, end
);
5216 } else if (inode_only
== LOG_INODE_ALL
) {
5217 struct extent_map
*em
, *n
;
5219 write_lock(&em_tree
->lock
);
5221 * We can't just remove every em if we're called for a ranged
5222 * fsync - that is, one that doesn't cover the whole possible
5223 * file range (0 to LLONG_MAX). This is because we can have
5224 * em's that fall outside the range we're logging and therefore
5225 * their ordered operations haven't completed yet
5226 * (btrfs_finish_ordered_io() not invoked yet). This means we
5227 * didn't get their respective file extent item in the fs/subvol
5228 * tree yet, and need to let the next fast fsync (one which
5229 * consults the list of modified extent maps) find the em so
5230 * that it logs a matching file extent item and waits for the
5231 * respective ordered operation to complete (if it's still
5234 * Removing every em outside the range we're logging would make
5235 * the next fast fsync not log their matching file extent items,
5236 * therefore making us lose data after a log replay.
5238 list_for_each_entry_safe(em
, n
, &em_tree
->modified_extents
,
5240 const u64 mod_end
= em
->mod_start
+ em
->mod_len
- 1;
5242 if (em
->mod_start
>= start
&& mod_end
<= end
)
5243 list_del_init(&em
->list
);
5245 write_unlock(&em_tree
->lock
);
5248 if (inode_only
== LOG_INODE_ALL
&& S_ISDIR(inode
->vfs_inode
.i_mode
)) {
5249 ret
= log_directory_changes(trans
, root
, inode
, path
, dst_path
,
5257 spin_lock(&inode
->lock
);
5258 inode
->logged_trans
= trans
->transid
;
5259 inode
->last_log_commit
= inode
->last_sub_trans
;
5260 spin_unlock(&inode
->lock
);
5263 btrfs_put_logged_extents(&logged_list
);
5265 btrfs_submit_logged_extents(&logged_list
, log
);
5266 mutex_unlock(&inode
->log_mutex
);
5268 btrfs_free_path(path
);
5269 btrfs_free_path(dst_path
);
5274 * Check if we must fallback to a transaction commit when logging an inode.
5275 * This must be called after logging the inode and is used only in the context
5276 * when fsyncing an inode requires the need to log some other inode - in which
5277 * case we can't lock the i_mutex of each other inode we need to log as that
5278 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5279 * log inodes up or down in the hierarchy) or rename operations for example. So
5280 * we take the log_mutex of the inode after we have logged it and then check for
5281 * its last_unlink_trans value - this is safe because any task setting
5282 * last_unlink_trans must take the log_mutex and it must do this before it does
5283 * the actual unlink operation, so if we do this check before a concurrent task
5284 * sets last_unlink_trans it means we've logged a consistent version/state of
5285 * all the inode items, otherwise we are not sure and must do a transaction
5286 * commit (the concurrent task might have only updated last_unlink_trans before
5287 * we logged the inode or it might have also done the unlink).
5289 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle
*trans
,
5290 struct btrfs_inode
*inode
)
5292 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
5295 mutex_lock(&inode
->log_mutex
);
5296 if (inode
->last_unlink_trans
> fs_info
->last_trans_committed
) {
5298 * Make sure any commits to the log are forced to be full
5301 btrfs_set_log_full_commit(fs_info
, trans
);
5304 mutex_unlock(&inode
->log_mutex
);
5310 * follow the dentry parent pointers up the chain and see if any
5311 * of the directories in it require a full commit before they can
5312 * be logged. Returns zero if nothing special needs to be done or 1 if
5313 * a full commit is required.
5315 static noinline
int check_parent_dirs_for_sync(struct btrfs_trans_handle
*trans
,
5316 struct btrfs_inode
*inode
,
5317 struct dentry
*parent
,
5318 struct super_block
*sb
,
5322 struct dentry
*old_parent
= NULL
;
5323 struct btrfs_inode
*orig_inode
= inode
;
5326 * for regular files, if its inode is already on disk, we don't
5327 * have to worry about the parents at all. This is because
5328 * we can use the last_unlink_trans field to record renames
5329 * and other fun in this file.
5331 if (S_ISREG(inode
->vfs_inode
.i_mode
) &&
5332 inode
->generation
<= last_committed
&&
5333 inode
->last_unlink_trans
<= last_committed
)
5336 if (!S_ISDIR(inode
->vfs_inode
.i_mode
)) {
5337 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5339 inode
= BTRFS_I(d_inode(parent
));
5344 * If we are logging a directory then we start with our inode,
5345 * not our parent's inode, so we need to skip setting the
5346 * logged_trans so that further down in the log code we don't
5347 * think this inode has already been logged.
5349 if (inode
!= orig_inode
)
5350 inode
->logged_trans
= trans
->transid
;
5353 if (btrfs_must_commit_transaction(trans
, inode
)) {
5358 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5361 if (IS_ROOT(parent
)) {
5362 inode
= BTRFS_I(d_inode(parent
));
5363 if (btrfs_must_commit_transaction(trans
, inode
))
5368 parent
= dget_parent(parent
);
5370 old_parent
= parent
;
5371 inode
= BTRFS_I(d_inode(parent
));
5379 struct btrfs_dir_list
{
5381 struct list_head list
;
5385 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5386 * details about the why it is needed.
5387 * This is a recursive operation - if an existing dentry corresponds to a
5388 * directory, that directory's new entries are logged too (same behaviour as
5389 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5390 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5391 * complains about the following circular lock dependency / possible deadlock:
5395 * lock(&type->i_mutex_dir_key#3/2);
5396 * lock(sb_internal#2);
5397 * lock(&type->i_mutex_dir_key#3/2);
5398 * lock(&sb->s_type->i_mutex_key#14);
5400 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5401 * sb_start_intwrite() in btrfs_start_transaction().
5402 * Not locking i_mutex of the inodes is still safe because:
5404 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5405 * that while logging the inode new references (names) are added or removed
5406 * from the inode, leaving the logged inode item with a link count that does
5407 * not match the number of logged inode reference items. This is fine because
5408 * at log replay time we compute the real number of links and correct the
5409 * link count in the inode item (see replay_one_buffer() and
5410 * link_to_fixup_dir());
5412 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5413 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5414 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5415 * has a size that doesn't match the sum of the lengths of all the logged
5416 * names. This does not result in a problem because if a dir_item key is
5417 * logged but its matching dir_index key is not logged, at log replay time we
5418 * don't use it to replay the respective name (see replay_one_name()). On the
5419 * other hand if only the dir_index key ends up being logged, the respective
5420 * name is added to the fs/subvol tree with both the dir_item and dir_index
5421 * keys created (see replay_one_name()).
5422 * The directory's inode item with a wrong i_size is not a problem as well,
5423 * since we don't use it at log replay time to set the i_size in the inode
5424 * item of the fs/subvol tree (see overwrite_item()).
5426 static int log_new_dir_dentries(struct btrfs_trans_handle
*trans
,
5427 struct btrfs_root
*root
,
5428 struct btrfs_inode
*start_inode
,
5429 struct btrfs_log_ctx
*ctx
)
5431 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5432 struct btrfs_root
*log
= root
->log_root
;
5433 struct btrfs_path
*path
;
5434 LIST_HEAD(dir_list
);
5435 struct btrfs_dir_list
*dir_elem
;
5438 path
= btrfs_alloc_path();
5442 dir_elem
= kmalloc(sizeof(*dir_elem
), GFP_NOFS
);
5444 btrfs_free_path(path
);
5447 dir_elem
->ino
= btrfs_ino(start_inode
);
5448 list_add_tail(&dir_elem
->list
, &dir_list
);
5450 while (!list_empty(&dir_list
)) {
5451 struct extent_buffer
*leaf
;
5452 struct btrfs_key min_key
;
5456 dir_elem
= list_first_entry(&dir_list
, struct btrfs_dir_list
,
5459 goto next_dir_inode
;
5461 min_key
.objectid
= dir_elem
->ino
;
5462 min_key
.type
= BTRFS_DIR_ITEM_KEY
;
5465 btrfs_release_path(path
);
5466 ret
= btrfs_search_forward(log
, &min_key
, path
, trans
->transid
);
5468 goto next_dir_inode
;
5469 } else if (ret
> 0) {
5471 goto next_dir_inode
;
5475 leaf
= path
->nodes
[0];
5476 nritems
= btrfs_header_nritems(leaf
);
5477 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
5478 struct btrfs_dir_item
*di
;
5479 struct btrfs_key di_key
;
5480 struct inode
*di_inode
;
5481 struct btrfs_dir_list
*new_dir_elem
;
5482 int log_mode
= LOG_INODE_EXISTS
;
5485 btrfs_item_key_to_cpu(leaf
, &min_key
, i
);
5486 if (min_key
.objectid
!= dir_elem
->ino
||
5487 min_key
.type
!= BTRFS_DIR_ITEM_KEY
)
5488 goto next_dir_inode
;
5490 di
= btrfs_item_ptr(leaf
, i
, struct btrfs_dir_item
);
5491 type
= btrfs_dir_type(leaf
, di
);
5492 if (btrfs_dir_transid(leaf
, di
) < trans
->transid
&&
5493 type
!= BTRFS_FT_DIR
)
5495 btrfs_dir_item_key_to_cpu(leaf
, di
, &di_key
);
5496 if (di_key
.type
== BTRFS_ROOT_ITEM_KEY
)
5499 btrfs_release_path(path
);
5500 di_inode
= btrfs_iget(fs_info
->sb
, &di_key
, root
, NULL
);
5501 if (IS_ERR(di_inode
)) {
5502 ret
= PTR_ERR(di_inode
);
5503 goto next_dir_inode
;
5506 if (btrfs_inode_in_log(BTRFS_I(di_inode
), trans
->transid
)) {
5511 ctx
->log_new_dentries
= false;
5512 if (type
== BTRFS_FT_DIR
|| type
== BTRFS_FT_SYMLINK
)
5513 log_mode
= LOG_INODE_ALL
;
5514 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(di_inode
),
5515 log_mode
, 0, LLONG_MAX
, ctx
);
5517 btrfs_must_commit_transaction(trans
, BTRFS_I(di_inode
)))
5521 goto next_dir_inode
;
5522 if (ctx
->log_new_dentries
) {
5523 new_dir_elem
= kmalloc(sizeof(*new_dir_elem
),
5525 if (!new_dir_elem
) {
5527 goto next_dir_inode
;
5529 new_dir_elem
->ino
= di_key
.objectid
;
5530 list_add_tail(&new_dir_elem
->list
, &dir_list
);
5535 ret
= btrfs_next_leaf(log
, path
);
5537 goto next_dir_inode
;
5538 } else if (ret
> 0) {
5540 goto next_dir_inode
;
5544 if (min_key
.offset
< (u64
)-1) {
5549 list_del(&dir_elem
->list
);
5553 btrfs_free_path(path
);
5557 static int btrfs_log_all_parents(struct btrfs_trans_handle
*trans
,
5558 struct btrfs_inode
*inode
,
5559 struct btrfs_log_ctx
*ctx
)
5561 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
5563 struct btrfs_path
*path
;
5564 struct btrfs_key key
;
5565 struct btrfs_root
*root
= inode
->root
;
5566 const u64 ino
= btrfs_ino(inode
);
5568 path
= btrfs_alloc_path();
5571 path
->skip_locking
= 1;
5572 path
->search_commit_root
= 1;
5575 key
.type
= BTRFS_INODE_REF_KEY
;
5577 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5582 struct extent_buffer
*leaf
= path
->nodes
[0];
5583 int slot
= path
->slots
[0];
5588 if (slot
>= btrfs_header_nritems(leaf
)) {
5589 ret
= btrfs_next_leaf(root
, path
);
5597 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5598 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5599 if (key
.objectid
!= ino
|| key
.type
> BTRFS_INODE_EXTREF_KEY
)
5602 item_size
= btrfs_item_size_nr(leaf
, slot
);
5603 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
5604 while (cur_offset
< item_size
) {
5605 struct btrfs_key inode_key
;
5606 struct inode
*dir_inode
;
5608 inode_key
.type
= BTRFS_INODE_ITEM_KEY
;
5609 inode_key
.offset
= 0;
5611 if (key
.type
== BTRFS_INODE_EXTREF_KEY
) {
5612 struct btrfs_inode_extref
*extref
;
5614 extref
= (struct btrfs_inode_extref
*)
5616 inode_key
.objectid
= btrfs_inode_extref_parent(
5618 cur_offset
+= sizeof(*extref
);
5619 cur_offset
+= btrfs_inode_extref_name_len(leaf
,
5622 inode_key
.objectid
= key
.offset
;
5623 cur_offset
= item_size
;
5626 dir_inode
= btrfs_iget(fs_info
->sb
, &inode_key
,
5629 * If the parent inode was deleted, return an error to
5630 * fallback to a transaction commit. This is to prevent
5631 * getting an inode that was moved from one parent A to
5632 * a parent B, got its former parent A deleted and then
5633 * it got fsync'ed, from existing at both parents after
5634 * a log replay (and the old parent still existing).
5641 * mv /mnt/B/bar /mnt/A/bar
5642 * mv -T /mnt/A /mnt/B
5646 * If we ignore the old parent B which got deleted,
5647 * after a log replay we would have file bar linked
5648 * at both parents and the old parent B would still
5651 if (IS_ERR(dir_inode
)) {
5652 ret
= PTR_ERR(dir_inode
);
5657 ctx
->log_new_dentries
= false;
5658 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(dir_inode
),
5659 LOG_INODE_ALL
, 0, LLONG_MAX
, ctx
);
5661 btrfs_must_commit_transaction(trans
, BTRFS_I(dir_inode
)))
5663 if (!ret
&& ctx
&& ctx
->log_new_dentries
)
5664 ret
= log_new_dir_dentries(trans
, root
,
5665 BTRFS_I(dir_inode
), ctx
);
5674 btrfs_free_path(path
);
5679 * helper function around btrfs_log_inode to make sure newly created
5680 * parent directories also end up in the log. A minimal inode and backref
5681 * only logging is done of any parent directories that are older than
5682 * the last committed transaction
5684 static int btrfs_log_inode_parent(struct btrfs_trans_handle
*trans
,
5685 struct btrfs_root
*root
,
5686 struct btrfs_inode
*inode
,
5687 struct dentry
*parent
,
5691 struct btrfs_log_ctx
*ctx
)
5693 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5694 int inode_only
= exists_only
? LOG_INODE_EXISTS
: LOG_INODE_ALL
;
5695 struct super_block
*sb
;
5696 struct dentry
*old_parent
= NULL
;
5698 u64 last_committed
= fs_info
->last_trans_committed
;
5699 bool log_dentries
= false;
5700 struct btrfs_inode
*orig_inode
= inode
;
5702 sb
= inode
->vfs_inode
.i_sb
;
5704 if (btrfs_test_opt(fs_info
, NOTREELOG
)) {
5710 * The prev transaction commit doesn't complete, we need do
5711 * full commit by ourselves.
5713 if (fs_info
->last_trans_log_full_commit
>
5714 fs_info
->last_trans_committed
) {
5719 if (root
!= inode
->root
|| btrfs_root_refs(&root
->root_item
) == 0) {
5724 ret
= check_parent_dirs_for_sync(trans
, inode
, parent
, sb
,
5730 * Skip already logged inodes or inodes corresponding to tmpfiles
5731 * (since logging them is pointless, a link count of 0 means they
5732 * will never be accessible).
5734 if (btrfs_inode_in_log(inode
, trans
->transid
) ||
5735 inode
->vfs_inode
.i_nlink
== 0) {
5736 ret
= BTRFS_NO_LOG_SYNC
;
5740 ret
= start_log_trans(trans
, root
, ctx
);
5744 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
, start
, end
, ctx
);
5749 * for regular files, if its inode is already on disk, we don't
5750 * have to worry about the parents at all. This is because
5751 * we can use the last_unlink_trans field to record renames
5752 * and other fun in this file.
5754 if (S_ISREG(inode
->vfs_inode
.i_mode
) &&
5755 inode
->generation
<= last_committed
&&
5756 inode
->last_unlink_trans
<= last_committed
) {
5761 if (S_ISDIR(inode
->vfs_inode
.i_mode
) && ctx
&& ctx
->log_new_dentries
)
5762 log_dentries
= true;
5765 * On unlink we must make sure all our current and old parent directory
5766 * inodes are fully logged. This is to prevent leaving dangling
5767 * directory index entries in directories that were our parents but are
5768 * not anymore. Not doing this results in old parent directory being
5769 * impossible to delete after log replay (rmdir will always fail with
5770 * error -ENOTEMPTY).
5776 * ln testdir/foo testdir/bar
5778 * unlink testdir/bar
5779 * xfs_io -c fsync testdir/foo
5781 * mount fs, triggers log replay
5783 * If we don't log the parent directory (testdir), after log replay the
5784 * directory still has an entry pointing to the file inode using the bar
5785 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5786 * the file inode has a link count of 1.
5792 * ln foo testdir/foo2
5793 * ln foo testdir/foo3
5795 * unlink testdir/foo3
5796 * xfs_io -c fsync foo
5798 * mount fs, triggers log replay
5800 * Similar as the first example, after log replay the parent directory
5801 * testdir still has an entry pointing to the inode file with name foo3
5802 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5803 * and has a link count of 2.
5805 if (inode
->last_unlink_trans
> last_committed
) {
5806 ret
= btrfs_log_all_parents(trans
, orig_inode
, ctx
);
5812 * If a new hard link was added to the inode in the current transaction
5813 * and its link count is now greater than 1, we need to fallback to a
5814 * transaction commit, otherwise we can end up not logging all its new
5815 * parents for all the hard links. Here just from the dentry used to
5816 * fsync, we can not visit the ancestor inodes for all the other hard
5817 * links to figure out if any is new, so we fallback to a transaction
5818 * commit (instead of adding a lot of complexity of scanning a btree,
5819 * since this scenario is not a common use case).
5821 if (inode
->vfs_inode
.i_nlink
> 1 &&
5822 inode
->last_link_trans
> last_committed
) {
5828 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5831 inode
= BTRFS_I(d_inode(parent
));
5832 if (root
!= inode
->root
)
5835 if (inode
->generation
> last_committed
) {
5836 ret
= btrfs_log_inode(trans
, root
, inode
,
5837 LOG_INODE_EXISTS
, 0, LLONG_MAX
, ctx
);
5841 if (IS_ROOT(parent
))
5844 parent
= dget_parent(parent
);
5846 old_parent
= parent
;
5849 ret
= log_new_dir_dentries(trans
, root
, orig_inode
, ctx
);
5855 btrfs_set_log_full_commit(fs_info
, trans
);
5860 btrfs_remove_log_ctx(root
, ctx
);
5861 btrfs_end_log_trans(root
);
5867 * it is not safe to log dentry if the chunk root has added new
5868 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5869 * If this returns 1, you must commit the transaction to safely get your
5872 int btrfs_log_dentry_safe(struct btrfs_trans_handle
*trans
,
5873 struct btrfs_root
*root
, struct dentry
*dentry
,
5876 struct btrfs_log_ctx
*ctx
)
5878 struct dentry
*parent
= dget_parent(dentry
);
5881 ret
= btrfs_log_inode_parent(trans
, root
, BTRFS_I(d_inode(dentry
)),
5882 parent
, start
, end
, 0, ctx
);
5889 * should be called during mount to recover any replay any log trees
5892 int btrfs_recover_log_trees(struct btrfs_root
*log_root_tree
)
5895 struct btrfs_path
*path
;
5896 struct btrfs_trans_handle
*trans
;
5897 struct btrfs_key key
;
5898 struct btrfs_key found_key
;
5899 struct btrfs_key tmp_key
;
5900 struct btrfs_root
*log
;
5901 struct btrfs_fs_info
*fs_info
= log_root_tree
->fs_info
;
5902 struct walk_control wc
= {
5903 .process_func
= process_one_buffer
,
5907 path
= btrfs_alloc_path();
5911 set_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
);
5913 trans
= btrfs_start_transaction(fs_info
->tree_root
, 0);
5914 if (IS_ERR(trans
)) {
5915 ret
= PTR_ERR(trans
);
5922 ret
= walk_log_tree(trans
, log_root_tree
, &wc
);
5924 btrfs_handle_fs_error(fs_info
, ret
,
5925 "Failed to pin buffers while recovering log root tree.");
5930 key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
5931 key
.offset
= (u64
)-1;
5932 key
.type
= BTRFS_ROOT_ITEM_KEY
;
5935 ret
= btrfs_search_slot(NULL
, log_root_tree
, &key
, path
, 0, 0);
5938 btrfs_handle_fs_error(fs_info
, ret
,
5939 "Couldn't find tree log root.");
5943 if (path
->slots
[0] == 0)
5947 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
5949 btrfs_release_path(path
);
5950 if (found_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
5953 log
= btrfs_read_fs_root(log_root_tree
, &found_key
);
5956 btrfs_handle_fs_error(fs_info
, ret
,
5957 "Couldn't read tree log root.");
5961 tmp_key
.objectid
= found_key
.offset
;
5962 tmp_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5963 tmp_key
.offset
= (u64
)-1;
5965 wc
.replay_dest
= btrfs_read_fs_root_no_name(fs_info
, &tmp_key
);
5966 if (IS_ERR(wc
.replay_dest
)) {
5967 ret
= PTR_ERR(wc
.replay_dest
);
5968 free_extent_buffer(log
->node
);
5969 free_extent_buffer(log
->commit_root
);
5971 btrfs_handle_fs_error(fs_info
, ret
,
5972 "Couldn't read target root for tree log recovery.");
5976 wc
.replay_dest
->log_root
= log
;
5977 btrfs_record_root_in_trans(trans
, wc
.replay_dest
);
5978 ret
= walk_log_tree(trans
, log
, &wc
);
5980 if (!ret
&& wc
.stage
== LOG_WALK_REPLAY_ALL
) {
5981 ret
= fixup_inode_link_counts(trans
, wc
.replay_dest
,
5985 if (!ret
&& wc
.stage
== LOG_WALK_REPLAY_ALL
) {
5986 struct btrfs_root
*root
= wc
.replay_dest
;
5988 btrfs_release_path(path
);
5991 * We have just replayed everything, and the highest
5992 * objectid of fs roots probably has changed in case
5993 * some inode_item's got replayed.
5995 * root->objectid_mutex is not acquired as log replay
5996 * could only happen during mount.
5998 ret
= btrfs_find_highest_objectid(root
,
5999 &root
->highest_objectid
);
6002 key
.offset
= found_key
.offset
- 1;
6003 wc
.replay_dest
->log_root
= NULL
;
6004 free_extent_buffer(log
->node
);
6005 free_extent_buffer(log
->commit_root
);
6011 if (found_key
.offset
== 0)
6014 btrfs_release_path(path
);
6016 /* step one is to pin it all, step two is to replay just inodes */
6019 wc
.process_func
= replay_one_buffer
;
6020 wc
.stage
= LOG_WALK_REPLAY_INODES
;
6023 /* step three is to replay everything */
6024 if (wc
.stage
< LOG_WALK_REPLAY_ALL
) {
6029 btrfs_free_path(path
);
6031 /* step 4: commit the transaction, which also unpins the blocks */
6032 ret
= btrfs_commit_transaction(trans
);
6036 free_extent_buffer(log_root_tree
->node
);
6037 log_root_tree
->log_root
= NULL
;
6038 clear_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
);
6039 kfree(log_root_tree
);
6044 btrfs_end_transaction(wc
.trans
);
6045 btrfs_free_path(path
);
6050 * there are some corner cases where we want to force a full
6051 * commit instead of allowing a directory to be logged.
6053 * They revolve around files there were unlinked from the directory, and
6054 * this function updates the parent directory so that a full commit is
6055 * properly done if it is fsync'd later after the unlinks are done.
6057 * Must be called before the unlink operations (updates to the subvolume tree,
6058 * inodes, etc) are done.
6060 void btrfs_record_unlink_dir(struct btrfs_trans_handle
*trans
,
6061 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
6065 * when we're logging a file, if it hasn't been renamed
6066 * or unlinked, and its inode is fully committed on disk,
6067 * we don't have to worry about walking up the directory chain
6068 * to log its parents.
6070 * So, we use the last_unlink_trans field to put this transid
6071 * into the file. When the file is logged we check it and
6072 * don't log the parents if the file is fully on disk.
6074 mutex_lock(&inode
->log_mutex
);
6075 inode
->last_unlink_trans
= trans
->transid
;
6076 mutex_unlock(&inode
->log_mutex
);
6079 * if this directory was already logged any new
6080 * names for this file/dir will get recorded
6083 if (dir
->logged_trans
== trans
->transid
)
6087 * if the inode we're about to unlink was logged,
6088 * the log will be properly updated for any new names
6090 if (inode
->logged_trans
== trans
->transid
)
6094 * when renaming files across directories, if the directory
6095 * there we're unlinking from gets fsync'd later on, there's
6096 * no way to find the destination directory later and fsync it
6097 * properly. So, we have to be conservative and force commits
6098 * so the new name gets discovered.
6103 /* we can safely do the unlink without any special recording */
6107 mutex_lock(&dir
->log_mutex
);
6108 dir
->last_unlink_trans
= trans
->transid
;
6109 mutex_unlock(&dir
->log_mutex
);
6113 * Make sure that if someone attempts to fsync the parent directory of a deleted
6114 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6115 * that after replaying the log tree of the parent directory's root we will not
6116 * see the snapshot anymore and at log replay time we will not see any log tree
6117 * corresponding to the deleted snapshot's root, which could lead to replaying
6118 * it after replaying the log tree of the parent directory (which would replay
6119 * the snapshot delete operation).
6121 * Must be called before the actual snapshot destroy operation (updates to the
6122 * parent root and tree of tree roots trees, etc) are done.
6124 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle
*trans
,
6125 struct btrfs_inode
*dir
)
6127 mutex_lock(&dir
->log_mutex
);
6128 dir
->last_unlink_trans
= trans
->transid
;
6129 mutex_unlock(&dir
->log_mutex
);
6133 * Call this after adding a new name for a file and it will properly
6134 * update the log to reflect the new name.
6136 * It will return zero if all goes well, and it will return 1 if a
6137 * full transaction commit is required.
6139 int btrfs_log_new_name(struct btrfs_trans_handle
*trans
,
6140 struct btrfs_inode
*inode
, struct btrfs_inode
*old_dir
,
6141 struct dentry
*parent
)
6143 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
6144 struct btrfs_root
*root
= inode
->root
;
6147 * this will force the logging code to walk the dentry chain
6150 if (!S_ISDIR(inode
->vfs_inode
.i_mode
))
6151 inode
->last_unlink_trans
= trans
->transid
;
6154 * if this inode hasn't been logged and directory we're renaming it
6155 * from hasn't been logged, we don't need to log it
6157 if (inode
->logged_trans
<= fs_info
->last_trans_committed
&&
6158 (!old_dir
|| old_dir
->logged_trans
<= fs_info
->last_trans_committed
))
6161 return btrfs_log_inode_parent(trans
, root
, inode
, parent
, 0,
6162 LLONG_MAX
, 1, NULL
);