2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
26 #include "print-tree.h"
29 #include "compression.h"
32 /* magic values for the inode_only field in btrfs_log_inode:
34 * LOG_INODE_ALL means to log everything
35 * LOG_INODE_EXISTS means to log just enough to recreate the inode
38 #define LOG_INODE_ALL 0
39 #define LOG_INODE_EXISTS 1
40 #define LOG_OTHER_INODE 2
43 * directory trouble cases
45 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
46 * log, we must force a full commit before doing an fsync of the directory
47 * where the unlink was done.
48 * ---> record transid of last unlink/rename per directory
52 * rename foo/some_dir foo2/some_dir
54 * fsync foo/some_dir/some_file
56 * The fsync above will unlink the original some_dir without recording
57 * it in its new location (foo2). After a crash, some_dir will be gone
58 * unless the fsync of some_file forces a full commit
60 * 2) we must log any new names for any file or dir that is in the fsync
61 * log. ---> check inode while renaming/linking.
63 * 2a) we must log any new names for any file or dir during rename
64 * when the directory they are being removed from was logged.
65 * ---> check inode and old parent dir during rename
67 * 2a is actually the more important variant. With the extra logging
68 * a crash might unlink the old name without recreating the new one
70 * 3) after a crash, we must go through any directories with a link count
71 * of zero and redo the rm -rf
78 * The directory f1 was fully removed from the FS, but fsync was never
79 * called on f1, only its parent dir. After a crash the rm -rf must
80 * be replayed. This must be able to recurse down the entire
81 * directory tree. The inode link count fixup code takes care of the
86 * stages for the tree walking. The first
87 * stage (0) is to only pin down the blocks we find
88 * the second stage (1) is to make sure that all the inodes
89 * we find in the log are created in the subvolume.
91 * The last stage is to deal with directories and links and extents
92 * and all the other fun semantics
94 #define LOG_WALK_PIN_ONLY 0
95 #define LOG_WALK_REPLAY_INODES 1
96 #define LOG_WALK_REPLAY_DIR_INDEX 2
97 #define LOG_WALK_REPLAY_ALL 3
99 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
100 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
104 struct btrfs_log_ctx
*ctx
);
105 static int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
106 struct btrfs_root
*root
,
107 struct btrfs_path
*path
, u64 objectid
);
108 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
109 struct btrfs_root
*root
,
110 struct btrfs_root
*log
,
111 struct btrfs_path
*path
,
112 u64 dirid
, int del_all
);
115 * tree logging is a special write ahead log used to make sure that
116 * fsyncs and O_SYNCs can happen without doing full tree commits.
118 * Full tree commits are expensive because they require commonly
119 * modified blocks to be recowed, creating many dirty pages in the
120 * extent tree an 4x-6x higher write load than ext3.
122 * Instead of doing a tree commit on every fsync, we use the
123 * key ranges and transaction ids to find items for a given file or directory
124 * that have changed in this transaction. Those items are copied into
125 * a special tree (one per subvolume root), that tree is written to disk
126 * and then the fsync is considered complete.
128 * After a crash, items are copied out of the log-tree back into the
129 * subvolume tree. Any file data extents found are recorded in the extent
130 * allocation tree, and the log-tree freed.
132 * The log tree is read three times, once to pin down all the extents it is
133 * using in ram and once, once to create all the inodes logged in the tree
134 * and once to do all the other items.
138 * start a sub transaction and setup the log tree
139 * this increments the log tree writer count to make the people
140 * syncing the tree wait for us to finish
142 static int start_log_trans(struct btrfs_trans_handle
*trans
,
143 struct btrfs_root
*root
,
144 struct btrfs_log_ctx
*ctx
)
146 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
149 mutex_lock(&root
->log_mutex
);
151 if (root
->log_root
) {
152 if (btrfs_need_log_full_commit(fs_info
, trans
)) {
157 if (!root
->log_start_pid
) {
158 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
159 root
->log_start_pid
= current
->pid
;
160 } else if (root
->log_start_pid
!= current
->pid
) {
161 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
164 mutex_lock(&fs_info
->tree_log_mutex
);
165 if (!fs_info
->log_root_tree
)
166 ret
= btrfs_init_log_root_tree(trans
, fs_info
);
167 mutex_unlock(&fs_info
->tree_log_mutex
);
171 ret
= btrfs_add_log_tree(trans
, root
);
175 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
176 root
->log_start_pid
= current
->pid
;
179 atomic_inc(&root
->log_batch
);
180 atomic_inc(&root
->log_writers
);
182 int index
= root
->log_transid
% 2;
183 list_add_tail(&ctx
->list
, &root
->log_ctxs
[index
]);
184 ctx
->log_transid
= root
->log_transid
;
188 mutex_unlock(&root
->log_mutex
);
193 * returns 0 if there was a log transaction running and we were able
194 * to join, or returns -ENOENT if there were not transactions
197 static int join_running_log_trans(struct btrfs_root
*root
)
205 mutex_lock(&root
->log_mutex
);
206 if (root
->log_root
) {
208 atomic_inc(&root
->log_writers
);
210 mutex_unlock(&root
->log_mutex
);
215 * This either makes the current running log transaction wait
216 * until you call btrfs_end_log_trans() or it makes any future
217 * log transactions wait until you call btrfs_end_log_trans()
219 int btrfs_pin_log_trans(struct btrfs_root
*root
)
223 mutex_lock(&root
->log_mutex
);
224 atomic_inc(&root
->log_writers
);
225 mutex_unlock(&root
->log_mutex
);
230 * indicate we're done making changes to the log tree
231 * and wake up anyone waiting to do a sync
233 void btrfs_end_log_trans(struct btrfs_root
*root
)
235 if (atomic_dec_and_test(&root
->log_writers
)) {
237 * Implicit memory barrier after atomic_dec_and_test
239 if (waitqueue_active(&root
->log_writer_wait
))
240 wake_up(&root
->log_writer_wait
);
246 * the walk control struct is used to pass state down the chain when
247 * processing the log tree. The stage field tells us which part
248 * of the log tree processing we are currently doing. The others
249 * are state fields used for that specific part
251 struct walk_control
{
252 /* should we free the extent on disk when done? This is used
253 * at transaction commit time while freeing a log tree
257 /* should we write out the extent buffer? This is used
258 * while flushing the log tree to disk during a sync
262 /* should we wait for the extent buffer io to finish? Also used
263 * while flushing the log tree to disk for a sync
267 /* pin only walk, we record which extents on disk belong to the
272 /* what stage of the replay code we're currently in */
275 /* the root we are currently replaying */
276 struct btrfs_root
*replay_dest
;
278 /* the trans handle for the current replay */
279 struct btrfs_trans_handle
*trans
;
281 /* the function that gets used to process blocks we find in the
282 * tree. Note the extent_buffer might not be up to date when it is
283 * passed in, and it must be checked or read if you need the data
286 int (*process_func
)(struct btrfs_root
*log
, struct extent_buffer
*eb
,
287 struct walk_control
*wc
, u64 gen
);
291 * process_func used to pin down extents, write them or wait on them
293 static int process_one_buffer(struct btrfs_root
*log
,
294 struct extent_buffer
*eb
,
295 struct walk_control
*wc
, u64 gen
)
297 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
301 * If this fs is mixed then we need to be able to process the leaves to
302 * pin down any logged extents, so we have to read the block.
304 if (btrfs_fs_incompat(fs_info
, MIXED_GROUPS
)) {
305 ret
= btrfs_read_buffer(eb
, gen
);
311 ret
= btrfs_pin_extent_for_log_replay(fs_info
, eb
->start
,
314 if (!ret
&& btrfs_buffer_uptodate(eb
, gen
, 0)) {
315 if (wc
->pin
&& btrfs_header_level(eb
) == 0)
316 ret
= btrfs_exclude_logged_extents(fs_info
, eb
);
318 btrfs_write_tree_block(eb
);
320 btrfs_wait_tree_block_writeback(eb
);
326 * Item overwrite used by replay and tree logging. eb, slot and key all refer
327 * to the src data we are copying out.
329 * root is the tree we are copying into, and path is a scratch
330 * path for use in this function (it should be released on entry and
331 * will be released on exit).
333 * If the key is already in the destination tree the existing item is
334 * overwritten. If the existing item isn't big enough, it is extended.
335 * If it is too large, it is truncated.
337 * If the key isn't in the destination yet, a new item is inserted.
339 static noinline
int overwrite_item(struct btrfs_trans_handle
*trans
,
340 struct btrfs_root
*root
,
341 struct btrfs_path
*path
,
342 struct extent_buffer
*eb
, int slot
,
343 struct btrfs_key
*key
)
345 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
348 u64 saved_i_size
= 0;
349 int save_old_i_size
= 0;
350 unsigned long src_ptr
;
351 unsigned long dst_ptr
;
352 int overwrite_root
= 0;
353 bool inode_item
= key
->type
== BTRFS_INODE_ITEM_KEY
;
355 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
358 item_size
= btrfs_item_size_nr(eb
, slot
);
359 src_ptr
= btrfs_item_ptr_offset(eb
, slot
);
361 /* look for the key in the destination tree */
362 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
369 u32 dst_size
= btrfs_item_size_nr(path
->nodes
[0],
371 if (dst_size
!= item_size
)
374 if (item_size
== 0) {
375 btrfs_release_path(path
);
378 dst_copy
= kmalloc(item_size
, GFP_NOFS
);
379 src_copy
= kmalloc(item_size
, GFP_NOFS
);
380 if (!dst_copy
|| !src_copy
) {
381 btrfs_release_path(path
);
387 read_extent_buffer(eb
, src_copy
, src_ptr
, item_size
);
389 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
390 read_extent_buffer(path
->nodes
[0], dst_copy
, dst_ptr
,
392 ret
= memcmp(dst_copy
, src_copy
, item_size
);
397 * they have the same contents, just return, this saves
398 * us from cowing blocks in the destination tree and doing
399 * extra writes that may not have been done by a previous
403 btrfs_release_path(path
);
408 * We need to load the old nbytes into the inode so when we
409 * replay the extents we've logged we get the right nbytes.
412 struct btrfs_inode_item
*item
;
416 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
417 struct btrfs_inode_item
);
418 nbytes
= btrfs_inode_nbytes(path
->nodes
[0], item
);
419 item
= btrfs_item_ptr(eb
, slot
,
420 struct btrfs_inode_item
);
421 btrfs_set_inode_nbytes(eb
, item
, nbytes
);
424 * If this is a directory we need to reset the i_size to
425 * 0 so that we can set it up properly when replaying
426 * the rest of the items in this log.
428 mode
= btrfs_inode_mode(eb
, item
);
430 btrfs_set_inode_size(eb
, item
, 0);
432 } else if (inode_item
) {
433 struct btrfs_inode_item
*item
;
437 * New inode, set nbytes to 0 so that the nbytes comes out
438 * properly when we replay the extents.
440 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
441 btrfs_set_inode_nbytes(eb
, item
, 0);
444 * If this is a directory we need to reset the i_size to 0 so
445 * that we can set it up properly when replaying the rest of
446 * the items in this log.
448 mode
= btrfs_inode_mode(eb
, item
);
450 btrfs_set_inode_size(eb
, item
, 0);
453 btrfs_release_path(path
);
454 /* try to insert the key into the destination tree */
455 path
->skip_release_on_error
= 1;
456 ret
= btrfs_insert_empty_item(trans
, root
, path
,
458 path
->skip_release_on_error
= 0;
460 /* make sure any existing item is the correct size */
461 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
) {
463 found_size
= btrfs_item_size_nr(path
->nodes
[0],
465 if (found_size
> item_size
)
466 btrfs_truncate_item(fs_info
, path
, item_size
, 1);
467 else if (found_size
< item_size
)
468 btrfs_extend_item(fs_info
, path
,
469 item_size
- found_size
);
473 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0],
476 /* don't overwrite an existing inode if the generation number
477 * was logged as zero. This is done when the tree logging code
478 * is just logging an inode to make sure it exists after recovery.
480 * Also, don't overwrite i_size on directories during replay.
481 * log replay inserts and removes directory items based on the
482 * state of the tree found in the subvolume, and i_size is modified
485 if (key
->type
== BTRFS_INODE_ITEM_KEY
&& ret
== -EEXIST
) {
486 struct btrfs_inode_item
*src_item
;
487 struct btrfs_inode_item
*dst_item
;
489 src_item
= (struct btrfs_inode_item
*)src_ptr
;
490 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
492 if (btrfs_inode_generation(eb
, src_item
) == 0) {
493 struct extent_buffer
*dst_eb
= path
->nodes
[0];
494 const u64 ino_size
= btrfs_inode_size(eb
, src_item
);
497 * For regular files an ino_size == 0 is used only when
498 * logging that an inode exists, as part of a directory
499 * fsync, and the inode wasn't fsynced before. In this
500 * case don't set the size of the inode in the fs/subvol
501 * tree, otherwise we would be throwing valid data away.
503 if (S_ISREG(btrfs_inode_mode(eb
, src_item
)) &&
504 S_ISREG(btrfs_inode_mode(dst_eb
, dst_item
)) &&
506 struct btrfs_map_token token
;
508 btrfs_init_map_token(&token
);
509 btrfs_set_token_inode_size(dst_eb
, dst_item
,
515 if (overwrite_root
&&
516 S_ISDIR(btrfs_inode_mode(eb
, src_item
)) &&
517 S_ISDIR(btrfs_inode_mode(path
->nodes
[0], dst_item
))) {
519 saved_i_size
= btrfs_inode_size(path
->nodes
[0],
524 copy_extent_buffer(path
->nodes
[0], eb
, dst_ptr
,
527 if (save_old_i_size
) {
528 struct btrfs_inode_item
*dst_item
;
529 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
530 btrfs_set_inode_size(path
->nodes
[0], dst_item
, saved_i_size
);
533 /* make sure the generation is filled in */
534 if (key
->type
== BTRFS_INODE_ITEM_KEY
) {
535 struct btrfs_inode_item
*dst_item
;
536 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
537 if (btrfs_inode_generation(path
->nodes
[0], dst_item
) == 0) {
538 btrfs_set_inode_generation(path
->nodes
[0], dst_item
,
543 btrfs_mark_buffer_dirty(path
->nodes
[0]);
544 btrfs_release_path(path
);
549 * simple helper to read an inode off the disk from a given root
550 * This can only be called for subvolume roots and not for the log
552 static noinline
struct inode
*read_one_inode(struct btrfs_root
*root
,
555 struct btrfs_key key
;
558 key
.objectid
= objectid
;
559 key
.type
= BTRFS_INODE_ITEM_KEY
;
561 inode
= btrfs_iget(root
->fs_info
->sb
, &key
, root
, NULL
);
564 } else if (is_bad_inode(inode
)) {
571 /* replays a single extent in 'eb' at 'slot' with 'key' into the
572 * subvolume 'root'. path is released on entry and should be released
575 * extents in the log tree have not been allocated out of the extent
576 * tree yet. So, this completes the allocation, taking a reference
577 * as required if the extent already exists or creating a new extent
578 * if it isn't in the extent allocation tree yet.
580 * The extent is inserted into the file, dropping any existing extents
581 * from the file that overlap the new one.
583 static noinline
int replay_one_extent(struct btrfs_trans_handle
*trans
,
584 struct btrfs_root
*root
,
585 struct btrfs_path
*path
,
586 struct extent_buffer
*eb
, int slot
,
587 struct btrfs_key
*key
)
589 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
592 u64 start
= key
->offset
;
594 struct btrfs_file_extent_item
*item
;
595 struct inode
*inode
= NULL
;
599 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
600 found_type
= btrfs_file_extent_type(eb
, item
);
602 if (found_type
== BTRFS_FILE_EXTENT_REG
||
603 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
604 nbytes
= btrfs_file_extent_num_bytes(eb
, item
);
605 extent_end
= start
+ nbytes
;
608 * We don't add to the inodes nbytes if we are prealloc or a
611 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0)
613 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
614 size
= btrfs_file_extent_inline_len(eb
, slot
, item
);
615 nbytes
= btrfs_file_extent_ram_bytes(eb
, item
);
616 extent_end
= ALIGN(start
+ size
,
617 fs_info
->sectorsize
);
623 inode
= read_one_inode(root
, key
->objectid
);
630 * first check to see if we already have this extent in the
631 * file. This must be done before the btrfs_drop_extents run
632 * so we don't try to drop this extent.
634 ret
= btrfs_lookup_file_extent(trans
, root
, path
,
635 btrfs_ino(BTRFS_I(inode
)), start
, 0);
638 (found_type
== BTRFS_FILE_EXTENT_REG
||
639 found_type
== BTRFS_FILE_EXTENT_PREALLOC
)) {
640 struct btrfs_file_extent_item cmp1
;
641 struct btrfs_file_extent_item cmp2
;
642 struct btrfs_file_extent_item
*existing
;
643 struct extent_buffer
*leaf
;
645 leaf
= path
->nodes
[0];
646 existing
= btrfs_item_ptr(leaf
, path
->slots
[0],
647 struct btrfs_file_extent_item
);
649 read_extent_buffer(eb
, &cmp1
, (unsigned long)item
,
651 read_extent_buffer(leaf
, &cmp2
, (unsigned long)existing
,
655 * we already have a pointer to this exact extent,
656 * we don't have to do anything
658 if (memcmp(&cmp1
, &cmp2
, sizeof(cmp1
)) == 0) {
659 btrfs_release_path(path
);
663 btrfs_release_path(path
);
665 /* drop any overlapping extents */
666 ret
= btrfs_drop_extents(trans
, root
, inode
, start
, extent_end
, 1);
670 if (found_type
== BTRFS_FILE_EXTENT_REG
||
671 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
673 unsigned long dest_offset
;
674 struct btrfs_key ins
;
676 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0 &&
677 btrfs_fs_incompat(fs_info
, NO_HOLES
))
680 ret
= btrfs_insert_empty_item(trans
, root
, path
, key
,
684 dest_offset
= btrfs_item_ptr_offset(path
->nodes
[0],
686 copy_extent_buffer(path
->nodes
[0], eb
, dest_offset
,
687 (unsigned long)item
, sizeof(*item
));
689 ins
.objectid
= btrfs_file_extent_disk_bytenr(eb
, item
);
690 ins
.offset
= btrfs_file_extent_disk_num_bytes(eb
, item
);
691 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
692 offset
= key
->offset
- btrfs_file_extent_offset(eb
, item
);
695 * Manually record dirty extent, as here we did a shallow
696 * file extent item copy and skip normal backref update,
697 * but modifying extent tree all by ourselves.
698 * So need to manually record dirty extent for qgroup,
699 * as the owner of the file extent changed from log tree
700 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
702 ret
= btrfs_qgroup_trace_extent(trans
, fs_info
,
703 btrfs_file_extent_disk_bytenr(eb
, item
),
704 btrfs_file_extent_disk_num_bytes(eb
, item
),
709 if (ins
.objectid
> 0) {
712 LIST_HEAD(ordered_sums
);
714 * is this extent already allocated in the extent
715 * allocation tree? If so, just add a reference
717 ret
= btrfs_lookup_data_extent(fs_info
, ins
.objectid
,
720 ret
= btrfs_inc_extent_ref(trans
, root
,
721 ins
.objectid
, ins
.offset
,
722 0, root
->root_key
.objectid
,
723 key
->objectid
, offset
);
728 * insert the extent pointer in the extent
731 ret
= btrfs_alloc_logged_file_extent(trans
,
733 root
->root_key
.objectid
,
734 key
->objectid
, offset
, &ins
);
738 btrfs_release_path(path
);
740 if (btrfs_file_extent_compression(eb
, item
)) {
741 csum_start
= ins
.objectid
;
742 csum_end
= csum_start
+ ins
.offset
;
744 csum_start
= ins
.objectid
+
745 btrfs_file_extent_offset(eb
, item
);
746 csum_end
= csum_start
+
747 btrfs_file_extent_num_bytes(eb
, item
);
750 ret
= btrfs_lookup_csums_range(root
->log_root
,
751 csum_start
, csum_end
- 1,
756 * Now delete all existing cums in the csum root that
757 * cover our range. We do this because we can have an
758 * extent that is completely referenced by one file
759 * extent item and partially referenced by another
760 * file extent item (like after using the clone or
761 * extent_same ioctls). In this case if we end up doing
762 * the replay of the one that partially references the
763 * extent first, and we do not do the csum deletion
764 * below, we can get 2 csum items in the csum tree that
765 * overlap each other. For example, imagine our log has
766 * the two following file extent items:
768 * key (257 EXTENT_DATA 409600)
769 * extent data disk byte 12845056 nr 102400
770 * extent data offset 20480 nr 20480 ram 102400
772 * key (257 EXTENT_DATA 819200)
773 * extent data disk byte 12845056 nr 102400
774 * extent data offset 0 nr 102400 ram 102400
776 * Where the second one fully references the 100K extent
777 * that starts at disk byte 12845056, and the log tree
778 * has a single csum item that covers the entire range
781 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
783 * After the first file extent item is replayed, the
784 * csum tree gets the following csum item:
786 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
788 * Which covers the 20K sub-range starting at offset 20K
789 * of our extent. Now when we replay the second file
790 * extent item, if we do not delete existing csum items
791 * that cover any of its blocks, we end up getting two
792 * csum items in our csum tree that overlap each other:
794 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
795 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
797 * Which is a problem, because after this anyone trying
798 * to lookup up for the checksum of any block of our
799 * extent starting at an offset of 40K or higher, will
800 * end up looking at the second csum item only, which
801 * does not contain the checksum for any block starting
802 * at offset 40K or higher of our extent.
804 while (!list_empty(&ordered_sums
)) {
805 struct btrfs_ordered_sum
*sums
;
806 sums
= list_entry(ordered_sums
.next
,
807 struct btrfs_ordered_sum
,
810 ret
= btrfs_del_csums(trans
, fs_info
,
814 ret
= btrfs_csum_file_blocks(trans
,
815 fs_info
->csum_root
, sums
);
816 list_del(&sums
->list
);
822 btrfs_release_path(path
);
824 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
825 /* inline extents are easy, we just overwrite them */
826 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
831 inode_add_bytes(inode
, nbytes
);
833 ret
= btrfs_update_inode(trans
, root
, inode
);
841 * when cleaning up conflicts between the directory names in the
842 * subvolume, directory names in the log and directory names in the
843 * inode back references, we may have to unlink inodes from directories.
845 * This is a helper function to do the unlink of a specific directory
848 static noinline
int drop_one_dir_item(struct btrfs_trans_handle
*trans
,
849 struct btrfs_root
*root
,
850 struct btrfs_path
*path
,
851 struct btrfs_inode
*dir
,
852 struct btrfs_dir_item
*di
)
854 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
858 struct extent_buffer
*leaf
;
859 struct btrfs_key location
;
862 leaf
= path
->nodes
[0];
864 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
865 name_len
= btrfs_dir_name_len(leaf
, di
);
866 name
= kmalloc(name_len
, GFP_NOFS
);
870 read_extent_buffer(leaf
, name
, (unsigned long)(di
+ 1), name_len
);
871 btrfs_release_path(path
);
873 inode
= read_one_inode(root
, location
.objectid
);
879 ret
= link_to_fixup_dir(trans
, root
, path
, location
.objectid
);
883 ret
= btrfs_unlink_inode(trans
, root
, dir
, BTRFS_I(inode
), name
,
888 ret
= btrfs_run_delayed_items(trans
, fs_info
);
896 * helper function to see if a given name and sequence number found
897 * in an inode back reference are already in a directory and correctly
898 * point to this inode
900 static noinline
int inode_in_dir(struct btrfs_root
*root
,
901 struct btrfs_path
*path
,
902 u64 dirid
, u64 objectid
, u64 index
,
903 const char *name
, int name_len
)
905 struct btrfs_dir_item
*di
;
906 struct btrfs_key location
;
909 di
= btrfs_lookup_dir_index_item(NULL
, root
, path
, dirid
,
910 index
, name
, name_len
, 0);
911 if (di
&& !IS_ERR(di
)) {
912 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
913 if (location
.objectid
!= objectid
)
917 btrfs_release_path(path
);
919 di
= btrfs_lookup_dir_item(NULL
, root
, path
, dirid
, name
, name_len
, 0);
920 if (di
&& !IS_ERR(di
)) {
921 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
922 if (location
.objectid
!= objectid
)
928 btrfs_release_path(path
);
933 * helper function to check a log tree for a named back reference in
934 * an inode. This is used to decide if a back reference that is
935 * found in the subvolume conflicts with what we find in the log.
937 * inode backreferences may have multiple refs in a single item,
938 * during replay we process one reference at a time, and we don't
939 * want to delete valid links to a file from the subvolume if that
940 * link is also in the log.
942 static noinline
int backref_in_log(struct btrfs_root
*log
,
943 struct btrfs_key
*key
,
945 const char *name
, int namelen
)
947 struct btrfs_path
*path
;
948 struct btrfs_inode_ref
*ref
;
950 unsigned long ptr_end
;
951 unsigned long name_ptr
;
957 path
= btrfs_alloc_path();
961 ret
= btrfs_search_slot(NULL
, log
, key
, path
, 0, 0);
965 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
967 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
968 if (btrfs_find_name_in_ext_backref(path
, ref_objectid
,
969 name
, namelen
, NULL
))
975 item_size
= btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]);
976 ptr_end
= ptr
+ item_size
;
977 while (ptr
< ptr_end
) {
978 ref
= (struct btrfs_inode_ref
*)ptr
;
979 found_name_len
= btrfs_inode_ref_name_len(path
->nodes
[0], ref
);
980 if (found_name_len
== namelen
) {
981 name_ptr
= (unsigned long)(ref
+ 1);
982 ret
= memcmp_extent_buffer(path
->nodes
[0], name
,
989 ptr
= (unsigned long)(ref
+ 1) + found_name_len
;
992 btrfs_free_path(path
);
996 static inline int __add_inode_ref(struct btrfs_trans_handle
*trans
,
997 struct btrfs_root
*root
,
998 struct btrfs_path
*path
,
999 struct btrfs_root
*log_root
,
1000 struct btrfs_inode
*dir
,
1001 struct btrfs_inode
*inode
,
1002 u64 inode_objectid
, u64 parent_objectid
,
1003 u64 ref_index
, char *name
, int namelen
,
1006 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1009 int victim_name_len
;
1010 struct extent_buffer
*leaf
;
1011 struct btrfs_dir_item
*di
;
1012 struct btrfs_key search_key
;
1013 struct btrfs_inode_extref
*extref
;
1016 /* Search old style refs */
1017 search_key
.objectid
= inode_objectid
;
1018 search_key
.type
= BTRFS_INODE_REF_KEY
;
1019 search_key
.offset
= parent_objectid
;
1020 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
1022 struct btrfs_inode_ref
*victim_ref
;
1024 unsigned long ptr_end
;
1026 leaf
= path
->nodes
[0];
1028 /* are we trying to overwrite a back ref for the root directory
1029 * if so, just jump out, we're done
1031 if (search_key
.objectid
== search_key
.offset
)
1034 /* check all the names in this back reference to see
1035 * if they are in the log. if so, we allow them to stay
1036 * otherwise they must be unlinked as a conflict
1038 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1039 ptr_end
= ptr
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1040 while (ptr
< ptr_end
) {
1041 victim_ref
= (struct btrfs_inode_ref
*)ptr
;
1042 victim_name_len
= btrfs_inode_ref_name_len(leaf
,
1044 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1048 read_extent_buffer(leaf
, victim_name
,
1049 (unsigned long)(victim_ref
+ 1),
1052 if (!backref_in_log(log_root
, &search_key
,
1056 inc_nlink(&inode
->vfs_inode
);
1057 btrfs_release_path(path
);
1059 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
,
1060 victim_name
, victim_name_len
);
1064 ret
= btrfs_run_delayed_items(trans
, fs_info
);
1072 ptr
= (unsigned long)(victim_ref
+ 1) + victim_name_len
;
1076 * NOTE: we have searched root tree and checked the
1077 * corresponding ref, it does not need to check again.
1081 btrfs_release_path(path
);
1083 /* Same search but for extended refs */
1084 extref
= btrfs_lookup_inode_extref(NULL
, root
, path
, name
, namelen
,
1085 inode_objectid
, parent_objectid
, 0,
1087 if (!IS_ERR_OR_NULL(extref
)) {
1091 struct inode
*victim_parent
;
1093 leaf
= path
->nodes
[0];
1095 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1096 base
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1098 while (cur_offset
< item_size
) {
1099 extref
= (struct btrfs_inode_extref
*)(base
+ cur_offset
);
1101 victim_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1103 if (btrfs_inode_extref_parent(leaf
, extref
) != parent_objectid
)
1106 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1109 read_extent_buffer(leaf
, victim_name
, (unsigned long)&extref
->name
,
1112 search_key
.objectid
= inode_objectid
;
1113 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1114 search_key
.offset
= btrfs_extref_hash(parent_objectid
,
1118 if (!backref_in_log(log_root
, &search_key
,
1119 parent_objectid
, victim_name
,
1122 victim_parent
= read_one_inode(root
,
1124 if (victim_parent
) {
1125 inc_nlink(&inode
->vfs_inode
);
1126 btrfs_release_path(path
);
1128 ret
= btrfs_unlink_inode(trans
, root
,
1129 BTRFS_I(victim_parent
),
1134 ret
= btrfs_run_delayed_items(
1138 iput(victim_parent
);
1147 cur_offset
+= victim_name_len
+ sizeof(*extref
);
1151 btrfs_release_path(path
);
1153 /* look for a conflicting sequence number */
1154 di
= btrfs_lookup_dir_index_item(trans
, root
, path
, btrfs_ino(dir
),
1155 ref_index
, name
, namelen
, 0);
1156 if (di
&& !IS_ERR(di
)) {
1157 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1161 btrfs_release_path(path
);
1163 /* look for a conflicing name */
1164 di
= btrfs_lookup_dir_item(trans
, root
, path
, btrfs_ino(dir
),
1166 if (di
&& !IS_ERR(di
)) {
1167 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1171 btrfs_release_path(path
);
1176 static int extref_get_fields(struct extent_buffer
*eb
, int slot
,
1177 unsigned long ref_ptr
, u32
*namelen
, char **name
,
1178 u64
*index
, u64
*parent_objectid
)
1180 struct btrfs_inode_extref
*extref
;
1182 extref
= (struct btrfs_inode_extref
*)ref_ptr
;
1184 *namelen
= btrfs_inode_extref_name_len(eb
, extref
);
1185 if (!btrfs_is_name_len_valid(eb
, slot
, (unsigned long)&extref
->name
,
1189 *name
= kmalloc(*namelen
, GFP_NOFS
);
1193 read_extent_buffer(eb
, *name
, (unsigned long)&extref
->name
,
1196 *index
= btrfs_inode_extref_index(eb
, extref
);
1197 if (parent_objectid
)
1198 *parent_objectid
= btrfs_inode_extref_parent(eb
, extref
);
1203 static int ref_get_fields(struct extent_buffer
*eb
, int slot
,
1204 unsigned long ref_ptr
, u32
*namelen
, char **name
,
1207 struct btrfs_inode_ref
*ref
;
1209 ref
= (struct btrfs_inode_ref
*)ref_ptr
;
1211 *namelen
= btrfs_inode_ref_name_len(eb
, ref
);
1212 if (!btrfs_is_name_len_valid(eb
, slot
, (unsigned long)(ref
+ 1),
1216 *name
= kmalloc(*namelen
, GFP_NOFS
);
1220 read_extent_buffer(eb
, *name
, (unsigned long)(ref
+ 1), *namelen
);
1222 *index
= btrfs_inode_ref_index(eb
, ref
);
1228 * replay one inode back reference item found in the log tree.
1229 * eb, slot and key refer to the buffer and key found in the log tree.
1230 * root is the destination we are replaying into, and path is for temp
1231 * use by this function. (it should be released on return).
1233 static noinline
int add_inode_ref(struct btrfs_trans_handle
*trans
,
1234 struct btrfs_root
*root
,
1235 struct btrfs_root
*log
,
1236 struct btrfs_path
*path
,
1237 struct extent_buffer
*eb
, int slot
,
1238 struct btrfs_key
*key
)
1240 struct inode
*dir
= NULL
;
1241 struct inode
*inode
= NULL
;
1242 unsigned long ref_ptr
;
1243 unsigned long ref_end
;
1247 int search_done
= 0;
1248 int log_ref_ver
= 0;
1249 u64 parent_objectid
;
1252 int ref_struct_size
;
1254 ref_ptr
= btrfs_item_ptr_offset(eb
, slot
);
1255 ref_end
= ref_ptr
+ btrfs_item_size_nr(eb
, slot
);
1257 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
1258 struct btrfs_inode_extref
*r
;
1260 ref_struct_size
= sizeof(struct btrfs_inode_extref
);
1262 r
= (struct btrfs_inode_extref
*)ref_ptr
;
1263 parent_objectid
= btrfs_inode_extref_parent(eb
, r
);
1265 ref_struct_size
= sizeof(struct btrfs_inode_ref
);
1266 parent_objectid
= key
->offset
;
1268 inode_objectid
= key
->objectid
;
1271 * it is possible that we didn't log all the parent directories
1272 * for a given inode. If we don't find the dir, just don't
1273 * copy the back ref in. The link count fixup code will take
1276 dir
= read_one_inode(root
, parent_objectid
);
1282 inode
= read_one_inode(root
, inode_objectid
);
1288 while (ref_ptr
< ref_end
) {
1290 ret
= extref_get_fields(eb
, slot
, ref_ptr
, &namelen
,
1291 &name
, &ref_index
, &parent_objectid
);
1293 * parent object can change from one array
1297 dir
= read_one_inode(root
, parent_objectid
);
1303 ret
= ref_get_fields(eb
, slot
, ref_ptr
, &namelen
,
1309 /* if we already have a perfect match, we're done */
1310 if (!inode_in_dir(root
, path
, btrfs_ino(BTRFS_I(dir
)),
1311 btrfs_ino(BTRFS_I(inode
)), ref_index
,
1314 * look for a conflicting back reference in the
1315 * metadata. if we find one we have to unlink that name
1316 * of the file before we add our new link. Later on, we
1317 * overwrite any existing back reference, and we don't
1318 * want to create dangling pointers in the directory.
1322 ret
= __add_inode_ref(trans
, root
, path
, log
,
1327 ref_index
, name
, namelen
,
1336 /* insert our name */
1337 ret
= btrfs_add_link(trans
, BTRFS_I(dir
),
1339 name
, namelen
, 0, ref_index
);
1343 btrfs_update_inode(trans
, root
, inode
);
1346 ref_ptr
= (unsigned long)(ref_ptr
+ ref_struct_size
) + namelen
;
1355 /* finally write the back reference in the inode */
1356 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
1358 btrfs_release_path(path
);
1365 static int insert_orphan_item(struct btrfs_trans_handle
*trans
,
1366 struct btrfs_root
*root
, u64 ino
)
1370 ret
= btrfs_insert_orphan_item(trans
, root
, ino
);
1377 static int count_inode_extrefs(struct btrfs_root
*root
,
1378 struct btrfs_inode
*inode
, struct btrfs_path
*path
)
1382 unsigned int nlink
= 0;
1385 u64 inode_objectid
= btrfs_ino(inode
);
1388 struct btrfs_inode_extref
*extref
;
1389 struct extent_buffer
*leaf
;
1392 ret
= btrfs_find_one_extref(root
, inode_objectid
, offset
, path
,
1397 leaf
= path
->nodes
[0];
1398 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1399 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1402 while (cur_offset
< item_size
) {
1403 extref
= (struct btrfs_inode_extref
*) (ptr
+ cur_offset
);
1404 name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1408 cur_offset
+= name_len
+ sizeof(*extref
);
1412 btrfs_release_path(path
);
1414 btrfs_release_path(path
);
1416 if (ret
< 0 && ret
!= -ENOENT
)
1421 static int count_inode_refs(struct btrfs_root
*root
,
1422 struct btrfs_inode
*inode
, struct btrfs_path
*path
)
1425 struct btrfs_key key
;
1426 unsigned int nlink
= 0;
1428 unsigned long ptr_end
;
1430 u64 ino
= btrfs_ino(inode
);
1433 key
.type
= BTRFS_INODE_REF_KEY
;
1434 key
.offset
= (u64
)-1;
1437 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1441 if (path
->slots
[0] == 0)
1446 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1448 if (key
.objectid
!= ino
||
1449 key
.type
!= BTRFS_INODE_REF_KEY
)
1451 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
1452 ptr_end
= ptr
+ btrfs_item_size_nr(path
->nodes
[0],
1454 while (ptr
< ptr_end
) {
1455 struct btrfs_inode_ref
*ref
;
1457 ref
= (struct btrfs_inode_ref
*)ptr
;
1458 name_len
= btrfs_inode_ref_name_len(path
->nodes
[0],
1460 ptr
= (unsigned long)(ref
+ 1) + name_len
;
1464 if (key
.offset
== 0)
1466 if (path
->slots
[0] > 0) {
1471 btrfs_release_path(path
);
1473 btrfs_release_path(path
);
1479 * There are a few corners where the link count of the file can't
1480 * be properly maintained during replay. So, instead of adding
1481 * lots of complexity to the log code, we just scan the backrefs
1482 * for any file that has been through replay.
1484 * The scan will update the link count on the inode to reflect the
1485 * number of back refs found. If it goes down to zero, the iput
1486 * will free the inode.
1488 static noinline
int fixup_inode_link_count(struct btrfs_trans_handle
*trans
,
1489 struct btrfs_root
*root
,
1490 struct inode
*inode
)
1492 struct btrfs_path
*path
;
1495 u64 ino
= btrfs_ino(BTRFS_I(inode
));
1497 path
= btrfs_alloc_path();
1501 ret
= count_inode_refs(root
, BTRFS_I(inode
), path
);
1507 ret
= count_inode_extrefs(root
, BTRFS_I(inode
), path
);
1515 if (nlink
!= inode
->i_nlink
) {
1516 set_nlink(inode
, nlink
);
1517 btrfs_update_inode(trans
, root
, inode
);
1519 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1521 if (inode
->i_nlink
== 0) {
1522 if (S_ISDIR(inode
->i_mode
)) {
1523 ret
= replay_dir_deletes(trans
, root
, NULL
, path
,
1528 ret
= insert_orphan_item(trans
, root
, ino
);
1532 btrfs_free_path(path
);
1536 static noinline
int fixup_inode_link_counts(struct btrfs_trans_handle
*trans
,
1537 struct btrfs_root
*root
,
1538 struct btrfs_path
*path
)
1541 struct btrfs_key key
;
1542 struct inode
*inode
;
1544 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1545 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1546 key
.offset
= (u64
)-1;
1548 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1553 if (path
->slots
[0] == 0)
1558 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1559 if (key
.objectid
!= BTRFS_TREE_LOG_FIXUP_OBJECTID
||
1560 key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
1563 ret
= btrfs_del_item(trans
, root
, path
);
1567 btrfs_release_path(path
);
1568 inode
= read_one_inode(root
, key
.offset
);
1572 ret
= fixup_inode_link_count(trans
, root
, inode
);
1578 * fixup on a directory may create new entries,
1579 * make sure we always look for the highset possible
1582 key
.offset
= (u64
)-1;
1586 btrfs_release_path(path
);
1592 * record a given inode in the fixup dir so we can check its link
1593 * count when replay is done. The link count is incremented here
1594 * so the inode won't go away until we check it
1596 static noinline
int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
1597 struct btrfs_root
*root
,
1598 struct btrfs_path
*path
,
1601 struct btrfs_key key
;
1603 struct inode
*inode
;
1605 inode
= read_one_inode(root
, objectid
);
1609 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1610 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1611 key
.offset
= objectid
;
1613 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1615 btrfs_release_path(path
);
1617 if (!inode
->i_nlink
)
1618 set_nlink(inode
, 1);
1621 ret
= btrfs_update_inode(trans
, root
, inode
);
1622 } else if (ret
== -EEXIST
) {
1625 BUG(); /* Logic Error */
1633 * when replaying the log for a directory, we only insert names
1634 * for inodes that actually exist. This means an fsync on a directory
1635 * does not implicitly fsync all the new files in it
1637 static noinline
int insert_one_name(struct btrfs_trans_handle
*trans
,
1638 struct btrfs_root
*root
,
1639 u64 dirid
, u64 index
,
1640 char *name
, int name_len
,
1641 struct btrfs_key
*location
)
1643 struct inode
*inode
;
1647 inode
= read_one_inode(root
, location
->objectid
);
1651 dir
= read_one_inode(root
, dirid
);
1657 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
), name
,
1658 name_len
, 1, index
);
1660 /* FIXME, put inode into FIXUP list */
1668 * Return true if an inode reference exists in the log for the given name,
1669 * inode and parent inode.
1671 static bool name_in_log_ref(struct btrfs_root
*log_root
,
1672 const char *name
, const int name_len
,
1673 const u64 dirid
, const u64 ino
)
1675 struct btrfs_key search_key
;
1677 search_key
.objectid
= ino
;
1678 search_key
.type
= BTRFS_INODE_REF_KEY
;
1679 search_key
.offset
= dirid
;
1680 if (backref_in_log(log_root
, &search_key
, dirid
, name
, name_len
))
1683 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1684 search_key
.offset
= btrfs_extref_hash(dirid
, name
, name_len
);
1685 if (backref_in_log(log_root
, &search_key
, dirid
, name
, name_len
))
1692 * take a single entry in a log directory item and replay it into
1695 * if a conflicting item exists in the subdirectory already,
1696 * the inode it points to is unlinked and put into the link count
1699 * If a name from the log points to a file or directory that does
1700 * not exist in the FS, it is skipped. fsyncs on directories
1701 * do not force down inodes inside that directory, just changes to the
1702 * names or unlinks in a directory.
1704 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1705 * non-existing inode) and 1 if the name was replayed.
1707 static noinline
int replay_one_name(struct btrfs_trans_handle
*trans
,
1708 struct btrfs_root
*root
,
1709 struct btrfs_path
*path
,
1710 struct extent_buffer
*eb
,
1711 struct btrfs_dir_item
*di
,
1712 struct btrfs_key
*key
)
1716 struct btrfs_dir_item
*dst_di
;
1717 struct btrfs_key found_key
;
1718 struct btrfs_key log_key
;
1723 bool update_size
= (key
->type
== BTRFS_DIR_INDEX_KEY
);
1724 bool name_added
= false;
1726 dir
= read_one_inode(root
, key
->objectid
);
1730 name_len
= btrfs_dir_name_len(eb
, di
);
1731 name
= kmalloc(name_len
, GFP_NOFS
);
1737 log_type
= btrfs_dir_type(eb
, di
);
1738 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1741 btrfs_dir_item_key_to_cpu(eb
, di
, &log_key
);
1742 exists
= btrfs_lookup_inode(trans
, root
, path
, &log_key
, 0);
1747 btrfs_release_path(path
);
1749 if (key
->type
== BTRFS_DIR_ITEM_KEY
) {
1750 dst_di
= btrfs_lookup_dir_item(trans
, root
, path
, key
->objectid
,
1752 } else if (key
->type
== BTRFS_DIR_INDEX_KEY
) {
1753 dst_di
= btrfs_lookup_dir_index_item(trans
, root
, path
,
1762 if (IS_ERR_OR_NULL(dst_di
)) {
1763 /* we need a sequence number to insert, so we only
1764 * do inserts for the BTRFS_DIR_INDEX_KEY types
1766 if (key
->type
!= BTRFS_DIR_INDEX_KEY
)
1771 btrfs_dir_item_key_to_cpu(path
->nodes
[0], dst_di
, &found_key
);
1772 /* the existing item matches the logged item */
1773 if (found_key
.objectid
== log_key
.objectid
&&
1774 found_key
.type
== log_key
.type
&&
1775 found_key
.offset
== log_key
.offset
&&
1776 btrfs_dir_type(path
->nodes
[0], dst_di
) == log_type
) {
1777 update_size
= false;
1782 * don't drop the conflicting directory entry if the inode
1783 * for the new entry doesn't exist
1788 ret
= drop_one_dir_item(trans
, root
, path
, BTRFS_I(dir
), dst_di
);
1792 if (key
->type
== BTRFS_DIR_INDEX_KEY
)
1795 btrfs_release_path(path
);
1796 if (!ret
&& update_size
) {
1797 btrfs_i_size_write(BTRFS_I(dir
), dir
->i_size
+ name_len
* 2);
1798 ret
= btrfs_update_inode(trans
, root
, dir
);
1802 if (!ret
&& name_added
)
1807 if (name_in_log_ref(root
->log_root
, name
, name_len
,
1808 key
->objectid
, log_key
.objectid
)) {
1809 /* The dentry will be added later. */
1811 update_size
= false;
1814 btrfs_release_path(path
);
1815 ret
= insert_one_name(trans
, root
, key
->objectid
, key
->offset
,
1816 name
, name_len
, &log_key
);
1817 if (ret
&& ret
!= -ENOENT
&& ret
!= -EEXIST
)
1821 update_size
= false;
1827 * find all the names in a directory item and reconcile them into
1828 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1829 * one name in a directory item, but the same code gets used for
1830 * both directory index types
1832 static noinline
int replay_one_dir_item(struct btrfs_trans_handle
*trans
,
1833 struct btrfs_root
*root
,
1834 struct btrfs_path
*path
,
1835 struct extent_buffer
*eb
, int slot
,
1836 struct btrfs_key
*key
)
1838 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1840 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
1841 struct btrfs_dir_item
*di
;
1844 unsigned long ptr_end
;
1845 struct btrfs_path
*fixup_path
= NULL
;
1847 ptr
= btrfs_item_ptr_offset(eb
, slot
);
1848 ptr_end
= ptr
+ item_size
;
1849 while (ptr
< ptr_end
) {
1850 di
= (struct btrfs_dir_item
*)ptr
;
1851 if (verify_dir_item(fs_info
, eb
, slot
, di
))
1853 name_len
= btrfs_dir_name_len(eb
, di
);
1854 ret
= replay_one_name(trans
, root
, path
, eb
, di
, key
);
1857 ptr
= (unsigned long)(di
+ 1);
1861 * If this entry refers to a non-directory (directories can not
1862 * have a link count > 1) and it was added in the transaction
1863 * that was not committed, make sure we fixup the link count of
1864 * the inode it the entry points to. Otherwise something like
1865 * the following would result in a directory pointing to an
1866 * inode with a wrong link that does not account for this dir
1874 * ln testdir/bar testdir/bar_link
1875 * ln testdir/foo testdir/foo_link
1876 * xfs_io -c "fsync" testdir/bar
1880 * mount fs, log replay happens
1882 * File foo would remain with a link count of 1 when it has two
1883 * entries pointing to it in the directory testdir. This would
1884 * make it impossible to ever delete the parent directory has
1885 * it would result in stale dentries that can never be deleted.
1887 if (ret
== 1 && btrfs_dir_type(eb
, di
) != BTRFS_FT_DIR
) {
1888 struct btrfs_key di_key
;
1891 fixup_path
= btrfs_alloc_path();
1898 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
1899 ret
= link_to_fixup_dir(trans
, root
, fixup_path
,
1906 btrfs_free_path(fixup_path
);
1911 * directory replay has two parts. There are the standard directory
1912 * items in the log copied from the subvolume, and range items
1913 * created in the log while the subvolume was logged.
1915 * The range items tell us which parts of the key space the log
1916 * is authoritative for. During replay, if a key in the subvolume
1917 * directory is in a logged range item, but not actually in the log
1918 * that means it was deleted from the directory before the fsync
1919 * and should be removed.
1921 static noinline
int find_dir_range(struct btrfs_root
*root
,
1922 struct btrfs_path
*path
,
1923 u64 dirid
, int key_type
,
1924 u64
*start_ret
, u64
*end_ret
)
1926 struct btrfs_key key
;
1928 struct btrfs_dir_log_item
*item
;
1932 if (*start_ret
== (u64
)-1)
1935 key
.objectid
= dirid
;
1936 key
.type
= key_type
;
1937 key
.offset
= *start_ret
;
1939 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1943 if (path
->slots
[0] == 0)
1948 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1950 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1954 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1955 struct btrfs_dir_log_item
);
1956 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1958 if (*start_ret
>= key
.offset
&& *start_ret
<= found_end
) {
1960 *start_ret
= key
.offset
;
1961 *end_ret
= found_end
;
1966 /* check the next slot in the tree to see if it is a valid item */
1967 nritems
= btrfs_header_nritems(path
->nodes
[0]);
1969 if (path
->slots
[0] >= nritems
) {
1970 ret
= btrfs_next_leaf(root
, path
);
1975 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1977 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1981 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1982 struct btrfs_dir_log_item
);
1983 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1984 *start_ret
= key
.offset
;
1985 *end_ret
= found_end
;
1988 btrfs_release_path(path
);
1993 * this looks for a given directory item in the log. If the directory
1994 * item is not in the log, the item is removed and the inode it points
1997 static noinline
int check_item_in_log(struct btrfs_trans_handle
*trans
,
1998 struct btrfs_root
*root
,
1999 struct btrfs_root
*log
,
2000 struct btrfs_path
*path
,
2001 struct btrfs_path
*log_path
,
2003 struct btrfs_key
*dir_key
)
2005 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2007 struct extent_buffer
*eb
;
2010 struct btrfs_dir_item
*di
;
2011 struct btrfs_dir_item
*log_di
;
2014 unsigned long ptr_end
;
2016 struct inode
*inode
;
2017 struct btrfs_key location
;
2020 eb
= path
->nodes
[0];
2021 slot
= path
->slots
[0];
2022 item_size
= btrfs_item_size_nr(eb
, slot
);
2023 ptr
= btrfs_item_ptr_offset(eb
, slot
);
2024 ptr_end
= ptr
+ item_size
;
2025 while (ptr
< ptr_end
) {
2026 di
= (struct btrfs_dir_item
*)ptr
;
2027 if (verify_dir_item(fs_info
, eb
, slot
, di
)) {
2032 name_len
= btrfs_dir_name_len(eb
, di
);
2033 name
= kmalloc(name_len
, GFP_NOFS
);
2038 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
2041 if (log
&& dir_key
->type
== BTRFS_DIR_ITEM_KEY
) {
2042 log_di
= btrfs_lookup_dir_item(trans
, log
, log_path
,
2045 } else if (log
&& dir_key
->type
== BTRFS_DIR_INDEX_KEY
) {
2046 log_di
= btrfs_lookup_dir_index_item(trans
, log
,
2052 if (!log_di
|| (IS_ERR(log_di
) && PTR_ERR(log_di
) == -ENOENT
)) {
2053 btrfs_dir_item_key_to_cpu(eb
, di
, &location
);
2054 btrfs_release_path(path
);
2055 btrfs_release_path(log_path
);
2056 inode
= read_one_inode(root
, location
.objectid
);
2062 ret
= link_to_fixup_dir(trans
, root
,
2063 path
, location
.objectid
);
2071 ret
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
),
2072 BTRFS_I(inode
), name
, name_len
);
2074 ret
= btrfs_run_delayed_items(trans
, fs_info
);
2080 /* there might still be more names under this key
2081 * check and repeat if required
2083 ret
= btrfs_search_slot(NULL
, root
, dir_key
, path
,
2089 } else if (IS_ERR(log_di
)) {
2091 return PTR_ERR(log_di
);
2093 btrfs_release_path(log_path
);
2096 ptr
= (unsigned long)(di
+ 1);
2101 btrfs_release_path(path
);
2102 btrfs_release_path(log_path
);
2106 static int replay_xattr_deletes(struct btrfs_trans_handle
*trans
,
2107 struct btrfs_root
*root
,
2108 struct btrfs_root
*log
,
2109 struct btrfs_path
*path
,
2112 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2113 struct btrfs_key search_key
;
2114 struct btrfs_path
*log_path
;
2119 log_path
= btrfs_alloc_path();
2123 search_key
.objectid
= ino
;
2124 search_key
.type
= BTRFS_XATTR_ITEM_KEY
;
2125 search_key
.offset
= 0;
2127 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
2131 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2132 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
2133 struct btrfs_key key
;
2134 struct btrfs_dir_item
*di
;
2135 struct btrfs_dir_item
*log_di
;
2139 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, i
);
2140 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_XATTR_ITEM_KEY
) {
2145 di
= btrfs_item_ptr(path
->nodes
[0], i
, struct btrfs_dir_item
);
2146 total_size
= btrfs_item_size_nr(path
->nodes
[0], i
);
2148 while (cur
< total_size
) {
2149 u16 name_len
= btrfs_dir_name_len(path
->nodes
[0], di
);
2150 u16 data_len
= btrfs_dir_data_len(path
->nodes
[0], di
);
2151 u32 this_len
= sizeof(*di
) + name_len
+ data_len
;
2154 ret
= verify_dir_item(fs_info
, path
->nodes
[0], i
, di
);
2159 name
= kmalloc(name_len
, GFP_NOFS
);
2164 read_extent_buffer(path
->nodes
[0], name
,
2165 (unsigned long)(di
+ 1), name_len
);
2167 log_di
= btrfs_lookup_xattr(NULL
, log
, log_path
, ino
,
2169 btrfs_release_path(log_path
);
2171 /* Doesn't exist in log tree, so delete it. */
2172 btrfs_release_path(path
);
2173 di
= btrfs_lookup_xattr(trans
, root
, path
, ino
,
2174 name
, name_len
, -1);
2181 ret
= btrfs_delete_one_dir_name(trans
, root
,
2185 btrfs_release_path(path
);
2190 if (IS_ERR(log_di
)) {
2191 ret
= PTR_ERR(log_di
);
2195 di
= (struct btrfs_dir_item
*)((char *)di
+ this_len
);
2198 ret
= btrfs_next_leaf(root
, path
);
2204 btrfs_free_path(log_path
);
2205 btrfs_release_path(path
);
2211 * deletion replay happens before we copy any new directory items
2212 * out of the log or out of backreferences from inodes. It
2213 * scans the log to find ranges of keys that log is authoritative for,
2214 * and then scans the directory to find items in those ranges that are
2215 * not present in the log.
2217 * Anything we don't find in the log is unlinked and removed from the
2220 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
2221 struct btrfs_root
*root
,
2222 struct btrfs_root
*log
,
2223 struct btrfs_path
*path
,
2224 u64 dirid
, int del_all
)
2228 int key_type
= BTRFS_DIR_LOG_ITEM_KEY
;
2230 struct btrfs_key dir_key
;
2231 struct btrfs_key found_key
;
2232 struct btrfs_path
*log_path
;
2235 dir_key
.objectid
= dirid
;
2236 dir_key
.type
= BTRFS_DIR_ITEM_KEY
;
2237 log_path
= btrfs_alloc_path();
2241 dir
= read_one_inode(root
, dirid
);
2242 /* it isn't an error if the inode isn't there, that can happen
2243 * because we replay the deletes before we copy in the inode item
2247 btrfs_free_path(log_path
);
2255 range_end
= (u64
)-1;
2257 ret
= find_dir_range(log
, path
, dirid
, key_type
,
2258 &range_start
, &range_end
);
2263 dir_key
.offset
= range_start
;
2266 ret
= btrfs_search_slot(NULL
, root
, &dir_key
, path
,
2271 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2272 if (path
->slots
[0] >= nritems
) {
2273 ret
= btrfs_next_leaf(root
, path
);
2277 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2279 if (found_key
.objectid
!= dirid
||
2280 found_key
.type
!= dir_key
.type
)
2283 if (found_key
.offset
> range_end
)
2286 ret
= check_item_in_log(trans
, root
, log
, path
,
2291 if (found_key
.offset
== (u64
)-1)
2293 dir_key
.offset
= found_key
.offset
+ 1;
2295 btrfs_release_path(path
);
2296 if (range_end
== (u64
)-1)
2298 range_start
= range_end
+ 1;
2303 if (key_type
== BTRFS_DIR_LOG_ITEM_KEY
) {
2304 key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
2305 dir_key
.type
= BTRFS_DIR_INDEX_KEY
;
2306 btrfs_release_path(path
);
2310 btrfs_release_path(path
);
2311 btrfs_free_path(log_path
);
2317 * the process_func used to replay items from the log tree. This
2318 * gets called in two different stages. The first stage just looks
2319 * for inodes and makes sure they are all copied into the subvolume.
2321 * The second stage copies all the other item types from the log into
2322 * the subvolume. The two stage approach is slower, but gets rid of
2323 * lots of complexity around inodes referencing other inodes that exist
2324 * only in the log (references come from either directory items or inode
2327 static int replay_one_buffer(struct btrfs_root
*log
, struct extent_buffer
*eb
,
2328 struct walk_control
*wc
, u64 gen
)
2331 struct btrfs_path
*path
;
2332 struct btrfs_root
*root
= wc
->replay_dest
;
2333 struct btrfs_key key
;
2338 ret
= btrfs_read_buffer(eb
, gen
);
2342 level
= btrfs_header_level(eb
);
2347 path
= btrfs_alloc_path();
2351 nritems
= btrfs_header_nritems(eb
);
2352 for (i
= 0; i
< nritems
; i
++) {
2353 btrfs_item_key_to_cpu(eb
, &key
, i
);
2355 /* inode keys are done during the first stage */
2356 if (key
.type
== BTRFS_INODE_ITEM_KEY
&&
2357 wc
->stage
== LOG_WALK_REPLAY_INODES
) {
2358 struct btrfs_inode_item
*inode_item
;
2361 inode_item
= btrfs_item_ptr(eb
, i
,
2362 struct btrfs_inode_item
);
2363 ret
= replay_xattr_deletes(wc
->trans
, root
, log
,
2364 path
, key
.objectid
);
2367 mode
= btrfs_inode_mode(eb
, inode_item
);
2368 if (S_ISDIR(mode
)) {
2369 ret
= replay_dir_deletes(wc
->trans
,
2370 root
, log
, path
, key
.objectid
, 0);
2374 ret
= overwrite_item(wc
->trans
, root
, path
,
2379 /* for regular files, make sure corresponding
2380 * orphan item exist. extents past the new EOF
2381 * will be truncated later by orphan cleanup.
2383 if (S_ISREG(mode
)) {
2384 ret
= insert_orphan_item(wc
->trans
, root
,
2390 ret
= link_to_fixup_dir(wc
->trans
, root
,
2391 path
, key
.objectid
);
2396 if (key
.type
== BTRFS_DIR_INDEX_KEY
&&
2397 wc
->stage
== LOG_WALK_REPLAY_DIR_INDEX
) {
2398 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2404 if (wc
->stage
< LOG_WALK_REPLAY_ALL
)
2407 /* these keys are simply copied */
2408 if (key
.type
== BTRFS_XATTR_ITEM_KEY
) {
2409 ret
= overwrite_item(wc
->trans
, root
, path
,
2413 } else if (key
.type
== BTRFS_INODE_REF_KEY
||
2414 key
.type
== BTRFS_INODE_EXTREF_KEY
) {
2415 ret
= add_inode_ref(wc
->trans
, root
, log
, path
,
2417 if (ret
&& ret
!= -ENOENT
)
2420 } else if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
2421 ret
= replay_one_extent(wc
->trans
, root
, path
,
2425 } else if (key
.type
== BTRFS_DIR_ITEM_KEY
) {
2426 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2432 btrfs_free_path(path
);
2436 static noinline
int walk_down_log_tree(struct btrfs_trans_handle
*trans
,
2437 struct btrfs_root
*root
,
2438 struct btrfs_path
*path
, int *level
,
2439 struct walk_control
*wc
)
2441 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2445 struct extent_buffer
*next
;
2446 struct extent_buffer
*cur
;
2447 struct extent_buffer
*parent
;
2451 WARN_ON(*level
< 0);
2452 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2454 while (*level
> 0) {
2455 WARN_ON(*level
< 0);
2456 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2457 cur
= path
->nodes
[*level
];
2459 WARN_ON(btrfs_header_level(cur
) != *level
);
2461 if (path
->slots
[*level
] >=
2462 btrfs_header_nritems(cur
))
2465 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2466 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2467 blocksize
= fs_info
->nodesize
;
2469 parent
= path
->nodes
[*level
];
2470 root_owner
= btrfs_header_owner(parent
);
2472 next
= btrfs_find_create_tree_block(fs_info
, bytenr
);
2474 return PTR_ERR(next
);
2477 ret
= wc
->process_func(root
, next
, wc
, ptr_gen
);
2479 free_extent_buffer(next
);
2483 path
->slots
[*level
]++;
2485 ret
= btrfs_read_buffer(next
, ptr_gen
);
2487 free_extent_buffer(next
);
2492 btrfs_tree_lock(next
);
2493 btrfs_set_lock_blocking(next
);
2494 clean_tree_block(fs_info
, next
);
2495 btrfs_wait_tree_block_writeback(next
);
2496 btrfs_tree_unlock(next
);
2499 WARN_ON(root_owner
!=
2500 BTRFS_TREE_LOG_OBJECTID
);
2501 ret
= btrfs_free_and_pin_reserved_extent(
2505 free_extent_buffer(next
);
2509 free_extent_buffer(next
);
2512 ret
= btrfs_read_buffer(next
, ptr_gen
);
2514 free_extent_buffer(next
);
2518 WARN_ON(*level
<= 0);
2519 if (path
->nodes
[*level
-1])
2520 free_extent_buffer(path
->nodes
[*level
-1]);
2521 path
->nodes
[*level
-1] = next
;
2522 *level
= btrfs_header_level(next
);
2523 path
->slots
[*level
] = 0;
2526 WARN_ON(*level
< 0);
2527 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2529 path
->slots
[*level
] = btrfs_header_nritems(path
->nodes
[*level
]);
2535 static noinline
int walk_up_log_tree(struct btrfs_trans_handle
*trans
,
2536 struct btrfs_root
*root
,
2537 struct btrfs_path
*path
, int *level
,
2538 struct walk_control
*wc
)
2540 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2546 for (i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2547 slot
= path
->slots
[i
];
2548 if (slot
+ 1 < btrfs_header_nritems(path
->nodes
[i
])) {
2551 WARN_ON(*level
== 0);
2554 struct extent_buffer
*parent
;
2555 if (path
->nodes
[*level
] == root
->node
)
2556 parent
= path
->nodes
[*level
];
2558 parent
= path
->nodes
[*level
+ 1];
2560 root_owner
= btrfs_header_owner(parent
);
2561 ret
= wc
->process_func(root
, path
->nodes
[*level
], wc
,
2562 btrfs_header_generation(path
->nodes
[*level
]));
2567 struct extent_buffer
*next
;
2569 next
= path
->nodes
[*level
];
2572 btrfs_tree_lock(next
);
2573 btrfs_set_lock_blocking(next
);
2574 clean_tree_block(fs_info
, next
);
2575 btrfs_wait_tree_block_writeback(next
);
2576 btrfs_tree_unlock(next
);
2579 WARN_ON(root_owner
!= BTRFS_TREE_LOG_OBJECTID
);
2580 ret
= btrfs_free_and_pin_reserved_extent(
2582 path
->nodes
[*level
]->start
,
2583 path
->nodes
[*level
]->len
);
2587 free_extent_buffer(path
->nodes
[*level
]);
2588 path
->nodes
[*level
] = NULL
;
2596 * drop the reference count on the tree rooted at 'snap'. This traverses
2597 * the tree freeing any blocks that have a ref count of zero after being
2600 static int walk_log_tree(struct btrfs_trans_handle
*trans
,
2601 struct btrfs_root
*log
, struct walk_control
*wc
)
2603 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
2607 struct btrfs_path
*path
;
2610 path
= btrfs_alloc_path();
2614 level
= btrfs_header_level(log
->node
);
2616 path
->nodes
[level
] = log
->node
;
2617 extent_buffer_get(log
->node
);
2618 path
->slots
[level
] = 0;
2621 wret
= walk_down_log_tree(trans
, log
, path
, &level
, wc
);
2629 wret
= walk_up_log_tree(trans
, log
, path
, &level
, wc
);
2638 /* was the root node processed? if not, catch it here */
2639 if (path
->nodes
[orig_level
]) {
2640 ret
= wc
->process_func(log
, path
->nodes
[orig_level
], wc
,
2641 btrfs_header_generation(path
->nodes
[orig_level
]));
2645 struct extent_buffer
*next
;
2647 next
= path
->nodes
[orig_level
];
2650 btrfs_tree_lock(next
);
2651 btrfs_set_lock_blocking(next
);
2652 clean_tree_block(fs_info
, next
);
2653 btrfs_wait_tree_block_writeback(next
);
2654 btrfs_tree_unlock(next
);
2657 WARN_ON(log
->root_key
.objectid
!=
2658 BTRFS_TREE_LOG_OBJECTID
);
2659 ret
= btrfs_free_and_pin_reserved_extent(fs_info
,
2660 next
->start
, next
->len
);
2667 btrfs_free_path(path
);
2672 * helper function to update the item for a given subvolumes log root
2673 * in the tree of log roots
2675 static int update_log_root(struct btrfs_trans_handle
*trans
,
2676 struct btrfs_root
*log
)
2678 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
2681 if (log
->log_transid
== 1) {
2682 /* insert root item on the first sync */
2683 ret
= btrfs_insert_root(trans
, fs_info
->log_root_tree
,
2684 &log
->root_key
, &log
->root_item
);
2686 ret
= btrfs_update_root(trans
, fs_info
->log_root_tree
,
2687 &log
->root_key
, &log
->root_item
);
2692 static void wait_log_commit(struct btrfs_root
*root
, int transid
)
2695 int index
= transid
% 2;
2698 * we only allow two pending log transactions at a time,
2699 * so we know that if ours is more than 2 older than the
2700 * current transaction, we're done
2703 prepare_to_wait(&root
->log_commit_wait
[index
],
2704 &wait
, TASK_UNINTERRUPTIBLE
);
2706 if (!(root
->log_transid_committed
< transid
&&
2707 atomic_read(&root
->log_commit
[index
])))
2710 mutex_unlock(&root
->log_mutex
);
2712 mutex_lock(&root
->log_mutex
);
2714 finish_wait(&root
->log_commit_wait
[index
], &wait
);
2717 static void wait_for_writer(struct btrfs_root
*root
)
2722 prepare_to_wait(&root
->log_writer_wait
, &wait
,
2723 TASK_UNINTERRUPTIBLE
);
2724 if (!atomic_read(&root
->log_writers
))
2727 mutex_unlock(&root
->log_mutex
);
2729 mutex_lock(&root
->log_mutex
);
2731 finish_wait(&root
->log_writer_wait
, &wait
);
2734 static inline void btrfs_remove_log_ctx(struct btrfs_root
*root
,
2735 struct btrfs_log_ctx
*ctx
)
2740 mutex_lock(&root
->log_mutex
);
2741 list_del_init(&ctx
->list
);
2742 mutex_unlock(&root
->log_mutex
);
2746 * Invoked in log mutex context, or be sure there is no other task which
2747 * can access the list.
2749 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root
*root
,
2750 int index
, int error
)
2752 struct btrfs_log_ctx
*ctx
;
2753 struct btrfs_log_ctx
*safe
;
2755 list_for_each_entry_safe(ctx
, safe
, &root
->log_ctxs
[index
], list
) {
2756 list_del_init(&ctx
->list
);
2757 ctx
->log_ret
= error
;
2760 INIT_LIST_HEAD(&root
->log_ctxs
[index
]);
2764 * btrfs_sync_log does sends a given tree log down to the disk and
2765 * updates the super blocks to record it. When this call is done,
2766 * you know that any inodes previously logged are safely on disk only
2769 * Any other return value means you need to call btrfs_commit_transaction.
2770 * Some of the edge cases for fsyncing directories that have had unlinks
2771 * or renames done in the past mean that sometimes the only safe
2772 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2773 * that has happened.
2775 int btrfs_sync_log(struct btrfs_trans_handle
*trans
,
2776 struct btrfs_root
*root
, struct btrfs_log_ctx
*ctx
)
2782 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2783 struct btrfs_root
*log
= root
->log_root
;
2784 struct btrfs_root
*log_root_tree
= fs_info
->log_root_tree
;
2785 int log_transid
= 0;
2786 struct btrfs_log_ctx root_log_ctx
;
2787 struct blk_plug plug
;
2789 mutex_lock(&root
->log_mutex
);
2790 log_transid
= ctx
->log_transid
;
2791 if (root
->log_transid_committed
>= log_transid
) {
2792 mutex_unlock(&root
->log_mutex
);
2793 return ctx
->log_ret
;
2796 index1
= log_transid
% 2;
2797 if (atomic_read(&root
->log_commit
[index1
])) {
2798 wait_log_commit(root
, log_transid
);
2799 mutex_unlock(&root
->log_mutex
);
2800 return ctx
->log_ret
;
2802 ASSERT(log_transid
== root
->log_transid
);
2803 atomic_set(&root
->log_commit
[index1
], 1);
2805 /* wait for previous tree log sync to complete */
2806 if (atomic_read(&root
->log_commit
[(index1
+ 1) % 2]))
2807 wait_log_commit(root
, log_transid
- 1);
2810 int batch
= atomic_read(&root
->log_batch
);
2811 /* when we're on an ssd, just kick the log commit out */
2812 if (!btrfs_test_opt(fs_info
, SSD
) &&
2813 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
)) {
2814 mutex_unlock(&root
->log_mutex
);
2815 schedule_timeout_uninterruptible(1);
2816 mutex_lock(&root
->log_mutex
);
2818 wait_for_writer(root
);
2819 if (batch
== atomic_read(&root
->log_batch
))
2823 /* bail out if we need to do a full commit */
2824 if (btrfs_need_log_full_commit(fs_info
, trans
)) {
2826 btrfs_free_logged_extents(log
, log_transid
);
2827 mutex_unlock(&root
->log_mutex
);
2831 if (log_transid
% 2 == 0)
2832 mark
= EXTENT_DIRTY
;
2836 /* we start IO on all the marked extents here, but we don't actually
2837 * wait for them until later.
2839 blk_start_plug(&plug
);
2840 ret
= btrfs_write_marked_extents(fs_info
, &log
->dirty_log_pages
, mark
);
2842 blk_finish_plug(&plug
);
2843 btrfs_abort_transaction(trans
, ret
);
2844 btrfs_free_logged_extents(log
, log_transid
);
2845 btrfs_set_log_full_commit(fs_info
, trans
);
2846 mutex_unlock(&root
->log_mutex
);
2850 btrfs_set_root_node(&log
->root_item
, log
->node
);
2852 root
->log_transid
++;
2853 log
->log_transid
= root
->log_transid
;
2854 root
->log_start_pid
= 0;
2856 * IO has been started, blocks of the log tree have WRITTEN flag set
2857 * in their headers. new modifications of the log will be written to
2858 * new positions. so it's safe to allow log writers to go in.
2860 mutex_unlock(&root
->log_mutex
);
2862 btrfs_init_log_ctx(&root_log_ctx
, NULL
);
2864 mutex_lock(&log_root_tree
->log_mutex
);
2865 atomic_inc(&log_root_tree
->log_batch
);
2866 atomic_inc(&log_root_tree
->log_writers
);
2868 index2
= log_root_tree
->log_transid
% 2;
2869 list_add_tail(&root_log_ctx
.list
, &log_root_tree
->log_ctxs
[index2
]);
2870 root_log_ctx
.log_transid
= log_root_tree
->log_transid
;
2872 mutex_unlock(&log_root_tree
->log_mutex
);
2874 ret
= update_log_root(trans
, log
);
2876 mutex_lock(&log_root_tree
->log_mutex
);
2877 if (atomic_dec_and_test(&log_root_tree
->log_writers
)) {
2879 * Implicit memory barrier after atomic_dec_and_test
2881 if (waitqueue_active(&log_root_tree
->log_writer_wait
))
2882 wake_up(&log_root_tree
->log_writer_wait
);
2886 if (!list_empty(&root_log_ctx
.list
))
2887 list_del_init(&root_log_ctx
.list
);
2889 blk_finish_plug(&plug
);
2890 btrfs_set_log_full_commit(fs_info
, trans
);
2892 if (ret
!= -ENOSPC
) {
2893 btrfs_abort_transaction(trans
, ret
);
2894 mutex_unlock(&log_root_tree
->log_mutex
);
2897 btrfs_wait_tree_log_extents(log
, mark
);
2898 btrfs_free_logged_extents(log
, log_transid
);
2899 mutex_unlock(&log_root_tree
->log_mutex
);
2904 if (log_root_tree
->log_transid_committed
>= root_log_ctx
.log_transid
) {
2905 blk_finish_plug(&plug
);
2906 list_del_init(&root_log_ctx
.list
);
2907 mutex_unlock(&log_root_tree
->log_mutex
);
2908 ret
= root_log_ctx
.log_ret
;
2912 index2
= root_log_ctx
.log_transid
% 2;
2913 if (atomic_read(&log_root_tree
->log_commit
[index2
])) {
2914 blk_finish_plug(&plug
);
2915 ret
= btrfs_wait_tree_log_extents(log
, mark
);
2916 btrfs_wait_logged_extents(trans
, log
, log_transid
);
2917 wait_log_commit(log_root_tree
,
2918 root_log_ctx
.log_transid
);
2919 mutex_unlock(&log_root_tree
->log_mutex
);
2921 ret
= root_log_ctx
.log_ret
;
2924 ASSERT(root_log_ctx
.log_transid
== log_root_tree
->log_transid
);
2925 atomic_set(&log_root_tree
->log_commit
[index2
], 1);
2927 if (atomic_read(&log_root_tree
->log_commit
[(index2
+ 1) % 2])) {
2928 wait_log_commit(log_root_tree
,
2929 root_log_ctx
.log_transid
- 1);
2932 wait_for_writer(log_root_tree
);
2935 * now that we've moved on to the tree of log tree roots,
2936 * check the full commit flag again
2938 if (btrfs_need_log_full_commit(fs_info
, trans
)) {
2939 blk_finish_plug(&plug
);
2940 btrfs_wait_tree_log_extents(log
, mark
);
2941 btrfs_free_logged_extents(log
, log_transid
);
2942 mutex_unlock(&log_root_tree
->log_mutex
);
2944 goto out_wake_log_root
;
2947 ret
= btrfs_write_marked_extents(fs_info
,
2948 &log_root_tree
->dirty_log_pages
,
2949 EXTENT_DIRTY
| EXTENT_NEW
);
2950 blk_finish_plug(&plug
);
2952 btrfs_set_log_full_commit(fs_info
, trans
);
2953 btrfs_abort_transaction(trans
, ret
);
2954 btrfs_free_logged_extents(log
, log_transid
);
2955 mutex_unlock(&log_root_tree
->log_mutex
);
2956 goto out_wake_log_root
;
2958 ret
= btrfs_wait_tree_log_extents(log
, mark
);
2960 ret
= btrfs_wait_tree_log_extents(log_root_tree
,
2961 EXTENT_NEW
| EXTENT_DIRTY
);
2963 btrfs_set_log_full_commit(fs_info
, trans
);
2964 btrfs_free_logged_extents(log
, log_transid
);
2965 mutex_unlock(&log_root_tree
->log_mutex
);
2966 goto out_wake_log_root
;
2968 btrfs_wait_logged_extents(trans
, log
, log_transid
);
2970 btrfs_set_super_log_root(fs_info
->super_for_commit
,
2971 log_root_tree
->node
->start
);
2972 btrfs_set_super_log_root_level(fs_info
->super_for_commit
,
2973 btrfs_header_level(log_root_tree
->node
));
2975 log_root_tree
->log_transid
++;
2976 mutex_unlock(&log_root_tree
->log_mutex
);
2979 * nobody else is going to jump in and write the the ctree
2980 * super here because the log_commit atomic below is protecting
2981 * us. We must be called with a transaction handle pinning
2982 * the running transaction open, so a full commit can't hop
2983 * in and cause problems either.
2985 ret
= write_all_supers(fs_info
, 1);
2987 btrfs_set_log_full_commit(fs_info
, trans
);
2988 btrfs_abort_transaction(trans
, ret
);
2989 goto out_wake_log_root
;
2992 mutex_lock(&root
->log_mutex
);
2993 if (root
->last_log_commit
< log_transid
)
2994 root
->last_log_commit
= log_transid
;
2995 mutex_unlock(&root
->log_mutex
);
2998 mutex_lock(&log_root_tree
->log_mutex
);
2999 btrfs_remove_all_log_ctxs(log_root_tree
, index2
, ret
);
3001 log_root_tree
->log_transid_committed
++;
3002 atomic_set(&log_root_tree
->log_commit
[index2
], 0);
3003 mutex_unlock(&log_root_tree
->log_mutex
);
3006 * The barrier before waitqueue_active is implied by mutex_unlock
3008 if (waitqueue_active(&log_root_tree
->log_commit_wait
[index2
]))
3009 wake_up(&log_root_tree
->log_commit_wait
[index2
]);
3011 mutex_lock(&root
->log_mutex
);
3012 btrfs_remove_all_log_ctxs(root
, index1
, ret
);
3013 root
->log_transid_committed
++;
3014 atomic_set(&root
->log_commit
[index1
], 0);
3015 mutex_unlock(&root
->log_mutex
);
3018 * The barrier before waitqueue_active is implied by mutex_unlock
3020 if (waitqueue_active(&root
->log_commit_wait
[index1
]))
3021 wake_up(&root
->log_commit_wait
[index1
]);
3025 static void free_log_tree(struct btrfs_trans_handle
*trans
,
3026 struct btrfs_root
*log
)
3031 struct walk_control wc
= {
3033 .process_func
= process_one_buffer
3036 ret
= walk_log_tree(trans
, log
, &wc
);
3037 /* I don't think this can happen but just in case */
3039 btrfs_abort_transaction(trans
, ret
);
3042 ret
= find_first_extent_bit(&log
->dirty_log_pages
,
3043 0, &start
, &end
, EXTENT_DIRTY
| EXTENT_NEW
,
3048 clear_extent_bits(&log
->dirty_log_pages
, start
, end
,
3049 EXTENT_DIRTY
| EXTENT_NEW
);
3053 * We may have short-circuited the log tree with the full commit logic
3054 * and left ordered extents on our list, so clear these out to keep us
3055 * from leaking inodes and memory.
3057 btrfs_free_logged_extents(log
, 0);
3058 btrfs_free_logged_extents(log
, 1);
3060 free_extent_buffer(log
->node
);
3065 * free all the extents used by the tree log. This should be called
3066 * at commit time of the full transaction
3068 int btrfs_free_log(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
)
3070 if (root
->log_root
) {
3071 free_log_tree(trans
, root
->log_root
);
3072 root
->log_root
= NULL
;
3077 int btrfs_free_log_root_tree(struct btrfs_trans_handle
*trans
,
3078 struct btrfs_fs_info
*fs_info
)
3080 if (fs_info
->log_root_tree
) {
3081 free_log_tree(trans
, fs_info
->log_root_tree
);
3082 fs_info
->log_root_tree
= NULL
;
3088 * If both a file and directory are logged, and unlinks or renames are
3089 * mixed in, we have a few interesting corners:
3091 * create file X in dir Y
3092 * link file X to X.link in dir Y
3094 * unlink file X but leave X.link
3097 * After a crash we would expect only X.link to exist. But file X
3098 * didn't get fsync'd again so the log has back refs for X and X.link.
3100 * We solve this by removing directory entries and inode backrefs from the
3101 * log when a file that was logged in the current transaction is
3102 * unlinked. Any later fsync will include the updated log entries, and
3103 * we'll be able to reconstruct the proper directory items from backrefs.
3105 * This optimizations allows us to avoid relogging the entire inode
3106 * or the entire directory.
3108 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle
*trans
,
3109 struct btrfs_root
*root
,
3110 const char *name
, int name_len
,
3111 struct btrfs_inode
*dir
, u64 index
)
3113 struct btrfs_root
*log
;
3114 struct btrfs_dir_item
*di
;
3115 struct btrfs_path
*path
;
3119 u64 dir_ino
= btrfs_ino(dir
);
3121 if (dir
->logged_trans
< trans
->transid
)
3124 ret
= join_running_log_trans(root
);
3128 mutex_lock(&dir
->log_mutex
);
3130 log
= root
->log_root
;
3131 path
= btrfs_alloc_path();
3137 di
= btrfs_lookup_dir_item(trans
, log
, path
, dir_ino
,
3138 name
, name_len
, -1);
3144 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
3145 bytes_del
+= name_len
;
3151 btrfs_release_path(path
);
3152 di
= btrfs_lookup_dir_index_item(trans
, log
, path
, dir_ino
,
3153 index
, name
, name_len
, -1);
3159 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
3160 bytes_del
+= name_len
;
3167 /* update the directory size in the log to reflect the names
3171 struct btrfs_key key
;
3173 key
.objectid
= dir_ino
;
3175 key
.type
= BTRFS_INODE_ITEM_KEY
;
3176 btrfs_release_path(path
);
3178 ret
= btrfs_search_slot(trans
, log
, &key
, path
, 0, 1);
3184 struct btrfs_inode_item
*item
;
3187 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3188 struct btrfs_inode_item
);
3189 i_size
= btrfs_inode_size(path
->nodes
[0], item
);
3190 if (i_size
> bytes_del
)
3191 i_size
-= bytes_del
;
3194 btrfs_set_inode_size(path
->nodes
[0], item
, i_size
);
3195 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3198 btrfs_release_path(path
);
3201 btrfs_free_path(path
);
3203 mutex_unlock(&dir
->log_mutex
);
3204 if (ret
== -ENOSPC
) {
3205 btrfs_set_log_full_commit(root
->fs_info
, trans
);
3208 btrfs_abort_transaction(trans
, ret
);
3210 btrfs_end_log_trans(root
);
3215 /* see comments for btrfs_del_dir_entries_in_log */
3216 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle
*trans
,
3217 struct btrfs_root
*root
,
3218 const char *name
, int name_len
,
3219 struct btrfs_inode
*inode
, u64 dirid
)
3221 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3222 struct btrfs_root
*log
;
3226 if (inode
->logged_trans
< trans
->transid
)
3229 ret
= join_running_log_trans(root
);
3232 log
= root
->log_root
;
3233 mutex_lock(&inode
->log_mutex
);
3235 ret
= btrfs_del_inode_ref(trans
, log
, name
, name_len
, btrfs_ino(inode
),
3237 mutex_unlock(&inode
->log_mutex
);
3238 if (ret
== -ENOSPC
) {
3239 btrfs_set_log_full_commit(fs_info
, trans
);
3241 } else if (ret
< 0 && ret
!= -ENOENT
)
3242 btrfs_abort_transaction(trans
, ret
);
3243 btrfs_end_log_trans(root
);
3249 * creates a range item in the log for 'dirid'. first_offset and
3250 * last_offset tell us which parts of the key space the log should
3251 * be considered authoritative for.
3253 static noinline
int insert_dir_log_key(struct btrfs_trans_handle
*trans
,
3254 struct btrfs_root
*log
,
3255 struct btrfs_path
*path
,
3256 int key_type
, u64 dirid
,
3257 u64 first_offset
, u64 last_offset
)
3260 struct btrfs_key key
;
3261 struct btrfs_dir_log_item
*item
;
3263 key
.objectid
= dirid
;
3264 key
.offset
= first_offset
;
3265 if (key_type
== BTRFS_DIR_ITEM_KEY
)
3266 key
.type
= BTRFS_DIR_LOG_ITEM_KEY
;
3268 key
.type
= BTRFS_DIR_LOG_INDEX_KEY
;
3269 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
, sizeof(*item
));
3273 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3274 struct btrfs_dir_log_item
);
3275 btrfs_set_dir_log_end(path
->nodes
[0], item
, last_offset
);
3276 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3277 btrfs_release_path(path
);
3282 * log all the items included in the current transaction for a given
3283 * directory. This also creates the range items in the log tree required
3284 * to replay anything deleted before the fsync
3286 static noinline
int log_dir_items(struct btrfs_trans_handle
*trans
,
3287 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
3288 struct btrfs_path
*path
,
3289 struct btrfs_path
*dst_path
, int key_type
,
3290 struct btrfs_log_ctx
*ctx
,
3291 u64 min_offset
, u64
*last_offset_ret
)
3293 struct btrfs_key min_key
;
3294 struct btrfs_root
*log
= root
->log_root
;
3295 struct extent_buffer
*src
;
3300 u64 first_offset
= min_offset
;
3301 u64 last_offset
= (u64
)-1;
3302 u64 ino
= btrfs_ino(inode
);
3304 log
= root
->log_root
;
3306 min_key
.objectid
= ino
;
3307 min_key
.type
= key_type
;
3308 min_key
.offset
= min_offset
;
3310 ret
= btrfs_search_forward(root
, &min_key
, path
, trans
->transid
);
3313 * we didn't find anything from this transaction, see if there
3314 * is anything at all
3316 if (ret
!= 0 || min_key
.objectid
!= ino
|| min_key
.type
!= key_type
) {
3317 min_key
.objectid
= ino
;
3318 min_key
.type
= key_type
;
3319 min_key
.offset
= (u64
)-1;
3320 btrfs_release_path(path
);
3321 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3323 btrfs_release_path(path
);
3326 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3328 /* if ret == 0 there are items for this type,
3329 * create a range to tell us the last key of this type.
3330 * otherwise, there are no items in this directory after
3331 * *min_offset, and we create a range to indicate that.
3334 struct btrfs_key tmp
;
3335 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
,
3337 if (key_type
== tmp
.type
)
3338 first_offset
= max(min_offset
, tmp
.offset
) + 1;
3343 /* go backward to find any previous key */
3344 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3346 struct btrfs_key tmp
;
3347 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3348 if (key_type
== tmp
.type
) {
3349 first_offset
= tmp
.offset
;
3350 ret
= overwrite_item(trans
, log
, dst_path
,
3351 path
->nodes
[0], path
->slots
[0],
3359 btrfs_release_path(path
);
3361 /* find the first key from this transaction again */
3362 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3363 if (WARN_ON(ret
!= 0))
3367 * we have a block from this transaction, log every item in it
3368 * from our directory
3371 struct btrfs_key tmp
;
3372 src
= path
->nodes
[0];
3373 nritems
= btrfs_header_nritems(src
);
3374 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
3375 struct btrfs_dir_item
*di
;
3377 btrfs_item_key_to_cpu(src
, &min_key
, i
);
3379 if (min_key
.objectid
!= ino
|| min_key
.type
!= key_type
)
3381 ret
= overwrite_item(trans
, log
, dst_path
, src
, i
,
3389 * We must make sure that when we log a directory entry,
3390 * the corresponding inode, after log replay, has a
3391 * matching link count. For example:
3397 * xfs_io -c "fsync" mydir
3399 * <mount fs and log replay>
3401 * Would result in a fsync log that when replayed, our
3402 * file inode would have a link count of 1, but we get
3403 * two directory entries pointing to the same inode.
3404 * After removing one of the names, it would not be
3405 * possible to remove the other name, which resulted
3406 * always in stale file handle errors, and would not
3407 * be possible to rmdir the parent directory, since
3408 * its i_size could never decrement to the value
3409 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3411 di
= btrfs_item_ptr(src
, i
, struct btrfs_dir_item
);
3412 btrfs_dir_item_key_to_cpu(src
, di
, &tmp
);
3414 (btrfs_dir_transid(src
, di
) == trans
->transid
||
3415 btrfs_dir_type(src
, di
) == BTRFS_FT_DIR
) &&
3416 tmp
.type
!= BTRFS_ROOT_ITEM_KEY
)
3417 ctx
->log_new_dentries
= true;
3419 path
->slots
[0] = nritems
;
3422 * look ahead to the next item and see if it is also
3423 * from this directory and from this transaction
3425 ret
= btrfs_next_leaf(root
, path
);
3427 last_offset
= (u64
)-1;
3430 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3431 if (tmp
.objectid
!= ino
|| tmp
.type
!= key_type
) {
3432 last_offset
= (u64
)-1;
3435 if (btrfs_header_generation(path
->nodes
[0]) != trans
->transid
) {
3436 ret
= overwrite_item(trans
, log
, dst_path
,
3437 path
->nodes
[0], path
->slots
[0],
3442 last_offset
= tmp
.offset
;
3447 btrfs_release_path(path
);
3448 btrfs_release_path(dst_path
);
3451 *last_offset_ret
= last_offset
;
3453 * insert the log range keys to indicate where the log
3456 ret
= insert_dir_log_key(trans
, log
, path
, key_type
,
3457 ino
, first_offset
, last_offset
);
3465 * logging directories is very similar to logging inodes, We find all the items
3466 * from the current transaction and write them to the log.
3468 * The recovery code scans the directory in the subvolume, and if it finds a
3469 * key in the range logged that is not present in the log tree, then it means
3470 * that dir entry was unlinked during the transaction.
3472 * In order for that scan to work, we must include one key smaller than
3473 * the smallest logged by this transaction and one key larger than the largest
3474 * key logged by this transaction.
3476 static noinline
int log_directory_changes(struct btrfs_trans_handle
*trans
,
3477 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
3478 struct btrfs_path
*path
,
3479 struct btrfs_path
*dst_path
,
3480 struct btrfs_log_ctx
*ctx
)
3485 int key_type
= BTRFS_DIR_ITEM_KEY
;
3491 ret
= log_dir_items(trans
, root
, inode
, path
, dst_path
, key_type
,
3492 ctx
, min_key
, &max_key
);
3495 if (max_key
== (u64
)-1)
3497 min_key
= max_key
+ 1;
3500 if (key_type
== BTRFS_DIR_ITEM_KEY
) {
3501 key_type
= BTRFS_DIR_INDEX_KEY
;
3508 * a helper function to drop items from the log before we relog an
3509 * inode. max_key_type indicates the highest item type to remove.
3510 * This cannot be run for file data extents because it does not
3511 * free the extents they point to.
3513 static int drop_objectid_items(struct btrfs_trans_handle
*trans
,
3514 struct btrfs_root
*log
,
3515 struct btrfs_path
*path
,
3516 u64 objectid
, int max_key_type
)
3519 struct btrfs_key key
;
3520 struct btrfs_key found_key
;
3523 key
.objectid
= objectid
;
3524 key
.type
= max_key_type
;
3525 key
.offset
= (u64
)-1;
3528 ret
= btrfs_search_slot(trans
, log
, &key
, path
, -1, 1);
3529 BUG_ON(ret
== 0); /* Logic error */
3533 if (path
->slots
[0] == 0)
3537 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
3540 if (found_key
.objectid
!= objectid
)
3543 found_key
.offset
= 0;
3545 ret
= btrfs_bin_search(path
->nodes
[0], &found_key
, 0,
3548 ret
= btrfs_del_items(trans
, log
, path
, start_slot
,
3549 path
->slots
[0] - start_slot
+ 1);
3551 * If start slot isn't 0 then we don't need to re-search, we've
3552 * found the last guy with the objectid in this tree.
3554 if (ret
|| start_slot
!= 0)
3556 btrfs_release_path(path
);
3558 btrfs_release_path(path
);
3564 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3565 struct extent_buffer
*leaf
,
3566 struct btrfs_inode_item
*item
,
3567 struct inode
*inode
, int log_inode_only
,
3570 struct btrfs_map_token token
;
3572 btrfs_init_map_token(&token
);
3574 if (log_inode_only
) {
3575 /* set the generation to zero so the recover code
3576 * can tell the difference between an logging
3577 * just to say 'this inode exists' and a logging
3578 * to say 'update this inode with these values'
3580 btrfs_set_token_inode_generation(leaf
, item
, 0, &token
);
3581 btrfs_set_token_inode_size(leaf
, item
, logged_isize
, &token
);
3583 btrfs_set_token_inode_generation(leaf
, item
,
3584 BTRFS_I(inode
)->generation
,
3586 btrfs_set_token_inode_size(leaf
, item
, inode
->i_size
, &token
);
3589 btrfs_set_token_inode_uid(leaf
, item
, i_uid_read(inode
), &token
);
3590 btrfs_set_token_inode_gid(leaf
, item
, i_gid_read(inode
), &token
);
3591 btrfs_set_token_inode_mode(leaf
, item
, inode
->i_mode
, &token
);
3592 btrfs_set_token_inode_nlink(leaf
, item
, inode
->i_nlink
, &token
);
3594 btrfs_set_token_timespec_sec(leaf
, &item
->atime
,
3595 inode
->i_atime
.tv_sec
, &token
);
3596 btrfs_set_token_timespec_nsec(leaf
, &item
->atime
,
3597 inode
->i_atime
.tv_nsec
, &token
);
3599 btrfs_set_token_timespec_sec(leaf
, &item
->mtime
,
3600 inode
->i_mtime
.tv_sec
, &token
);
3601 btrfs_set_token_timespec_nsec(leaf
, &item
->mtime
,
3602 inode
->i_mtime
.tv_nsec
, &token
);
3604 btrfs_set_token_timespec_sec(leaf
, &item
->ctime
,
3605 inode
->i_ctime
.tv_sec
, &token
);
3606 btrfs_set_token_timespec_nsec(leaf
, &item
->ctime
,
3607 inode
->i_ctime
.tv_nsec
, &token
);
3609 btrfs_set_token_inode_nbytes(leaf
, item
, inode_get_bytes(inode
),
3612 btrfs_set_token_inode_sequence(leaf
, item
, inode
->i_version
, &token
);
3613 btrfs_set_token_inode_transid(leaf
, item
, trans
->transid
, &token
);
3614 btrfs_set_token_inode_rdev(leaf
, item
, inode
->i_rdev
, &token
);
3615 btrfs_set_token_inode_flags(leaf
, item
, BTRFS_I(inode
)->flags
, &token
);
3616 btrfs_set_token_inode_block_group(leaf
, item
, 0, &token
);
3619 static int log_inode_item(struct btrfs_trans_handle
*trans
,
3620 struct btrfs_root
*log
, struct btrfs_path
*path
,
3621 struct btrfs_inode
*inode
)
3623 struct btrfs_inode_item
*inode_item
;
3626 ret
= btrfs_insert_empty_item(trans
, log
, path
,
3627 &inode
->location
, sizeof(*inode_item
));
3628 if (ret
&& ret
!= -EEXIST
)
3630 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3631 struct btrfs_inode_item
);
3632 fill_inode_item(trans
, path
->nodes
[0], inode_item
, &inode
->vfs_inode
,
3634 btrfs_release_path(path
);
3638 static noinline
int copy_items(struct btrfs_trans_handle
*trans
,
3639 struct btrfs_inode
*inode
,
3640 struct btrfs_path
*dst_path
,
3641 struct btrfs_path
*src_path
, u64
*last_extent
,
3642 int start_slot
, int nr
, int inode_only
,
3645 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
3646 unsigned long src_offset
;
3647 unsigned long dst_offset
;
3648 struct btrfs_root
*log
= inode
->root
->log_root
;
3649 struct btrfs_file_extent_item
*extent
;
3650 struct btrfs_inode_item
*inode_item
;
3651 struct extent_buffer
*src
= src_path
->nodes
[0];
3652 struct btrfs_key first_key
, last_key
, key
;
3654 struct btrfs_key
*ins_keys
;
3658 struct list_head ordered_sums
;
3659 int skip_csum
= inode
->flags
& BTRFS_INODE_NODATASUM
;
3660 bool has_extents
= false;
3661 bool need_find_last_extent
= true;
3664 INIT_LIST_HEAD(&ordered_sums
);
3666 ins_data
= kmalloc(nr
* sizeof(struct btrfs_key
) +
3667 nr
* sizeof(u32
), GFP_NOFS
);
3671 first_key
.objectid
= (u64
)-1;
3673 ins_sizes
= (u32
*)ins_data
;
3674 ins_keys
= (struct btrfs_key
*)(ins_data
+ nr
* sizeof(u32
));
3676 for (i
= 0; i
< nr
; i
++) {
3677 ins_sizes
[i
] = btrfs_item_size_nr(src
, i
+ start_slot
);
3678 btrfs_item_key_to_cpu(src
, ins_keys
+ i
, i
+ start_slot
);
3680 ret
= btrfs_insert_empty_items(trans
, log
, dst_path
,
3681 ins_keys
, ins_sizes
, nr
);
3687 for (i
= 0; i
< nr
; i
++, dst_path
->slots
[0]++) {
3688 dst_offset
= btrfs_item_ptr_offset(dst_path
->nodes
[0],
3689 dst_path
->slots
[0]);
3691 src_offset
= btrfs_item_ptr_offset(src
, start_slot
+ i
);
3694 last_key
= ins_keys
[i
];
3696 if (ins_keys
[i
].type
== BTRFS_INODE_ITEM_KEY
) {
3697 inode_item
= btrfs_item_ptr(dst_path
->nodes
[0],
3699 struct btrfs_inode_item
);
3700 fill_inode_item(trans
, dst_path
->nodes
[0], inode_item
,
3702 inode_only
== LOG_INODE_EXISTS
,
3705 copy_extent_buffer(dst_path
->nodes
[0], src
, dst_offset
,
3706 src_offset
, ins_sizes
[i
]);
3710 * We set need_find_last_extent here in case we know we were
3711 * processing other items and then walk into the first extent in
3712 * the inode. If we don't hit an extent then nothing changes,
3713 * we'll do the last search the next time around.
3715 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
) {
3717 if (first_key
.objectid
== (u64
)-1)
3718 first_key
= ins_keys
[i
];
3720 need_find_last_extent
= false;
3723 /* take a reference on file data extents so that truncates
3724 * or deletes of this inode don't have to relog the inode
3727 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
&&
3730 extent
= btrfs_item_ptr(src
, start_slot
+ i
,
3731 struct btrfs_file_extent_item
);
3733 if (btrfs_file_extent_generation(src
, extent
) < trans
->transid
)
3736 found_type
= btrfs_file_extent_type(src
, extent
);
3737 if (found_type
== BTRFS_FILE_EXTENT_REG
) {
3739 ds
= btrfs_file_extent_disk_bytenr(src
,
3741 /* ds == 0 is a hole */
3745 dl
= btrfs_file_extent_disk_num_bytes(src
,
3747 cs
= btrfs_file_extent_offset(src
, extent
);
3748 cl
= btrfs_file_extent_num_bytes(src
,
3750 if (btrfs_file_extent_compression(src
,
3756 ret
= btrfs_lookup_csums_range(
3758 ds
+ cs
, ds
+ cs
+ cl
- 1,
3761 btrfs_release_path(dst_path
);
3769 btrfs_mark_buffer_dirty(dst_path
->nodes
[0]);
3770 btrfs_release_path(dst_path
);
3774 * we have to do this after the loop above to avoid changing the
3775 * log tree while trying to change the log tree.
3778 while (!list_empty(&ordered_sums
)) {
3779 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
3780 struct btrfs_ordered_sum
,
3783 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
3784 list_del(&sums
->list
);
3791 if (need_find_last_extent
&& *last_extent
== first_key
.offset
) {
3793 * We don't have any leafs between our current one and the one
3794 * we processed before that can have file extent items for our
3795 * inode (and have a generation number smaller than our current
3798 need_find_last_extent
= false;
3802 * Because we use btrfs_search_forward we could skip leaves that were
3803 * not modified and then assume *last_extent is valid when it really
3804 * isn't. So back up to the previous leaf and read the end of the last
3805 * extent before we go and fill in holes.
3807 if (need_find_last_extent
) {
3810 ret
= btrfs_prev_leaf(inode
->root
, src_path
);
3815 if (src_path
->slots
[0])
3816 src_path
->slots
[0]--;
3817 src
= src_path
->nodes
[0];
3818 btrfs_item_key_to_cpu(src
, &key
, src_path
->slots
[0]);
3819 if (key
.objectid
!= btrfs_ino(inode
) ||
3820 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
3822 extent
= btrfs_item_ptr(src
, src_path
->slots
[0],
3823 struct btrfs_file_extent_item
);
3824 if (btrfs_file_extent_type(src
, extent
) ==
3825 BTRFS_FILE_EXTENT_INLINE
) {
3826 len
= btrfs_file_extent_inline_len(src
,
3829 *last_extent
= ALIGN(key
.offset
+ len
,
3830 fs_info
->sectorsize
);
3832 len
= btrfs_file_extent_num_bytes(src
, extent
);
3833 *last_extent
= key
.offset
+ len
;
3837 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3838 * things could have happened
3840 * 1) A merge could have happened, so we could currently be on a leaf
3841 * that holds what we were copying in the first place.
3842 * 2) A split could have happened, and now not all of the items we want
3843 * are on the same leaf.
3845 * So we need to adjust how we search for holes, we need to drop the
3846 * path and re-search for the first extent key we found, and then walk
3847 * forward until we hit the last one we copied.
3849 if (need_find_last_extent
) {
3850 /* btrfs_prev_leaf could return 1 without releasing the path */
3851 btrfs_release_path(src_path
);
3852 ret
= btrfs_search_slot(NULL
, inode
->root
, &first_key
,
3857 src
= src_path
->nodes
[0];
3858 i
= src_path
->slots
[0];
3864 * Ok so here we need to go through and fill in any holes we may have
3865 * to make sure that holes are punched for those areas in case they had
3866 * extents previously.
3872 if (i
>= btrfs_header_nritems(src_path
->nodes
[0])) {
3873 ret
= btrfs_next_leaf(inode
->root
, src_path
);
3877 src
= src_path
->nodes
[0];
3881 btrfs_item_key_to_cpu(src
, &key
, i
);
3882 if (!btrfs_comp_cpu_keys(&key
, &last_key
))
3884 if (key
.objectid
!= btrfs_ino(inode
) ||
3885 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
3889 extent
= btrfs_item_ptr(src
, i
, struct btrfs_file_extent_item
);
3890 if (btrfs_file_extent_type(src
, extent
) ==
3891 BTRFS_FILE_EXTENT_INLINE
) {
3892 len
= btrfs_file_extent_inline_len(src
, i
, extent
);
3893 extent_end
= ALIGN(key
.offset
+ len
,
3894 fs_info
->sectorsize
);
3896 len
= btrfs_file_extent_num_bytes(src
, extent
);
3897 extent_end
= key
.offset
+ len
;
3901 if (*last_extent
== key
.offset
) {
3902 *last_extent
= extent_end
;
3905 offset
= *last_extent
;
3906 len
= key
.offset
- *last_extent
;
3907 ret
= btrfs_insert_file_extent(trans
, log
, btrfs_ino(inode
),
3908 offset
, 0, 0, len
, 0, len
, 0, 0, 0);
3911 *last_extent
= extent_end
;
3914 * Need to let the callers know we dropped the path so they should
3917 if (!ret
&& need_find_last_extent
)
3922 static int extent_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
3924 struct extent_map
*em1
, *em2
;
3926 em1
= list_entry(a
, struct extent_map
, list
);
3927 em2
= list_entry(b
, struct extent_map
, list
);
3929 if (em1
->start
< em2
->start
)
3931 else if (em1
->start
> em2
->start
)
3936 static int wait_ordered_extents(struct btrfs_trans_handle
*trans
,
3937 struct inode
*inode
,
3938 struct btrfs_root
*root
,
3939 const struct extent_map
*em
,
3940 const struct list_head
*logged_list
,
3941 bool *ordered_io_error
)
3943 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3944 struct btrfs_ordered_extent
*ordered
;
3945 struct btrfs_root
*log
= root
->log_root
;
3946 u64 mod_start
= em
->mod_start
;
3947 u64 mod_len
= em
->mod_len
;
3948 const bool skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
3951 LIST_HEAD(ordered_sums
);
3954 *ordered_io_error
= false;
3956 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
3957 em
->block_start
== EXTENT_MAP_HOLE
)
3961 * Wait far any ordered extent that covers our extent map. If it
3962 * finishes without an error, first check and see if our csums are on
3963 * our outstanding ordered extents.
3965 list_for_each_entry(ordered
, logged_list
, log_list
) {
3966 struct btrfs_ordered_sum
*sum
;
3971 if (ordered
->file_offset
+ ordered
->len
<= mod_start
||
3972 mod_start
+ mod_len
<= ordered
->file_offset
)
3975 if (!test_bit(BTRFS_ORDERED_IO_DONE
, &ordered
->flags
) &&
3976 !test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
) &&
3977 !test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
)) {
3978 const u64 start
= ordered
->file_offset
;
3979 const u64 end
= ordered
->file_offset
+ ordered
->len
- 1;
3981 WARN_ON(ordered
->inode
!= inode
);
3982 filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
3985 wait_event(ordered
->wait
,
3986 (test_bit(BTRFS_ORDERED_IO_DONE
, &ordered
->flags
) ||
3987 test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
)));
3989 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
)) {
3991 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3992 * i_mapping flags, so that the next fsync won't get
3993 * an outdated io error too.
3995 filemap_check_errors(inode
->i_mapping
);
3996 *ordered_io_error
= true;
4000 * We are going to copy all the csums on this ordered extent, so
4001 * go ahead and adjust mod_start and mod_len in case this
4002 * ordered extent has already been logged.
4004 if (ordered
->file_offset
> mod_start
) {
4005 if (ordered
->file_offset
+ ordered
->len
>=
4006 mod_start
+ mod_len
)
4007 mod_len
= ordered
->file_offset
- mod_start
;
4009 * If we have this case
4011 * |--------- logged extent ---------|
4012 * |----- ordered extent ----|
4014 * Just don't mess with mod_start and mod_len, we'll
4015 * just end up logging more csums than we need and it
4019 if (ordered
->file_offset
+ ordered
->len
<
4020 mod_start
+ mod_len
) {
4021 mod_len
= (mod_start
+ mod_len
) -
4022 (ordered
->file_offset
+ ordered
->len
);
4023 mod_start
= ordered
->file_offset
+
4034 * To keep us from looping for the above case of an ordered
4035 * extent that falls inside of the logged extent.
4037 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM
,
4041 list_for_each_entry(sum
, &ordered
->list
, list
) {
4042 ret
= btrfs_csum_file_blocks(trans
, log
, sum
);
4048 if (*ordered_io_error
|| !mod_len
|| ret
|| skip_csum
)
4051 if (em
->compress_type
) {
4053 csum_len
= max(em
->block_len
, em
->orig_block_len
);
4055 csum_offset
= mod_start
- em
->start
;
4059 /* block start is already adjusted for the file extent offset. */
4060 ret
= btrfs_lookup_csums_range(fs_info
->csum_root
,
4061 em
->block_start
+ csum_offset
,
4062 em
->block_start
+ csum_offset
+
4063 csum_len
- 1, &ordered_sums
, 0);
4067 while (!list_empty(&ordered_sums
)) {
4068 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
4069 struct btrfs_ordered_sum
,
4072 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
4073 list_del(&sums
->list
);
4080 static int log_one_extent(struct btrfs_trans_handle
*trans
,
4081 struct btrfs_inode
*inode
, struct btrfs_root
*root
,
4082 const struct extent_map
*em
,
4083 struct btrfs_path
*path
,
4084 const struct list_head
*logged_list
,
4085 struct btrfs_log_ctx
*ctx
)
4087 struct btrfs_root
*log
= root
->log_root
;
4088 struct btrfs_file_extent_item
*fi
;
4089 struct extent_buffer
*leaf
;
4090 struct btrfs_map_token token
;
4091 struct btrfs_key key
;
4092 u64 extent_offset
= em
->start
- em
->orig_start
;
4095 int extent_inserted
= 0;
4096 bool ordered_io_err
= false;
4098 ret
= wait_ordered_extents(trans
, &inode
->vfs_inode
, root
, em
,
4099 logged_list
, &ordered_io_err
);
4103 if (ordered_io_err
) {
4108 btrfs_init_map_token(&token
);
4110 ret
= __btrfs_drop_extents(trans
, log
, &inode
->vfs_inode
, path
, em
->start
,
4111 em
->start
+ em
->len
, NULL
, 0, 1,
4112 sizeof(*fi
), &extent_inserted
);
4116 if (!extent_inserted
) {
4117 key
.objectid
= btrfs_ino(inode
);
4118 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4119 key
.offset
= em
->start
;
4121 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
,
4126 leaf
= path
->nodes
[0];
4127 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4128 struct btrfs_file_extent_item
);
4130 btrfs_set_token_file_extent_generation(leaf
, fi
, trans
->transid
,
4132 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
4133 btrfs_set_token_file_extent_type(leaf
, fi
,
4134 BTRFS_FILE_EXTENT_PREALLOC
,
4137 btrfs_set_token_file_extent_type(leaf
, fi
,
4138 BTRFS_FILE_EXTENT_REG
,
4141 block_len
= max(em
->block_len
, em
->orig_block_len
);
4142 if (em
->compress_type
!= BTRFS_COMPRESS_NONE
) {
4143 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
4146 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
4148 } else if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
4149 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
4151 extent_offset
, &token
);
4152 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
4155 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
, 0, &token
);
4156 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, 0,
4160 btrfs_set_token_file_extent_offset(leaf
, fi
, extent_offset
, &token
);
4161 btrfs_set_token_file_extent_num_bytes(leaf
, fi
, em
->len
, &token
);
4162 btrfs_set_token_file_extent_ram_bytes(leaf
, fi
, em
->ram_bytes
, &token
);
4163 btrfs_set_token_file_extent_compression(leaf
, fi
, em
->compress_type
,
4165 btrfs_set_token_file_extent_encryption(leaf
, fi
, 0, &token
);
4166 btrfs_set_token_file_extent_other_encoding(leaf
, fi
, 0, &token
);
4167 btrfs_mark_buffer_dirty(leaf
);
4169 btrfs_release_path(path
);
4174 static int btrfs_log_changed_extents(struct btrfs_trans_handle
*trans
,
4175 struct btrfs_root
*root
,
4176 struct btrfs_inode
*inode
,
4177 struct btrfs_path
*path
,
4178 struct list_head
*logged_list
,
4179 struct btrfs_log_ctx
*ctx
,
4183 struct extent_map
*em
, *n
;
4184 struct list_head extents
;
4185 struct extent_map_tree
*tree
= &inode
->extent_tree
;
4186 u64 logged_start
, logged_end
;
4191 INIT_LIST_HEAD(&extents
);
4193 down_write(&inode
->dio_sem
);
4194 write_lock(&tree
->lock
);
4195 test_gen
= root
->fs_info
->last_trans_committed
;
4196 logged_start
= start
;
4199 list_for_each_entry_safe(em
, n
, &tree
->modified_extents
, list
) {
4200 list_del_init(&em
->list
);
4202 * Just an arbitrary number, this can be really CPU intensive
4203 * once we start getting a lot of extents, and really once we
4204 * have a bunch of extents we just want to commit since it will
4207 if (++num
> 32768) {
4208 list_del_init(&tree
->modified_extents
);
4213 if (em
->generation
<= test_gen
)
4216 if (em
->start
< logged_start
)
4217 logged_start
= em
->start
;
4218 if ((em
->start
+ em
->len
- 1) > logged_end
)
4219 logged_end
= em
->start
+ em
->len
- 1;
4221 /* Need a ref to keep it from getting evicted from cache */
4222 refcount_inc(&em
->refs
);
4223 set_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
4224 list_add_tail(&em
->list
, &extents
);
4228 list_sort(NULL
, &extents
, extent_cmp
);
4229 btrfs_get_logged_extents(inode
, logged_list
, logged_start
, logged_end
);
4231 * Some ordered extents started by fsync might have completed
4232 * before we could collect them into the list logged_list, which
4233 * means they're gone, not in our logged_list nor in the inode's
4234 * ordered tree. We want the application/user space to know an
4235 * error happened while attempting to persist file data so that
4236 * it can take proper action. If such error happened, we leave
4237 * without writing to the log tree and the fsync must report the
4238 * file data write error and not commit the current transaction.
4240 ret
= filemap_check_errors(inode
->vfs_inode
.i_mapping
);
4244 while (!list_empty(&extents
)) {
4245 em
= list_entry(extents
.next
, struct extent_map
, list
);
4247 list_del_init(&em
->list
);
4250 * If we had an error we just need to delete everybody from our
4254 clear_em_logging(tree
, em
);
4255 free_extent_map(em
);
4259 write_unlock(&tree
->lock
);
4261 ret
= log_one_extent(trans
, inode
, root
, em
, path
, logged_list
,
4263 write_lock(&tree
->lock
);
4264 clear_em_logging(tree
, em
);
4265 free_extent_map(em
);
4267 WARN_ON(!list_empty(&extents
));
4268 write_unlock(&tree
->lock
);
4269 up_write(&inode
->dio_sem
);
4271 btrfs_release_path(path
);
4275 static int logged_inode_size(struct btrfs_root
*log
, struct btrfs_inode
*inode
,
4276 struct btrfs_path
*path
, u64
*size_ret
)
4278 struct btrfs_key key
;
4281 key
.objectid
= btrfs_ino(inode
);
4282 key
.type
= BTRFS_INODE_ITEM_KEY
;
4285 ret
= btrfs_search_slot(NULL
, log
, &key
, path
, 0, 0);
4288 } else if (ret
> 0) {
4291 struct btrfs_inode_item
*item
;
4293 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4294 struct btrfs_inode_item
);
4295 *size_ret
= btrfs_inode_size(path
->nodes
[0], item
);
4298 btrfs_release_path(path
);
4303 * At the moment we always log all xattrs. This is to figure out at log replay
4304 * time which xattrs must have their deletion replayed. If a xattr is missing
4305 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4306 * because if a xattr is deleted, the inode is fsynced and a power failure
4307 * happens, causing the log to be replayed the next time the fs is mounted,
4308 * we want the xattr to not exist anymore (same behaviour as other filesystems
4309 * with a journal, ext3/4, xfs, f2fs, etc).
4311 static int btrfs_log_all_xattrs(struct btrfs_trans_handle
*trans
,
4312 struct btrfs_root
*root
,
4313 struct btrfs_inode
*inode
,
4314 struct btrfs_path
*path
,
4315 struct btrfs_path
*dst_path
)
4318 struct btrfs_key key
;
4319 const u64 ino
= btrfs_ino(inode
);
4324 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4327 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4332 int slot
= path
->slots
[0];
4333 struct extent_buffer
*leaf
= path
->nodes
[0];
4334 int nritems
= btrfs_header_nritems(leaf
);
4336 if (slot
>= nritems
) {
4338 u64 last_extent
= 0;
4340 ret
= copy_items(trans
, inode
, dst_path
, path
,
4341 &last_extent
, start_slot
,
4343 /* can't be 1, extent items aren't processed */
4349 ret
= btrfs_next_leaf(root
, path
);
4357 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4358 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_XATTR_ITEM_KEY
)
4368 u64 last_extent
= 0;
4370 ret
= copy_items(trans
, inode
, dst_path
, path
,
4371 &last_extent
, start_slot
,
4373 /* can't be 1, extent items aren't processed */
4383 * If the no holes feature is enabled we need to make sure any hole between the
4384 * last extent and the i_size of our inode is explicitly marked in the log. This
4385 * is to make sure that doing something like:
4387 * 1) create file with 128Kb of data
4388 * 2) truncate file to 64Kb
4389 * 3) truncate file to 256Kb
4391 * 5) <crash/power failure>
4392 * 6) mount fs and trigger log replay
4394 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4395 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4396 * file correspond to a hole. The presence of explicit holes in a log tree is
4397 * what guarantees that log replay will remove/adjust file extent items in the
4400 * Here we do not need to care about holes between extents, that is already done
4401 * by copy_items(). We also only need to do this in the full sync path, where we
4402 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4403 * lookup the list of modified extent maps and if any represents a hole, we
4404 * insert a corresponding extent representing a hole in the log tree.
4406 static int btrfs_log_trailing_hole(struct btrfs_trans_handle
*trans
,
4407 struct btrfs_root
*root
,
4408 struct btrfs_inode
*inode
,
4409 struct btrfs_path
*path
)
4411 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4413 struct btrfs_key key
;
4416 struct extent_buffer
*leaf
;
4417 struct btrfs_root
*log
= root
->log_root
;
4418 const u64 ino
= btrfs_ino(inode
);
4419 const u64 i_size
= i_size_read(&inode
->vfs_inode
);
4421 if (!btrfs_fs_incompat(fs_info
, NO_HOLES
))
4425 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4426 key
.offset
= (u64
)-1;
4428 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4433 ASSERT(path
->slots
[0] > 0);
4435 leaf
= path
->nodes
[0];
4436 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4438 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
4439 /* inode does not have any extents */
4443 struct btrfs_file_extent_item
*extent
;
4447 * If there's an extent beyond i_size, an explicit hole was
4448 * already inserted by copy_items().
4450 if (key
.offset
>= i_size
)
4453 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
4454 struct btrfs_file_extent_item
);
4456 if (btrfs_file_extent_type(leaf
, extent
) ==
4457 BTRFS_FILE_EXTENT_INLINE
) {
4458 len
= btrfs_file_extent_inline_len(leaf
,
4461 ASSERT(len
== i_size
||
4462 (len
== fs_info
->sectorsize
&&
4463 btrfs_file_extent_compression(leaf
, extent
) !=
4464 BTRFS_COMPRESS_NONE
));
4468 len
= btrfs_file_extent_num_bytes(leaf
, extent
);
4469 /* Last extent goes beyond i_size, no need to log a hole. */
4470 if (key
.offset
+ len
> i_size
)
4472 hole_start
= key
.offset
+ len
;
4473 hole_size
= i_size
- hole_start
;
4475 btrfs_release_path(path
);
4477 /* Last extent ends at i_size. */
4481 hole_size
= ALIGN(hole_size
, fs_info
->sectorsize
);
4482 ret
= btrfs_insert_file_extent(trans
, log
, ino
, hole_start
, 0, 0,
4483 hole_size
, 0, hole_size
, 0, 0, 0);
4488 * When we are logging a new inode X, check if it doesn't have a reference that
4489 * matches the reference from some other inode Y created in a past transaction
4490 * and that was renamed in the current transaction. If we don't do this, then at
4491 * log replay time we can lose inode Y (and all its files if it's a directory):
4494 * echo "hello world" > /mnt/x/foobar
4497 * mkdir /mnt/x # or touch /mnt/x
4498 * xfs_io -c fsync /mnt/x
4500 * mount fs, trigger log replay
4502 * After the log replay procedure, we would lose the first directory and all its
4503 * files (file foobar).
4504 * For the case where inode Y is not a directory we simply end up losing it:
4506 * echo "123" > /mnt/foo
4508 * mv /mnt/foo /mnt/bar
4509 * echo "abc" > /mnt/foo
4510 * xfs_io -c fsync /mnt/foo
4513 * We also need this for cases where a snapshot entry is replaced by some other
4514 * entry (file or directory) otherwise we end up with an unreplayable log due to
4515 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4516 * if it were a regular entry:
4519 * btrfs subvolume snapshot /mnt /mnt/x/snap
4520 * btrfs subvolume delete /mnt/x/snap
4523 * fsync /mnt/x or fsync some new file inside it
4526 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4527 * the same transaction.
4529 static int btrfs_check_ref_name_override(struct extent_buffer
*eb
,
4531 const struct btrfs_key
*key
,
4532 struct btrfs_inode
*inode
,
4536 struct btrfs_path
*search_path
;
4539 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
4541 unsigned long ptr
= btrfs_item_ptr_offset(eb
, slot
);
4543 search_path
= btrfs_alloc_path();
4546 search_path
->search_commit_root
= 1;
4547 search_path
->skip_locking
= 1;
4549 while (cur_offset
< item_size
) {
4553 unsigned long name_ptr
;
4554 struct btrfs_dir_item
*di
;
4556 if (key
->type
== BTRFS_INODE_REF_KEY
) {
4557 struct btrfs_inode_ref
*iref
;
4559 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur_offset
);
4560 parent
= key
->offset
;
4561 this_name_len
= btrfs_inode_ref_name_len(eb
, iref
);
4562 name_ptr
= (unsigned long)(iref
+ 1);
4563 this_len
= sizeof(*iref
) + this_name_len
;
4565 struct btrfs_inode_extref
*extref
;
4567 extref
= (struct btrfs_inode_extref
*)(ptr
+
4569 parent
= btrfs_inode_extref_parent(eb
, extref
);
4570 this_name_len
= btrfs_inode_extref_name_len(eb
, extref
);
4571 name_ptr
= (unsigned long)&extref
->name
;
4572 this_len
= sizeof(*extref
) + this_name_len
;
4575 ret
= btrfs_is_name_len_valid(eb
, slot
, name_ptr
,
4581 if (this_name_len
> name_len
) {
4584 new_name
= krealloc(name
, this_name_len
, GFP_NOFS
);
4589 name_len
= this_name_len
;
4593 read_extent_buffer(eb
, name
, name_ptr
, this_name_len
);
4594 di
= btrfs_lookup_dir_item(NULL
, inode
->root
, search_path
,
4595 parent
, name
, this_name_len
, 0);
4596 if (di
&& !IS_ERR(di
)) {
4597 struct btrfs_key di_key
;
4599 btrfs_dir_item_key_to_cpu(search_path
->nodes
[0],
4601 if (di_key
.type
== BTRFS_INODE_ITEM_KEY
) {
4603 *other_ino
= di_key
.objectid
;
4608 } else if (IS_ERR(di
)) {
4612 btrfs_release_path(search_path
);
4614 cur_offset
+= this_len
;
4618 btrfs_free_path(search_path
);
4623 /* log a single inode in the tree log.
4624 * At least one parent directory for this inode must exist in the tree
4625 * or be logged already.
4627 * Any items from this inode changed by the current transaction are copied
4628 * to the log tree. An extra reference is taken on any extents in this
4629 * file, allowing us to avoid a whole pile of corner cases around logging
4630 * blocks that have been removed from the tree.
4632 * See LOG_INODE_ALL and related defines for a description of what inode_only
4635 * This handles both files and directories.
4637 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
4638 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
4642 struct btrfs_log_ctx
*ctx
)
4644 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4645 struct btrfs_path
*path
;
4646 struct btrfs_path
*dst_path
;
4647 struct btrfs_key min_key
;
4648 struct btrfs_key max_key
;
4649 struct btrfs_root
*log
= root
->log_root
;
4650 LIST_HEAD(logged_list
);
4651 u64 last_extent
= 0;
4655 int ins_start_slot
= 0;
4657 bool fast_search
= false;
4658 u64 ino
= btrfs_ino(inode
);
4659 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
4660 u64 logged_isize
= 0;
4661 bool need_log_inode_item
= true;
4663 path
= btrfs_alloc_path();
4666 dst_path
= btrfs_alloc_path();
4668 btrfs_free_path(path
);
4672 min_key
.objectid
= ino
;
4673 min_key
.type
= BTRFS_INODE_ITEM_KEY
;
4676 max_key
.objectid
= ino
;
4679 /* today the code can only do partial logging of directories */
4680 if (S_ISDIR(inode
->vfs_inode
.i_mode
) ||
4681 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
4682 &inode
->runtime_flags
) &&
4683 inode_only
>= LOG_INODE_EXISTS
))
4684 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
4686 max_key
.type
= (u8
)-1;
4687 max_key
.offset
= (u64
)-1;
4690 * Only run delayed items if we are a dir or a new file.
4691 * Otherwise commit the delayed inode only, which is needed in
4692 * order for the log replay code to mark inodes for link count
4693 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4695 if (S_ISDIR(inode
->vfs_inode
.i_mode
) ||
4696 inode
->generation
> fs_info
->last_trans_committed
)
4697 ret
= btrfs_commit_inode_delayed_items(trans
, inode
);
4699 ret
= btrfs_commit_inode_delayed_inode(inode
);
4702 btrfs_free_path(path
);
4703 btrfs_free_path(dst_path
);
4707 if (inode_only
== LOG_OTHER_INODE
) {
4708 inode_only
= LOG_INODE_EXISTS
;
4709 mutex_lock_nested(&inode
->log_mutex
, SINGLE_DEPTH_NESTING
);
4711 mutex_lock(&inode
->log_mutex
);
4715 * a brute force approach to making sure we get the most uptodate
4716 * copies of everything.
4718 if (S_ISDIR(inode
->vfs_inode
.i_mode
)) {
4719 int max_key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
4721 if (inode_only
== LOG_INODE_EXISTS
)
4722 max_key_type
= BTRFS_XATTR_ITEM_KEY
;
4723 ret
= drop_objectid_items(trans
, log
, path
, ino
, max_key_type
);
4725 if (inode_only
== LOG_INODE_EXISTS
) {
4727 * Make sure the new inode item we write to the log has
4728 * the same isize as the current one (if it exists).
4729 * This is necessary to prevent data loss after log
4730 * replay, and also to prevent doing a wrong expanding
4731 * truncate - for e.g. create file, write 4K into offset
4732 * 0, fsync, write 4K into offset 4096, add hard link,
4733 * fsync some other file (to sync log), power fail - if
4734 * we use the inode's current i_size, after log replay
4735 * we get a 8Kb file, with the last 4Kb extent as a hole
4736 * (zeroes), as if an expanding truncate happened,
4737 * instead of getting a file of 4Kb only.
4739 err
= logged_inode_size(log
, inode
, path
, &logged_isize
);
4743 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
4744 &inode
->runtime_flags
)) {
4745 if (inode_only
== LOG_INODE_EXISTS
) {
4746 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
4747 ret
= drop_objectid_items(trans
, log
, path
, ino
,
4750 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
4751 &inode
->runtime_flags
);
4752 clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
4753 &inode
->runtime_flags
);
4755 ret
= btrfs_truncate_inode_items(trans
,
4756 log
, &inode
->vfs_inode
, 0, 0);
4761 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
4762 &inode
->runtime_flags
) ||
4763 inode_only
== LOG_INODE_EXISTS
) {
4764 if (inode_only
== LOG_INODE_ALL
)
4766 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
4767 ret
= drop_objectid_items(trans
, log
, path
, ino
,
4770 if (inode_only
== LOG_INODE_ALL
)
4783 ret
= btrfs_search_forward(root
, &min_key
,
4784 path
, trans
->transid
);
4792 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4793 if (min_key
.objectid
!= ino
)
4795 if (min_key
.type
> max_key
.type
)
4798 if (min_key
.type
== BTRFS_INODE_ITEM_KEY
)
4799 need_log_inode_item
= false;
4801 if ((min_key
.type
== BTRFS_INODE_REF_KEY
||
4802 min_key
.type
== BTRFS_INODE_EXTREF_KEY
) &&
4803 inode
->generation
== trans
->transid
) {
4806 ret
= btrfs_check_ref_name_override(path
->nodes
[0],
4807 path
->slots
[0], &min_key
, inode
,
4812 } else if (ret
> 0 && ctx
&&
4813 other_ino
!= btrfs_ino(BTRFS_I(ctx
->inode
))) {
4814 struct btrfs_key inode_key
;
4815 struct inode
*other_inode
;
4821 ins_start_slot
= path
->slots
[0];
4823 ret
= copy_items(trans
, inode
, dst_path
, path
,
4824 &last_extent
, ins_start_slot
,
4832 btrfs_release_path(path
);
4833 inode_key
.objectid
= other_ino
;
4834 inode_key
.type
= BTRFS_INODE_ITEM_KEY
;
4835 inode_key
.offset
= 0;
4836 other_inode
= btrfs_iget(fs_info
->sb
,
4840 * If the other inode that had a conflicting dir
4841 * entry was deleted in the current transaction,
4842 * we don't need to do more work nor fallback to
4843 * a transaction commit.
4845 if (IS_ERR(other_inode
) &&
4846 PTR_ERR(other_inode
) == -ENOENT
) {
4848 } else if (IS_ERR(other_inode
)) {
4849 err
= PTR_ERR(other_inode
);
4853 * We are safe logging the other inode without
4854 * acquiring its i_mutex as long as we log with
4855 * the LOG_INODE_EXISTS mode. We're safe against
4856 * concurrent renames of the other inode as well
4857 * because during a rename we pin the log and
4858 * update the log with the new name before we
4861 err
= btrfs_log_inode(trans
, root
,
4862 BTRFS_I(other_inode
),
4863 LOG_OTHER_INODE
, 0, LLONG_MAX
,
4873 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4874 if (min_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
4877 ret
= copy_items(trans
, inode
, dst_path
, path
,
4878 &last_extent
, ins_start_slot
,
4879 ins_nr
, inode_only
, logged_isize
);
4886 btrfs_release_path(path
);
4892 if (ins_nr
&& ins_start_slot
+ ins_nr
== path
->slots
[0]) {
4895 } else if (!ins_nr
) {
4896 ins_start_slot
= path
->slots
[0];
4901 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
4902 ins_start_slot
, ins_nr
, inode_only
,
4910 btrfs_release_path(path
);
4914 ins_start_slot
= path
->slots
[0];
4917 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4919 if (path
->slots
[0] < nritems
) {
4920 btrfs_item_key_to_cpu(path
->nodes
[0], &min_key
,
4925 ret
= copy_items(trans
, inode
, dst_path
, path
,
4926 &last_extent
, ins_start_slot
,
4927 ins_nr
, inode_only
, logged_isize
);
4935 btrfs_release_path(path
);
4937 if (min_key
.offset
< (u64
)-1) {
4939 } else if (min_key
.type
< max_key
.type
) {
4947 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
4948 ins_start_slot
, ins_nr
, inode_only
,
4958 btrfs_release_path(path
);
4959 btrfs_release_path(dst_path
);
4960 err
= btrfs_log_all_xattrs(trans
, root
, inode
, path
, dst_path
);
4963 if (max_key
.type
>= BTRFS_EXTENT_DATA_KEY
&& !fast_search
) {
4964 btrfs_release_path(path
);
4965 btrfs_release_path(dst_path
);
4966 err
= btrfs_log_trailing_hole(trans
, root
, inode
, path
);
4971 btrfs_release_path(path
);
4972 btrfs_release_path(dst_path
);
4973 if (need_log_inode_item
) {
4974 err
= log_inode_item(trans
, log
, dst_path
, inode
);
4979 ret
= btrfs_log_changed_extents(trans
, root
, inode
, dst_path
,
4980 &logged_list
, ctx
, start
, end
);
4985 } else if (inode_only
== LOG_INODE_ALL
) {
4986 struct extent_map
*em
, *n
;
4988 write_lock(&em_tree
->lock
);
4990 * We can't just remove every em if we're called for a ranged
4991 * fsync - that is, one that doesn't cover the whole possible
4992 * file range (0 to LLONG_MAX). This is because we can have
4993 * em's that fall outside the range we're logging and therefore
4994 * their ordered operations haven't completed yet
4995 * (btrfs_finish_ordered_io() not invoked yet). This means we
4996 * didn't get their respective file extent item in the fs/subvol
4997 * tree yet, and need to let the next fast fsync (one which
4998 * consults the list of modified extent maps) find the em so
4999 * that it logs a matching file extent item and waits for the
5000 * respective ordered operation to complete (if it's still
5003 * Removing every em outside the range we're logging would make
5004 * the next fast fsync not log their matching file extent items,
5005 * therefore making us lose data after a log replay.
5007 list_for_each_entry_safe(em
, n
, &em_tree
->modified_extents
,
5009 const u64 mod_end
= em
->mod_start
+ em
->mod_len
- 1;
5011 if (em
->mod_start
>= start
&& mod_end
<= end
)
5012 list_del_init(&em
->list
);
5014 write_unlock(&em_tree
->lock
);
5017 if (inode_only
== LOG_INODE_ALL
&& S_ISDIR(inode
->vfs_inode
.i_mode
)) {
5018 ret
= log_directory_changes(trans
, root
, inode
, path
, dst_path
,
5026 spin_lock(&inode
->lock
);
5027 inode
->logged_trans
= trans
->transid
;
5028 inode
->last_log_commit
= inode
->last_sub_trans
;
5029 spin_unlock(&inode
->lock
);
5032 btrfs_put_logged_extents(&logged_list
);
5034 btrfs_submit_logged_extents(&logged_list
, log
);
5035 mutex_unlock(&inode
->log_mutex
);
5037 btrfs_free_path(path
);
5038 btrfs_free_path(dst_path
);
5043 * Check if we must fallback to a transaction commit when logging an inode.
5044 * This must be called after logging the inode and is used only in the context
5045 * when fsyncing an inode requires the need to log some other inode - in which
5046 * case we can't lock the i_mutex of each other inode we need to log as that
5047 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5048 * log inodes up or down in the hierarchy) or rename operations for example. So
5049 * we take the log_mutex of the inode after we have logged it and then check for
5050 * its last_unlink_trans value - this is safe because any task setting
5051 * last_unlink_trans must take the log_mutex and it must do this before it does
5052 * the actual unlink operation, so if we do this check before a concurrent task
5053 * sets last_unlink_trans it means we've logged a consistent version/state of
5054 * all the inode items, otherwise we are not sure and must do a transaction
5055 * commit (the concurrent task might have only updated last_unlink_trans before
5056 * we logged the inode or it might have also done the unlink).
5058 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle
*trans
,
5059 struct btrfs_inode
*inode
)
5061 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
5064 mutex_lock(&inode
->log_mutex
);
5065 if (inode
->last_unlink_trans
> fs_info
->last_trans_committed
) {
5067 * Make sure any commits to the log are forced to be full
5070 btrfs_set_log_full_commit(fs_info
, trans
);
5073 mutex_unlock(&inode
->log_mutex
);
5079 * follow the dentry parent pointers up the chain and see if any
5080 * of the directories in it require a full commit before they can
5081 * be logged. Returns zero if nothing special needs to be done or 1 if
5082 * a full commit is required.
5084 static noinline
int check_parent_dirs_for_sync(struct btrfs_trans_handle
*trans
,
5085 struct btrfs_inode
*inode
,
5086 struct dentry
*parent
,
5087 struct super_block
*sb
,
5091 struct dentry
*old_parent
= NULL
;
5092 struct btrfs_inode
*orig_inode
= inode
;
5095 * for regular files, if its inode is already on disk, we don't
5096 * have to worry about the parents at all. This is because
5097 * we can use the last_unlink_trans field to record renames
5098 * and other fun in this file.
5100 if (S_ISREG(inode
->vfs_inode
.i_mode
) &&
5101 inode
->generation
<= last_committed
&&
5102 inode
->last_unlink_trans
<= last_committed
)
5105 if (!S_ISDIR(inode
->vfs_inode
.i_mode
)) {
5106 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5108 inode
= BTRFS_I(d_inode(parent
));
5113 * If we are logging a directory then we start with our inode,
5114 * not our parent's inode, so we need to skip setting the
5115 * logged_trans so that further down in the log code we don't
5116 * think this inode has already been logged.
5118 if (inode
!= orig_inode
)
5119 inode
->logged_trans
= trans
->transid
;
5122 if (btrfs_must_commit_transaction(trans
, inode
)) {
5127 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5130 if (IS_ROOT(parent
)) {
5131 inode
= BTRFS_I(d_inode(parent
));
5132 if (btrfs_must_commit_transaction(trans
, inode
))
5137 parent
= dget_parent(parent
);
5139 old_parent
= parent
;
5140 inode
= BTRFS_I(d_inode(parent
));
5148 struct btrfs_dir_list
{
5150 struct list_head list
;
5154 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5155 * details about the why it is needed.
5156 * This is a recursive operation - if an existing dentry corresponds to a
5157 * directory, that directory's new entries are logged too (same behaviour as
5158 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5159 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5160 * complains about the following circular lock dependency / possible deadlock:
5164 * lock(&type->i_mutex_dir_key#3/2);
5165 * lock(sb_internal#2);
5166 * lock(&type->i_mutex_dir_key#3/2);
5167 * lock(&sb->s_type->i_mutex_key#14);
5169 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5170 * sb_start_intwrite() in btrfs_start_transaction().
5171 * Not locking i_mutex of the inodes is still safe because:
5173 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5174 * that while logging the inode new references (names) are added or removed
5175 * from the inode, leaving the logged inode item with a link count that does
5176 * not match the number of logged inode reference items. This is fine because
5177 * at log replay time we compute the real number of links and correct the
5178 * link count in the inode item (see replay_one_buffer() and
5179 * link_to_fixup_dir());
5181 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5182 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5183 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5184 * has a size that doesn't match the sum of the lengths of all the logged
5185 * names. This does not result in a problem because if a dir_item key is
5186 * logged but its matching dir_index key is not logged, at log replay time we
5187 * don't use it to replay the respective name (see replay_one_name()). On the
5188 * other hand if only the dir_index key ends up being logged, the respective
5189 * name is added to the fs/subvol tree with both the dir_item and dir_index
5190 * keys created (see replay_one_name()).
5191 * The directory's inode item with a wrong i_size is not a problem as well,
5192 * since we don't use it at log replay time to set the i_size in the inode
5193 * item of the fs/subvol tree (see overwrite_item()).
5195 static int log_new_dir_dentries(struct btrfs_trans_handle
*trans
,
5196 struct btrfs_root
*root
,
5197 struct btrfs_inode
*start_inode
,
5198 struct btrfs_log_ctx
*ctx
)
5200 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5201 struct btrfs_root
*log
= root
->log_root
;
5202 struct btrfs_path
*path
;
5203 LIST_HEAD(dir_list
);
5204 struct btrfs_dir_list
*dir_elem
;
5207 path
= btrfs_alloc_path();
5211 dir_elem
= kmalloc(sizeof(*dir_elem
), GFP_NOFS
);
5213 btrfs_free_path(path
);
5216 dir_elem
->ino
= btrfs_ino(start_inode
);
5217 list_add_tail(&dir_elem
->list
, &dir_list
);
5219 while (!list_empty(&dir_list
)) {
5220 struct extent_buffer
*leaf
;
5221 struct btrfs_key min_key
;
5225 dir_elem
= list_first_entry(&dir_list
, struct btrfs_dir_list
,
5228 goto next_dir_inode
;
5230 min_key
.objectid
= dir_elem
->ino
;
5231 min_key
.type
= BTRFS_DIR_ITEM_KEY
;
5234 btrfs_release_path(path
);
5235 ret
= btrfs_search_forward(log
, &min_key
, path
, trans
->transid
);
5237 goto next_dir_inode
;
5238 } else if (ret
> 0) {
5240 goto next_dir_inode
;
5244 leaf
= path
->nodes
[0];
5245 nritems
= btrfs_header_nritems(leaf
);
5246 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
5247 struct btrfs_dir_item
*di
;
5248 struct btrfs_key di_key
;
5249 struct inode
*di_inode
;
5250 struct btrfs_dir_list
*new_dir_elem
;
5251 int log_mode
= LOG_INODE_EXISTS
;
5254 btrfs_item_key_to_cpu(leaf
, &min_key
, i
);
5255 if (min_key
.objectid
!= dir_elem
->ino
||
5256 min_key
.type
!= BTRFS_DIR_ITEM_KEY
)
5257 goto next_dir_inode
;
5259 di
= btrfs_item_ptr(leaf
, i
, struct btrfs_dir_item
);
5260 type
= btrfs_dir_type(leaf
, di
);
5261 if (btrfs_dir_transid(leaf
, di
) < trans
->transid
&&
5262 type
!= BTRFS_FT_DIR
)
5264 btrfs_dir_item_key_to_cpu(leaf
, di
, &di_key
);
5265 if (di_key
.type
== BTRFS_ROOT_ITEM_KEY
)
5268 btrfs_release_path(path
);
5269 di_inode
= btrfs_iget(fs_info
->sb
, &di_key
, root
, NULL
);
5270 if (IS_ERR(di_inode
)) {
5271 ret
= PTR_ERR(di_inode
);
5272 goto next_dir_inode
;
5275 if (btrfs_inode_in_log(BTRFS_I(di_inode
), trans
->transid
)) {
5280 ctx
->log_new_dentries
= false;
5281 if (type
== BTRFS_FT_DIR
|| type
== BTRFS_FT_SYMLINK
)
5282 log_mode
= LOG_INODE_ALL
;
5283 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(di_inode
),
5284 log_mode
, 0, LLONG_MAX
, ctx
);
5286 btrfs_must_commit_transaction(trans
, BTRFS_I(di_inode
)))
5290 goto next_dir_inode
;
5291 if (ctx
->log_new_dentries
) {
5292 new_dir_elem
= kmalloc(sizeof(*new_dir_elem
),
5294 if (!new_dir_elem
) {
5296 goto next_dir_inode
;
5298 new_dir_elem
->ino
= di_key
.objectid
;
5299 list_add_tail(&new_dir_elem
->list
, &dir_list
);
5304 ret
= btrfs_next_leaf(log
, path
);
5306 goto next_dir_inode
;
5307 } else if (ret
> 0) {
5309 goto next_dir_inode
;
5313 if (min_key
.offset
< (u64
)-1) {
5318 list_del(&dir_elem
->list
);
5322 btrfs_free_path(path
);
5326 static int btrfs_log_all_parents(struct btrfs_trans_handle
*trans
,
5327 struct btrfs_inode
*inode
,
5328 struct btrfs_log_ctx
*ctx
)
5330 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
5332 struct btrfs_path
*path
;
5333 struct btrfs_key key
;
5334 struct btrfs_root
*root
= inode
->root
;
5335 const u64 ino
= btrfs_ino(inode
);
5337 path
= btrfs_alloc_path();
5340 path
->skip_locking
= 1;
5341 path
->search_commit_root
= 1;
5344 key
.type
= BTRFS_INODE_REF_KEY
;
5346 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5351 struct extent_buffer
*leaf
= path
->nodes
[0];
5352 int slot
= path
->slots
[0];
5357 if (slot
>= btrfs_header_nritems(leaf
)) {
5358 ret
= btrfs_next_leaf(root
, path
);
5366 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5367 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5368 if (key
.objectid
!= ino
|| key
.type
> BTRFS_INODE_EXTREF_KEY
)
5371 item_size
= btrfs_item_size_nr(leaf
, slot
);
5372 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
5373 while (cur_offset
< item_size
) {
5374 struct btrfs_key inode_key
;
5375 struct inode
*dir_inode
;
5377 inode_key
.type
= BTRFS_INODE_ITEM_KEY
;
5378 inode_key
.offset
= 0;
5380 if (key
.type
== BTRFS_INODE_EXTREF_KEY
) {
5381 struct btrfs_inode_extref
*extref
;
5383 extref
= (struct btrfs_inode_extref
*)
5385 inode_key
.objectid
= btrfs_inode_extref_parent(
5387 cur_offset
+= sizeof(*extref
);
5388 cur_offset
+= btrfs_inode_extref_name_len(leaf
,
5391 inode_key
.objectid
= key
.offset
;
5392 cur_offset
= item_size
;
5395 dir_inode
= btrfs_iget(fs_info
->sb
, &inode_key
,
5397 /* If parent inode was deleted, skip it. */
5398 if (IS_ERR(dir_inode
))
5402 ctx
->log_new_dentries
= false;
5403 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(dir_inode
),
5404 LOG_INODE_ALL
, 0, LLONG_MAX
, ctx
);
5406 btrfs_must_commit_transaction(trans
, BTRFS_I(dir_inode
)))
5408 if (!ret
&& ctx
&& ctx
->log_new_dentries
)
5409 ret
= log_new_dir_dentries(trans
, root
,
5410 BTRFS_I(dir_inode
), ctx
);
5419 btrfs_free_path(path
);
5424 * helper function around btrfs_log_inode to make sure newly created
5425 * parent directories also end up in the log. A minimal inode and backref
5426 * only logging is done of any parent directories that are older than
5427 * the last committed transaction
5429 static int btrfs_log_inode_parent(struct btrfs_trans_handle
*trans
,
5430 struct btrfs_root
*root
,
5431 struct btrfs_inode
*inode
,
5432 struct dentry
*parent
,
5436 struct btrfs_log_ctx
*ctx
)
5438 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5439 int inode_only
= exists_only
? LOG_INODE_EXISTS
: LOG_INODE_ALL
;
5440 struct super_block
*sb
;
5441 struct dentry
*old_parent
= NULL
;
5443 u64 last_committed
= fs_info
->last_trans_committed
;
5444 bool log_dentries
= false;
5445 struct btrfs_inode
*orig_inode
= inode
;
5447 sb
= inode
->vfs_inode
.i_sb
;
5449 if (btrfs_test_opt(fs_info
, NOTREELOG
)) {
5455 * The prev transaction commit doesn't complete, we need do
5456 * full commit by ourselves.
5458 if (fs_info
->last_trans_log_full_commit
>
5459 fs_info
->last_trans_committed
) {
5464 if (root
!= inode
->root
|| btrfs_root_refs(&root
->root_item
) == 0) {
5469 ret
= check_parent_dirs_for_sync(trans
, inode
, parent
, sb
,
5474 if (btrfs_inode_in_log(inode
, trans
->transid
)) {
5475 ret
= BTRFS_NO_LOG_SYNC
;
5479 ret
= start_log_trans(trans
, root
, ctx
);
5483 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
, start
, end
, ctx
);
5488 * for regular files, if its inode is already on disk, we don't
5489 * have to worry about the parents at all. This is because
5490 * we can use the last_unlink_trans field to record renames
5491 * and other fun in this file.
5493 if (S_ISREG(inode
->vfs_inode
.i_mode
) &&
5494 inode
->generation
<= last_committed
&&
5495 inode
->last_unlink_trans
<= last_committed
) {
5500 if (S_ISDIR(inode
->vfs_inode
.i_mode
) && ctx
&& ctx
->log_new_dentries
)
5501 log_dentries
= true;
5504 * On unlink we must make sure all our current and old parent directory
5505 * inodes are fully logged. This is to prevent leaving dangling
5506 * directory index entries in directories that were our parents but are
5507 * not anymore. Not doing this results in old parent directory being
5508 * impossible to delete after log replay (rmdir will always fail with
5509 * error -ENOTEMPTY).
5515 * ln testdir/foo testdir/bar
5517 * unlink testdir/bar
5518 * xfs_io -c fsync testdir/foo
5520 * mount fs, triggers log replay
5522 * If we don't log the parent directory (testdir), after log replay the
5523 * directory still has an entry pointing to the file inode using the bar
5524 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5525 * the file inode has a link count of 1.
5531 * ln foo testdir/foo2
5532 * ln foo testdir/foo3
5534 * unlink testdir/foo3
5535 * xfs_io -c fsync foo
5537 * mount fs, triggers log replay
5539 * Similar as the first example, after log replay the parent directory
5540 * testdir still has an entry pointing to the inode file with name foo3
5541 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5542 * and has a link count of 2.
5544 if (inode
->last_unlink_trans
> last_committed
) {
5545 ret
= btrfs_log_all_parents(trans
, orig_inode
, ctx
);
5551 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5554 inode
= BTRFS_I(d_inode(parent
));
5555 if (root
!= inode
->root
)
5558 if (inode
->generation
> last_committed
) {
5559 ret
= btrfs_log_inode(trans
, root
, inode
,
5560 LOG_INODE_EXISTS
, 0, LLONG_MAX
, ctx
);
5564 if (IS_ROOT(parent
))
5567 parent
= dget_parent(parent
);
5569 old_parent
= parent
;
5572 ret
= log_new_dir_dentries(trans
, root
, orig_inode
, ctx
);
5578 btrfs_set_log_full_commit(fs_info
, trans
);
5583 btrfs_remove_log_ctx(root
, ctx
);
5584 btrfs_end_log_trans(root
);
5590 * it is not safe to log dentry if the chunk root has added new
5591 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5592 * If this returns 1, you must commit the transaction to safely get your
5595 int btrfs_log_dentry_safe(struct btrfs_trans_handle
*trans
,
5596 struct btrfs_root
*root
, struct dentry
*dentry
,
5599 struct btrfs_log_ctx
*ctx
)
5601 struct dentry
*parent
= dget_parent(dentry
);
5604 ret
= btrfs_log_inode_parent(trans
, root
, BTRFS_I(d_inode(dentry
)),
5605 parent
, start
, end
, 0, ctx
);
5612 * should be called during mount to recover any replay any log trees
5615 int btrfs_recover_log_trees(struct btrfs_root
*log_root_tree
)
5618 struct btrfs_path
*path
;
5619 struct btrfs_trans_handle
*trans
;
5620 struct btrfs_key key
;
5621 struct btrfs_key found_key
;
5622 struct btrfs_key tmp_key
;
5623 struct btrfs_root
*log
;
5624 struct btrfs_fs_info
*fs_info
= log_root_tree
->fs_info
;
5625 struct walk_control wc
= {
5626 .process_func
= process_one_buffer
,
5630 path
= btrfs_alloc_path();
5634 set_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
);
5636 trans
= btrfs_start_transaction(fs_info
->tree_root
, 0);
5637 if (IS_ERR(trans
)) {
5638 ret
= PTR_ERR(trans
);
5645 ret
= walk_log_tree(trans
, log_root_tree
, &wc
);
5647 btrfs_handle_fs_error(fs_info
, ret
,
5648 "Failed to pin buffers while recovering log root tree.");
5653 key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
5654 key
.offset
= (u64
)-1;
5655 key
.type
= BTRFS_ROOT_ITEM_KEY
;
5658 ret
= btrfs_search_slot(NULL
, log_root_tree
, &key
, path
, 0, 0);
5661 btrfs_handle_fs_error(fs_info
, ret
,
5662 "Couldn't find tree log root.");
5666 if (path
->slots
[0] == 0)
5670 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
5672 btrfs_release_path(path
);
5673 if (found_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
5676 log
= btrfs_read_fs_root(log_root_tree
, &found_key
);
5679 btrfs_handle_fs_error(fs_info
, ret
,
5680 "Couldn't read tree log root.");
5684 tmp_key
.objectid
= found_key
.offset
;
5685 tmp_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5686 tmp_key
.offset
= (u64
)-1;
5688 wc
.replay_dest
= btrfs_read_fs_root_no_name(fs_info
, &tmp_key
);
5689 if (IS_ERR(wc
.replay_dest
)) {
5690 ret
= PTR_ERR(wc
.replay_dest
);
5691 free_extent_buffer(log
->node
);
5692 free_extent_buffer(log
->commit_root
);
5694 btrfs_handle_fs_error(fs_info
, ret
,
5695 "Couldn't read target root for tree log recovery.");
5699 wc
.replay_dest
->log_root
= log
;
5700 btrfs_record_root_in_trans(trans
, wc
.replay_dest
);
5701 ret
= walk_log_tree(trans
, log
, &wc
);
5703 if (!ret
&& wc
.stage
== LOG_WALK_REPLAY_ALL
) {
5704 ret
= fixup_inode_link_counts(trans
, wc
.replay_dest
,
5708 key
.offset
= found_key
.offset
- 1;
5709 wc
.replay_dest
->log_root
= NULL
;
5710 free_extent_buffer(log
->node
);
5711 free_extent_buffer(log
->commit_root
);
5717 if (found_key
.offset
== 0)
5720 btrfs_release_path(path
);
5722 /* step one is to pin it all, step two is to replay just inodes */
5725 wc
.process_func
= replay_one_buffer
;
5726 wc
.stage
= LOG_WALK_REPLAY_INODES
;
5729 /* step three is to replay everything */
5730 if (wc
.stage
< LOG_WALK_REPLAY_ALL
) {
5735 btrfs_free_path(path
);
5737 /* step 4: commit the transaction, which also unpins the blocks */
5738 ret
= btrfs_commit_transaction(trans
);
5742 free_extent_buffer(log_root_tree
->node
);
5743 log_root_tree
->log_root
= NULL
;
5744 clear_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
);
5745 kfree(log_root_tree
);
5750 btrfs_end_transaction(wc
.trans
);
5751 btrfs_free_path(path
);
5756 * there are some corner cases where we want to force a full
5757 * commit instead of allowing a directory to be logged.
5759 * They revolve around files there were unlinked from the directory, and
5760 * this function updates the parent directory so that a full commit is
5761 * properly done if it is fsync'd later after the unlinks are done.
5763 * Must be called before the unlink operations (updates to the subvolume tree,
5764 * inodes, etc) are done.
5766 void btrfs_record_unlink_dir(struct btrfs_trans_handle
*trans
,
5767 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
5771 * when we're logging a file, if it hasn't been renamed
5772 * or unlinked, and its inode is fully committed on disk,
5773 * we don't have to worry about walking up the directory chain
5774 * to log its parents.
5776 * So, we use the last_unlink_trans field to put this transid
5777 * into the file. When the file is logged we check it and
5778 * don't log the parents if the file is fully on disk.
5780 mutex_lock(&inode
->log_mutex
);
5781 inode
->last_unlink_trans
= trans
->transid
;
5782 mutex_unlock(&inode
->log_mutex
);
5785 * if this directory was already logged any new
5786 * names for this file/dir will get recorded
5789 if (dir
->logged_trans
== trans
->transid
)
5793 * if the inode we're about to unlink was logged,
5794 * the log will be properly updated for any new names
5796 if (inode
->logged_trans
== trans
->transid
)
5800 * when renaming files across directories, if the directory
5801 * there we're unlinking from gets fsync'd later on, there's
5802 * no way to find the destination directory later and fsync it
5803 * properly. So, we have to be conservative and force commits
5804 * so the new name gets discovered.
5809 /* we can safely do the unlink without any special recording */
5813 mutex_lock(&dir
->log_mutex
);
5814 dir
->last_unlink_trans
= trans
->transid
;
5815 mutex_unlock(&dir
->log_mutex
);
5819 * Make sure that if someone attempts to fsync the parent directory of a deleted
5820 * snapshot, it ends up triggering a transaction commit. This is to guarantee
5821 * that after replaying the log tree of the parent directory's root we will not
5822 * see the snapshot anymore and at log replay time we will not see any log tree
5823 * corresponding to the deleted snapshot's root, which could lead to replaying
5824 * it after replaying the log tree of the parent directory (which would replay
5825 * the snapshot delete operation).
5827 * Must be called before the actual snapshot destroy operation (updates to the
5828 * parent root and tree of tree roots trees, etc) are done.
5830 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle
*trans
,
5831 struct btrfs_inode
*dir
)
5833 mutex_lock(&dir
->log_mutex
);
5834 dir
->last_unlink_trans
= trans
->transid
;
5835 mutex_unlock(&dir
->log_mutex
);
5839 * Call this after adding a new name for a file and it will properly
5840 * update the log to reflect the new name.
5842 * It will return zero if all goes well, and it will return 1 if a
5843 * full transaction commit is required.
5845 int btrfs_log_new_name(struct btrfs_trans_handle
*trans
,
5846 struct btrfs_inode
*inode
, struct btrfs_inode
*old_dir
,
5847 struct dentry
*parent
)
5849 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
5850 struct btrfs_root
*root
= inode
->root
;
5853 * this will force the logging code to walk the dentry chain
5856 if (S_ISREG(inode
->vfs_inode
.i_mode
))
5857 inode
->last_unlink_trans
= trans
->transid
;
5860 * if this inode hasn't been logged and directory we're renaming it
5861 * from hasn't been logged, we don't need to log it
5863 if (inode
->logged_trans
<= fs_info
->last_trans_committed
&&
5864 (!old_dir
|| old_dir
->logged_trans
<= fs_info
->last_trans_committed
))
5867 return btrfs_log_inode_parent(trans
, root
, inode
, parent
, 0,
5868 LLONG_MAX
, 1, NULL
);