2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
28 #include "transaction.h"
31 #include "inode-map.h"
34 #define BTRFS_ROOT_TRANS_TAG 0
36 void put_transaction(struct btrfs_transaction
*transaction
)
38 WARN_ON(atomic_read(&transaction
->use_count
) == 0);
39 if (atomic_dec_and_test(&transaction
->use_count
)) {
40 BUG_ON(!list_empty(&transaction
->list
));
41 WARN_ON(transaction
->delayed_refs
.root
.rb_node
);
42 memset(transaction
, 0, sizeof(*transaction
));
43 kmem_cache_free(btrfs_transaction_cachep
, transaction
);
47 static noinline
void switch_commit_root(struct btrfs_root
*root
)
49 free_extent_buffer(root
->commit_root
);
50 root
->commit_root
= btrfs_root_node(root
);
54 * either allocate a new transaction or hop into the existing one
56 static noinline
int join_transaction(struct btrfs_root
*root
, int type
)
58 struct btrfs_transaction
*cur_trans
;
59 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
61 spin_lock(&fs_info
->trans_lock
);
63 /* The file system has been taken offline. No new transactions. */
64 if (fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
65 spin_unlock(&fs_info
->trans_lock
);
69 if (fs_info
->trans_no_join
) {
71 * If we are JOIN_NOLOCK we're already committing a current
72 * transaction, we just need a handle to deal with something
73 * when committing the transaction, such as inode cache and
74 * space cache. It is a special case.
76 if (type
!= TRANS_JOIN_NOLOCK
) {
77 spin_unlock(&fs_info
->trans_lock
);
82 cur_trans
= fs_info
->running_transaction
;
84 if (cur_trans
->aborted
) {
85 spin_unlock(&fs_info
->trans_lock
);
86 return cur_trans
->aborted
;
88 atomic_inc(&cur_trans
->use_count
);
89 atomic_inc(&cur_trans
->num_writers
);
90 cur_trans
->num_joined
++;
91 spin_unlock(&fs_info
->trans_lock
);
94 spin_unlock(&fs_info
->trans_lock
);
97 * If we are ATTACH, we just want to catch the current transaction,
98 * and commit it. If there is no transaction, just return ENOENT.
100 if (type
== TRANS_ATTACH
)
103 cur_trans
= kmem_cache_alloc(btrfs_transaction_cachep
, GFP_NOFS
);
107 spin_lock(&fs_info
->trans_lock
);
108 if (fs_info
->running_transaction
) {
110 * someone started a transaction after we unlocked. Make sure
111 * to redo the trans_no_join checks above
113 kmem_cache_free(btrfs_transaction_cachep
, cur_trans
);
114 cur_trans
= fs_info
->running_transaction
;
116 } else if (fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
117 spin_unlock(&fs_info
->trans_lock
);
118 kmem_cache_free(btrfs_transaction_cachep
, cur_trans
);
122 atomic_set(&cur_trans
->num_writers
, 1);
123 cur_trans
->num_joined
= 0;
124 init_waitqueue_head(&cur_trans
->writer_wait
);
125 init_waitqueue_head(&cur_trans
->commit_wait
);
126 cur_trans
->in_commit
= 0;
127 cur_trans
->blocked
= 0;
129 * One for this trans handle, one so it will live on until we
130 * commit the transaction.
132 atomic_set(&cur_trans
->use_count
, 2);
133 cur_trans
->commit_done
= 0;
134 cur_trans
->start_time
= get_seconds();
136 cur_trans
->delayed_refs
.root
= RB_ROOT
;
137 cur_trans
->delayed_refs
.num_entries
= 0;
138 cur_trans
->delayed_refs
.num_heads_ready
= 0;
139 cur_trans
->delayed_refs
.num_heads
= 0;
140 cur_trans
->delayed_refs
.flushing
= 0;
141 cur_trans
->delayed_refs
.run_delayed_start
= 0;
144 * although the tree mod log is per file system and not per transaction,
145 * the log must never go across transaction boundaries.
148 if (!list_empty(&fs_info
->tree_mod_seq_list
))
149 WARN(1, KERN_ERR
"btrfs: tree_mod_seq_list not empty when "
150 "creating a fresh transaction\n");
151 if (!RB_EMPTY_ROOT(&fs_info
->tree_mod_log
))
152 WARN(1, KERN_ERR
"btrfs: tree_mod_log rb tree not empty when "
153 "creating a fresh transaction\n");
154 atomic_set(&fs_info
->tree_mod_seq
, 0);
156 spin_lock_init(&cur_trans
->commit_lock
);
157 spin_lock_init(&cur_trans
->delayed_refs
.lock
);
159 INIT_LIST_HEAD(&cur_trans
->pending_snapshots
);
160 list_add_tail(&cur_trans
->list
, &fs_info
->trans_list
);
161 extent_io_tree_init(&cur_trans
->dirty_pages
,
162 fs_info
->btree_inode
->i_mapping
);
163 fs_info
->generation
++;
164 cur_trans
->transid
= fs_info
->generation
;
165 fs_info
->running_transaction
= cur_trans
;
166 cur_trans
->aborted
= 0;
167 spin_unlock(&fs_info
->trans_lock
);
173 * this does all the record keeping required to make sure that a reference
174 * counted root is properly recorded in a given transaction. This is required
175 * to make sure the old root from before we joined the transaction is deleted
176 * when the transaction commits
178 static int record_root_in_trans(struct btrfs_trans_handle
*trans
,
179 struct btrfs_root
*root
)
181 if (root
->ref_cows
&& root
->last_trans
< trans
->transid
) {
182 WARN_ON(root
== root
->fs_info
->extent_root
);
183 WARN_ON(root
->commit_root
!= root
->node
);
186 * see below for in_trans_setup usage rules
187 * we have the reloc mutex held now, so there
188 * is only one writer in this function
190 root
->in_trans_setup
= 1;
192 /* make sure readers find in_trans_setup before
193 * they find our root->last_trans update
197 spin_lock(&root
->fs_info
->fs_roots_radix_lock
);
198 if (root
->last_trans
== trans
->transid
) {
199 spin_unlock(&root
->fs_info
->fs_roots_radix_lock
);
202 radix_tree_tag_set(&root
->fs_info
->fs_roots_radix
,
203 (unsigned long)root
->root_key
.objectid
,
204 BTRFS_ROOT_TRANS_TAG
);
205 spin_unlock(&root
->fs_info
->fs_roots_radix_lock
);
206 root
->last_trans
= trans
->transid
;
208 /* this is pretty tricky. We don't want to
209 * take the relocation lock in btrfs_record_root_in_trans
210 * unless we're really doing the first setup for this root in
213 * Normally we'd use root->last_trans as a flag to decide
214 * if we want to take the expensive mutex.
216 * But, we have to set root->last_trans before we
217 * init the relocation root, otherwise, we trip over warnings
218 * in ctree.c. The solution used here is to flag ourselves
219 * with root->in_trans_setup. When this is 1, we're still
220 * fixing up the reloc trees and everyone must wait.
222 * When this is zero, they can trust root->last_trans and fly
223 * through btrfs_record_root_in_trans without having to take the
224 * lock. smp_wmb() makes sure that all the writes above are
225 * done before we pop in the zero below
227 btrfs_init_reloc_root(trans
, root
);
229 root
->in_trans_setup
= 0;
235 int btrfs_record_root_in_trans(struct btrfs_trans_handle
*trans
,
236 struct btrfs_root
*root
)
242 * see record_root_in_trans for comments about in_trans_setup usage
246 if (root
->last_trans
== trans
->transid
&&
247 !root
->in_trans_setup
)
250 mutex_lock(&root
->fs_info
->reloc_mutex
);
251 record_root_in_trans(trans
, root
);
252 mutex_unlock(&root
->fs_info
->reloc_mutex
);
257 /* wait for commit against the current transaction to become unblocked
258 * when this is done, it is safe to start a new transaction, but the current
259 * transaction might not be fully on disk.
261 static void wait_current_trans(struct btrfs_root
*root
)
263 struct btrfs_transaction
*cur_trans
;
265 spin_lock(&root
->fs_info
->trans_lock
);
266 cur_trans
= root
->fs_info
->running_transaction
;
267 if (cur_trans
&& cur_trans
->blocked
) {
268 atomic_inc(&cur_trans
->use_count
);
269 spin_unlock(&root
->fs_info
->trans_lock
);
271 wait_event(root
->fs_info
->transaction_wait
,
272 !cur_trans
->blocked
);
273 put_transaction(cur_trans
);
275 spin_unlock(&root
->fs_info
->trans_lock
);
279 static int may_wait_transaction(struct btrfs_root
*root
, int type
)
281 if (root
->fs_info
->log_root_recovering
)
284 if (type
== TRANS_USERSPACE
)
287 if (type
== TRANS_START
&&
288 !atomic_read(&root
->fs_info
->open_ioctl_trans
))
294 static struct btrfs_trans_handle
*
295 start_transaction(struct btrfs_root
*root
, u64 num_items
, int type
,
296 enum btrfs_reserve_flush_enum flush
)
298 struct btrfs_trans_handle
*h
;
299 struct btrfs_transaction
*cur_trans
;
302 u64 qgroup_reserved
= 0;
304 if (root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
)
305 return ERR_PTR(-EROFS
);
307 if (current
->journal_info
) {
308 WARN_ON(type
!= TRANS_JOIN
&& type
!= TRANS_JOIN_NOLOCK
);
309 h
= current
->journal_info
;
311 WARN_ON(h
->use_count
> 2);
312 h
->orig_rsv
= h
->block_rsv
;
318 * Do the reservation before we join the transaction so we can do all
319 * the appropriate flushing if need be.
321 if (num_items
> 0 && root
!= root
->fs_info
->chunk_root
) {
322 if (root
->fs_info
->quota_enabled
&&
323 is_fstree(root
->root_key
.objectid
)) {
324 qgroup_reserved
= num_items
* root
->leafsize
;
325 ret
= btrfs_qgroup_reserve(root
, qgroup_reserved
);
330 num_bytes
= btrfs_calc_trans_metadata_size(root
, num_items
);
331 ret
= btrfs_block_rsv_add(root
,
332 &root
->fs_info
->trans_block_rsv
,
338 h
= kmem_cache_alloc(btrfs_trans_handle_cachep
, GFP_NOFS
);
340 return ERR_PTR(-ENOMEM
);
343 * If we are JOIN_NOLOCK we're already committing a transaction and
344 * waiting on this guy, so we don't need to do the sb_start_intwrite
345 * because we're already holding a ref. We need this because we could
346 * have raced in and did an fsync() on a file which can kick a commit
347 * and then we deadlock with somebody doing a freeze.
349 * If we are ATTACH, it means we just want to catch the current
350 * transaction and commit it, so we needn't do sb_start_intwrite().
352 if (type
< TRANS_JOIN_NOLOCK
)
353 sb_start_intwrite(root
->fs_info
->sb
);
355 if (may_wait_transaction(root
, type
))
356 wait_current_trans(root
);
359 ret
= join_transaction(root
, type
);
361 wait_current_trans(root
);
362 } while (ret
== -EBUSY
);
365 /* We must get the transaction if we are JOIN_NOLOCK. */
366 BUG_ON(type
== TRANS_JOIN_NOLOCK
);
368 if (type
< TRANS_JOIN_NOLOCK
)
369 sb_end_intwrite(root
->fs_info
->sb
);
370 kmem_cache_free(btrfs_trans_handle_cachep
, h
);
374 cur_trans
= root
->fs_info
->running_transaction
;
376 h
->transid
= cur_trans
->transid
;
377 h
->transaction
= cur_trans
;
379 h
->bytes_reserved
= 0;
381 h
->delayed_ref_updates
= 0;
387 h
->qgroup_reserved
= qgroup_reserved
;
388 h
->delayed_ref_elem
.seq
= 0;
390 INIT_LIST_HEAD(&h
->qgroup_ref_list
);
391 INIT_LIST_HEAD(&h
->new_bgs
);
394 if (cur_trans
->blocked
&& may_wait_transaction(root
, type
)) {
395 btrfs_commit_transaction(h
, root
);
400 trace_btrfs_space_reservation(root
->fs_info
, "transaction",
401 h
->transid
, num_bytes
, 1);
402 h
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
403 h
->bytes_reserved
= num_bytes
;
407 btrfs_record_root_in_trans(h
, root
);
409 if (!current
->journal_info
&& type
!= TRANS_USERSPACE
)
410 current
->journal_info
= h
;
414 struct btrfs_trans_handle
*btrfs_start_transaction(struct btrfs_root
*root
,
417 return start_transaction(root
, num_items
, TRANS_START
,
418 BTRFS_RESERVE_FLUSH_ALL
);
421 struct btrfs_trans_handle
*btrfs_start_transaction_lflush(
422 struct btrfs_root
*root
, int num_items
)
424 return start_transaction(root
, num_items
, TRANS_START
,
425 BTRFS_RESERVE_FLUSH_LIMIT
);
428 struct btrfs_trans_handle
*btrfs_join_transaction(struct btrfs_root
*root
)
430 return start_transaction(root
, 0, TRANS_JOIN
, 0);
433 struct btrfs_trans_handle
*btrfs_join_transaction_nolock(struct btrfs_root
*root
)
435 return start_transaction(root
, 0, TRANS_JOIN_NOLOCK
, 0);
438 struct btrfs_trans_handle
*btrfs_start_ioctl_transaction(struct btrfs_root
*root
)
440 return start_transaction(root
, 0, TRANS_USERSPACE
, 0);
443 struct btrfs_trans_handle
*btrfs_attach_transaction(struct btrfs_root
*root
)
445 return start_transaction(root
, 0, TRANS_ATTACH
, 0);
448 /* wait for a transaction commit to be fully complete */
449 static noinline
void wait_for_commit(struct btrfs_root
*root
,
450 struct btrfs_transaction
*commit
)
452 wait_event(commit
->commit_wait
, commit
->commit_done
);
455 int btrfs_wait_for_commit(struct btrfs_root
*root
, u64 transid
)
457 struct btrfs_transaction
*cur_trans
= NULL
, *t
;
462 if (transid
<= root
->fs_info
->last_trans_committed
)
465 /* find specified transaction */
466 spin_lock(&root
->fs_info
->trans_lock
);
467 list_for_each_entry(t
, &root
->fs_info
->trans_list
, list
) {
468 if (t
->transid
== transid
) {
470 atomic_inc(&cur_trans
->use_count
);
473 if (t
->transid
> transid
)
476 spin_unlock(&root
->fs_info
->trans_lock
);
479 goto out
; /* bad transid */
481 /* find newest transaction that is committing | committed */
482 spin_lock(&root
->fs_info
->trans_lock
);
483 list_for_each_entry_reverse(t
, &root
->fs_info
->trans_list
,
489 atomic_inc(&cur_trans
->use_count
);
493 spin_unlock(&root
->fs_info
->trans_lock
);
495 goto out
; /* nothing committing|committed */
498 wait_for_commit(root
, cur_trans
);
500 put_transaction(cur_trans
);
506 void btrfs_throttle(struct btrfs_root
*root
)
508 if (!atomic_read(&root
->fs_info
->open_ioctl_trans
))
509 wait_current_trans(root
);
512 static int should_end_transaction(struct btrfs_trans_handle
*trans
,
513 struct btrfs_root
*root
)
517 ret
= btrfs_block_rsv_check(root
, &root
->fs_info
->global_block_rsv
, 5);
521 int btrfs_should_end_transaction(struct btrfs_trans_handle
*trans
,
522 struct btrfs_root
*root
)
524 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
529 if (cur_trans
->blocked
|| cur_trans
->delayed_refs
.flushing
)
532 updates
= trans
->delayed_ref_updates
;
533 trans
->delayed_ref_updates
= 0;
535 err
= btrfs_run_delayed_refs(trans
, root
, updates
);
536 if (err
) /* Error code will also eval true */
540 return should_end_transaction(trans
, root
);
543 static int __btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
544 struct btrfs_root
*root
, int throttle
)
546 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
547 struct btrfs_fs_info
*info
= root
->fs_info
;
549 int lock
= (trans
->type
!= TRANS_JOIN_NOLOCK
);
552 if (--trans
->use_count
) {
553 trans
->block_rsv
= trans
->orig_rsv
;
558 * do the qgroup accounting as early as possible
560 err
= btrfs_delayed_refs_qgroup_accounting(trans
, info
);
562 btrfs_trans_release_metadata(trans
, root
);
563 trans
->block_rsv
= NULL
;
565 * the same root has to be passed to start_transaction and
566 * end_transaction. Subvolume quota depends on this.
568 WARN_ON(trans
->root
!= root
);
570 if (trans
->qgroup_reserved
) {
571 btrfs_qgroup_free(root
, trans
->qgroup_reserved
);
572 trans
->qgroup_reserved
= 0;
575 if (!list_empty(&trans
->new_bgs
))
576 btrfs_create_pending_block_groups(trans
, root
);
579 unsigned long cur
= trans
->delayed_ref_updates
;
580 trans
->delayed_ref_updates
= 0;
582 trans
->transaction
->delayed_refs
.num_heads_ready
> 64) {
583 trans
->delayed_ref_updates
= 0;
584 btrfs_run_delayed_refs(trans
, root
, cur
);
590 btrfs_trans_release_metadata(trans
, root
);
591 trans
->block_rsv
= NULL
;
593 if (!list_empty(&trans
->new_bgs
))
594 btrfs_create_pending_block_groups(trans
, root
);
596 if (lock
&& !atomic_read(&root
->fs_info
->open_ioctl_trans
) &&
597 should_end_transaction(trans
, root
)) {
598 trans
->transaction
->blocked
= 1;
602 if (lock
&& cur_trans
->blocked
&& !cur_trans
->in_commit
) {
605 * We may race with somebody else here so end up having
606 * to call end_transaction on ourselves again, so inc
610 return btrfs_commit_transaction(trans
, root
);
612 wake_up_process(info
->transaction_kthread
);
616 if (trans
->type
< TRANS_JOIN_NOLOCK
)
617 sb_end_intwrite(root
->fs_info
->sb
);
619 WARN_ON(cur_trans
!= info
->running_transaction
);
620 WARN_ON(atomic_read(&cur_trans
->num_writers
) < 1);
621 atomic_dec(&cur_trans
->num_writers
);
624 if (waitqueue_active(&cur_trans
->writer_wait
))
625 wake_up(&cur_trans
->writer_wait
);
626 put_transaction(cur_trans
);
628 if (current
->journal_info
== trans
)
629 current
->journal_info
= NULL
;
632 btrfs_run_delayed_iputs(root
);
634 if (trans
->aborted
||
635 root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
638 assert_qgroups_uptodate(trans
);
640 memset(trans
, 0, sizeof(*trans
));
641 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
645 int btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
646 struct btrfs_root
*root
)
650 ret
= __btrfs_end_transaction(trans
, root
, 0);
656 int btrfs_end_transaction_throttle(struct btrfs_trans_handle
*trans
,
657 struct btrfs_root
*root
)
661 ret
= __btrfs_end_transaction(trans
, root
, 1);
667 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle
*trans
,
668 struct btrfs_root
*root
)
670 return __btrfs_end_transaction(trans
, root
, 1);
674 * when btree blocks are allocated, they have some corresponding bits set for
675 * them in one of two extent_io trees. This is used to make sure all of
676 * those extents are sent to disk but does not wait on them
678 int btrfs_write_marked_extents(struct btrfs_root
*root
,
679 struct extent_io_tree
*dirty_pages
, int mark
)
683 struct address_space
*mapping
= root
->fs_info
->btree_inode
->i_mapping
;
684 struct extent_state
*cached_state
= NULL
;
688 while (!find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
689 mark
, &cached_state
)) {
690 convert_extent_bit(dirty_pages
, start
, end
, EXTENT_NEED_WAIT
,
691 mark
, &cached_state
, GFP_NOFS
);
693 err
= filemap_fdatawrite_range(mapping
, start
, end
);
705 * when btree blocks are allocated, they have some corresponding bits set for
706 * them in one of two extent_io trees. This is used to make sure all of
707 * those extents are on disk for transaction or log commit. We wait
708 * on all the pages and clear them from the dirty pages state tree
710 int btrfs_wait_marked_extents(struct btrfs_root
*root
,
711 struct extent_io_tree
*dirty_pages
, int mark
)
715 struct address_space
*mapping
= root
->fs_info
->btree_inode
->i_mapping
;
716 struct extent_state
*cached_state
= NULL
;
720 while (!find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
721 EXTENT_NEED_WAIT
, &cached_state
)) {
722 clear_extent_bit(dirty_pages
, start
, end
, EXTENT_NEED_WAIT
,
723 0, 0, &cached_state
, GFP_NOFS
);
724 err
= filemap_fdatawait_range(mapping
, start
, end
);
736 * when btree blocks are allocated, they have some corresponding bits set for
737 * them in one of two extent_io trees. This is used to make sure all of
738 * those extents are on disk for transaction or log commit
740 int btrfs_write_and_wait_marked_extents(struct btrfs_root
*root
,
741 struct extent_io_tree
*dirty_pages
, int mark
)
746 ret
= btrfs_write_marked_extents(root
, dirty_pages
, mark
);
747 ret2
= btrfs_wait_marked_extents(root
, dirty_pages
, mark
);
756 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle
*trans
,
757 struct btrfs_root
*root
)
759 if (!trans
|| !trans
->transaction
) {
760 struct inode
*btree_inode
;
761 btree_inode
= root
->fs_info
->btree_inode
;
762 return filemap_write_and_wait(btree_inode
->i_mapping
);
764 return btrfs_write_and_wait_marked_extents(root
,
765 &trans
->transaction
->dirty_pages
,
770 * this is used to update the root pointer in the tree of tree roots.
772 * But, in the case of the extent allocation tree, updating the root
773 * pointer may allocate blocks which may change the root of the extent
776 * So, this loops and repeats and makes sure the cowonly root didn't
777 * change while the root pointer was being updated in the metadata.
779 static int update_cowonly_root(struct btrfs_trans_handle
*trans
,
780 struct btrfs_root
*root
)
785 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
787 old_root_used
= btrfs_root_used(&root
->root_item
);
788 btrfs_write_dirty_block_groups(trans
, root
);
791 old_root_bytenr
= btrfs_root_bytenr(&root
->root_item
);
792 if (old_root_bytenr
== root
->node
->start
&&
793 old_root_used
== btrfs_root_used(&root
->root_item
))
796 btrfs_set_root_node(&root
->root_item
, root
->node
);
797 ret
= btrfs_update_root(trans
, tree_root
,
803 old_root_used
= btrfs_root_used(&root
->root_item
);
804 ret
= btrfs_write_dirty_block_groups(trans
, root
);
809 if (root
!= root
->fs_info
->extent_root
)
810 switch_commit_root(root
);
816 * update all the cowonly tree roots on disk
818 * The error handling in this function may not be obvious. Any of the
819 * failures will cause the file system to go offline. We still need
820 * to clean up the delayed refs.
822 static noinline
int commit_cowonly_roots(struct btrfs_trans_handle
*trans
,
823 struct btrfs_root
*root
)
825 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
826 struct list_head
*next
;
827 struct extent_buffer
*eb
;
830 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
834 eb
= btrfs_lock_root_node(fs_info
->tree_root
);
835 ret
= btrfs_cow_block(trans
, fs_info
->tree_root
, eb
, NULL
,
837 btrfs_tree_unlock(eb
);
838 free_extent_buffer(eb
);
843 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
847 ret
= btrfs_run_dev_stats(trans
, root
->fs_info
);
850 ret
= btrfs_run_qgroups(trans
, root
->fs_info
);
853 /* run_qgroups might have added some more refs */
854 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
857 while (!list_empty(&fs_info
->dirty_cowonly_roots
)) {
858 next
= fs_info
->dirty_cowonly_roots
.next
;
860 root
= list_entry(next
, struct btrfs_root
, dirty_list
);
862 ret
= update_cowonly_root(trans
, root
);
867 down_write(&fs_info
->extent_commit_sem
);
868 switch_commit_root(fs_info
->extent_root
);
869 up_write(&fs_info
->extent_commit_sem
);
875 * dead roots are old snapshots that need to be deleted. This allocates
876 * a dirty root struct and adds it into the list of dead roots that need to
879 int btrfs_add_dead_root(struct btrfs_root
*root
)
881 spin_lock(&root
->fs_info
->trans_lock
);
882 list_add(&root
->root_list
, &root
->fs_info
->dead_roots
);
883 spin_unlock(&root
->fs_info
->trans_lock
);
888 * update all the cowonly tree roots on disk
890 static noinline
int commit_fs_roots(struct btrfs_trans_handle
*trans
,
891 struct btrfs_root
*root
)
893 struct btrfs_root
*gang
[8];
894 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
899 spin_lock(&fs_info
->fs_roots_radix_lock
);
901 ret
= radix_tree_gang_lookup_tag(&fs_info
->fs_roots_radix
,
904 BTRFS_ROOT_TRANS_TAG
);
907 for (i
= 0; i
< ret
; i
++) {
909 radix_tree_tag_clear(&fs_info
->fs_roots_radix
,
910 (unsigned long)root
->root_key
.objectid
,
911 BTRFS_ROOT_TRANS_TAG
);
912 spin_unlock(&fs_info
->fs_roots_radix_lock
);
914 btrfs_free_log(trans
, root
);
915 btrfs_update_reloc_root(trans
, root
);
916 btrfs_orphan_commit_root(trans
, root
);
918 btrfs_save_ino_cache(root
, trans
);
920 /* see comments in should_cow_block() */
924 if (root
->commit_root
!= root
->node
) {
925 mutex_lock(&root
->fs_commit_mutex
);
926 switch_commit_root(root
);
927 btrfs_unpin_free_ino(root
);
928 mutex_unlock(&root
->fs_commit_mutex
);
930 btrfs_set_root_node(&root
->root_item
,
934 err
= btrfs_update_root(trans
, fs_info
->tree_root
,
937 spin_lock(&fs_info
->fs_roots_radix_lock
);
942 spin_unlock(&fs_info
->fs_roots_radix_lock
);
947 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
948 * otherwise every leaf in the btree is read and defragged.
950 int btrfs_defrag_root(struct btrfs_root
*root
, int cacheonly
)
952 struct btrfs_fs_info
*info
= root
->fs_info
;
953 struct btrfs_trans_handle
*trans
;
957 if (xchg(&root
->defrag_running
, 1))
961 trans
= btrfs_start_transaction(root
, 0);
963 return PTR_ERR(trans
);
965 ret
= btrfs_defrag_leaves(trans
, root
, cacheonly
);
967 nr
= trans
->blocks_used
;
968 btrfs_end_transaction(trans
, root
);
969 btrfs_btree_balance_dirty(info
->tree_root
, nr
);
972 if (btrfs_fs_closing(root
->fs_info
) || ret
!= -EAGAIN
)
975 root
->defrag_running
= 0;
980 * new snapshots need to be created at a very specific time in the
981 * transaction commit. This does the actual creation
983 static noinline
int create_pending_snapshot(struct btrfs_trans_handle
*trans
,
984 struct btrfs_fs_info
*fs_info
,
985 struct btrfs_pending_snapshot
*pending
)
987 struct btrfs_key key
;
988 struct btrfs_root_item
*new_root_item
;
989 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
990 struct btrfs_root
*root
= pending
->root
;
991 struct btrfs_root
*parent_root
;
992 struct btrfs_block_rsv
*rsv
;
993 struct inode
*parent_inode
;
994 struct btrfs_path
*path
;
995 struct btrfs_dir_item
*dir_item
;
996 struct dentry
*parent
;
997 struct dentry
*dentry
;
998 struct extent_buffer
*tmp
;
999 struct extent_buffer
*old
;
1000 struct timespec cur_time
= CURRENT_TIME
;
1008 path
= btrfs_alloc_path();
1010 ret
= pending
->error
= -ENOMEM
;
1011 goto path_alloc_fail
;
1014 new_root_item
= kmalloc(sizeof(*new_root_item
), GFP_NOFS
);
1015 if (!new_root_item
) {
1016 ret
= pending
->error
= -ENOMEM
;
1017 goto root_item_alloc_fail
;
1020 ret
= btrfs_find_free_objectid(tree_root
, &objectid
);
1022 pending
->error
= ret
;
1023 goto no_free_objectid
;
1026 btrfs_reloc_pre_snapshot(trans
, pending
, &to_reserve
);
1028 if (to_reserve
> 0) {
1029 ret
= btrfs_block_rsv_add(root
, &pending
->block_rsv
,
1031 BTRFS_RESERVE_NO_FLUSH
);
1033 pending
->error
= ret
;
1034 goto no_free_objectid
;
1038 ret
= btrfs_qgroup_inherit(trans
, fs_info
, root
->root_key
.objectid
,
1039 objectid
, pending
->inherit
);
1041 pending
->error
= ret
;
1042 goto no_free_objectid
;
1045 key
.objectid
= objectid
;
1046 key
.offset
= (u64
)-1;
1047 key
.type
= BTRFS_ROOT_ITEM_KEY
;
1049 rsv
= trans
->block_rsv
;
1050 trans
->block_rsv
= &pending
->block_rsv
;
1052 dentry
= pending
->dentry
;
1053 parent
= dget_parent(dentry
);
1054 parent_inode
= parent
->d_inode
;
1055 parent_root
= BTRFS_I(parent_inode
)->root
;
1056 record_root_in_trans(trans
, parent_root
);
1059 * insert the directory item
1061 ret
= btrfs_set_inode_index(parent_inode
, &index
);
1062 BUG_ON(ret
); /* -ENOMEM */
1064 /* check if there is a file/dir which has the same name. */
1065 dir_item
= btrfs_lookup_dir_item(NULL
, parent_root
, path
,
1066 btrfs_ino(parent_inode
),
1067 dentry
->d_name
.name
,
1068 dentry
->d_name
.len
, 0);
1069 if (dir_item
!= NULL
&& !IS_ERR(dir_item
)) {
1070 pending
->error
= -EEXIST
;
1072 } else if (IS_ERR(dir_item
)) {
1073 ret
= PTR_ERR(dir_item
);
1074 btrfs_abort_transaction(trans
, root
, ret
);
1077 btrfs_release_path(path
);
1080 * pull in the delayed directory update
1081 * and the delayed inode item
1082 * otherwise we corrupt the FS during
1085 ret
= btrfs_run_delayed_items(trans
, root
);
1086 if (ret
) { /* Transaction aborted */
1087 btrfs_abort_transaction(trans
, root
, ret
);
1091 record_root_in_trans(trans
, root
);
1092 btrfs_set_root_last_snapshot(&root
->root_item
, trans
->transid
);
1093 memcpy(new_root_item
, &root
->root_item
, sizeof(*new_root_item
));
1094 btrfs_check_and_init_root_item(new_root_item
);
1096 root_flags
= btrfs_root_flags(new_root_item
);
1097 if (pending
->readonly
)
1098 root_flags
|= BTRFS_ROOT_SUBVOL_RDONLY
;
1100 root_flags
&= ~BTRFS_ROOT_SUBVOL_RDONLY
;
1101 btrfs_set_root_flags(new_root_item
, root_flags
);
1103 btrfs_set_root_generation_v2(new_root_item
,
1105 uuid_le_gen(&new_uuid
);
1106 memcpy(new_root_item
->uuid
, new_uuid
.b
, BTRFS_UUID_SIZE
);
1107 memcpy(new_root_item
->parent_uuid
, root
->root_item
.uuid
,
1109 new_root_item
->otime
.sec
= cpu_to_le64(cur_time
.tv_sec
);
1110 new_root_item
->otime
.nsec
= cpu_to_le32(cur_time
.tv_nsec
);
1111 btrfs_set_root_otransid(new_root_item
, trans
->transid
);
1112 memset(&new_root_item
->stime
, 0, sizeof(new_root_item
->stime
));
1113 memset(&new_root_item
->rtime
, 0, sizeof(new_root_item
->rtime
));
1114 btrfs_set_root_stransid(new_root_item
, 0);
1115 btrfs_set_root_rtransid(new_root_item
, 0);
1117 old
= btrfs_lock_root_node(root
);
1118 ret
= btrfs_cow_block(trans
, root
, old
, NULL
, 0, &old
);
1120 btrfs_tree_unlock(old
);
1121 free_extent_buffer(old
);
1122 btrfs_abort_transaction(trans
, root
, ret
);
1126 btrfs_set_lock_blocking(old
);
1128 ret
= btrfs_copy_root(trans
, root
, old
, &tmp
, objectid
);
1129 /* clean up in any case */
1130 btrfs_tree_unlock(old
);
1131 free_extent_buffer(old
);
1133 btrfs_abort_transaction(trans
, root
, ret
);
1137 /* see comments in should_cow_block() */
1138 root
->force_cow
= 1;
1141 btrfs_set_root_node(new_root_item
, tmp
);
1142 /* record when the snapshot was created in key.offset */
1143 key
.offset
= trans
->transid
;
1144 ret
= btrfs_insert_root(trans
, tree_root
, &key
, new_root_item
);
1145 btrfs_tree_unlock(tmp
);
1146 free_extent_buffer(tmp
);
1148 btrfs_abort_transaction(trans
, root
, ret
);
1153 * insert root back/forward references
1155 ret
= btrfs_add_root_ref(trans
, tree_root
, objectid
,
1156 parent_root
->root_key
.objectid
,
1157 btrfs_ino(parent_inode
), index
,
1158 dentry
->d_name
.name
, dentry
->d_name
.len
);
1160 btrfs_abort_transaction(trans
, root
, ret
);
1164 key
.offset
= (u64
)-1;
1165 pending
->snap
= btrfs_read_fs_root_no_name(root
->fs_info
, &key
);
1166 if (IS_ERR(pending
->snap
)) {
1167 ret
= PTR_ERR(pending
->snap
);
1168 btrfs_abort_transaction(trans
, root
, ret
);
1172 ret
= btrfs_reloc_post_snapshot(trans
, pending
);
1174 btrfs_abort_transaction(trans
, root
, ret
);
1178 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
1180 btrfs_abort_transaction(trans
, root
, ret
);
1184 ret
= btrfs_insert_dir_item(trans
, parent_root
,
1185 dentry
->d_name
.name
, dentry
->d_name
.len
,
1187 BTRFS_FT_DIR
, index
);
1188 /* We have check then name at the beginning, so it is impossible. */
1189 BUG_ON(ret
== -EEXIST
);
1191 btrfs_abort_transaction(trans
, root
, ret
);
1195 btrfs_i_size_write(parent_inode
, parent_inode
->i_size
+
1196 dentry
->d_name
.len
* 2);
1197 parent_inode
->i_mtime
= parent_inode
->i_ctime
= CURRENT_TIME
;
1198 ret
= btrfs_update_inode_fallback(trans
, parent_root
, parent_inode
);
1200 btrfs_abort_transaction(trans
, root
, ret
);
1203 trans
->block_rsv
= rsv
;
1205 kfree(new_root_item
);
1206 root_item_alloc_fail
:
1207 btrfs_free_path(path
);
1209 btrfs_block_rsv_release(root
, &pending
->block_rsv
, (u64
)-1);
1214 * create all the snapshots we've scheduled for creation
1216 static noinline
int create_pending_snapshots(struct btrfs_trans_handle
*trans
,
1217 struct btrfs_fs_info
*fs_info
)
1219 struct btrfs_pending_snapshot
*pending
;
1220 struct list_head
*head
= &trans
->transaction
->pending_snapshots
;
1222 list_for_each_entry(pending
, head
, list
)
1223 create_pending_snapshot(trans
, fs_info
, pending
);
1227 static void update_super_roots(struct btrfs_root
*root
)
1229 struct btrfs_root_item
*root_item
;
1230 struct btrfs_super_block
*super
;
1232 super
= root
->fs_info
->super_copy
;
1234 root_item
= &root
->fs_info
->chunk_root
->root_item
;
1235 super
->chunk_root
= root_item
->bytenr
;
1236 super
->chunk_root_generation
= root_item
->generation
;
1237 super
->chunk_root_level
= root_item
->level
;
1239 root_item
= &root
->fs_info
->tree_root
->root_item
;
1240 super
->root
= root_item
->bytenr
;
1241 super
->generation
= root_item
->generation
;
1242 super
->root_level
= root_item
->level
;
1243 if (btrfs_test_opt(root
, SPACE_CACHE
))
1244 super
->cache_generation
= root_item
->generation
;
1247 int btrfs_transaction_in_commit(struct btrfs_fs_info
*info
)
1250 spin_lock(&info
->trans_lock
);
1251 if (info
->running_transaction
)
1252 ret
= info
->running_transaction
->in_commit
;
1253 spin_unlock(&info
->trans_lock
);
1257 int btrfs_transaction_blocked(struct btrfs_fs_info
*info
)
1260 spin_lock(&info
->trans_lock
);
1261 if (info
->running_transaction
)
1262 ret
= info
->running_transaction
->blocked
;
1263 spin_unlock(&info
->trans_lock
);
1268 * wait for the current transaction commit to start and block subsequent
1271 static void wait_current_trans_commit_start(struct btrfs_root
*root
,
1272 struct btrfs_transaction
*trans
)
1274 wait_event(root
->fs_info
->transaction_blocked_wait
, trans
->in_commit
);
1278 * wait for the current transaction to start and then become unblocked.
1281 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root
*root
,
1282 struct btrfs_transaction
*trans
)
1284 wait_event(root
->fs_info
->transaction_wait
,
1285 trans
->commit_done
|| (trans
->in_commit
&& !trans
->blocked
));
1289 * commit transactions asynchronously. once btrfs_commit_transaction_async
1290 * returns, any subsequent transaction will not be allowed to join.
1292 struct btrfs_async_commit
{
1293 struct btrfs_trans_handle
*newtrans
;
1294 struct btrfs_root
*root
;
1295 struct delayed_work work
;
1298 static void do_async_commit(struct work_struct
*work
)
1300 struct btrfs_async_commit
*ac
=
1301 container_of(work
, struct btrfs_async_commit
, work
.work
);
1304 * We've got freeze protection passed with the transaction.
1305 * Tell lockdep about it.
1308 &ac
->root
->fs_info
->sb
->s_writers
.lock_map
[SB_FREEZE_FS
-1],
1311 current
->journal_info
= ac
->newtrans
;
1313 btrfs_commit_transaction(ac
->newtrans
, ac
->root
);
1317 int btrfs_commit_transaction_async(struct btrfs_trans_handle
*trans
,
1318 struct btrfs_root
*root
,
1319 int wait_for_unblock
)
1321 struct btrfs_async_commit
*ac
;
1322 struct btrfs_transaction
*cur_trans
;
1324 ac
= kmalloc(sizeof(*ac
), GFP_NOFS
);
1328 INIT_DELAYED_WORK(&ac
->work
, do_async_commit
);
1330 ac
->newtrans
= btrfs_join_transaction(root
);
1331 if (IS_ERR(ac
->newtrans
)) {
1332 int err
= PTR_ERR(ac
->newtrans
);
1337 /* take transaction reference */
1338 cur_trans
= trans
->transaction
;
1339 atomic_inc(&cur_trans
->use_count
);
1341 btrfs_end_transaction(trans
, root
);
1344 * Tell lockdep we've released the freeze rwsem, since the
1345 * async commit thread will be the one to unlock it.
1347 rwsem_release(&root
->fs_info
->sb
->s_writers
.lock_map
[SB_FREEZE_FS
-1],
1350 schedule_delayed_work(&ac
->work
, 0);
1352 /* wait for transaction to start and unblock */
1353 if (wait_for_unblock
)
1354 wait_current_trans_commit_start_and_unblock(root
, cur_trans
);
1356 wait_current_trans_commit_start(root
, cur_trans
);
1358 if (current
->journal_info
== trans
)
1359 current
->journal_info
= NULL
;
1361 put_transaction(cur_trans
);
1366 static void cleanup_transaction(struct btrfs_trans_handle
*trans
,
1367 struct btrfs_root
*root
, int err
)
1369 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
1371 WARN_ON(trans
->use_count
> 1);
1373 btrfs_abort_transaction(trans
, root
, err
);
1375 spin_lock(&root
->fs_info
->trans_lock
);
1376 list_del_init(&cur_trans
->list
);
1377 if (cur_trans
== root
->fs_info
->running_transaction
) {
1378 root
->fs_info
->running_transaction
= NULL
;
1379 root
->fs_info
->trans_no_join
= 0;
1381 spin_unlock(&root
->fs_info
->trans_lock
);
1383 btrfs_cleanup_one_transaction(trans
->transaction
, root
);
1385 put_transaction(cur_trans
);
1386 put_transaction(cur_trans
);
1388 trace_btrfs_transaction_commit(root
);
1390 btrfs_scrub_continue(root
);
1392 if (current
->journal_info
== trans
)
1393 current
->journal_info
= NULL
;
1395 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
1398 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle
*trans
,
1399 struct btrfs_root
*root
)
1401 int flush_on_commit
= btrfs_test_opt(root
, FLUSHONCOMMIT
);
1402 int snap_pending
= 0;
1405 if (!flush_on_commit
) {
1406 spin_lock(&root
->fs_info
->trans_lock
);
1407 if (!list_empty(&trans
->transaction
->pending_snapshots
))
1409 spin_unlock(&root
->fs_info
->trans_lock
);
1412 if (flush_on_commit
|| snap_pending
) {
1413 btrfs_start_delalloc_inodes(root
, 1);
1414 btrfs_wait_ordered_extents(root
, 1);
1417 ret
= btrfs_run_delayed_items(trans
, root
);
1422 * running the delayed items may have added new refs. account
1423 * them now so that they hinder processing of more delayed refs
1424 * as little as possible.
1426 btrfs_delayed_refs_qgroup_accounting(trans
, root
->fs_info
);
1429 * rename don't use btrfs_join_transaction, so, once we
1430 * set the transaction to blocked above, we aren't going
1431 * to get any new ordered operations. We can safely run
1432 * it here and no for sure that nothing new will be added
1435 btrfs_run_ordered_operations(root
, 1);
1441 * btrfs_transaction state sequence:
1442 * in_commit = 0, blocked = 0 (initial)
1443 * in_commit = 1, blocked = 1
1447 int btrfs_commit_transaction(struct btrfs_trans_handle
*trans
,
1448 struct btrfs_root
*root
)
1450 unsigned long joined
= 0;
1451 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
1452 struct btrfs_transaction
*prev_trans
= NULL
;
1455 int should_grow
= 0;
1456 unsigned long now
= get_seconds();
1458 ret
= btrfs_run_ordered_operations(root
, 0);
1460 btrfs_abort_transaction(trans
, root
, ret
);
1461 goto cleanup_transaction
;
1464 if (cur_trans
->aborted
) {
1465 ret
= cur_trans
->aborted
;
1466 goto cleanup_transaction
;
1469 /* make a pass through all the delayed refs we have so far
1470 * any runnings procs may add more while we are here
1472 ret
= btrfs_run_delayed_refs(trans
, root
, 0);
1474 goto cleanup_transaction
;
1476 btrfs_trans_release_metadata(trans
, root
);
1477 trans
->block_rsv
= NULL
;
1479 cur_trans
= trans
->transaction
;
1482 * set the flushing flag so procs in this transaction have to
1483 * start sending their work down.
1485 cur_trans
->delayed_refs
.flushing
= 1;
1487 if (!list_empty(&trans
->new_bgs
))
1488 btrfs_create_pending_block_groups(trans
, root
);
1490 ret
= btrfs_run_delayed_refs(trans
, root
, 0);
1492 goto cleanup_transaction
;
1494 spin_lock(&cur_trans
->commit_lock
);
1495 if (cur_trans
->in_commit
) {
1496 spin_unlock(&cur_trans
->commit_lock
);
1497 atomic_inc(&cur_trans
->use_count
);
1498 ret
= btrfs_end_transaction(trans
, root
);
1500 wait_for_commit(root
, cur_trans
);
1502 put_transaction(cur_trans
);
1507 trans
->transaction
->in_commit
= 1;
1508 trans
->transaction
->blocked
= 1;
1509 spin_unlock(&cur_trans
->commit_lock
);
1510 wake_up(&root
->fs_info
->transaction_blocked_wait
);
1512 spin_lock(&root
->fs_info
->trans_lock
);
1513 if (cur_trans
->list
.prev
!= &root
->fs_info
->trans_list
) {
1514 prev_trans
= list_entry(cur_trans
->list
.prev
,
1515 struct btrfs_transaction
, list
);
1516 if (!prev_trans
->commit_done
) {
1517 atomic_inc(&prev_trans
->use_count
);
1518 spin_unlock(&root
->fs_info
->trans_lock
);
1520 wait_for_commit(root
, prev_trans
);
1522 put_transaction(prev_trans
);
1524 spin_unlock(&root
->fs_info
->trans_lock
);
1527 spin_unlock(&root
->fs_info
->trans_lock
);
1530 if (!btrfs_test_opt(root
, SSD
) &&
1531 (now
< cur_trans
->start_time
|| now
- cur_trans
->start_time
< 1))
1535 joined
= cur_trans
->num_joined
;
1537 WARN_ON(cur_trans
!= trans
->transaction
);
1539 ret
= btrfs_flush_all_pending_stuffs(trans
, root
);
1541 goto cleanup_transaction
;
1543 prepare_to_wait(&cur_trans
->writer_wait
, &wait
,
1544 TASK_UNINTERRUPTIBLE
);
1546 if (atomic_read(&cur_trans
->num_writers
) > 1)
1547 schedule_timeout(MAX_SCHEDULE_TIMEOUT
);
1548 else if (should_grow
)
1549 schedule_timeout(1);
1551 finish_wait(&cur_trans
->writer_wait
, &wait
);
1552 } while (atomic_read(&cur_trans
->num_writers
) > 1 ||
1553 (should_grow
&& cur_trans
->num_joined
!= joined
));
1555 ret
= btrfs_flush_all_pending_stuffs(trans
, root
);
1557 goto cleanup_transaction
;
1560 * Ok now we need to make sure to block out any other joins while we
1561 * commit the transaction. We could have started a join before setting
1562 * no_join so make sure to wait for num_writers to == 1 again.
1564 spin_lock(&root
->fs_info
->trans_lock
);
1565 root
->fs_info
->trans_no_join
= 1;
1566 spin_unlock(&root
->fs_info
->trans_lock
);
1567 wait_event(cur_trans
->writer_wait
,
1568 atomic_read(&cur_trans
->num_writers
) == 1);
1571 * the reloc mutex makes sure that we stop
1572 * the balancing code from coming in and moving
1573 * extents around in the middle of the commit
1575 mutex_lock(&root
->fs_info
->reloc_mutex
);
1578 * We needn't worry about the delayed items because we will
1579 * deal with them in create_pending_snapshot(), which is the
1580 * core function of the snapshot creation.
1582 ret
= create_pending_snapshots(trans
, root
->fs_info
);
1584 mutex_unlock(&root
->fs_info
->reloc_mutex
);
1585 goto cleanup_transaction
;
1589 * We insert the dir indexes of the snapshots and update the inode
1590 * of the snapshots' parents after the snapshot creation, so there
1591 * are some delayed items which are not dealt with. Now deal with
1594 * We needn't worry that this operation will corrupt the snapshots,
1595 * because all the tree which are snapshoted will be forced to COW
1596 * the nodes and leaves.
1598 ret
= btrfs_run_delayed_items(trans
, root
);
1600 mutex_unlock(&root
->fs_info
->reloc_mutex
);
1601 goto cleanup_transaction
;
1604 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
1606 mutex_unlock(&root
->fs_info
->reloc_mutex
);
1607 goto cleanup_transaction
;
1611 * make sure none of the code above managed to slip in a
1614 btrfs_assert_delayed_root_empty(root
);
1616 WARN_ON(cur_trans
!= trans
->transaction
);
1618 btrfs_scrub_pause(root
);
1619 /* btrfs_commit_tree_roots is responsible for getting the
1620 * various roots consistent with each other. Every pointer
1621 * in the tree of tree roots has to point to the most up to date
1622 * root for every subvolume and other tree. So, we have to keep
1623 * the tree logging code from jumping in and changing any
1626 * At this point in the commit, there can't be any tree-log
1627 * writers, but a little lower down we drop the trans mutex
1628 * and let new people in. By holding the tree_log_mutex
1629 * from now until after the super is written, we avoid races
1630 * with the tree-log code.
1632 mutex_lock(&root
->fs_info
->tree_log_mutex
);
1634 ret
= commit_fs_roots(trans
, root
);
1636 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
1637 mutex_unlock(&root
->fs_info
->reloc_mutex
);
1638 goto cleanup_transaction
;
1641 /* commit_fs_roots gets rid of all the tree log roots, it is now
1642 * safe to free the root of tree log roots
1644 btrfs_free_log_root_tree(trans
, root
->fs_info
);
1646 ret
= commit_cowonly_roots(trans
, root
);
1648 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
1649 mutex_unlock(&root
->fs_info
->reloc_mutex
);
1650 goto cleanup_transaction
;
1653 btrfs_prepare_extent_commit(trans
, root
);
1655 cur_trans
= root
->fs_info
->running_transaction
;
1657 btrfs_set_root_node(&root
->fs_info
->tree_root
->root_item
,
1658 root
->fs_info
->tree_root
->node
);
1659 switch_commit_root(root
->fs_info
->tree_root
);
1661 btrfs_set_root_node(&root
->fs_info
->chunk_root
->root_item
,
1662 root
->fs_info
->chunk_root
->node
);
1663 switch_commit_root(root
->fs_info
->chunk_root
);
1665 assert_qgroups_uptodate(trans
);
1666 update_super_roots(root
);
1668 if (!root
->fs_info
->log_root_recovering
) {
1669 btrfs_set_super_log_root(root
->fs_info
->super_copy
, 0);
1670 btrfs_set_super_log_root_level(root
->fs_info
->super_copy
, 0);
1673 memcpy(root
->fs_info
->super_for_commit
, root
->fs_info
->super_copy
,
1674 sizeof(*root
->fs_info
->super_copy
));
1676 trans
->transaction
->blocked
= 0;
1677 spin_lock(&root
->fs_info
->trans_lock
);
1678 root
->fs_info
->running_transaction
= NULL
;
1679 root
->fs_info
->trans_no_join
= 0;
1680 spin_unlock(&root
->fs_info
->trans_lock
);
1681 mutex_unlock(&root
->fs_info
->reloc_mutex
);
1683 wake_up(&root
->fs_info
->transaction_wait
);
1685 ret
= btrfs_write_and_wait_transaction(trans
, root
);
1687 btrfs_error(root
->fs_info
, ret
,
1688 "Error while writing out transaction.");
1689 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
1690 goto cleanup_transaction
;
1693 ret
= write_ctree_super(trans
, root
, 0);
1695 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
1696 goto cleanup_transaction
;
1700 * the super is written, we can safely allow the tree-loggers
1701 * to go about their business
1703 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
1705 btrfs_finish_extent_commit(trans
, root
);
1707 cur_trans
->commit_done
= 1;
1709 root
->fs_info
->last_trans_committed
= cur_trans
->transid
;
1711 wake_up(&cur_trans
->commit_wait
);
1713 spin_lock(&root
->fs_info
->trans_lock
);
1714 list_del_init(&cur_trans
->list
);
1715 spin_unlock(&root
->fs_info
->trans_lock
);
1717 put_transaction(cur_trans
);
1718 put_transaction(cur_trans
);
1720 if (trans
->type
< TRANS_JOIN_NOLOCK
)
1721 sb_end_intwrite(root
->fs_info
->sb
);
1723 trace_btrfs_transaction_commit(root
);
1725 btrfs_scrub_continue(root
);
1727 if (current
->journal_info
== trans
)
1728 current
->journal_info
= NULL
;
1730 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
1732 if (current
!= root
->fs_info
->transaction_kthread
)
1733 btrfs_run_delayed_iputs(root
);
1737 cleanup_transaction
:
1738 btrfs_trans_release_metadata(trans
, root
);
1739 trans
->block_rsv
= NULL
;
1740 btrfs_printk(root
->fs_info
, "Skipping commit of aborted transaction.\n");
1742 if (current
->journal_info
== trans
)
1743 current
->journal_info
= NULL
;
1744 cleanup_transaction(trans
, root
, ret
);
1750 * interface function to delete all the snapshots we have scheduled for deletion
1752 int btrfs_clean_old_snapshots(struct btrfs_root
*root
)
1755 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1757 spin_lock(&fs_info
->trans_lock
);
1758 list_splice_init(&fs_info
->dead_roots
, &list
);
1759 spin_unlock(&fs_info
->trans_lock
);
1761 while (!list_empty(&list
)) {
1764 root
= list_entry(list
.next
, struct btrfs_root
, root_list
);
1765 list_del(&root
->root_list
);
1767 btrfs_kill_all_delayed_nodes(root
);
1769 if (btrfs_header_backref_rev(root
->node
) <
1770 BTRFS_MIXED_BACKREF_REV
)
1771 ret
= btrfs_drop_snapshot(root
, NULL
, 0, 0);
1773 ret
=btrfs_drop_snapshot(root
, NULL
, 1, 0);