1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/writeback.h>
10 #include <linux/pagemap.h>
11 #include <linux/blkdev.h>
12 #include <linux/uuid.h>
16 #include "transaction.h"
20 #include "dev-replace.h"
22 #include "block-group.h"
23 #include "space-info.h"
26 #define BTRFS_ROOT_TRANS_TAG 0
29 * Transaction states and transitions
31 * No running transaction (fs tree blocks are not modified)
34 * | Call start_transaction() variants. Except btrfs_join_transaction_nostart().
36 * Transaction N [[TRANS_STATE_RUNNING]]
38 * | New trans handles can be attached to transaction N by calling all
39 * | start_transaction() variants.
42 * | Call btrfs_commit_transaction() on any trans handle attached to
45 * Transaction N [[TRANS_STATE_COMMIT_START]]
47 * | Will wait for previous running transaction to completely finish if there
50 * | Then one of the following happes:
51 * | - Wait for all other trans handle holders to release.
52 * | The btrfs_commit_transaction() caller will do the commit work.
53 * | - Wait for current transaction to be committed by others.
54 * | Other btrfs_commit_transaction() caller will do the commit work.
56 * | At this stage, only btrfs_join_transaction*() variants can attach
57 * | to this running transaction.
58 * | All other variants will wait for current one to finish and attach to
62 * | Caller is chosen to commit transaction N, and all other trans handle
63 * | haven been released.
65 * Transaction N [[TRANS_STATE_COMMIT_DOING]]
67 * | The heavy lifting transaction work is started.
68 * | From running delayed refs (modifying extent tree) to creating pending
69 * | snapshots, running qgroups.
70 * | In short, modify supporting trees to reflect modifications of subvolume
73 * | At this stage, all start_transaction() calls will wait for this
74 * | transaction to finish and attach to transaction N+1.
77 * | Until all supporting trees are updated.
79 * Transaction N [[TRANS_STATE_UNBLOCKED]]
81 * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]]
82 * | need to write them back to disk and update |
85 * | At this stage, new transaction is allowed to |
87 * | All new start_transaction() calls will be |
88 * | attached to transid N+1. |
91 * | Until all tree blocks are super blocks are |
92 * | written to block devices |
94 * Transaction N [[TRANS_STATE_COMPLETED]] V
95 * All tree blocks and super blocks are written. Transaction N+1
96 * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]]
97 * data structures will be cleaned up. | Life goes on
99 static const unsigned int btrfs_blocked_trans_types
[TRANS_STATE_MAX
] = {
100 [TRANS_STATE_RUNNING
] = 0U,
101 [TRANS_STATE_COMMIT_START
] = (__TRANS_START
| __TRANS_ATTACH
),
102 [TRANS_STATE_COMMIT_DOING
] = (__TRANS_START
|
105 __TRANS_JOIN_NOSTART
),
106 [TRANS_STATE_UNBLOCKED
] = (__TRANS_START
|
109 __TRANS_JOIN_NOLOCK
|
110 __TRANS_JOIN_NOSTART
),
111 [TRANS_STATE_SUPER_COMMITTED
] = (__TRANS_START
|
114 __TRANS_JOIN_NOLOCK
|
115 __TRANS_JOIN_NOSTART
),
116 [TRANS_STATE_COMPLETED
] = (__TRANS_START
|
119 __TRANS_JOIN_NOLOCK
|
120 __TRANS_JOIN_NOSTART
),
123 void btrfs_put_transaction(struct btrfs_transaction
*transaction
)
125 WARN_ON(refcount_read(&transaction
->use_count
) == 0);
126 if (refcount_dec_and_test(&transaction
->use_count
)) {
127 BUG_ON(!list_empty(&transaction
->list
));
128 WARN_ON(!RB_EMPTY_ROOT(
129 &transaction
->delayed_refs
.href_root
.rb_root
));
130 WARN_ON(!RB_EMPTY_ROOT(
131 &transaction
->delayed_refs
.dirty_extent_root
));
132 if (transaction
->delayed_refs
.pending_csums
)
133 btrfs_err(transaction
->fs_info
,
134 "pending csums is %llu",
135 transaction
->delayed_refs
.pending_csums
);
137 * If any block groups are found in ->deleted_bgs then it's
138 * because the transaction was aborted and a commit did not
139 * happen (things failed before writing the new superblock
140 * and calling btrfs_finish_extent_commit()), so we can not
141 * discard the physical locations of the block groups.
143 while (!list_empty(&transaction
->deleted_bgs
)) {
144 struct btrfs_block_group
*cache
;
146 cache
= list_first_entry(&transaction
->deleted_bgs
,
147 struct btrfs_block_group
,
149 list_del_init(&cache
->bg_list
);
150 btrfs_unfreeze_block_group(cache
);
151 btrfs_put_block_group(cache
);
153 WARN_ON(!list_empty(&transaction
->dev_update_list
));
158 static noinline
void switch_commit_roots(struct btrfs_trans_handle
*trans
)
160 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
161 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
162 struct btrfs_root
*root
, *tmp
;
163 struct btrfs_caching_control
*caching_ctl
, *next
;
165 down_write(&fs_info
->commit_root_sem
);
166 list_for_each_entry_safe(root
, tmp
, &cur_trans
->switch_commits
,
168 list_del_init(&root
->dirty_list
);
169 free_extent_buffer(root
->commit_root
);
170 root
->commit_root
= btrfs_root_node(root
);
171 extent_io_tree_release(&root
->dirty_log_pages
);
172 btrfs_qgroup_clean_swapped_blocks(root
);
175 /* We can free old roots now. */
176 spin_lock(&cur_trans
->dropped_roots_lock
);
177 while (!list_empty(&cur_trans
->dropped_roots
)) {
178 root
= list_first_entry(&cur_trans
->dropped_roots
,
179 struct btrfs_root
, root_list
);
180 list_del_init(&root
->root_list
);
181 spin_unlock(&cur_trans
->dropped_roots_lock
);
182 btrfs_free_log(trans
, root
);
183 btrfs_drop_and_free_fs_root(fs_info
, root
);
184 spin_lock(&cur_trans
->dropped_roots_lock
);
186 spin_unlock(&cur_trans
->dropped_roots_lock
);
189 * We have to update the last_byte_to_unpin under the commit_root_sem,
190 * at the same time we swap out the commit roots.
192 * This is because we must have a real view of the last spot the caching
193 * kthreads were while caching. Consider the following views of the
194 * extent tree for a block group
197 * +----+----+----+----+----+----+----+
198 * |\\\\| |\\\\|\\\\| |\\\\|\\\\|
199 * +----+----+----+----+----+----+----+
203 * +----+----+----+----+----+----+----+
204 * | | | |\\\\| | |\\\\|
205 * +----+----+----+----+----+----+----+
208 * If the cache_ctl->progress was at 3, then we are only allowed to
209 * unpin [0,1) and [2,3], because the caching thread has already
210 * processed those extents. We are not allowed to unpin [5,6), because
211 * the caching thread will re-start it's search from 3, and thus find
212 * the hole from [4,6) to add to the free space cache.
214 spin_lock(&fs_info
->block_group_cache_lock
);
215 list_for_each_entry_safe(caching_ctl
, next
,
216 &fs_info
->caching_block_groups
, list
) {
217 struct btrfs_block_group
*cache
= caching_ctl
->block_group
;
219 if (btrfs_block_group_done(cache
)) {
220 cache
->last_byte_to_unpin
= (u64
)-1;
221 list_del_init(&caching_ctl
->list
);
222 btrfs_put_caching_control(caching_ctl
);
224 cache
->last_byte_to_unpin
= caching_ctl
->progress
;
227 spin_unlock(&fs_info
->block_group_cache_lock
);
228 up_write(&fs_info
->commit_root_sem
);
231 static inline void extwriter_counter_inc(struct btrfs_transaction
*trans
,
234 if (type
& TRANS_EXTWRITERS
)
235 atomic_inc(&trans
->num_extwriters
);
238 static inline void extwriter_counter_dec(struct btrfs_transaction
*trans
,
241 if (type
& TRANS_EXTWRITERS
)
242 atomic_dec(&trans
->num_extwriters
);
245 static inline void extwriter_counter_init(struct btrfs_transaction
*trans
,
248 atomic_set(&trans
->num_extwriters
, ((type
& TRANS_EXTWRITERS
) ? 1 : 0));
251 static inline int extwriter_counter_read(struct btrfs_transaction
*trans
)
253 return atomic_read(&trans
->num_extwriters
);
257 * To be called after doing the chunk btree updates right after allocating a new
258 * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
259 * chunk after all chunk btree updates and after finishing the second phase of
260 * chunk allocation (btrfs_create_pending_block_groups()) in case some block
261 * group had its chunk item insertion delayed to the second phase.
263 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle
*trans
)
265 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
267 if (!trans
->chunk_bytes_reserved
)
270 btrfs_block_rsv_release(fs_info
, &fs_info
->chunk_block_rsv
,
271 trans
->chunk_bytes_reserved
, NULL
);
272 trans
->chunk_bytes_reserved
= 0;
276 * either allocate a new transaction or hop into the existing one
278 static noinline
int join_transaction(struct btrfs_fs_info
*fs_info
,
281 struct btrfs_transaction
*cur_trans
;
283 spin_lock(&fs_info
->trans_lock
);
285 /* The file system has been taken offline. No new transactions. */
286 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
)) {
287 spin_unlock(&fs_info
->trans_lock
);
291 cur_trans
= fs_info
->running_transaction
;
293 if (TRANS_ABORTED(cur_trans
)) {
294 spin_unlock(&fs_info
->trans_lock
);
295 return cur_trans
->aborted
;
297 if (btrfs_blocked_trans_types
[cur_trans
->state
] & type
) {
298 spin_unlock(&fs_info
->trans_lock
);
301 refcount_inc(&cur_trans
->use_count
);
302 atomic_inc(&cur_trans
->num_writers
);
303 extwriter_counter_inc(cur_trans
, type
);
304 spin_unlock(&fs_info
->trans_lock
);
307 spin_unlock(&fs_info
->trans_lock
);
310 * If we are ATTACH, we just want to catch the current transaction,
311 * and commit it. If there is no transaction, just return ENOENT.
313 if (type
== TRANS_ATTACH
)
317 * JOIN_NOLOCK only happens during the transaction commit, so
318 * it is impossible that ->running_transaction is NULL
320 BUG_ON(type
== TRANS_JOIN_NOLOCK
);
322 cur_trans
= kmalloc(sizeof(*cur_trans
), GFP_NOFS
);
326 spin_lock(&fs_info
->trans_lock
);
327 if (fs_info
->running_transaction
) {
329 * someone started a transaction after we unlocked. Make sure
330 * to redo the checks above
334 } else if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
)) {
335 spin_unlock(&fs_info
->trans_lock
);
340 cur_trans
->fs_info
= fs_info
;
341 atomic_set(&cur_trans
->pending_ordered
, 0);
342 init_waitqueue_head(&cur_trans
->pending_wait
);
343 atomic_set(&cur_trans
->num_writers
, 1);
344 extwriter_counter_init(cur_trans
, type
);
345 init_waitqueue_head(&cur_trans
->writer_wait
);
346 init_waitqueue_head(&cur_trans
->commit_wait
);
347 cur_trans
->state
= TRANS_STATE_RUNNING
;
349 * One for this trans handle, one so it will live on until we
350 * commit the transaction.
352 refcount_set(&cur_trans
->use_count
, 2);
353 cur_trans
->flags
= 0;
354 cur_trans
->start_time
= ktime_get_seconds();
356 memset(&cur_trans
->delayed_refs
, 0, sizeof(cur_trans
->delayed_refs
));
358 cur_trans
->delayed_refs
.href_root
= RB_ROOT_CACHED
;
359 cur_trans
->delayed_refs
.dirty_extent_root
= RB_ROOT
;
360 atomic_set(&cur_trans
->delayed_refs
.num_entries
, 0);
363 * although the tree mod log is per file system and not per transaction,
364 * the log must never go across transaction boundaries.
367 if (!list_empty(&fs_info
->tree_mod_seq_list
))
368 WARN(1, KERN_ERR
"BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
369 if (!RB_EMPTY_ROOT(&fs_info
->tree_mod_log
))
370 WARN(1, KERN_ERR
"BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
371 atomic64_set(&fs_info
->tree_mod_seq
, 0);
373 spin_lock_init(&cur_trans
->delayed_refs
.lock
);
375 INIT_LIST_HEAD(&cur_trans
->pending_snapshots
);
376 INIT_LIST_HEAD(&cur_trans
->dev_update_list
);
377 INIT_LIST_HEAD(&cur_trans
->switch_commits
);
378 INIT_LIST_HEAD(&cur_trans
->dirty_bgs
);
379 INIT_LIST_HEAD(&cur_trans
->io_bgs
);
380 INIT_LIST_HEAD(&cur_trans
->dropped_roots
);
381 mutex_init(&cur_trans
->cache_write_mutex
);
382 spin_lock_init(&cur_trans
->dirty_bgs_lock
);
383 INIT_LIST_HEAD(&cur_trans
->deleted_bgs
);
384 spin_lock_init(&cur_trans
->dropped_roots_lock
);
385 INIT_LIST_HEAD(&cur_trans
->releasing_ebs
);
386 spin_lock_init(&cur_trans
->releasing_ebs_lock
);
387 list_add_tail(&cur_trans
->list
, &fs_info
->trans_list
);
388 extent_io_tree_init(fs_info
, &cur_trans
->dirty_pages
,
389 IO_TREE_TRANS_DIRTY_PAGES
, fs_info
->btree_inode
);
390 extent_io_tree_init(fs_info
, &cur_trans
->pinned_extents
,
391 IO_TREE_FS_PINNED_EXTENTS
, NULL
);
392 fs_info
->generation
++;
393 cur_trans
->transid
= fs_info
->generation
;
394 fs_info
->running_transaction
= cur_trans
;
395 cur_trans
->aborted
= 0;
396 spin_unlock(&fs_info
->trans_lock
);
402 * This does all the record keeping required to make sure that a shareable root
403 * is properly recorded in a given transaction. This is required to make sure
404 * the old root from before we joined the transaction is deleted when the
405 * transaction commits.
407 static int record_root_in_trans(struct btrfs_trans_handle
*trans
,
408 struct btrfs_root
*root
,
411 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
414 if ((test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) &&
415 root
->last_trans
< trans
->transid
) || force
) {
416 WARN_ON(root
== fs_info
->extent_root
);
417 WARN_ON(!force
&& root
->commit_root
!= root
->node
);
420 * see below for IN_TRANS_SETUP usage rules
421 * we have the reloc mutex held now, so there
422 * is only one writer in this function
424 set_bit(BTRFS_ROOT_IN_TRANS_SETUP
, &root
->state
);
426 /* make sure readers find IN_TRANS_SETUP before
427 * they find our root->last_trans update
431 spin_lock(&fs_info
->fs_roots_radix_lock
);
432 if (root
->last_trans
== trans
->transid
&& !force
) {
433 spin_unlock(&fs_info
->fs_roots_radix_lock
);
436 radix_tree_tag_set(&fs_info
->fs_roots_radix
,
437 (unsigned long)root
->root_key
.objectid
,
438 BTRFS_ROOT_TRANS_TAG
);
439 spin_unlock(&fs_info
->fs_roots_radix_lock
);
440 root
->last_trans
= trans
->transid
;
442 /* this is pretty tricky. We don't want to
443 * take the relocation lock in btrfs_record_root_in_trans
444 * unless we're really doing the first setup for this root in
447 * Normally we'd use root->last_trans as a flag to decide
448 * if we want to take the expensive mutex.
450 * But, we have to set root->last_trans before we
451 * init the relocation root, otherwise, we trip over warnings
452 * in ctree.c. The solution used here is to flag ourselves
453 * with root IN_TRANS_SETUP. When this is 1, we're still
454 * fixing up the reloc trees and everyone must wait.
456 * When this is zero, they can trust root->last_trans and fly
457 * through btrfs_record_root_in_trans without having to take the
458 * lock. smp_wmb() makes sure that all the writes above are
459 * done before we pop in the zero below
461 ret
= btrfs_init_reloc_root(trans
, root
);
462 smp_mb__before_atomic();
463 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP
, &root
->state
);
469 void btrfs_add_dropped_root(struct btrfs_trans_handle
*trans
,
470 struct btrfs_root
*root
)
472 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
473 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
475 /* Add ourselves to the transaction dropped list */
476 spin_lock(&cur_trans
->dropped_roots_lock
);
477 list_add_tail(&root
->root_list
, &cur_trans
->dropped_roots
);
478 spin_unlock(&cur_trans
->dropped_roots_lock
);
480 /* Make sure we don't try to update the root at commit time */
481 spin_lock(&fs_info
->fs_roots_radix_lock
);
482 radix_tree_tag_clear(&fs_info
->fs_roots_radix
,
483 (unsigned long)root
->root_key
.objectid
,
484 BTRFS_ROOT_TRANS_TAG
);
485 spin_unlock(&fs_info
->fs_roots_radix_lock
);
488 int btrfs_record_root_in_trans(struct btrfs_trans_handle
*trans
,
489 struct btrfs_root
*root
)
491 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
494 if (!test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
))
498 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
502 if (root
->last_trans
== trans
->transid
&&
503 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP
, &root
->state
))
506 mutex_lock(&fs_info
->reloc_mutex
);
507 ret
= record_root_in_trans(trans
, root
, 0);
508 mutex_unlock(&fs_info
->reloc_mutex
);
513 static inline int is_transaction_blocked(struct btrfs_transaction
*trans
)
515 return (trans
->state
>= TRANS_STATE_COMMIT_START
&&
516 trans
->state
< TRANS_STATE_UNBLOCKED
&&
517 !TRANS_ABORTED(trans
));
520 /* wait for commit against the current transaction to become unblocked
521 * when this is done, it is safe to start a new transaction, but the current
522 * transaction might not be fully on disk.
524 static void wait_current_trans(struct btrfs_fs_info
*fs_info
)
526 struct btrfs_transaction
*cur_trans
;
528 spin_lock(&fs_info
->trans_lock
);
529 cur_trans
= fs_info
->running_transaction
;
530 if (cur_trans
&& is_transaction_blocked(cur_trans
)) {
531 refcount_inc(&cur_trans
->use_count
);
532 spin_unlock(&fs_info
->trans_lock
);
534 wait_event(fs_info
->transaction_wait
,
535 cur_trans
->state
>= TRANS_STATE_UNBLOCKED
||
536 TRANS_ABORTED(cur_trans
));
537 btrfs_put_transaction(cur_trans
);
539 spin_unlock(&fs_info
->trans_lock
);
543 static int may_wait_transaction(struct btrfs_fs_info
*fs_info
, int type
)
545 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
548 if (type
== TRANS_START
)
554 static inline bool need_reserve_reloc_root(struct btrfs_root
*root
)
556 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
558 if (!fs_info
->reloc_ctl
||
559 !test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) ||
560 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
567 static struct btrfs_trans_handle
*
568 start_transaction(struct btrfs_root
*root
, unsigned int num_items
,
569 unsigned int type
, enum btrfs_reserve_flush_enum flush
,
570 bool enforce_qgroups
)
572 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
573 struct btrfs_block_rsv
*delayed_refs_rsv
= &fs_info
->delayed_refs_rsv
;
574 struct btrfs_trans_handle
*h
;
575 struct btrfs_transaction
*cur_trans
;
577 u64 qgroup_reserved
= 0;
578 bool reloc_reserved
= false;
579 bool do_chunk_alloc
= false;
582 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
583 return ERR_PTR(-EROFS
);
585 if (current
->journal_info
) {
586 WARN_ON(type
& TRANS_EXTWRITERS
);
587 h
= current
->journal_info
;
588 refcount_inc(&h
->use_count
);
589 WARN_ON(refcount_read(&h
->use_count
) > 2);
590 h
->orig_rsv
= h
->block_rsv
;
596 * Do the reservation before we join the transaction so we can do all
597 * the appropriate flushing if need be.
599 if (num_items
&& root
!= fs_info
->chunk_root
) {
600 struct btrfs_block_rsv
*rsv
= &fs_info
->trans_block_rsv
;
601 u64 delayed_refs_bytes
= 0;
603 qgroup_reserved
= num_items
* fs_info
->nodesize
;
604 ret
= btrfs_qgroup_reserve_meta_pertrans(root
, qgroup_reserved
,
610 * We want to reserve all the bytes we may need all at once, so
611 * we only do 1 enospc flushing cycle per transaction start. We
612 * accomplish this by simply assuming we'll do 2 x num_items
613 * worth of delayed refs updates in this trans handle, and
614 * refill that amount for whatever is missing in the reserve.
616 num_bytes
= btrfs_calc_insert_metadata_size(fs_info
, num_items
);
617 if (flush
== BTRFS_RESERVE_FLUSH_ALL
&&
618 delayed_refs_rsv
->full
== 0) {
619 delayed_refs_bytes
= num_bytes
;
624 * Do the reservation for the relocation root creation
626 if (need_reserve_reloc_root(root
)) {
627 num_bytes
+= fs_info
->nodesize
;
628 reloc_reserved
= true;
631 ret
= btrfs_block_rsv_add(root
, rsv
, num_bytes
, flush
);
634 if (delayed_refs_bytes
) {
635 btrfs_migrate_to_delayed_refs_rsv(fs_info
, rsv
,
637 num_bytes
-= delayed_refs_bytes
;
640 if (rsv
->space_info
->force_alloc
)
641 do_chunk_alloc
= true;
642 } else if (num_items
== 0 && flush
== BTRFS_RESERVE_FLUSH_ALL
&&
643 !delayed_refs_rsv
->full
) {
645 * Some people call with btrfs_start_transaction(root, 0)
646 * because they can be throttled, but have some other mechanism
647 * for reserving space. We still want these guys to refill the
648 * delayed block_rsv so just add 1 items worth of reservation
651 ret
= btrfs_delayed_refs_rsv_refill(fs_info
, flush
);
656 h
= kmem_cache_zalloc(btrfs_trans_handle_cachep
, GFP_NOFS
);
663 * If we are JOIN_NOLOCK we're already committing a transaction and
664 * waiting on this guy, so we don't need to do the sb_start_intwrite
665 * because we're already holding a ref. We need this because we could
666 * have raced in and did an fsync() on a file which can kick a commit
667 * and then we deadlock with somebody doing a freeze.
669 * If we are ATTACH, it means we just want to catch the current
670 * transaction and commit it, so we needn't do sb_start_intwrite().
672 if (type
& __TRANS_FREEZABLE
)
673 sb_start_intwrite(fs_info
->sb
);
675 if (may_wait_transaction(fs_info
, type
))
676 wait_current_trans(fs_info
);
679 ret
= join_transaction(fs_info
, type
);
681 wait_current_trans(fs_info
);
682 if (unlikely(type
== TRANS_ATTACH
||
683 type
== TRANS_JOIN_NOSTART
))
686 } while (ret
== -EBUSY
);
691 cur_trans
= fs_info
->running_transaction
;
693 h
->transid
= cur_trans
->transid
;
694 h
->transaction
= cur_trans
;
696 refcount_set(&h
->use_count
, 1);
697 h
->fs_info
= root
->fs_info
;
700 INIT_LIST_HEAD(&h
->new_bgs
);
703 if (cur_trans
->state
>= TRANS_STATE_COMMIT_START
&&
704 may_wait_transaction(fs_info
, type
)) {
705 current
->journal_info
= h
;
706 btrfs_commit_transaction(h
);
711 trace_btrfs_space_reservation(fs_info
, "transaction",
712 h
->transid
, num_bytes
, 1);
713 h
->block_rsv
= &fs_info
->trans_block_rsv
;
714 h
->bytes_reserved
= num_bytes
;
715 h
->reloc_reserved
= reloc_reserved
;
719 if (!current
->journal_info
)
720 current
->journal_info
= h
;
723 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
724 * ALLOC_FORCE the first run through, and then we won't allocate for
725 * anybody else who races in later. We don't care about the return
728 if (do_chunk_alloc
&& num_bytes
) {
729 u64 flags
= h
->block_rsv
->space_info
->flags
;
731 btrfs_chunk_alloc(h
, btrfs_get_alloc_profile(fs_info
, flags
),
732 CHUNK_ALLOC_NO_FORCE
);
736 * btrfs_record_root_in_trans() needs to alloc new extents, and may
737 * call btrfs_join_transaction() while we're also starting a
740 * Thus it need to be called after current->journal_info initialized,
741 * or we can deadlock.
743 ret
= btrfs_record_root_in_trans(h
, root
);
746 * The transaction handle is fully initialized and linked with
747 * other structures so it needs to be ended in case of errors,
750 btrfs_end_transaction(h
);
757 if (type
& __TRANS_FREEZABLE
)
758 sb_end_intwrite(fs_info
->sb
);
759 kmem_cache_free(btrfs_trans_handle_cachep
, h
);
762 btrfs_block_rsv_release(fs_info
, &fs_info
->trans_block_rsv
,
765 btrfs_qgroup_free_meta_pertrans(root
, qgroup_reserved
);
769 struct btrfs_trans_handle
*btrfs_start_transaction(struct btrfs_root
*root
,
770 unsigned int num_items
)
772 return start_transaction(root
, num_items
, TRANS_START
,
773 BTRFS_RESERVE_FLUSH_ALL
, true);
776 struct btrfs_trans_handle
*btrfs_start_transaction_fallback_global_rsv(
777 struct btrfs_root
*root
,
778 unsigned int num_items
)
780 return start_transaction(root
, num_items
, TRANS_START
,
781 BTRFS_RESERVE_FLUSH_ALL_STEAL
, false);
784 struct btrfs_trans_handle
*btrfs_join_transaction(struct btrfs_root
*root
)
786 return start_transaction(root
, 0, TRANS_JOIN
, BTRFS_RESERVE_NO_FLUSH
,
790 struct btrfs_trans_handle
*btrfs_join_transaction_spacecache(struct btrfs_root
*root
)
792 return start_transaction(root
, 0, TRANS_JOIN_NOLOCK
,
793 BTRFS_RESERVE_NO_FLUSH
, true);
797 * Similar to regular join but it never starts a transaction when none is
798 * running or after waiting for the current one to finish.
800 struct btrfs_trans_handle
*btrfs_join_transaction_nostart(struct btrfs_root
*root
)
802 return start_transaction(root
, 0, TRANS_JOIN_NOSTART
,
803 BTRFS_RESERVE_NO_FLUSH
, true);
807 * btrfs_attach_transaction() - catch the running transaction
809 * It is used when we want to commit the current the transaction, but
810 * don't want to start a new one.
812 * Note: If this function return -ENOENT, it just means there is no
813 * running transaction. But it is possible that the inactive transaction
814 * is still in the memory, not fully on disk. If you hope there is no
815 * inactive transaction in the fs when -ENOENT is returned, you should
817 * btrfs_attach_transaction_barrier()
819 struct btrfs_trans_handle
*btrfs_attach_transaction(struct btrfs_root
*root
)
821 return start_transaction(root
, 0, TRANS_ATTACH
,
822 BTRFS_RESERVE_NO_FLUSH
, true);
826 * btrfs_attach_transaction_barrier() - catch the running transaction
828 * It is similar to the above function, the difference is this one
829 * will wait for all the inactive transactions until they fully
832 struct btrfs_trans_handle
*
833 btrfs_attach_transaction_barrier(struct btrfs_root
*root
)
835 struct btrfs_trans_handle
*trans
;
837 trans
= start_transaction(root
, 0, TRANS_ATTACH
,
838 BTRFS_RESERVE_NO_FLUSH
, true);
839 if (trans
== ERR_PTR(-ENOENT
))
840 btrfs_wait_for_commit(root
->fs_info
, 0);
845 /* Wait for a transaction commit to reach at least the given state. */
846 static noinline
void wait_for_commit(struct btrfs_transaction
*commit
,
847 const enum btrfs_trans_state min_state
)
849 struct btrfs_fs_info
*fs_info
= commit
->fs_info
;
850 u64 transid
= commit
->transid
;
854 wait_event(commit
->commit_wait
, commit
->state
>= min_state
);
856 btrfs_put_transaction(commit
);
858 if (min_state
< TRANS_STATE_COMPLETED
)
862 * A transaction isn't really completed until all of the
863 * previous transactions are completed, but with fsync we can
864 * end up with SUPER_COMMITTED transactions before a COMPLETED
865 * transaction. Wait for those.
868 spin_lock(&fs_info
->trans_lock
);
869 commit
= list_first_entry_or_null(&fs_info
->trans_list
,
870 struct btrfs_transaction
,
872 if (!commit
|| commit
->transid
> transid
) {
873 spin_unlock(&fs_info
->trans_lock
);
876 refcount_inc(&commit
->use_count
);
878 spin_unlock(&fs_info
->trans_lock
);
882 int btrfs_wait_for_commit(struct btrfs_fs_info
*fs_info
, u64 transid
)
884 struct btrfs_transaction
*cur_trans
= NULL
, *t
;
888 if (transid
<= fs_info
->last_trans_committed
)
891 /* find specified transaction */
892 spin_lock(&fs_info
->trans_lock
);
893 list_for_each_entry(t
, &fs_info
->trans_list
, list
) {
894 if (t
->transid
== transid
) {
896 refcount_inc(&cur_trans
->use_count
);
900 if (t
->transid
> transid
) {
905 spin_unlock(&fs_info
->trans_lock
);
908 * The specified transaction doesn't exist, or we
909 * raced with btrfs_commit_transaction
912 if (transid
> fs_info
->last_trans_committed
)
917 /* find newest transaction that is committing | committed */
918 spin_lock(&fs_info
->trans_lock
);
919 list_for_each_entry_reverse(t
, &fs_info
->trans_list
,
921 if (t
->state
>= TRANS_STATE_COMMIT_START
) {
922 if (t
->state
== TRANS_STATE_COMPLETED
)
925 refcount_inc(&cur_trans
->use_count
);
929 spin_unlock(&fs_info
->trans_lock
);
931 goto out
; /* nothing committing|committed */
934 wait_for_commit(cur_trans
, TRANS_STATE_COMPLETED
);
935 btrfs_put_transaction(cur_trans
);
940 void btrfs_throttle(struct btrfs_fs_info
*fs_info
)
942 wait_current_trans(fs_info
);
945 static bool should_end_transaction(struct btrfs_trans_handle
*trans
)
947 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
949 if (btrfs_check_space_for_delayed_refs(fs_info
))
952 return !!btrfs_block_rsv_check(&fs_info
->global_block_rsv
, 5);
955 bool btrfs_should_end_transaction(struct btrfs_trans_handle
*trans
)
957 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
959 if (cur_trans
->state
>= TRANS_STATE_COMMIT_START
||
960 test_bit(BTRFS_DELAYED_REFS_FLUSHING
, &cur_trans
->delayed_refs
.flags
))
963 return should_end_transaction(trans
);
966 static void btrfs_trans_release_metadata(struct btrfs_trans_handle
*trans
)
969 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
971 if (!trans
->block_rsv
) {
972 ASSERT(!trans
->bytes_reserved
);
976 if (!trans
->bytes_reserved
)
979 ASSERT(trans
->block_rsv
== &fs_info
->trans_block_rsv
);
980 trace_btrfs_space_reservation(fs_info
, "transaction",
981 trans
->transid
, trans
->bytes_reserved
, 0);
982 btrfs_block_rsv_release(fs_info
, trans
->block_rsv
,
983 trans
->bytes_reserved
, NULL
);
984 trans
->bytes_reserved
= 0;
987 static int __btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
990 struct btrfs_fs_info
*info
= trans
->fs_info
;
991 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
994 if (refcount_read(&trans
->use_count
) > 1) {
995 refcount_dec(&trans
->use_count
);
996 trans
->block_rsv
= trans
->orig_rsv
;
1000 btrfs_trans_release_metadata(trans
);
1001 trans
->block_rsv
= NULL
;
1003 btrfs_create_pending_block_groups(trans
);
1005 btrfs_trans_release_chunk_metadata(trans
);
1007 if (trans
->type
& __TRANS_FREEZABLE
)
1008 sb_end_intwrite(info
->sb
);
1010 WARN_ON(cur_trans
!= info
->running_transaction
);
1011 WARN_ON(atomic_read(&cur_trans
->num_writers
) < 1);
1012 atomic_dec(&cur_trans
->num_writers
);
1013 extwriter_counter_dec(cur_trans
, trans
->type
);
1015 cond_wake_up(&cur_trans
->writer_wait
);
1016 btrfs_put_transaction(cur_trans
);
1018 if (current
->journal_info
== trans
)
1019 current
->journal_info
= NULL
;
1022 btrfs_run_delayed_iputs(info
);
1024 if (TRANS_ABORTED(trans
) ||
1025 test_bit(BTRFS_FS_STATE_ERROR
, &info
->fs_state
)) {
1026 wake_up_process(info
->transaction_kthread
);
1027 if (TRANS_ABORTED(trans
))
1028 err
= trans
->aborted
;
1033 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
1037 int btrfs_end_transaction(struct btrfs_trans_handle
*trans
)
1039 return __btrfs_end_transaction(trans
, 0);
1042 int btrfs_end_transaction_throttle(struct btrfs_trans_handle
*trans
)
1044 return __btrfs_end_transaction(trans
, 1);
1048 * when btree blocks are allocated, they have some corresponding bits set for
1049 * them in one of two extent_io trees. This is used to make sure all of
1050 * those extents are sent to disk but does not wait on them
1052 int btrfs_write_marked_extents(struct btrfs_fs_info
*fs_info
,
1053 struct extent_io_tree
*dirty_pages
, int mark
)
1057 struct address_space
*mapping
= fs_info
->btree_inode
->i_mapping
;
1058 struct extent_state
*cached_state
= NULL
;
1062 atomic_inc(&BTRFS_I(fs_info
->btree_inode
)->sync_writers
);
1063 while (!find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
1064 mark
, &cached_state
)) {
1065 bool wait_writeback
= false;
1067 err
= convert_extent_bit(dirty_pages
, start
, end
,
1069 mark
, &cached_state
);
1071 * convert_extent_bit can return -ENOMEM, which is most of the
1072 * time a temporary error. So when it happens, ignore the error
1073 * and wait for writeback of this range to finish - because we
1074 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
1075 * to __btrfs_wait_marked_extents() would not know that
1076 * writeback for this range started and therefore wouldn't
1077 * wait for it to finish - we don't want to commit a
1078 * superblock that points to btree nodes/leafs for which
1079 * writeback hasn't finished yet (and without errors).
1080 * We cleanup any entries left in the io tree when committing
1081 * the transaction (through extent_io_tree_release()).
1083 if (err
== -ENOMEM
) {
1085 wait_writeback
= true;
1088 err
= filemap_fdatawrite_range(mapping
, start
, end
);
1091 else if (wait_writeback
)
1092 werr
= filemap_fdatawait_range(mapping
, start
, end
);
1093 free_extent_state(cached_state
);
1094 cached_state
= NULL
;
1098 atomic_dec(&BTRFS_I(fs_info
->btree_inode
)->sync_writers
);
1103 * when btree blocks are allocated, they have some corresponding bits set for
1104 * them in one of two extent_io trees. This is used to make sure all of
1105 * those extents are on disk for transaction or log commit. We wait
1106 * on all the pages and clear them from the dirty pages state tree
1108 static int __btrfs_wait_marked_extents(struct btrfs_fs_info
*fs_info
,
1109 struct extent_io_tree
*dirty_pages
)
1113 struct address_space
*mapping
= fs_info
->btree_inode
->i_mapping
;
1114 struct extent_state
*cached_state
= NULL
;
1118 while (!find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
1119 EXTENT_NEED_WAIT
, &cached_state
)) {
1121 * Ignore -ENOMEM errors returned by clear_extent_bit().
1122 * When committing the transaction, we'll remove any entries
1123 * left in the io tree. For a log commit, we don't remove them
1124 * after committing the log because the tree can be accessed
1125 * concurrently - we do it only at transaction commit time when
1126 * it's safe to do it (through extent_io_tree_release()).
1128 err
= clear_extent_bit(dirty_pages
, start
, end
,
1129 EXTENT_NEED_WAIT
, 0, 0, &cached_state
);
1133 err
= filemap_fdatawait_range(mapping
, start
, end
);
1136 free_extent_state(cached_state
);
1137 cached_state
= NULL
;
1146 static int btrfs_wait_extents(struct btrfs_fs_info
*fs_info
,
1147 struct extent_io_tree
*dirty_pages
)
1149 bool errors
= false;
1152 err
= __btrfs_wait_marked_extents(fs_info
, dirty_pages
);
1153 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR
, &fs_info
->flags
))
1161 int btrfs_wait_tree_log_extents(struct btrfs_root
*log_root
, int mark
)
1163 struct btrfs_fs_info
*fs_info
= log_root
->fs_info
;
1164 struct extent_io_tree
*dirty_pages
= &log_root
->dirty_log_pages
;
1165 bool errors
= false;
1168 ASSERT(log_root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
);
1170 err
= __btrfs_wait_marked_extents(fs_info
, dirty_pages
);
1171 if ((mark
& EXTENT_DIRTY
) &&
1172 test_and_clear_bit(BTRFS_FS_LOG1_ERR
, &fs_info
->flags
))
1175 if ((mark
& EXTENT_NEW
) &&
1176 test_and_clear_bit(BTRFS_FS_LOG2_ERR
, &fs_info
->flags
))
1185 * When btree blocks are allocated the corresponding extents are marked dirty.
1186 * This function ensures such extents are persisted on disk for transaction or
1189 * @trans: transaction whose dirty pages we'd like to write
1191 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle
*trans
)
1195 struct extent_io_tree
*dirty_pages
= &trans
->transaction
->dirty_pages
;
1196 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1197 struct blk_plug plug
;
1199 blk_start_plug(&plug
);
1200 ret
= btrfs_write_marked_extents(fs_info
, dirty_pages
, EXTENT_DIRTY
);
1201 blk_finish_plug(&plug
);
1202 ret2
= btrfs_wait_extents(fs_info
, dirty_pages
);
1204 extent_io_tree_release(&trans
->transaction
->dirty_pages
);
1215 * this is used to update the root pointer in the tree of tree roots.
1217 * But, in the case of the extent allocation tree, updating the root
1218 * pointer may allocate blocks which may change the root of the extent
1221 * So, this loops and repeats and makes sure the cowonly root didn't
1222 * change while the root pointer was being updated in the metadata.
1224 static int update_cowonly_root(struct btrfs_trans_handle
*trans
,
1225 struct btrfs_root
*root
)
1228 u64 old_root_bytenr
;
1230 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1231 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
1233 old_root_used
= btrfs_root_used(&root
->root_item
);
1236 old_root_bytenr
= btrfs_root_bytenr(&root
->root_item
);
1237 if (old_root_bytenr
== root
->node
->start
&&
1238 old_root_used
== btrfs_root_used(&root
->root_item
))
1241 btrfs_set_root_node(&root
->root_item
, root
->node
);
1242 ret
= btrfs_update_root(trans
, tree_root
,
1248 old_root_used
= btrfs_root_used(&root
->root_item
);
1255 * update all the cowonly tree roots on disk
1257 * The error handling in this function may not be obvious. Any of the
1258 * failures will cause the file system to go offline. We still need
1259 * to clean up the delayed refs.
1261 static noinline
int commit_cowonly_roots(struct btrfs_trans_handle
*trans
)
1263 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1264 struct list_head
*dirty_bgs
= &trans
->transaction
->dirty_bgs
;
1265 struct list_head
*io_bgs
= &trans
->transaction
->io_bgs
;
1266 struct list_head
*next
;
1267 struct extent_buffer
*eb
;
1270 eb
= btrfs_lock_root_node(fs_info
->tree_root
);
1271 ret
= btrfs_cow_block(trans
, fs_info
->tree_root
, eb
, NULL
,
1272 0, &eb
, BTRFS_NESTING_COW
);
1273 btrfs_tree_unlock(eb
);
1274 free_extent_buffer(eb
);
1279 ret
= btrfs_run_dev_stats(trans
);
1282 ret
= btrfs_run_dev_replace(trans
);
1285 ret
= btrfs_run_qgroups(trans
);
1289 ret
= btrfs_setup_space_cache(trans
);
1294 while (!list_empty(&fs_info
->dirty_cowonly_roots
)) {
1295 struct btrfs_root
*root
;
1296 next
= fs_info
->dirty_cowonly_roots
.next
;
1297 list_del_init(next
);
1298 root
= list_entry(next
, struct btrfs_root
, dirty_list
);
1299 clear_bit(BTRFS_ROOT_DIRTY
, &root
->state
);
1301 if (root
!= fs_info
->extent_root
)
1302 list_add_tail(&root
->dirty_list
,
1303 &trans
->transaction
->switch_commits
);
1304 ret
= update_cowonly_root(trans
, root
);
1309 /* Now flush any delayed refs generated by updating all of the roots */
1310 ret
= btrfs_run_delayed_refs(trans
, (unsigned long)-1);
1314 while (!list_empty(dirty_bgs
) || !list_empty(io_bgs
)) {
1315 ret
= btrfs_write_dirty_block_groups(trans
);
1320 * We're writing the dirty block groups, which could generate
1321 * delayed refs, which could generate more dirty block groups,
1322 * so we want to keep this flushing in this loop to make sure
1323 * everything gets run.
1325 ret
= btrfs_run_delayed_refs(trans
, (unsigned long)-1);
1330 if (!list_empty(&fs_info
->dirty_cowonly_roots
))
1333 list_add_tail(&fs_info
->extent_root
->dirty_list
,
1334 &trans
->transaction
->switch_commits
);
1336 /* Update dev-replace pointer once everything is committed */
1337 fs_info
->dev_replace
.committed_cursor_left
=
1338 fs_info
->dev_replace
.cursor_left_last_write_of_item
;
1344 * If we had a pending drop we need to see if there are any others left in our
1345 * dead roots list, and if not clear our bit and wake any waiters.
1347 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info
*fs_info
)
1350 * We put the drop in progress roots at the front of the list, so if the
1351 * first entry doesn't have UNFINISHED_DROP set we can wake everybody
1354 spin_lock(&fs_info
->trans_lock
);
1355 if (!list_empty(&fs_info
->dead_roots
)) {
1356 struct btrfs_root
*root
= list_first_entry(&fs_info
->dead_roots
,
1359 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP
, &root
->state
)) {
1360 spin_unlock(&fs_info
->trans_lock
);
1364 spin_unlock(&fs_info
->trans_lock
);
1366 btrfs_wake_unfinished_drop(fs_info
);
1370 * dead roots are old snapshots that need to be deleted. This allocates
1371 * a dirty root struct and adds it into the list of dead roots that need to
1374 void btrfs_add_dead_root(struct btrfs_root
*root
)
1376 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1378 spin_lock(&fs_info
->trans_lock
);
1379 if (list_empty(&root
->root_list
)) {
1380 btrfs_grab_root(root
);
1382 /* We want to process the partially complete drops first. */
1383 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP
, &root
->state
))
1384 list_add(&root
->root_list
, &fs_info
->dead_roots
);
1386 list_add_tail(&root
->root_list
, &fs_info
->dead_roots
);
1388 spin_unlock(&fs_info
->trans_lock
);
1392 * update all the cowonly tree roots on disk
1394 static noinline
int commit_fs_roots(struct btrfs_trans_handle
*trans
)
1396 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1397 struct btrfs_root
*gang
[8];
1401 spin_lock(&fs_info
->fs_roots_radix_lock
);
1403 ret
= radix_tree_gang_lookup_tag(&fs_info
->fs_roots_radix
,
1406 BTRFS_ROOT_TRANS_TAG
);
1409 for (i
= 0; i
< ret
; i
++) {
1410 struct btrfs_root
*root
= gang
[i
];
1413 radix_tree_tag_clear(&fs_info
->fs_roots_radix
,
1414 (unsigned long)root
->root_key
.objectid
,
1415 BTRFS_ROOT_TRANS_TAG
);
1416 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1418 btrfs_free_log(trans
, root
);
1419 ret2
= btrfs_update_reloc_root(trans
, root
);
1423 /* see comments in should_cow_block() */
1424 clear_bit(BTRFS_ROOT_FORCE_COW
, &root
->state
);
1425 smp_mb__after_atomic();
1427 if (root
->commit_root
!= root
->node
) {
1428 list_add_tail(&root
->dirty_list
,
1429 &trans
->transaction
->switch_commits
);
1430 btrfs_set_root_node(&root
->root_item
,
1434 ret2
= btrfs_update_root(trans
, fs_info
->tree_root
,
1439 spin_lock(&fs_info
->fs_roots_radix_lock
);
1440 btrfs_qgroup_free_meta_all_pertrans(root
);
1443 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1448 * defrag a given btree.
1449 * Every leaf in the btree is read and defragged.
1451 int btrfs_defrag_root(struct btrfs_root
*root
)
1453 struct btrfs_fs_info
*info
= root
->fs_info
;
1454 struct btrfs_trans_handle
*trans
;
1457 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING
, &root
->state
))
1461 trans
= btrfs_start_transaction(root
, 0);
1462 if (IS_ERR(trans
)) {
1463 ret
= PTR_ERR(trans
);
1467 ret
= btrfs_defrag_leaves(trans
, root
);
1469 btrfs_end_transaction(trans
);
1470 btrfs_btree_balance_dirty(info
);
1473 if (btrfs_fs_closing(info
) || ret
!= -EAGAIN
)
1476 if (btrfs_defrag_cancelled(info
)) {
1477 btrfs_debug(info
, "defrag_root cancelled");
1482 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING
, &root
->state
);
1487 * Do all special snapshot related qgroup dirty hack.
1489 * Will do all needed qgroup inherit and dirty hack like switch commit
1490 * roots inside one transaction and write all btree into disk, to make
1493 static int qgroup_account_snapshot(struct btrfs_trans_handle
*trans
,
1494 struct btrfs_root
*src
,
1495 struct btrfs_root
*parent
,
1496 struct btrfs_qgroup_inherit
*inherit
,
1499 struct btrfs_fs_info
*fs_info
= src
->fs_info
;
1503 * Save some performance in the case that qgroups are not
1504 * enabled. If this check races with the ioctl, rescan will
1507 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
1511 * Ensure dirty @src will be committed. Or, after coming
1512 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1513 * recorded root will never be updated again, causing an outdated root
1516 ret
= record_root_in_trans(trans
, src
, 1);
1521 * btrfs_qgroup_inherit relies on a consistent view of the usage for the
1522 * src root, so we must run the delayed refs here.
1524 * However this isn't particularly fool proof, because there's no
1525 * synchronization keeping us from changing the tree after this point
1526 * before we do the qgroup_inherit, or even from making changes while
1527 * we're doing the qgroup_inherit. But that's a problem for the future,
1528 * for now flush the delayed refs to narrow the race window where the
1529 * qgroup counters could end up wrong.
1531 ret
= btrfs_run_delayed_refs(trans
, (unsigned long)-1);
1533 btrfs_abort_transaction(trans
, ret
);
1538 * We are going to commit transaction, see btrfs_commit_transaction()
1539 * comment for reason locking tree_log_mutex
1541 mutex_lock(&fs_info
->tree_log_mutex
);
1543 ret
= commit_fs_roots(trans
);
1546 ret
= btrfs_qgroup_account_extents(trans
);
1550 /* Now qgroup are all updated, we can inherit it to new qgroups */
1551 ret
= btrfs_qgroup_inherit(trans
, src
->root_key
.objectid
, dst_objectid
,
1557 * Now we do a simplified commit transaction, which will:
1558 * 1) commit all subvolume and extent tree
1559 * To ensure all subvolume and extent tree have a valid
1560 * commit_root to accounting later insert_dir_item()
1561 * 2) write all btree blocks onto disk
1562 * This is to make sure later btree modification will be cowed
1563 * Or commit_root can be populated and cause wrong qgroup numbers
1564 * In this simplified commit, we don't really care about other trees
1565 * like chunk and root tree, as they won't affect qgroup.
1566 * And we don't write super to avoid half committed status.
1568 ret
= commit_cowonly_roots(trans
);
1571 switch_commit_roots(trans
);
1572 ret
= btrfs_write_and_wait_transaction(trans
);
1574 btrfs_handle_fs_error(fs_info
, ret
,
1575 "Error while writing out transaction for qgroup");
1578 mutex_unlock(&fs_info
->tree_log_mutex
);
1581 * Force parent root to be updated, as we recorded it before so its
1582 * last_trans == cur_transid.
1583 * Or it won't be committed again onto disk after later
1587 ret
= record_root_in_trans(trans
, parent
, 1);
1592 * new snapshots need to be created at a very specific time in the
1593 * transaction commit. This does the actual creation.
1596 * If the error which may affect the commitment of the current transaction
1597 * happens, we should return the error number. If the error which just affect
1598 * the creation of the pending snapshots, just return 0.
1600 static noinline
int create_pending_snapshot(struct btrfs_trans_handle
*trans
,
1601 struct btrfs_pending_snapshot
*pending
)
1604 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1605 struct btrfs_key key
;
1606 struct btrfs_root_item
*new_root_item
;
1607 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
1608 struct btrfs_root
*root
= pending
->root
;
1609 struct btrfs_root
*parent_root
;
1610 struct btrfs_block_rsv
*rsv
;
1611 struct inode
*parent_inode
;
1612 struct btrfs_path
*path
;
1613 struct btrfs_dir_item
*dir_item
;
1614 struct dentry
*dentry
;
1615 struct extent_buffer
*tmp
;
1616 struct extent_buffer
*old
;
1617 struct timespec64 cur_time
;
1624 ASSERT(pending
->path
);
1625 path
= pending
->path
;
1627 ASSERT(pending
->root_item
);
1628 new_root_item
= pending
->root_item
;
1630 pending
->error
= btrfs_get_free_objectid(tree_root
, &objectid
);
1632 goto no_free_objectid
;
1635 * Make qgroup to skip current new snapshot's qgroupid, as it is
1636 * accounted by later btrfs_qgroup_inherit().
1638 btrfs_set_skip_qgroup(trans
, objectid
);
1640 btrfs_reloc_pre_snapshot(pending
, &to_reserve
);
1642 if (to_reserve
> 0) {
1643 pending
->error
= btrfs_block_rsv_add(root
,
1644 &pending
->block_rsv
,
1646 BTRFS_RESERVE_NO_FLUSH
);
1648 goto clear_skip_qgroup
;
1651 key
.objectid
= objectid
;
1652 key
.offset
= (u64
)-1;
1653 key
.type
= BTRFS_ROOT_ITEM_KEY
;
1655 rsv
= trans
->block_rsv
;
1656 trans
->block_rsv
= &pending
->block_rsv
;
1657 trans
->bytes_reserved
= trans
->block_rsv
->reserved
;
1658 trace_btrfs_space_reservation(fs_info
, "transaction",
1660 trans
->bytes_reserved
, 1);
1661 dentry
= pending
->dentry
;
1662 parent_inode
= pending
->dir
;
1663 parent_root
= BTRFS_I(parent_inode
)->root
;
1664 ret
= record_root_in_trans(trans
, parent_root
, 0);
1667 cur_time
= current_time(parent_inode
);
1670 * insert the directory item
1672 ret
= btrfs_set_inode_index(BTRFS_I(parent_inode
), &index
);
1673 BUG_ON(ret
); /* -ENOMEM */
1675 /* check if there is a file/dir which has the same name. */
1676 dir_item
= btrfs_lookup_dir_item(NULL
, parent_root
, path
,
1677 btrfs_ino(BTRFS_I(parent_inode
)),
1678 dentry
->d_name
.name
,
1679 dentry
->d_name
.len
, 0);
1680 if (dir_item
!= NULL
&& !IS_ERR(dir_item
)) {
1681 pending
->error
= -EEXIST
;
1682 goto dir_item_existed
;
1683 } else if (IS_ERR(dir_item
)) {
1684 ret
= PTR_ERR(dir_item
);
1685 btrfs_abort_transaction(trans
, ret
);
1688 btrfs_release_path(path
);
1691 * pull in the delayed directory update
1692 * and the delayed inode item
1693 * otherwise we corrupt the FS during
1696 ret
= btrfs_run_delayed_items(trans
);
1697 if (ret
) { /* Transaction aborted */
1698 btrfs_abort_transaction(trans
, ret
);
1702 ret
= record_root_in_trans(trans
, root
, 0);
1704 btrfs_abort_transaction(trans
, ret
);
1707 btrfs_set_root_last_snapshot(&root
->root_item
, trans
->transid
);
1708 memcpy(new_root_item
, &root
->root_item
, sizeof(*new_root_item
));
1709 btrfs_check_and_init_root_item(new_root_item
);
1711 root_flags
= btrfs_root_flags(new_root_item
);
1712 if (pending
->readonly
)
1713 root_flags
|= BTRFS_ROOT_SUBVOL_RDONLY
;
1715 root_flags
&= ~BTRFS_ROOT_SUBVOL_RDONLY
;
1716 btrfs_set_root_flags(new_root_item
, root_flags
);
1718 btrfs_set_root_generation_v2(new_root_item
,
1720 generate_random_guid(new_root_item
->uuid
);
1721 memcpy(new_root_item
->parent_uuid
, root
->root_item
.uuid
,
1723 if (!(root_flags
& BTRFS_ROOT_SUBVOL_RDONLY
)) {
1724 memset(new_root_item
->received_uuid
, 0,
1725 sizeof(new_root_item
->received_uuid
));
1726 memset(&new_root_item
->stime
, 0, sizeof(new_root_item
->stime
));
1727 memset(&new_root_item
->rtime
, 0, sizeof(new_root_item
->rtime
));
1728 btrfs_set_root_stransid(new_root_item
, 0);
1729 btrfs_set_root_rtransid(new_root_item
, 0);
1731 btrfs_set_stack_timespec_sec(&new_root_item
->otime
, cur_time
.tv_sec
);
1732 btrfs_set_stack_timespec_nsec(&new_root_item
->otime
, cur_time
.tv_nsec
);
1733 btrfs_set_root_otransid(new_root_item
, trans
->transid
);
1735 old
= btrfs_lock_root_node(root
);
1736 ret
= btrfs_cow_block(trans
, root
, old
, NULL
, 0, &old
,
1739 btrfs_tree_unlock(old
);
1740 free_extent_buffer(old
);
1741 btrfs_abort_transaction(trans
, ret
);
1745 ret
= btrfs_copy_root(trans
, root
, old
, &tmp
, objectid
);
1746 /* clean up in any case */
1747 btrfs_tree_unlock(old
);
1748 free_extent_buffer(old
);
1750 btrfs_abort_transaction(trans
, ret
);
1753 /* see comments in should_cow_block() */
1754 set_bit(BTRFS_ROOT_FORCE_COW
, &root
->state
);
1757 btrfs_set_root_node(new_root_item
, tmp
);
1758 /* record when the snapshot was created in key.offset */
1759 key
.offset
= trans
->transid
;
1760 ret
= btrfs_insert_root(trans
, tree_root
, &key
, new_root_item
);
1761 btrfs_tree_unlock(tmp
);
1762 free_extent_buffer(tmp
);
1764 btrfs_abort_transaction(trans
, ret
);
1769 * insert root back/forward references
1771 ret
= btrfs_add_root_ref(trans
, objectid
,
1772 parent_root
->root_key
.objectid
,
1773 btrfs_ino(BTRFS_I(parent_inode
)), index
,
1774 dentry
->d_name
.name
, dentry
->d_name
.len
);
1776 btrfs_abort_transaction(trans
, ret
);
1780 key
.offset
= (u64
)-1;
1781 pending
->snap
= btrfs_get_new_fs_root(fs_info
, objectid
, pending
->anon_dev
);
1782 if (IS_ERR(pending
->snap
)) {
1783 ret
= PTR_ERR(pending
->snap
);
1784 pending
->snap
= NULL
;
1785 btrfs_abort_transaction(trans
, ret
);
1789 ret
= btrfs_reloc_post_snapshot(trans
, pending
);
1791 btrfs_abort_transaction(trans
, ret
);
1796 * Do special qgroup accounting for snapshot, as we do some qgroup
1797 * snapshot hack to do fast snapshot.
1798 * To co-operate with that hack, we do hack again.
1799 * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1801 ret
= qgroup_account_snapshot(trans
, root
, parent_root
,
1802 pending
->inherit
, objectid
);
1806 ret
= btrfs_insert_dir_item(trans
, dentry
->d_name
.name
,
1807 dentry
->d_name
.len
, BTRFS_I(parent_inode
),
1808 &key
, BTRFS_FT_DIR
, index
);
1809 /* We have check then name at the beginning, so it is impossible. */
1810 BUG_ON(ret
== -EEXIST
|| ret
== -EOVERFLOW
);
1812 btrfs_abort_transaction(trans
, ret
);
1816 btrfs_i_size_write(BTRFS_I(parent_inode
), parent_inode
->i_size
+
1817 dentry
->d_name
.len
* 2);
1818 parent_inode
->i_mtime
= parent_inode
->i_ctime
=
1819 current_time(parent_inode
);
1820 ret
= btrfs_update_inode_fallback(trans
, parent_root
, BTRFS_I(parent_inode
));
1822 btrfs_abort_transaction(trans
, ret
);
1825 ret
= btrfs_uuid_tree_add(trans
, new_root_item
->uuid
,
1826 BTRFS_UUID_KEY_SUBVOL
,
1829 btrfs_abort_transaction(trans
, ret
);
1832 if (!btrfs_is_empty_uuid(new_root_item
->received_uuid
)) {
1833 ret
= btrfs_uuid_tree_add(trans
, new_root_item
->received_uuid
,
1834 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
1836 if (ret
&& ret
!= -EEXIST
) {
1837 btrfs_abort_transaction(trans
, ret
);
1843 pending
->error
= ret
;
1845 trans
->block_rsv
= rsv
;
1846 trans
->bytes_reserved
= 0;
1848 btrfs_clear_skip_qgroup(trans
);
1850 kfree(new_root_item
);
1851 pending
->root_item
= NULL
;
1852 btrfs_free_path(path
);
1853 pending
->path
= NULL
;
1859 * create all the snapshots we've scheduled for creation
1861 static noinline
int create_pending_snapshots(struct btrfs_trans_handle
*trans
)
1863 struct btrfs_pending_snapshot
*pending
, *next
;
1864 struct list_head
*head
= &trans
->transaction
->pending_snapshots
;
1867 list_for_each_entry_safe(pending
, next
, head
, list
) {
1868 list_del(&pending
->list
);
1869 ret
= create_pending_snapshot(trans
, pending
);
1876 static void update_super_roots(struct btrfs_fs_info
*fs_info
)
1878 struct btrfs_root_item
*root_item
;
1879 struct btrfs_super_block
*super
;
1881 super
= fs_info
->super_copy
;
1883 root_item
= &fs_info
->chunk_root
->root_item
;
1884 super
->chunk_root
= root_item
->bytenr
;
1885 super
->chunk_root_generation
= root_item
->generation
;
1886 super
->chunk_root_level
= root_item
->level
;
1888 root_item
= &fs_info
->tree_root
->root_item
;
1889 super
->root
= root_item
->bytenr
;
1890 super
->generation
= root_item
->generation
;
1891 super
->root_level
= root_item
->level
;
1892 if (btrfs_test_opt(fs_info
, SPACE_CACHE
))
1893 super
->cache_generation
= root_item
->generation
;
1894 else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1
, &fs_info
->flags
))
1895 super
->cache_generation
= 0;
1896 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN
, &fs_info
->flags
))
1897 super
->uuid_tree_generation
= root_item
->generation
;
1900 int btrfs_transaction_in_commit(struct btrfs_fs_info
*info
)
1902 struct btrfs_transaction
*trans
;
1905 spin_lock(&info
->trans_lock
);
1906 trans
= info
->running_transaction
;
1908 ret
= (trans
->state
>= TRANS_STATE_COMMIT_START
);
1909 spin_unlock(&info
->trans_lock
);
1913 int btrfs_transaction_blocked(struct btrfs_fs_info
*info
)
1915 struct btrfs_transaction
*trans
;
1918 spin_lock(&info
->trans_lock
);
1919 trans
= info
->running_transaction
;
1921 ret
= is_transaction_blocked(trans
);
1922 spin_unlock(&info
->trans_lock
);
1927 * commit transactions asynchronously. once btrfs_commit_transaction_async
1928 * returns, any subsequent transaction will not be allowed to join.
1930 struct btrfs_async_commit
{
1931 struct btrfs_trans_handle
*newtrans
;
1932 struct work_struct work
;
1935 static void do_async_commit(struct work_struct
*work
)
1937 struct btrfs_async_commit
*ac
=
1938 container_of(work
, struct btrfs_async_commit
, work
);
1941 * We've got freeze protection passed with the transaction.
1942 * Tell lockdep about it.
1944 if (ac
->newtrans
->type
& __TRANS_FREEZABLE
)
1945 __sb_writers_acquired(ac
->newtrans
->fs_info
->sb
, SB_FREEZE_FS
);
1947 current
->journal_info
= ac
->newtrans
;
1949 btrfs_commit_transaction(ac
->newtrans
);
1953 int btrfs_commit_transaction_async(struct btrfs_trans_handle
*trans
)
1955 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1956 struct btrfs_async_commit
*ac
;
1957 struct btrfs_transaction
*cur_trans
;
1959 ac
= kmalloc(sizeof(*ac
), GFP_NOFS
);
1963 INIT_WORK(&ac
->work
, do_async_commit
);
1964 ac
->newtrans
= btrfs_join_transaction(trans
->root
);
1965 if (IS_ERR(ac
->newtrans
)) {
1966 int err
= PTR_ERR(ac
->newtrans
);
1971 /* take transaction reference */
1972 cur_trans
= trans
->transaction
;
1973 refcount_inc(&cur_trans
->use_count
);
1975 btrfs_end_transaction(trans
);
1978 * Tell lockdep we've released the freeze rwsem, since the
1979 * async commit thread will be the one to unlock it.
1981 if (ac
->newtrans
->type
& __TRANS_FREEZABLE
)
1982 __sb_writers_release(fs_info
->sb
, SB_FREEZE_FS
);
1984 schedule_work(&ac
->work
);
1986 * Wait for the current transaction commit to start and block
1987 * subsequent transaction joins
1989 wait_event(fs_info
->transaction_blocked_wait
,
1990 cur_trans
->state
>= TRANS_STATE_COMMIT_START
||
1991 TRANS_ABORTED(cur_trans
));
1992 if (current
->journal_info
== trans
)
1993 current
->journal_info
= NULL
;
1995 btrfs_put_transaction(cur_trans
);
2000 static void cleanup_transaction(struct btrfs_trans_handle
*trans
, int err
)
2002 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2003 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2005 WARN_ON(refcount_read(&trans
->use_count
) > 1);
2007 btrfs_abort_transaction(trans
, err
);
2009 spin_lock(&fs_info
->trans_lock
);
2012 * If the transaction is removed from the list, it means this
2013 * transaction has been committed successfully, so it is impossible
2014 * to call the cleanup function.
2016 BUG_ON(list_empty(&cur_trans
->list
));
2018 if (cur_trans
== fs_info
->running_transaction
) {
2019 cur_trans
->state
= TRANS_STATE_COMMIT_DOING
;
2020 spin_unlock(&fs_info
->trans_lock
);
2021 wait_event(cur_trans
->writer_wait
,
2022 atomic_read(&cur_trans
->num_writers
) == 1);
2024 spin_lock(&fs_info
->trans_lock
);
2028 * Now that we know no one else is still using the transaction we can
2029 * remove the transaction from the list of transactions. This avoids
2030 * the transaction kthread from cleaning up the transaction while some
2031 * other task is still using it, which could result in a use-after-free
2032 * on things like log trees, as it forces the transaction kthread to
2033 * wait for this transaction to be cleaned up by us.
2035 list_del_init(&cur_trans
->list
);
2037 spin_unlock(&fs_info
->trans_lock
);
2039 btrfs_cleanup_one_transaction(trans
->transaction
, fs_info
);
2041 spin_lock(&fs_info
->trans_lock
);
2042 if (cur_trans
== fs_info
->running_transaction
)
2043 fs_info
->running_transaction
= NULL
;
2044 spin_unlock(&fs_info
->trans_lock
);
2046 if (trans
->type
& __TRANS_FREEZABLE
)
2047 sb_end_intwrite(fs_info
->sb
);
2048 btrfs_put_transaction(cur_trans
);
2049 btrfs_put_transaction(cur_trans
);
2051 trace_btrfs_transaction_commit(trans
->root
);
2053 if (current
->journal_info
== trans
)
2054 current
->journal_info
= NULL
;
2055 btrfs_scrub_cancel(fs_info
);
2057 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
2061 * Release reserved delayed ref space of all pending block groups of the
2062 * transaction and remove them from the list
2064 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle
*trans
)
2066 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2067 struct btrfs_block_group
*block_group
, *tmp
;
2069 list_for_each_entry_safe(block_group
, tmp
, &trans
->new_bgs
, bg_list
) {
2070 btrfs_delayed_refs_rsv_release(fs_info
, 1);
2071 list_del_init(&block_group
->bg_list
);
2075 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info
*fs_info
)
2078 * We use try_to_writeback_inodes_sb() here because if we used
2079 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
2080 * Currently are holding the fs freeze lock, if we do an async flush
2081 * we'll do btrfs_join_transaction() and deadlock because we need to
2082 * wait for the fs freeze lock. Using the direct flushing we benefit
2083 * from already being in a transaction and our join_transaction doesn't
2084 * have to re-take the fs freeze lock.
2086 * Note that try_to_writeback_inodes_sb() will only trigger writeback
2087 * if it can read lock sb->s_umount. It will always be able to lock it,
2088 * except when the filesystem is being unmounted or being frozen, but in
2089 * those cases sync_filesystem() is called, which results in calling
2090 * writeback_inodes_sb() while holding a write lock on sb->s_umount.
2091 * Note that we don't call writeback_inodes_sb() directly, because it
2092 * will emit a warning if sb->s_umount is not locked.
2094 if (btrfs_test_opt(fs_info
, FLUSHONCOMMIT
))
2095 try_to_writeback_inodes_sb(fs_info
->sb
, WB_REASON_SYNC
);
2099 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info
*fs_info
)
2101 if (btrfs_test_opt(fs_info
, FLUSHONCOMMIT
))
2102 btrfs_wait_ordered_roots(fs_info
, U64_MAX
, 0, (u64
)-1);
2106 * Add a pending snapshot associated with the given transaction handle to the
2107 * respective handle. This must be called after the transaction commit started
2108 * and while holding fs_info->trans_lock.
2109 * This serves to guarantee a caller of btrfs_commit_transaction() that it can
2110 * safely free the pending snapshot pointer in case btrfs_commit_transaction()
2113 static void add_pending_snapshot(struct btrfs_trans_handle
*trans
)
2115 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2117 if (!trans
->pending_snapshot
)
2120 lockdep_assert_held(&trans
->fs_info
->trans_lock
);
2121 ASSERT(cur_trans
->state
>= TRANS_STATE_COMMIT_START
);
2123 list_add(&trans
->pending_snapshot
->list
, &cur_trans
->pending_snapshots
);
2126 int btrfs_commit_transaction(struct btrfs_trans_handle
*trans
)
2128 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2129 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2130 struct btrfs_transaction
*prev_trans
= NULL
;
2133 ASSERT(refcount_read(&trans
->use_count
) == 1);
2135 /* Stop the commit early if ->aborted is set */
2136 if (TRANS_ABORTED(cur_trans
)) {
2137 ret
= cur_trans
->aborted
;
2138 btrfs_end_transaction(trans
);
2142 btrfs_trans_release_metadata(trans
);
2143 trans
->block_rsv
= NULL
;
2146 * We only want one transaction commit doing the flushing so we do not
2147 * waste a bunch of time on lock contention on the extent root node.
2149 if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING
,
2150 &cur_trans
->delayed_refs
.flags
)) {
2152 * Make a pass through all the delayed refs we have so far.
2153 * Any running threads may add more while we are here.
2155 ret
= btrfs_run_delayed_refs(trans
, 0);
2157 btrfs_end_transaction(trans
);
2162 btrfs_create_pending_block_groups(trans
);
2164 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN
, &cur_trans
->flags
)) {
2167 /* this mutex is also taken before trying to set
2168 * block groups readonly. We need to make sure
2169 * that nobody has set a block group readonly
2170 * after a extents from that block group have been
2171 * allocated for cache files. btrfs_set_block_group_ro
2172 * will wait for the transaction to commit if it
2173 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2175 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2176 * only one process starts all the block group IO. It wouldn't
2177 * hurt to have more than one go through, but there's no
2178 * real advantage to it either.
2180 mutex_lock(&fs_info
->ro_block_group_mutex
);
2181 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN
,
2184 mutex_unlock(&fs_info
->ro_block_group_mutex
);
2187 ret
= btrfs_start_dirty_block_groups(trans
);
2189 btrfs_end_transaction(trans
);
2195 spin_lock(&fs_info
->trans_lock
);
2196 if (cur_trans
->state
>= TRANS_STATE_COMMIT_START
) {
2197 enum btrfs_trans_state want_state
= TRANS_STATE_COMPLETED
;
2199 add_pending_snapshot(trans
);
2201 spin_unlock(&fs_info
->trans_lock
);
2202 refcount_inc(&cur_trans
->use_count
);
2204 if (trans
->in_fsync
)
2205 want_state
= TRANS_STATE_SUPER_COMMITTED
;
2206 ret
= btrfs_end_transaction(trans
);
2207 wait_for_commit(cur_trans
, want_state
);
2209 if (TRANS_ABORTED(cur_trans
))
2210 ret
= cur_trans
->aborted
;
2212 btrfs_put_transaction(cur_trans
);
2217 cur_trans
->state
= TRANS_STATE_COMMIT_START
;
2218 wake_up(&fs_info
->transaction_blocked_wait
);
2220 if (cur_trans
->list
.prev
!= &fs_info
->trans_list
) {
2221 enum btrfs_trans_state want_state
= TRANS_STATE_COMPLETED
;
2223 if (trans
->in_fsync
)
2224 want_state
= TRANS_STATE_SUPER_COMMITTED
;
2226 prev_trans
= list_entry(cur_trans
->list
.prev
,
2227 struct btrfs_transaction
, list
);
2228 if (prev_trans
->state
< want_state
) {
2229 refcount_inc(&prev_trans
->use_count
);
2230 spin_unlock(&fs_info
->trans_lock
);
2232 wait_for_commit(prev_trans
, want_state
);
2234 ret
= READ_ONCE(prev_trans
->aborted
);
2236 btrfs_put_transaction(prev_trans
);
2238 goto cleanup_transaction
;
2240 spin_unlock(&fs_info
->trans_lock
);
2243 spin_unlock(&fs_info
->trans_lock
);
2245 * The previous transaction was aborted and was already removed
2246 * from the list of transactions at fs_info->trans_list. So we
2247 * abort to prevent writing a new superblock that reflects a
2248 * corrupt state (pointing to trees with unwritten nodes/leafs).
2250 if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED
, &fs_info
->fs_state
)) {
2252 goto cleanup_transaction
;
2256 extwriter_counter_dec(cur_trans
, trans
->type
);
2258 ret
= btrfs_start_delalloc_flush(fs_info
);
2260 goto cleanup_transaction
;
2262 ret
= btrfs_run_delayed_items(trans
);
2264 goto cleanup_transaction
;
2266 wait_event(cur_trans
->writer_wait
,
2267 extwriter_counter_read(cur_trans
) == 0);
2269 /* some pending stuffs might be added after the previous flush. */
2270 ret
= btrfs_run_delayed_items(trans
);
2272 goto cleanup_transaction
;
2274 btrfs_wait_delalloc_flush(fs_info
);
2277 * Wait for all ordered extents started by a fast fsync that joined this
2278 * transaction. Otherwise if this transaction commits before the ordered
2279 * extents complete we lose logged data after a power failure.
2281 wait_event(cur_trans
->pending_wait
,
2282 atomic_read(&cur_trans
->pending_ordered
) == 0);
2284 btrfs_scrub_pause(fs_info
);
2286 * Ok now we need to make sure to block out any other joins while we
2287 * commit the transaction. We could have started a join before setting
2288 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2290 spin_lock(&fs_info
->trans_lock
);
2291 add_pending_snapshot(trans
);
2292 cur_trans
->state
= TRANS_STATE_COMMIT_DOING
;
2293 spin_unlock(&fs_info
->trans_lock
);
2294 wait_event(cur_trans
->writer_wait
,
2295 atomic_read(&cur_trans
->num_writers
) == 1);
2297 if (TRANS_ABORTED(cur_trans
)) {
2298 ret
= cur_trans
->aborted
;
2299 goto scrub_continue
;
2302 * the reloc mutex makes sure that we stop
2303 * the balancing code from coming in and moving
2304 * extents around in the middle of the commit
2306 mutex_lock(&fs_info
->reloc_mutex
);
2309 * We needn't worry about the delayed items because we will
2310 * deal with them in create_pending_snapshot(), which is the
2311 * core function of the snapshot creation.
2313 ret
= create_pending_snapshots(trans
);
2318 * We insert the dir indexes of the snapshots and update the inode
2319 * of the snapshots' parents after the snapshot creation, so there
2320 * are some delayed items which are not dealt with. Now deal with
2323 * We needn't worry that this operation will corrupt the snapshots,
2324 * because all the tree which are snapshoted will be forced to COW
2325 * the nodes and leaves.
2327 ret
= btrfs_run_delayed_items(trans
);
2331 ret
= btrfs_run_delayed_refs(trans
, (unsigned long)-1);
2336 * make sure none of the code above managed to slip in a
2339 btrfs_assert_delayed_root_empty(fs_info
);
2341 WARN_ON(cur_trans
!= trans
->transaction
);
2343 /* btrfs_commit_tree_roots is responsible for getting the
2344 * various roots consistent with each other. Every pointer
2345 * in the tree of tree roots has to point to the most up to date
2346 * root for every subvolume and other tree. So, we have to keep
2347 * the tree logging code from jumping in and changing any
2350 * At this point in the commit, there can't be any tree-log
2351 * writers, but a little lower down we drop the trans mutex
2352 * and let new people in. By holding the tree_log_mutex
2353 * from now until after the super is written, we avoid races
2354 * with the tree-log code.
2356 mutex_lock(&fs_info
->tree_log_mutex
);
2358 ret
= commit_fs_roots(trans
);
2360 goto unlock_tree_log
;
2363 * Since the transaction is done, we can apply the pending changes
2364 * before the next transaction.
2366 btrfs_apply_pending_changes(fs_info
);
2368 /* commit_fs_roots gets rid of all the tree log roots, it is now
2369 * safe to free the root of tree log roots
2371 btrfs_free_log_root_tree(trans
, fs_info
);
2374 * Since fs roots are all committed, we can get a quite accurate
2375 * new_roots. So let's do quota accounting.
2377 ret
= btrfs_qgroup_account_extents(trans
);
2379 goto unlock_tree_log
;
2381 ret
= commit_cowonly_roots(trans
);
2383 goto unlock_tree_log
;
2386 * The tasks which save the space cache and inode cache may also
2387 * update ->aborted, check it.
2389 if (TRANS_ABORTED(cur_trans
)) {
2390 ret
= cur_trans
->aborted
;
2391 goto unlock_tree_log
;
2394 cur_trans
= fs_info
->running_transaction
;
2396 btrfs_set_root_node(&fs_info
->tree_root
->root_item
,
2397 fs_info
->tree_root
->node
);
2398 list_add_tail(&fs_info
->tree_root
->dirty_list
,
2399 &cur_trans
->switch_commits
);
2401 btrfs_set_root_node(&fs_info
->chunk_root
->root_item
,
2402 fs_info
->chunk_root
->node
);
2403 list_add_tail(&fs_info
->chunk_root
->dirty_list
,
2404 &cur_trans
->switch_commits
);
2406 switch_commit_roots(trans
);
2408 ASSERT(list_empty(&cur_trans
->dirty_bgs
));
2409 ASSERT(list_empty(&cur_trans
->io_bgs
));
2410 update_super_roots(fs_info
);
2412 btrfs_set_super_log_root(fs_info
->super_copy
, 0);
2413 btrfs_set_super_log_root_level(fs_info
->super_copy
, 0);
2414 memcpy(fs_info
->super_for_commit
, fs_info
->super_copy
,
2415 sizeof(*fs_info
->super_copy
));
2417 btrfs_commit_device_sizes(cur_trans
);
2419 clear_bit(BTRFS_FS_LOG1_ERR
, &fs_info
->flags
);
2420 clear_bit(BTRFS_FS_LOG2_ERR
, &fs_info
->flags
);
2422 btrfs_trans_release_chunk_metadata(trans
);
2424 spin_lock(&fs_info
->trans_lock
);
2425 cur_trans
->state
= TRANS_STATE_UNBLOCKED
;
2426 fs_info
->running_transaction
= NULL
;
2427 spin_unlock(&fs_info
->trans_lock
);
2428 mutex_unlock(&fs_info
->reloc_mutex
);
2430 wake_up(&fs_info
->transaction_wait
);
2432 ret
= btrfs_write_and_wait_transaction(trans
);
2434 btrfs_handle_fs_error(fs_info
, ret
,
2435 "Error while writing out transaction");
2437 * reloc_mutex has been unlocked, tree_log_mutex is still held
2438 * but we can't jump to unlock_tree_log causing double unlock
2440 mutex_unlock(&fs_info
->tree_log_mutex
);
2441 goto scrub_continue
;
2445 * At this point, we should have written all the tree blocks allocated
2446 * in this transaction. So it's now safe to free the redirtyied extent
2449 btrfs_free_redirty_list(cur_trans
);
2451 ret
= write_all_supers(fs_info
, 0);
2453 * the super is written, we can safely allow the tree-loggers
2454 * to go about their business
2456 mutex_unlock(&fs_info
->tree_log_mutex
);
2458 goto scrub_continue
;
2461 * We needn't acquire the lock here because there is no other task
2462 * which can change it.
2464 cur_trans
->state
= TRANS_STATE_SUPER_COMMITTED
;
2465 wake_up(&cur_trans
->commit_wait
);
2467 btrfs_finish_extent_commit(trans
);
2469 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS
, &cur_trans
->flags
))
2470 btrfs_clear_space_info_full(fs_info
);
2472 fs_info
->last_trans_committed
= cur_trans
->transid
;
2474 * We needn't acquire the lock here because there is no other task
2475 * which can change it.
2477 cur_trans
->state
= TRANS_STATE_COMPLETED
;
2478 wake_up(&cur_trans
->commit_wait
);
2480 spin_lock(&fs_info
->trans_lock
);
2481 list_del_init(&cur_trans
->list
);
2482 spin_unlock(&fs_info
->trans_lock
);
2484 btrfs_put_transaction(cur_trans
);
2485 btrfs_put_transaction(cur_trans
);
2487 if (trans
->type
& __TRANS_FREEZABLE
)
2488 sb_end_intwrite(fs_info
->sb
);
2490 trace_btrfs_transaction_commit(trans
->root
);
2492 btrfs_scrub_continue(fs_info
);
2494 if (current
->journal_info
== trans
)
2495 current
->journal_info
= NULL
;
2497 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
2502 mutex_unlock(&fs_info
->tree_log_mutex
);
2504 mutex_unlock(&fs_info
->reloc_mutex
);
2506 btrfs_scrub_continue(fs_info
);
2507 cleanup_transaction
:
2508 btrfs_trans_release_metadata(trans
);
2509 btrfs_cleanup_pending_block_groups(trans
);
2510 btrfs_trans_release_chunk_metadata(trans
);
2511 trans
->block_rsv
= NULL
;
2512 btrfs_warn(fs_info
, "Skipping commit of aborted transaction.");
2513 if (current
->journal_info
== trans
)
2514 current
->journal_info
= NULL
;
2515 cleanup_transaction(trans
, ret
);
2521 * return < 0 if error
2522 * 0 if there are no more dead_roots at the time of call
2523 * 1 there are more to be processed, call me again
2525 * The return value indicates there are certainly more snapshots to delete, but
2526 * if there comes a new one during processing, it may return 0. We don't mind,
2527 * because btrfs_commit_super will poke cleaner thread and it will process it a
2528 * few seconds later.
2530 int btrfs_clean_one_deleted_snapshot(struct btrfs_root
*root
)
2533 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2535 spin_lock(&fs_info
->trans_lock
);
2536 if (list_empty(&fs_info
->dead_roots
)) {
2537 spin_unlock(&fs_info
->trans_lock
);
2540 root
= list_first_entry(&fs_info
->dead_roots
,
2541 struct btrfs_root
, root_list
);
2542 list_del_init(&root
->root_list
);
2543 spin_unlock(&fs_info
->trans_lock
);
2545 btrfs_debug(fs_info
, "cleaner removing %llu", root
->root_key
.objectid
);
2547 btrfs_kill_all_delayed_nodes(root
);
2549 if (btrfs_header_backref_rev(root
->node
) <
2550 BTRFS_MIXED_BACKREF_REV
)
2551 ret
= btrfs_drop_snapshot(root
, 0, 0);
2553 ret
= btrfs_drop_snapshot(root
, 1, 0);
2555 btrfs_put_root(root
);
2556 return (ret
< 0) ? 0 : 1;
2559 void btrfs_apply_pending_changes(struct btrfs_fs_info
*fs_info
)
2564 prev
= xchg(&fs_info
->pending_changes
, 0);
2568 bit
= 1 << BTRFS_PENDING_COMMIT
;
2570 btrfs_debug(fs_info
, "pending commit done");
2575 "unknown pending changes left 0x%lx, ignoring", prev
);