]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/btrfs/transaction.c
Merge branch 'integration-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/fdman...
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / transaction.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
79154b1b 19#include <linux/fs.h>
5a0e3ad6 20#include <linux/slab.h>
34088780 21#include <linux/sched.h>
d3c2fdcf 22#include <linux/writeback.h>
5f39d397 23#include <linux/pagemap.h>
5f2cc086 24#include <linux/blkdev.h>
8ea05e3a 25#include <linux/uuid.h>
79154b1b
CM
26#include "ctree.h"
27#include "disk-io.h"
28#include "transaction.h"
925baedd 29#include "locking.h"
e02119d5 30#include "tree-log.h"
581bb050 31#include "inode-map.h"
733f4fbb 32#include "volumes.h"
8dabb742 33#include "dev-replace.h"
fcebe456 34#include "qgroup.h"
79154b1b 35
0f7d52f4
CM
36#define BTRFS_ROOT_TRANS_TAG 0
37
e8c9f186 38static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
4a9d8bde
MX
39 [TRANS_STATE_RUNNING] = 0U,
40 [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE |
41 __TRANS_START),
42 [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE |
43 __TRANS_START |
44 __TRANS_ATTACH),
45 [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE |
46 __TRANS_START |
47 __TRANS_ATTACH |
48 __TRANS_JOIN),
49 [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE |
50 __TRANS_START |
51 __TRANS_ATTACH |
52 __TRANS_JOIN |
53 __TRANS_JOIN_NOLOCK),
54 [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE |
55 __TRANS_START |
56 __TRANS_ATTACH |
57 __TRANS_JOIN |
58 __TRANS_JOIN_NOLOCK),
59};
60
724e2315 61void btrfs_put_transaction(struct btrfs_transaction *transaction)
79154b1b 62{
13c5a93e
JB
63 WARN_ON(atomic_read(&transaction->use_count) == 0);
64 if (atomic_dec_and_test(&transaction->use_count)) {
a4abeea4 65 BUG_ON(!list_empty(&transaction->list));
c46effa6 66 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
1262133b
JB
67 if (transaction->delayed_refs.pending_csums)
68 printk(KERN_ERR "pending csums is %llu\n",
69 transaction->delayed_refs.pending_csums);
6df9a95e
JB
70 while (!list_empty(&transaction->pending_chunks)) {
71 struct extent_map *em;
72
73 em = list_first_entry(&transaction->pending_chunks,
74 struct extent_map, list);
75 list_del_init(&em->list);
76 free_extent_map(em);
77 }
2c90e5d6 78 kmem_cache_free(btrfs_transaction_cachep, transaction);
78fae27e 79 }
79154b1b
CM
80}
81
663dfbb0
FM
82static void clear_btree_io_tree(struct extent_io_tree *tree)
83{
84 spin_lock(&tree->lock);
b666a9cd
DS
85 /*
86 * Do a single barrier for the waitqueue_active check here, the state
87 * of the waitqueue should not change once clear_btree_io_tree is
88 * called.
89 */
90 smp_mb();
663dfbb0
FM
91 while (!RB_EMPTY_ROOT(&tree->state)) {
92 struct rb_node *node;
93 struct extent_state *state;
94
95 node = rb_first(&tree->state);
96 state = rb_entry(node, struct extent_state, rb_node);
97 rb_erase(&state->rb_node, &tree->state);
98 RB_CLEAR_NODE(&state->rb_node);
99 /*
100 * btree io trees aren't supposed to have tasks waiting for
101 * changes in the flags of extent states ever.
102 */
103 ASSERT(!waitqueue_active(&state->wq));
104 free_extent_state(state);
351810c1
DS
105
106 cond_resched_lock(&tree->lock);
663dfbb0
FM
107 }
108 spin_unlock(&tree->lock);
109}
110
9e351cc8
JB
111static noinline void switch_commit_roots(struct btrfs_transaction *trans,
112 struct btrfs_fs_info *fs_info)
817d52f8 113{
9e351cc8
JB
114 struct btrfs_root *root, *tmp;
115
116 down_write(&fs_info->commit_root_sem);
117 list_for_each_entry_safe(root, tmp, &trans->switch_commits,
118 dirty_list) {
119 list_del_init(&root->dirty_list);
120 free_extent_buffer(root->commit_root);
121 root->commit_root = btrfs_root_node(root);
122 if (is_fstree(root->objectid))
123 btrfs_unpin_free_ino(root);
663dfbb0 124 clear_btree_io_tree(&root->dirty_log_pages);
9e351cc8 125 }
2b9dbef2
JB
126
127 /* We can free old roots now. */
128 spin_lock(&trans->dropped_roots_lock);
129 while (!list_empty(&trans->dropped_roots)) {
130 root = list_first_entry(&trans->dropped_roots,
131 struct btrfs_root, root_list);
132 list_del_init(&root->root_list);
133 spin_unlock(&trans->dropped_roots_lock);
134 btrfs_drop_and_free_fs_root(fs_info, root);
135 spin_lock(&trans->dropped_roots_lock);
136 }
137 spin_unlock(&trans->dropped_roots_lock);
9e351cc8 138 up_write(&fs_info->commit_root_sem);
817d52f8
JB
139}
140
0860adfd
MX
141static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
142 unsigned int type)
143{
144 if (type & TRANS_EXTWRITERS)
145 atomic_inc(&trans->num_extwriters);
146}
147
148static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
149 unsigned int type)
150{
151 if (type & TRANS_EXTWRITERS)
152 atomic_dec(&trans->num_extwriters);
153}
154
155static inline void extwriter_counter_init(struct btrfs_transaction *trans,
156 unsigned int type)
157{
158 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
159}
160
161static inline int extwriter_counter_read(struct btrfs_transaction *trans)
162{
163 return atomic_read(&trans->num_extwriters);
178260b2
MX
164}
165
d352ac68
CM
166/*
167 * either allocate a new transaction or hop into the existing one
168 */
0860adfd 169static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
79154b1b
CM
170{
171 struct btrfs_transaction *cur_trans;
19ae4e81 172 struct btrfs_fs_info *fs_info = root->fs_info;
a4abeea4 173
19ae4e81 174 spin_lock(&fs_info->trans_lock);
d43317dc 175loop:
49b25e05 176 /* The file system has been taken offline. No new transactions. */
87533c47 177 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
19ae4e81 178 spin_unlock(&fs_info->trans_lock);
49b25e05
JM
179 return -EROFS;
180 }
181
19ae4e81 182 cur_trans = fs_info->running_transaction;
a4abeea4 183 if (cur_trans) {
871383be 184 if (cur_trans->aborted) {
19ae4e81 185 spin_unlock(&fs_info->trans_lock);
49b25e05 186 return cur_trans->aborted;
871383be 187 }
4a9d8bde 188 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
178260b2
MX
189 spin_unlock(&fs_info->trans_lock);
190 return -EBUSY;
191 }
a4abeea4 192 atomic_inc(&cur_trans->use_count);
13c5a93e 193 atomic_inc(&cur_trans->num_writers);
0860adfd 194 extwriter_counter_inc(cur_trans, type);
19ae4e81 195 spin_unlock(&fs_info->trans_lock);
a4abeea4 196 return 0;
79154b1b 197 }
19ae4e81 198 spin_unlock(&fs_info->trans_lock);
a4abeea4 199
354aa0fb
MX
200 /*
201 * If we are ATTACH, we just want to catch the current transaction,
202 * and commit it. If there is no transaction, just return ENOENT.
203 */
204 if (type == TRANS_ATTACH)
205 return -ENOENT;
206
4a9d8bde
MX
207 /*
208 * JOIN_NOLOCK only happens during the transaction commit, so
209 * it is impossible that ->running_transaction is NULL
210 */
211 BUG_ON(type == TRANS_JOIN_NOLOCK);
212
a4abeea4
JB
213 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
214 if (!cur_trans)
215 return -ENOMEM;
d43317dc 216
19ae4e81
JS
217 spin_lock(&fs_info->trans_lock);
218 if (fs_info->running_transaction) {
d43317dc
CM
219 /*
220 * someone started a transaction after we unlocked. Make sure
4a9d8bde 221 * to redo the checks above
d43317dc 222 */
a4abeea4 223 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
d43317dc 224 goto loop;
87533c47 225 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
e4b50e14 226 spin_unlock(&fs_info->trans_lock);
7b8b92af
JB
227 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
228 return -EROFS;
79154b1b 229 }
d43317dc 230
a4abeea4 231 atomic_set(&cur_trans->num_writers, 1);
0860adfd 232 extwriter_counter_init(cur_trans, type);
a4abeea4
JB
233 init_waitqueue_head(&cur_trans->writer_wait);
234 init_waitqueue_head(&cur_trans->commit_wait);
4a9d8bde 235 cur_trans->state = TRANS_STATE_RUNNING;
a4abeea4
JB
236 /*
237 * One for this trans handle, one so it will live on until we
238 * commit the transaction.
239 */
240 atomic_set(&cur_trans->use_count, 2);
13212b54 241 cur_trans->have_free_bgs = 0;
a4abeea4 242 cur_trans->start_time = get_seconds();
1bbc621e 243 cur_trans->dirty_bg_run = 0;
a4abeea4 244
a099d0fd
AM
245 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
246
c46effa6 247 cur_trans->delayed_refs.href_root = RB_ROOT;
3368d001 248 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
d7df2c79 249 atomic_set(&cur_trans->delayed_refs.num_entries, 0);
20b297d6
JS
250
251 /*
252 * although the tree mod log is per file system and not per transaction,
253 * the log must never go across transaction boundaries.
254 */
255 smp_mb();
31b1a2bd 256 if (!list_empty(&fs_info->tree_mod_seq_list))
efe120a0 257 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
20b297d6 258 "creating a fresh transaction\n");
31b1a2bd 259 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
efe120a0 260 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
20b297d6 261 "creating a fresh transaction\n");
fc36ed7e 262 atomic64_set(&fs_info->tree_mod_seq, 0);
20b297d6 263
a4abeea4
JB
264 spin_lock_init(&cur_trans->delayed_refs.lock);
265
266 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
6df9a95e 267 INIT_LIST_HEAD(&cur_trans->pending_chunks);
9e351cc8 268 INIT_LIST_HEAD(&cur_trans->switch_commits);
50d9aa99 269 INIT_LIST_HEAD(&cur_trans->pending_ordered);
ce93ec54 270 INIT_LIST_HEAD(&cur_trans->dirty_bgs);
1bbc621e 271 INIT_LIST_HEAD(&cur_trans->io_bgs);
2b9dbef2 272 INIT_LIST_HEAD(&cur_trans->dropped_roots);
1bbc621e 273 mutex_init(&cur_trans->cache_write_mutex);
cb723e49 274 cur_trans->num_dirty_bgs = 0;
ce93ec54 275 spin_lock_init(&cur_trans->dirty_bgs_lock);
e33e17ee
JM
276 INIT_LIST_HEAD(&cur_trans->deleted_bgs);
277 spin_lock_init(&cur_trans->deleted_bgs_lock);
2b9dbef2 278 spin_lock_init(&cur_trans->dropped_roots_lock);
19ae4e81 279 list_add_tail(&cur_trans->list, &fs_info->trans_list);
a4abeea4 280 extent_io_tree_init(&cur_trans->dirty_pages,
19ae4e81
JS
281 fs_info->btree_inode->i_mapping);
282 fs_info->generation++;
283 cur_trans->transid = fs_info->generation;
284 fs_info->running_transaction = cur_trans;
49b25e05 285 cur_trans->aborted = 0;
19ae4e81 286 spin_unlock(&fs_info->trans_lock);
15ee9bc7 287
79154b1b
CM
288 return 0;
289}
290
d352ac68 291/*
d397712b
CM
292 * this does all the record keeping required to make sure that a reference
293 * counted root is properly recorded in a given transaction. This is required
294 * to make sure the old root from before we joined the transaction is deleted
295 * when the transaction commits
d352ac68 296 */
7585717f 297static int record_root_in_trans(struct btrfs_trans_handle *trans,
a4abeea4 298 struct btrfs_root *root)
6702ed49 299{
27cdeb70
MX
300 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
301 root->last_trans < trans->transid) {
6702ed49 302 WARN_ON(root == root->fs_info->extent_root);
5d4f98a2
YZ
303 WARN_ON(root->commit_root != root->node);
304
7585717f 305 /*
27cdeb70 306 * see below for IN_TRANS_SETUP usage rules
7585717f
CM
307 * we have the reloc mutex held now, so there
308 * is only one writer in this function
309 */
27cdeb70 310 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
7585717f 311
27cdeb70 312 /* make sure readers find IN_TRANS_SETUP before
7585717f
CM
313 * they find our root->last_trans update
314 */
315 smp_wmb();
316
a4abeea4
JB
317 spin_lock(&root->fs_info->fs_roots_radix_lock);
318 if (root->last_trans == trans->transid) {
319 spin_unlock(&root->fs_info->fs_roots_radix_lock);
320 return 0;
321 }
5d4f98a2
YZ
322 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
323 (unsigned long)root->root_key.objectid,
324 BTRFS_ROOT_TRANS_TAG);
a4abeea4 325 spin_unlock(&root->fs_info->fs_roots_radix_lock);
7585717f
CM
326 root->last_trans = trans->transid;
327
328 /* this is pretty tricky. We don't want to
329 * take the relocation lock in btrfs_record_root_in_trans
330 * unless we're really doing the first setup for this root in
331 * this transaction.
332 *
333 * Normally we'd use root->last_trans as a flag to decide
334 * if we want to take the expensive mutex.
335 *
336 * But, we have to set root->last_trans before we
337 * init the relocation root, otherwise, we trip over warnings
338 * in ctree.c. The solution used here is to flag ourselves
27cdeb70 339 * with root IN_TRANS_SETUP. When this is 1, we're still
7585717f
CM
340 * fixing up the reloc trees and everyone must wait.
341 *
342 * When this is zero, they can trust root->last_trans and fly
343 * through btrfs_record_root_in_trans without having to take the
344 * lock. smp_wmb() makes sure that all the writes above are
345 * done before we pop in the zero below
346 */
5d4f98a2 347 btrfs_init_reloc_root(trans, root);
c7548af6 348 smp_mb__before_atomic();
27cdeb70 349 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
5d4f98a2
YZ
350 }
351 return 0;
352}
bcc63abb 353
7585717f 354
2b9dbef2
JB
355void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
356 struct btrfs_root *root)
357{
358 struct btrfs_transaction *cur_trans = trans->transaction;
359
360 /* Add ourselves to the transaction dropped list */
361 spin_lock(&cur_trans->dropped_roots_lock);
362 list_add_tail(&root->root_list, &cur_trans->dropped_roots);
363 spin_unlock(&cur_trans->dropped_roots_lock);
364
365 /* Make sure we don't try to update the root at commit time */
366 spin_lock(&root->fs_info->fs_roots_radix_lock);
367 radix_tree_tag_clear(&root->fs_info->fs_roots_radix,
368 (unsigned long)root->root_key.objectid,
369 BTRFS_ROOT_TRANS_TAG);
370 spin_unlock(&root->fs_info->fs_roots_radix_lock);
371}
372
7585717f
CM
373int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
374 struct btrfs_root *root)
375{
27cdeb70 376 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
7585717f
CM
377 return 0;
378
379 /*
27cdeb70 380 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
7585717f
CM
381 * and barriers
382 */
383 smp_rmb();
384 if (root->last_trans == trans->transid &&
27cdeb70 385 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
7585717f
CM
386 return 0;
387
388 mutex_lock(&root->fs_info->reloc_mutex);
389 record_root_in_trans(trans, root);
390 mutex_unlock(&root->fs_info->reloc_mutex);
391
392 return 0;
393}
394
4a9d8bde
MX
395static inline int is_transaction_blocked(struct btrfs_transaction *trans)
396{
397 return (trans->state >= TRANS_STATE_BLOCKED &&
501407aa
JB
398 trans->state < TRANS_STATE_UNBLOCKED &&
399 !trans->aborted);
4a9d8bde
MX
400}
401
d352ac68
CM
402/* wait for commit against the current transaction to become unblocked
403 * when this is done, it is safe to start a new transaction, but the current
404 * transaction might not be fully on disk.
405 */
37d1aeee 406static void wait_current_trans(struct btrfs_root *root)
79154b1b 407{
f9295749 408 struct btrfs_transaction *cur_trans;
79154b1b 409
a4abeea4 410 spin_lock(&root->fs_info->trans_lock);
f9295749 411 cur_trans = root->fs_info->running_transaction;
4a9d8bde 412 if (cur_trans && is_transaction_blocked(cur_trans)) {
13c5a93e 413 atomic_inc(&cur_trans->use_count);
a4abeea4 414 spin_unlock(&root->fs_info->trans_lock);
72d63ed6
LZ
415
416 wait_event(root->fs_info->transaction_wait,
501407aa
JB
417 cur_trans->state >= TRANS_STATE_UNBLOCKED ||
418 cur_trans->aborted);
724e2315 419 btrfs_put_transaction(cur_trans);
a4abeea4
JB
420 } else {
421 spin_unlock(&root->fs_info->trans_lock);
f9295749 422 }
37d1aeee
CM
423}
424
a22285a6
YZ
425static int may_wait_transaction(struct btrfs_root *root, int type)
426{
a4abeea4
JB
427 if (root->fs_info->log_root_recovering)
428 return 0;
429
430 if (type == TRANS_USERSPACE)
431 return 1;
432
433 if (type == TRANS_START &&
434 !atomic_read(&root->fs_info->open_ioctl_trans))
a22285a6 435 return 1;
a4abeea4 436
a22285a6
YZ
437 return 0;
438}
439
20dd2cbf
MX
440static inline bool need_reserve_reloc_root(struct btrfs_root *root)
441{
442 if (!root->fs_info->reloc_ctl ||
27cdeb70 443 !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
20dd2cbf
MX
444 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
445 root->reloc_root)
446 return false;
447
448 return true;
449}
450
08e007d2 451static struct btrfs_trans_handle *
5aed1dd8
AM
452start_transaction(struct btrfs_root *root, unsigned int num_items,
453 unsigned int type, enum btrfs_reserve_flush_enum flush)
37d1aeee 454{
a22285a6
YZ
455 struct btrfs_trans_handle *h;
456 struct btrfs_transaction *cur_trans;
b5009945 457 u64 num_bytes = 0;
c5567237 458 u64 qgroup_reserved = 0;
20dd2cbf
MX
459 bool reloc_reserved = false;
460 int ret;
acce952b 461
46c4e71e 462 /* Send isn't supposed to start transactions. */
2755a0de 463 ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
46c4e71e 464
87533c47 465 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
acce952b 466 return ERR_PTR(-EROFS);
2a1eb461 467
46c4e71e 468 if (current->journal_info) {
0860adfd 469 WARN_ON(type & TRANS_EXTWRITERS);
2a1eb461
JB
470 h = current->journal_info;
471 h->use_count++;
b7d5b0a8 472 WARN_ON(h->use_count > 2);
2a1eb461
JB
473 h->orig_rsv = h->block_rsv;
474 h->block_rsv = NULL;
475 goto got_it;
476 }
b5009945
JB
477
478 /*
479 * Do the reservation before we join the transaction so we can do all
480 * the appropriate flushing if need be.
481 */
482 if (num_items > 0 && root != root->fs_info->chunk_root) {
c5567237
AJ
483 if (root->fs_info->quota_enabled &&
484 is_fstree(root->root_key.objectid)) {
707e8a07 485 qgroup_reserved = num_items * root->nodesize;
c5567237
AJ
486 ret = btrfs_qgroup_reserve(root, qgroup_reserved);
487 if (ret)
488 return ERR_PTR(ret);
489 }
490
b5009945 491 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
20dd2cbf
MX
492 /*
493 * Do the reservation for the relocation root creation
494 */
ee39b432 495 if (need_reserve_reloc_root(root)) {
20dd2cbf
MX
496 num_bytes += root->nodesize;
497 reloc_reserved = true;
498 }
499
08e007d2
MX
500 ret = btrfs_block_rsv_add(root,
501 &root->fs_info->trans_block_rsv,
502 num_bytes, flush);
b5009945 503 if (ret)
843fcf35 504 goto reserve_fail;
b5009945 505 }
a22285a6 506again:
f2f767e7 507 h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
843fcf35
MX
508 if (!h) {
509 ret = -ENOMEM;
510 goto alloc_fail;
511 }
37d1aeee 512
98114659
JB
513 /*
514 * If we are JOIN_NOLOCK we're already committing a transaction and
515 * waiting on this guy, so we don't need to do the sb_start_intwrite
516 * because we're already holding a ref. We need this because we could
517 * have raced in and did an fsync() on a file which can kick a commit
518 * and then we deadlock with somebody doing a freeze.
354aa0fb
MX
519 *
520 * If we are ATTACH, it means we just want to catch the current
521 * transaction and commit it, so we needn't do sb_start_intwrite().
98114659 522 */
0860adfd 523 if (type & __TRANS_FREEZABLE)
60376ce4 524 sb_start_intwrite(root->fs_info->sb);
b2b5ef5c 525
a22285a6 526 if (may_wait_transaction(root, type))
37d1aeee 527 wait_current_trans(root);
a22285a6 528
a4abeea4 529 do {
354aa0fb 530 ret = join_transaction(root, type);
178260b2 531 if (ret == -EBUSY) {
a4abeea4 532 wait_current_trans(root);
178260b2
MX
533 if (unlikely(type == TRANS_ATTACH))
534 ret = -ENOENT;
535 }
a4abeea4
JB
536 } while (ret == -EBUSY);
537
db5b493a 538 if (ret < 0) {
354aa0fb
MX
539 /* We must get the transaction if we are JOIN_NOLOCK. */
540 BUG_ON(type == TRANS_JOIN_NOLOCK);
843fcf35 541 goto join_fail;
db5b493a 542 }
0f7d52f4 543
a22285a6 544 cur_trans = root->fs_info->running_transaction;
a22285a6
YZ
545
546 h->transid = cur_trans->transid;
547 h->transaction = cur_trans;
d13603ef 548 h->root = root;
2a1eb461 549 h->use_count = 1;
a698d075 550 h->type = type;
d9a0540a 551 h->can_flush_pending_bgs = true;
bed92eae 552 INIT_LIST_HEAD(&h->qgroup_ref_list);
ea658bad 553 INIT_LIST_HEAD(&h->new_bgs);
50d9aa99 554 INIT_LIST_HEAD(&h->ordered);
b7ec40d7 555
a22285a6 556 smp_mb();
4a9d8bde
MX
557 if (cur_trans->state >= TRANS_STATE_BLOCKED &&
558 may_wait_transaction(root, type)) {
abdd2e80 559 current->journal_info = h;
a22285a6
YZ
560 btrfs_commit_transaction(h, root);
561 goto again;
562 }
563
b5009945 564 if (num_bytes) {
8c2a3ca2 565 trace_btrfs_space_reservation(root->fs_info, "transaction",
2bcc0328 566 h->transid, num_bytes, 1);
b5009945
JB
567 h->block_rsv = &root->fs_info->trans_block_rsv;
568 h->bytes_reserved = num_bytes;
20dd2cbf 569 h->reloc_reserved = reloc_reserved;
a22285a6 570 }
4b824906 571 h->qgroup_reserved = qgroup_reserved;
9ed74f2d 572
2a1eb461 573got_it:
a4abeea4 574 btrfs_record_root_in_trans(h, root);
a22285a6
YZ
575
576 if (!current->journal_info && type != TRANS_USERSPACE)
577 current->journal_info = h;
79154b1b 578 return h;
843fcf35
MX
579
580join_fail:
0860adfd 581 if (type & __TRANS_FREEZABLE)
843fcf35
MX
582 sb_end_intwrite(root->fs_info->sb);
583 kmem_cache_free(btrfs_trans_handle_cachep, h);
584alloc_fail:
585 if (num_bytes)
586 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
587 num_bytes);
588reserve_fail:
589 if (qgroup_reserved)
590 btrfs_qgroup_free(root, qgroup_reserved);
591 return ERR_PTR(ret);
79154b1b
CM
592}
593
f9295749 594struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
5aed1dd8 595 unsigned int num_items)
f9295749 596{
08e007d2
MX
597 return start_transaction(root, num_items, TRANS_START,
598 BTRFS_RESERVE_FLUSH_ALL);
f9295749 599}
8407aa46 600
08e007d2 601struct btrfs_trans_handle *btrfs_start_transaction_lflush(
5aed1dd8
AM
602 struct btrfs_root *root,
603 unsigned int num_items)
8407aa46 604{
08e007d2
MX
605 return start_transaction(root, num_items, TRANS_START,
606 BTRFS_RESERVE_FLUSH_LIMIT);
8407aa46
MX
607}
608
7a7eaa40 609struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
f9295749 610{
8407aa46 611 return start_transaction(root, 0, TRANS_JOIN, 0);
f9295749
CM
612}
613
7a7eaa40 614struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
0af3d00b 615{
8407aa46 616 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
0af3d00b
JB
617}
618
7a7eaa40 619struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
9ca9ee09 620{
8407aa46 621 return start_transaction(root, 0, TRANS_USERSPACE, 0);
9ca9ee09
SW
622}
623
d4edf39b
MX
624/*
625 * btrfs_attach_transaction() - catch the running transaction
626 *
627 * It is used when we want to commit the current the transaction, but
628 * don't want to start a new one.
629 *
630 * Note: If this function return -ENOENT, it just means there is no
631 * running transaction. But it is possible that the inactive transaction
632 * is still in the memory, not fully on disk. If you hope there is no
633 * inactive transaction in the fs when -ENOENT is returned, you should
634 * invoke
635 * btrfs_attach_transaction_barrier()
636 */
354aa0fb 637struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
60376ce4 638{
354aa0fb 639 return start_transaction(root, 0, TRANS_ATTACH, 0);
60376ce4
JB
640}
641
d4edf39b 642/*
90b6d283 643 * btrfs_attach_transaction_barrier() - catch the running transaction
d4edf39b
MX
644 *
645 * It is similar to the above function, the differentia is this one
646 * will wait for all the inactive transactions until they fully
647 * complete.
648 */
649struct btrfs_trans_handle *
650btrfs_attach_transaction_barrier(struct btrfs_root *root)
651{
652 struct btrfs_trans_handle *trans;
653
654 trans = start_transaction(root, 0, TRANS_ATTACH, 0);
655 if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
656 btrfs_wait_for_commit(root, 0);
657
658 return trans;
659}
660
d352ac68 661/* wait for a transaction commit to be fully complete */
b9c8300c 662static noinline void wait_for_commit(struct btrfs_root *root,
89ce8a63
CM
663 struct btrfs_transaction *commit)
664{
4a9d8bde 665 wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
89ce8a63
CM
666}
667
46204592
SW
668int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
669{
670 struct btrfs_transaction *cur_trans = NULL, *t;
8cd2807f 671 int ret = 0;
46204592 672
46204592
SW
673 if (transid) {
674 if (transid <= root->fs_info->last_trans_committed)
a4abeea4 675 goto out;
46204592
SW
676
677 /* find specified transaction */
a4abeea4 678 spin_lock(&root->fs_info->trans_lock);
46204592
SW
679 list_for_each_entry(t, &root->fs_info->trans_list, list) {
680 if (t->transid == transid) {
681 cur_trans = t;
a4abeea4 682 atomic_inc(&cur_trans->use_count);
8cd2807f 683 ret = 0;
46204592
SW
684 break;
685 }
8cd2807f
MX
686 if (t->transid > transid) {
687 ret = 0;
46204592 688 break;
8cd2807f 689 }
46204592 690 }
a4abeea4 691 spin_unlock(&root->fs_info->trans_lock);
42383020
SW
692
693 /*
694 * The specified transaction doesn't exist, or we
695 * raced with btrfs_commit_transaction
696 */
697 if (!cur_trans) {
698 if (transid > root->fs_info->last_trans_committed)
699 ret = -EINVAL;
8cd2807f 700 goto out;
42383020 701 }
46204592
SW
702 } else {
703 /* find newest transaction that is committing | committed */
a4abeea4 704 spin_lock(&root->fs_info->trans_lock);
46204592
SW
705 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
706 list) {
4a9d8bde
MX
707 if (t->state >= TRANS_STATE_COMMIT_START) {
708 if (t->state == TRANS_STATE_COMPLETED)
3473f3c0 709 break;
46204592 710 cur_trans = t;
a4abeea4 711 atomic_inc(&cur_trans->use_count);
46204592
SW
712 break;
713 }
714 }
a4abeea4 715 spin_unlock(&root->fs_info->trans_lock);
46204592 716 if (!cur_trans)
a4abeea4 717 goto out; /* nothing committing|committed */
46204592
SW
718 }
719
46204592 720 wait_for_commit(root, cur_trans);
724e2315 721 btrfs_put_transaction(cur_trans);
a4abeea4 722out:
46204592
SW
723 return ret;
724}
725
37d1aeee
CM
726void btrfs_throttle(struct btrfs_root *root)
727{
a4abeea4 728 if (!atomic_read(&root->fs_info->open_ioctl_trans))
9ca9ee09 729 wait_current_trans(root);
37d1aeee
CM
730}
731
8929ecfa
YZ
732static int should_end_transaction(struct btrfs_trans_handle *trans,
733 struct btrfs_root *root)
734{
1be41b78 735 if (root->fs_info->global_block_rsv.space_info->full &&
0a2b2a84 736 btrfs_check_space_for_delayed_refs(trans, root))
1be41b78 737 return 1;
36ba022a 738
1be41b78 739 return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
8929ecfa
YZ
740}
741
742int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
743 struct btrfs_root *root)
744{
745 struct btrfs_transaction *cur_trans = trans->transaction;
746 int updates;
49b25e05 747 int err;
8929ecfa 748
a4abeea4 749 smp_mb();
4a9d8bde
MX
750 if (cur_trans->state >= TRANS_STATE_BLOCKED ||
751 cur_trans->delayed_refs.flushing)
8929ecfa
YZ
752 return 1;
753
754 updates = trans->delayed_ref_updates;
755 trans->delayed_ref_updates = 0;
49b25e05 756 if (updates) {
28ed1345 757 err = btrfs_run_delayed_refs(trans, root, updates * 2);
49b25e05
JM
758 if (err) /* Error code will also eval true */
759 return err;
760 }
8929ecfa
YZ
761
762 return should_end_transaction(trans, root);
763}
764
89ce8a63 765static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
a698d075 766 struct btrfs_root *root, int throttle)
79154b1b 767{
8929ecfa 768 struct btrfs_transaction *cur_trans = trans->transaction;
ab78c84d 769 struct btrfs_fs_info *info = root->fs_info;
1be41b78 770 unsigned long cur = trans->delayed_ref_updates;
a698d075 771 int lock = (trans->type != TRANS_JOIN_NOLOCK);
4edc2ca3 772 int err = 0;
a79b7d4b 773 int must_run_delayed_refs = 0;
c3e69d58 774
3bbb24b2
JB
775 if (trans->use_count > 1) {
776 trans->use_count--;
2a1eb461
JB
777 trans->block_rsv = trans->orig_rsv;
778 return 0;
779 }
780
b24e03db 781 btrfs_trans_release_metadata(trans, root);
4c13d758 782 trans->block_rsv = NULL;
c5567237 783
ea658bad
JB
784 if (!list_empty(&trans->new_bgs))
785 btrfs_create_pending_block_groups(trans, root);
786
50d9aa99
JB
787 if (!list_empty(&trans->ordered)) {
788 spin_lock(&info->trans_lock);
d3efe084 789 list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
50d9aa99
JB
790 spin_unlock(&info->trans_lock);
791 }
792
1be41b78 793 trans->delayed_ref_updates = 0;
a79b7d4b
CM
794 if (!trans->sync) {
795 must_run_delayed_refs =
796 btrfs_should_throttle_delayed_refs(trans, root);
0a2b2a84 797 cur = max_t(unsigned long, cur, 32);
a79b7d4b
CM
798
799 /*
800 * don't make the caller wait if they are from a NOLOCK
801 * or ATTACH transaction, it will deadlock with commit
802 */
803 if (must_run_delayed_refs == 1 &&
804 (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
805 must_run_delayed_refs = 2;
56bec294 806 }
bb721703 807
fcebe456
JB
808 if (trans->qgroup_reserved) {
809 /*
810 * the same root has to be passed here between start_transaction
811 * and end_transaction. Subvolume quota depends on this.
812 */
813 btrfs_qgroup_free(trans->root, trans->qgroup_reserved);
814 trans->qgroup_reserved = 0;
815 }
816
0e721106
JB
817 btrfs_trans_release_metadata(trans, root);
818 trans->block_rsv = NULL;
56bec294 819
ea658bad
JB
820 if (!list_empty(&trans->new_bgs))
821 btrfs_create_pending_block_groups(trans, root);
822
4fbcdf66
FM
823 btrfs_trans_release_chunk_metadata(trans);
824
a4abeea4 825 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
4a9d8bde
MX
826 should_end_transaction(trans, root) &&
827 ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
828 spin_lock(&info->trans_lock);
829 if (cur_trans->state == TRANS_STATE_RUNNING)
830 cur_trans->state = TRANS_STATE_BLOCKED;
831 spin_unlock(&info->trans_lock);
a4abeea4 832 }
8929ecfa 833
4a9d8bde 834 if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
3bbb24b2 835 if (throttle)
8929ecfa 836 return btrfs_commit_transaction(trans, root);
3bbb24b2 837 else
8929ecfa
YZ
838 wake_up_process(info->transaction_kthread);
839 }
840
0860adfd 841 if (trans->type & __TRANS_FREEZABLE)
98114659 842 sb_end_intwrite(root->fs_info->sb);
6df7881a 843
8929ecfa 844 WARN_ON(cur_trans != info->running_transaction);
13c5a93e
JB
845 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
846 atomic_dec(&cur_trans->num_writers);
0860adfd 847 extwriter_counter_dec(cur_trans, trans->type);
89ce8a63 848
a83342aa
DS
849 /*
850 * Make sure counter is updated before we wake up waiters.
851 */
99d16cbc 852 smp_mb();
79154b1b
CM
853 if (waitqueue_active(&cur_trans->writer_wait))
854 wake_up(&cur_trans->writer_wait);
724e2315 855 btrfs_put_transaction(cur_trans);
9ed74f2d
JB
856
857 if (current->journal_info == trans)
858 current->journal_info = NULL;
ab78c84d 859
24bbcf04
YZ
860 if (throttle)
861 btrfs_run_delayed_iputs(root);
862
49b25e05 863 if (trans->aborted ||
4e121c06
JB
864 test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
865 wake_up_process(info->transaction_kthread);
4edc2ca3 866 err = -EIO;
4e121c06 867 }
edf39272 868 assert_qgroups_uptodate(trans);
49b25e05 869
4edc2ca3 870 kmem_cache_free(btrfs_trans_handle_cachep, trans);
a79b7d4b
CM
871 if (must_run_delayed_refs) {
872 btrfs_async_run_delayed_refs(root, cur,
873 must_run_delayed_refs == 1);
874 }
4edc2ca3 875 return err;
79154b1b
CM
876}
877
89ce8a63
CM
878int btrfs_end_transaction(struct btrfs_trans_handle *trans,
879 struct btrfs_root *root)
880{
98ad43be 881 return __btrfs_end_transaction(trans, root, 0);
89ce8a63
CM
882}
883
884int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
885 struct btrfs_root *root)
886{
98ad43be 887 return __btrfs_end_transaction(trans, root, 1);
16cdcec7
MX
888}
889
d352ac68
CM
890/*
891 * when btree blocks are allocated, they have some corresponding bits set for
892 * them in one of two extent_io trees. This is used to make sure all of
690587d1 893 * those extents are sent to disk but does not wait on them
d352ac68 894 */
690587d1 895int btrfs_write_marked_extents(struct btrfs_root *root,
8cef4e16 896 struct extent_io_tree *dirty_pages, int mark)
79154b1b 897{
777e6bd7 898 int err = 0;
7c4452b9 899 int werr = 0;
1728366e 900 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
e6138876 901 struct extent_state *cached_state = NULL;
777e6bd7 902 u64 start = 0;
5f39d397 903 u64 end;
7c4452b9 904
1728366e 905 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
e6138876 906 mark, &cached_state)) {
663dfbb0
FM
907 bool wait_writeback = false;
908
909 err = convert_extent_bit(dirty_pages, start, end,
910 EXTENT_NEED_WAIT,
911 mark, &cached_state, GFP_NOFS);
912 /*
913 * convert_extent_bit can return -ENOMEM, which is most of the
914 * time a temporary error. So when it happens, ignore the error
915 * and wait for writeback of this range to finish - because we
916 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
917 * to btrfs_wait_marked_extents() would not know that writeback
918 * for this range started and therefore wouldn't wait for it to
919 * finish - we don't want to commit a superblock that points to
920 * btree nodes/leafs for which writeback hasn't finished yet
921 * (and without errors).
922 * We cleanup any entries left in the io tree when committing
923 * the transaction (through clear_btree_io_tree()).
924 */
925 if (err == -ENOMEM) {
926 err = 0;
927 wait_writeback = true;
928 }
929 if (!err)
930 err = filemap_fdatawrite_range(mapping, start, end);
1728366e
JB
931 if (err)
932 werr = err;
663dfbb0
FM
933 else if (wait_writeback)
934 werr = filemap_fdatawait_range(mapping, start, end);
e38e2ed7 935 free_extent_state(cached_state);
663dfbb0 936 cached_state = NULL;
1728366e
JB
937 cond_resched();
938 start = end + 1;
7c4452b9 939 }
690587d1
CM
940 return werr;
941}
942
943/*
944 * when btree blocks are allocated, they have some corresponding bits set for
945 * them in one of two extent_io trees. This is used to make sure all of
946 * those extents are on disk for transaction or log commit. We wait
947 * on all the pages and clear them from the dirty pages state tree
948 */
949int btrfs_wait_marked_extents(struct btrfs_root *root,
8cef4e16 950 struct extent_io_tree *dirty_pages, int mark)
690587d1 951{
690587d1
CM
952 int err = 0;
953 int werr = 0;
1728366e 954 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
e6138876 955 struct extent_state *cached_state = NULL;
690587d1
CM
956 u64 start = 0;
957 u64 end;
656f30db
FM
958 struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
959 bool errors = false;
777e6bd7 960
1728366e 961 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
e6138876 962 EXTENT_NEED_WAIT, &cached_state)) {
663dfbb0
FM
963 /*
964 * Ignore -ENOMEM errors returned by clear_extent_bit().
965 * When committing the transaction, we'll remove any entries
966 * left in the io tree. For a log commit, we don't remove them
967 * after committing the log because the tree can be accessed
968 * concurrently - we do it only at transaction commit time when
969 * it's safe to do it (through clear_btree_io_tree()).
970 */
971 err = clear_extent_bit(dirty_pages, start, end,
972 EXTENT_NEED_WAIT,
973 0, 0, &cached_state, GFP_NOFS);
974 if (err == -ENOMEM)
975 err = 0;
976 if (!err)
977 err = filemap_fdatawait_range(mapping, start, end);
1728366e
JB
978 if (err)
979 werr = err;
e38e2ed7
FM
980 free_extent_state(cached_state);
981 cached_state = NULL;
1728366e
JB
982 cond_resched();
983 start = end + 1;
777e6bd7 984 }
7c4452b9
CM
985 if (err)
986 werr = err;
656f30db
FM
987
988 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
989 if ((mark & EXTENT_DIRTY) &&
990 test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR,
991 &btree_ino->runtime_flags))
992 errors = true;
993
994 if ((mark & EXTENT_NEW) &&
995 test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR,
996 &btree_ino->runtime_flags))
997 errors = true;
998 } else {
999 if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR,
1000 &btree_ino->runtime_flags))
1001 errors = true;
1002 }
1003
1004 if (errors && !werr)
1005 werr = -EIO;
1006
7c4452b9 1007 return werr;
79154b1b
CM
1008}
1009
690587d1
CM
1010/*
1011 * when btree blocks are allocated, they have some corresponding bits set for
1012 * them in one of two extent_io trees. This is used to make sure all of
1013 * those extents are on disk for transaction or log commit
1014 */
171170c1 1015static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
8cef4e16 1016 struct extent_io_tree *dirty_pages, int mark)
690587d1
CM
1017{
1018 int ret;
1019 int ret2;
c6adc9cc 1020 struct blk_plug plug;
690587d1 1021
c6adc9cc 1022 blk_start_plug(&plug);
8cef4e16 1023 ret = btrfs_write_marked_extents(root, dirty_pages, mark);
c6adc9cc 1024 blk_finish_plug(&plug);
8cef4e16 1025 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
bf0da8c1
CM
1026
1027 if (ret)
1028 return ret;
1029 if (ret2)
1030 return ret2;
1031 return 0;
690587d1
CM
1032}
1033
663dfbb0 1034static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
d0c803c4
CM
1035 struct btrfs_root *root)
1036{
663dfbb0
FM
1037 int ret;
1038
1039 ret = btrfs_write_and_wait_marked_extents(root,
8cef4e16
YZ
1040 &trans->transaction->dirty_pages,
1041 EXTENT_DIRTY);
663dfbb0
FM
1042 clear_btree_io_tree(&trans->transaction->dirty_pages);
1043
1044 return ret;
d0c803c4
CM
1045}
1046
d352ac68
CM
1047/*
1048 * this is used to update the root pointer in the tree of tree roots.
1049 *
1050 * But, in the case of the extent allocation tree, updating the root
1051 * pointer may allocate blocks which may change the root of the extent
1052 * allocation tree.
1053 *
1054 * So, this loops and repeats and makes sure the cowonly root didn't
1055 * change while the root pointer was being updated in the metadata.
1056 */
0b86a832
CM
1057static int update_cowonly_root(struct btrfs_trans_handle *trans,
1058 struct btrfs_root *root)
79154b1b
CM
1059{
1060 int ret;
0b86a832 1061 u64 old_root_bytenr;
86b9f2ec 1062 u64 old_root_used;
0b86a832 1063 struct btrfs_root *tree_root = root->fs_info->tree_root;
79154b1b 1064
86b9f2ec 1065 old_root_used = btrfs_root_used(&root->root_item);
56bec294 1066
d397712b 1067 while (1) {
0b86a832 1068 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
86b9f2ec 1069 if (old_root_bytenr == root->node->start &&
ea526d18 1070 old_root_used == btrfs_root_used(&root->root_item))
79154b1b 1071 break;
87ef2bb4 1072
5d4f98a2 1073 btrfs_set_root_node(&root->root_item, root->node);
79154b1b 1074 ret = btrfs_update_root(trans, tree_root,
0b86a832
CM
1075 &root->root_key,
1076 &root->root_item);
49b25e05
JM
1077 if (ret)
1078 return ret;
56bec294 1079
86b9f2ec 1080 old_root_used = btrfs_root_used(&root->root_item);
0b86a832 1081 }
276e680d 1082
0b86a832
CM
1083 return 0;
1084}
1085
d352ac68
CM
1086/*
1087 * update all the cowonly tree roots on disk
49b25e05
JM
1088 *
1089 * The error handling in this function may not be obvious. Any of the
1090 * failures will cause the file system to go offline. We still need
1091 * to clean up the delayed refs.
d352ac68 1092 */
5d4f98a2
YZ
1093static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1094 struct btrfs_root *root)
0b86a832
CM
1095{
1096 struct btrfs_fs_info *fs_info = root->fs_info;
ea526d18 1097 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1bbc621e 1098 struct list_head *io_bgs = &trans->transaction->io_bgs;
0b86a832 1099 struct list_head *next;
84234f3a 1100 struct extent_buffer *eb;
56bec294 1101 int ret;
84234f3a
YZ
1102
1103 eb = btrfs_lock_root_node(fs_info->tree_root);
49b25e05
JM
1104 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1105 0, &eb);
84234f3a
YZ
1106 btrfs_tree_unlock(eb);
1107 free_extent_buffer(eb);
0b86a832 1108
49b25e05
JM
1109 if (ret)
1110 return ret;
1111
56bec294 1112 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
49b25e05
JM
1113 if (ret)
1114 return ret;
87ef2bb4 1115
733f4fbb 1116 ret = btrfs_run_dev_stats(trans, root->fs_info);
c16ce190
JB
1117 if (ret)
1118 return ret;
8dabb742 1119 ret = btrfs_run_dev_replace(trans, root->fs_info);
c16ce190
JB
1120 if (ret)
1121 return ret;
546adb0d 1122 ret = btrfs_run_qgroups(trans, root->fs_info);
c16ce190
JB
1123 if (ret)
1124 return ret;
546adb0d 1125
dcdf7f6d
JB
1126 ret = btrfs_setup_space_cache(trans, root);
1127 if (ret)
1128 return ret;
1129
546adb0d
JS
1130 /* run_qgroups might have added some more refs */
1131 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
c16ce190
JB
1132 if (ret)
1133 return ret;
ea526d18 1134again:
d397712b 1135 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
0b86a832
CM
1136 next = fs_info->dirty_cowonly_roots.next;
1137 list_del_init(next);
1138 root = list_entry(next, struct btrfs_root, dirty_list);
e7070be1 1139 clear_bit(BTRFS_ROOT_DIRTY, &root->state);
87ef2bb4 1140
9e351cc8
JB
1141 if (root != fs_info->extent_root)
1142 list_add_tail(&root->dirty_list,
1143 &trans->transaction->switch_commits);
49b25e05
JM
1144 ret = update_cowonly_root(trans, root);
1145 if (ret)
1146 return ret;
ea526d18
JB
1147 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1148 if (ret)
1149 return ret;
79154b1b 1150 }
276e680d 1151
1bbc621e 1152 while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
ea526d18
JB
1153 ret = btrfs_write_dirty_block_groups(trans, root);
1154 if (ret)
1155 return ret;
1156 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1157 if (ret)
1158 return ret;
1159 }
1160
1161 if (!list_empty(&fs_info->dirty_cowonly_roots))
1162 goto again;
1163
9e351cc8
JB
1164 list_add_tail(&fs_info->extent_root->dirty_list,
1165 &trans->transaction->switch_commits);
8dabb742
SB
1166 btrfs_after_dev_replace_commit(fs_info);
1167
79154b1b
CM
1168 return 0;
1169}
1170
d352ac68
CM
1171/*
1172 * dead roots are old snapshots that need to be deleted. This allocates
1173 * a dirty root struct and adds it into the list of dead roots that need to
1174 * be deleted
1175 */
cfad392b 1176void btrfs_add_dead_root(struct btrfs_root *root)
5eda7b5e 1177{
a4abeea4 1178 spin_lock(&root->fs_info->trans_lock);
cfad392b
JB
1179 if (list_empty(&root->root_list))
1180 list_add_tail(&root->root_list, &root->fs_info->dead_roots);
a4abeea4 1181 spin_unlock(&root->fs_info->trans_lock);
5eda7b5e
CM
1182}
1183
d352ac68 1184/*
5d4f98a2 1185 * update all the cowonly tree roots on disk
d352ac68 1186 */
5d4f98a2
YZ
1187static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
1188 struct btrfs_root *root)
0f7d52f4 1189{
0f7d52f4 1190 struct btrfs_root *gang[8];
5d4f98a2 1191 struct btrfs_fs_info *fs_info = root->fs_info;
0f7d52f4
CM
1192 int i;
1193 int ret;
54aa1f4d
CM
1194 int err = 0;
1195
a4abeea4 1196 spin_lock(&fs_info->fs_roots_radix_lock);
d397712b 1197 while (1) {
5d4f98a2
YZ
1198 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1199 (void **)gang, 0,
0f7d52f4
CM
1200 ARRAY_SIZE(gang),
1201 BTRFS_ROOT_TRANS_TAG);
1202 if (ret == 0)
1203 break;
1204 for (i = 0; i < ret; i++) {
1205 root = gang[i];
5d4f98a2
YZ
1206 radix_tree_tag_clear(&fs_info->fs_roots_radix,
1207 (unsigned long)root->root_key.objectid,
1208 BTRFS_ROOT_TRANS_TAG);
a4abeea4 1209 spin_unlock(&fs_info->fs_roots_radix_lock);
31153d81 1210
e02119d5 1211 btrfs_free_log(trans, root);
5d4f98a2 1212 btrfs_update_reloc_root(trans, root);
d68fc57b 1213 btrfs_orphan_commit_root(trans, root);
bcc63abb 1214
82d5902d
LZ
1215 btrfs_save_ino_cache(root, trans);
1216
f1ebcc74 1217 /* see comments in should_cow_block() */
27cdeb70 1218 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
c7548af6 1219 smp_mb__after_atomic();
f1ebcc74 1220
978d910d 1221 if (root->commit_root != root->node) {
9e351cc8
JB
1222 list_add_tail(&root->dirty_list,
1223 &trans->transaction->switch_commits);
978d910d
YZ
1224 btrfs_set_root_node(&root->root_item,
1225 root->node);
1226 }
5d4f98a2 1227
5d4f98a2 1228 err = btrfs_update_root(trans, fs_info->tree_root,
0f7d52f4
CM
1229 &root->root_key,
1230 &root->root_item);
a4abeea4 1231 spin_lock(&fs_info->fs_roots_radix_lock);
54aa1f4d
CM
1232 if (err)
1233 break;
0f7d52f4
CM
1234 }
1235 }
a4abeea4 1236 spin_unlock(&fs_info->fs_roots_radix_lock);
54aa1f4d 1237 return err;
0f7d52f4
CM
1238}
1239
d352ac68 1240/*
de78b51a
ES
1241 * defrag a given btree.
1242 * Every leaf in the btree is read and defragged.
d352ac68 1243 */
de78b51a 1244int btrfs_defrag_root(struct btrfs_root *root)
e9d0b13b
CM
1245{
1246 struct btrfs_fs_info *info = root->fs_info;
e9d0b13b 1247 struct btrfs_trans_handle *trans;
8929ecfa 1248 int ret;
e9d0b13b 1249
27cdeb70 1250 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
e9d0b13b 1251 return 0;
8929ecfa 1252
6b80053d 1253 while (1) {
8929ecfa
YZ
1254 trans = btrfs_start_transaction(root, 0);
1255 if (IS_ERR(trans))
1256 return PTR_ERR(trans);
1257
de78b51a 1258 ret = btrfs_defrag_leaves(trans, root);
8929ecfa 1259
e9d0b13b 1260 btrfs_end_transaction(trans, root);
b53d3f5d 1261 btrfs_btree_balance_dirty(info->tree_root);
e9d0b13b
CM
1262 cond_resched();
1263
7841cb28 1264 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
e9d0b13b 1265 break;
210549eb
DS
1266
1267 if (btrfs_defrag_cancelled(root->fs_info)) {
efe120a0 1268 pr_debug("BTRFS: defrag_root cancelled\n");
210549eb
DS
1269 ret = -EAGAIN;
1270 break;
1271 }
e9d0b13b 1272 }
27cdeb70 1273 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
8929ecfa 1274 return ret;
e9d0b13b
CM
1275}
1276
d352ac68
CM
1277/*
1278 * new snapshots need to be created at a very specific time in the
aec8030a
MX
1279 * transaction commit. This does the actual creation.
1280 *
1281 * Note:
1282 * If the error which may affect the commitment of the current transaction
1283 * happens, we should return the error number. If the error which just affect
1284 * the creation of the pending snapshots, just return 0.
d352ac68 1285 */
80b6794d 1286static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
3063d29f
CM
1287 struct btrfs_fs_info *fs_info,
1288 struct btrfs_pending_snapshot *pending)
1289{
1290 struct btrfs_key key;
80b6794d 1291 struct btrfs_root_item *new_root_item;
3063d29f
CM
1292 struct btrfs_root *tree_root = fs_info->tree_root;
1293 struct btrfs_root *root = pending->root;
6bdb72de 1294 struct btrfs_root *parent_root;
98c9942a 1295 struct btrfs_block_rsv *rsv;
6bdb72de 1296 struct inode *parent_inode;
42874b3d
MX
1297 struct btrfs_path *path;
1298 struct btrfs_dir_item *dir_item;
a22285a6 1299 struct dentry *dentry;
3063d29f 1300 struct extent_buffer *tmp;
925baedd 1301 struct extent_buffer *old;
8ea05e3a 1302 struct timespec cur_time = CURRENT_TIME;
aec8030a 1303 int ret = 0;
d68fc57b 1304 u64 to_reserve = 0;
6bdb72de 1305 u64 index = 0;
a22285a6 1306 u64 objectid;
b83cc969 1307 u64 root_flags;
8ea05e3a 1308 uuid_le new_uuid;
3063d29f 1309
42874b3d
MX
1310 path = btrfs_alloc_path();
1311 if (!path) {
aec8030a
MX
1312 pending->error = -ENOMEM;
1313 return 0;
42874b3d
MX
1314 }
1315
80b6794d
CM
1316 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1317 if (!new_root_item) {
aec8030a 1318 pending->error = -ENOMEM;
6fa9700e 1319 goto root_item_alloc_fail;
80b6794d 1320 }
a22285a6 1321
aec8030a
MX
1322 pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1323 if (pending->error)
6fa9700e 1324 goto no_free_objectid;
3063d29f 1325
d6726335
QW
1326 /*
1327 * Make qgroup to skip current new snapshot's qgroupid, as it is
1328 * accounted by later btrfs_qgroup_inherit().
1329 */
1330 btrfs_set_skip_qgroup(trans, objectid);
1331
147d256e 1332 btrfs_reloc_pre_snapshot(pending, &to_reserve);
d68fc57b
YZ
1333
1334 if (to_reserve > 0) {
aec8030a
MX
1335 pending->error = btrfs_block_rsv_add(root,
1336 &pending->block_rsv,
1337 to_reserve,
1338 BTRFS_RESERVE_NO_FLUSH);
1339 if (pending->error)
d6726335 1340 goto clear_skip_qgroup;
d68fc57b
YZ
1341 }
1342
3063d29f 1343 key.objectid = objectid;
a22285a6
YZ
1344 key.offset = (u64)-1;
1345 key.type = BTRFS_ROOT_ITEM_KEY;
3063d29f 1346
6fa9700e 1347 rsv = trans->block_rsv;
a22285a6 1348 trans->block_rsv = &pending->block_rsv;
2382c5cc 1349 trans->bytes_reserved = trans->block_rsv->reserved;
3de4586c 1350
a22285a6 1351 dentry = pending->dentry;
e9662f70 1352 parent_inode = pending->dir;
a22285a6 1353 parent_root = BTRFS_I(parent_inode)->root;
7585717f 1354 record_root_in_trans(trans, parent_root);
a22285a6 1355
3063d29f
CM
1356 /*
1357 * insert the directory item
1358 */
3de4586c 1359 ret = btrfs_set_inode_index(parent_inode, &index);
49b25e05 1360 BUG_ON(ret); /* -ENOMEM */
42874b3d
MX
1361
1362 /* check if there is a file/dir which has the same name. */
1363 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1364 btrfs_ino(parent_inode),
1365 dentry->d_name.name,
1366 dentry->d_name.len, 0);
1367 if (dir_item != NULL && !IS_ERR(dir_item)) {
fe66a05a 1368 pending->error = -EEXIST;
aec8030a 1369 goto dir_item_existed;
42874b3d
MX
1370 } else if (IS_ERR(dir_item)) {
1371 ret = PTR_ERR(dir_item);
8732d44f
MX
1372 btrfs_abort_transaction(trans, root, ret);
1373 goto fail;
79787eaa 1374 }
42874b3d 1375 btrfs_release_path(path);
52c26179 1376
e999376f
CM
1377 /*
1378 * pull in the delayed directory update
1379 * and the delayed inode item
1380 * otherwise we corrupt the FS during
1381 * snapshot
1382 */
1383 ret = btrfs_run_delayed_items(trans, root);
8732d44f
MX
1384 if (ret) { /* Transaction aborted */
1385 btrfs_abort_transaction(trans, root, ret);
1386 goto fail;
1387 }
e999376f 1388
7585717f 1389 record_root_in_trans(trans, root);
6bdb72de
SW
1390 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1391 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
08fe4db1 1392 btrfs_check_and_init_root_item(new_root_item);
6bdb72de 1393
b83cc969
LZ
1394 root_flags = btrfs_root_flags(new_root_item);
1395 if (pending->readonly)
1396 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1397 else
1398 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1399 btrfs_set_root_flags(new_root_item, root_flags);
1400
8ea05e3a
AB
1401 btrfs_set_root_generation_v2(new_root_item,
1402 trans->transid);
1403 uuid_le_gen(&new_uuid);
1404 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1405 memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1406 BTRFS_UUID_SIZE);
70023da2
SB
1407 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1408 memset(new_root_item->received_uuid, 0,
1409 sizeof(new_root_item->received_uuid));
1410 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1411 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1412 btrfs_set_root_stransid(new_root_item, 0);
1413 btrfs_set_root_rtransid(new_root_item, 0);
1414 }
3cae210f
QW
1415 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1416 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
8ea05e3a 1417 btrfs_set_root_otransid(new_root_item, trans->transid);
8ea05e3a 1418
6bdb72de 1419 old = btrfs_lock_root_node(root);
49b25e05 1420 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
79787eaa
JM
1421 if (ret) {
1422 btrfs_tree_unlock(old);
1423 free_extent_buffer(old);
8732d44f
MX
1424 btrfs_abort_transaction(trans, root, ret);
1425 goto fail;
79787eaa 1426 }
49b25e05 1427
6bdb72de
SW
1428 btrfs_set_lock_blocking(old);
1429
49b25e05 1430 ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
79787eaa 1431 /* clean up in any case */
6bdb72de
SW
1432 btrfs_tree_unlock(old);
1433 free_extent_buffer(old);
8732d44f
MX
1434 if (ret) {
1435 btrfs_abort_transaction(trans, root, ret);
1436 goto fail;
1437 }
f1ebcc74 1438 /* see comments in should_cow_block() */
27cdeb70 1439 set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
f1ebcc74
LB
1440 smp_wmb();
1441
6bdb72de 1442 btrfs_set_root_node(new_root_item, tmp);
a22285a6
YZ
1443 /* record when the snapshot was created in key.offset */
1444 key.offset = trans->transid;
1445 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
6bdb72de
SW
1446 btrfs_tree_unlock(tmp);
1447 free_extent_buffer(tmp);
8732d44f
MX
1448 if (ret) {
1449 btrfs_abort_transaction(trans, root, ret);
1450 goto fail;
1451 }
6bdb72de 1452
a22285a6
YZ
1453 /*
1454 * insert root back/forward references
1455 */
1456 ret = btrfs_add_root_ref(trans, tree_root, objectid,
0660b5af 1457 parent_root->root_key.objectid,
33345d01 1458 btrfs_ino(parent_inode), index,
a22285a6 1459 dentry->d_name.name, dentry->d_name.len);
8732d44f
MX
1460 if (ret) {
1461 btrfs_abort_transaction(trans, root, ret);
1462 goto fail;
1463 }
0660b5af 1464
a22285a6
YZ
1465 key.offset = (u64)-1;
1466 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
79787eaa
JM
1467 if (IS_ERR(pending->snap)) {
1468 ret = PTR_ERR(pending->snap);
8732d44f
MX
1469 btrfs_abort_transaction(trans, root, ret);
1470 goto fail;
79787eaa 1471 }
d68fc57b 1472
49b25e05 1473 ret = btrfs_reloc_post_snapshot(trans, pending);
8732d44f
MX
1474 if (ret) {
1475 btrfs_abort_transaction(trans, root, ret);
1476 goto fail;
1477 }
361048f5
MX
1478
1479 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
8732d44f
MX
1480 if (ret) {
1481 btrfs_abort_transaction(trans, root, ret);
1482 goto fail;
1483 }
42874b3d
MX
1484
1485 ret = btrfs_insert_dir_item(trans, parent_root,
1486 dentry->d_name.name, dentry->d_name.len,
1487 parent_inode, &key,
1488 BTRFS_FT_DIR, index);
1489 /* We have check then name at the beginning, so it is impossible. */
9c52057c 1490 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
8732d44f
MX
1491 if (ret) {
1492 btrfs_abort_transaction(trans, root, ret);
1493 goto fail;
1494 }
42874b3d
MX
1495
1496 btrfs_i_size_write(parent_inode, parent_inode->i_size +
1497 dentry->d_name.len * 2);
1498 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
be6aef60 1499 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
dd5f9615
SB
1500 if (ret) {
1501 btrfs_abort_transaction(trans, root, ret);
1502 goto fail;
1503 }
1504 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
1505 BTRFS_UUID_KEY_SUBVOL, objectid);
1506 if (ret) {
8732d44f 1507 btrfs_abort_transaction(trans, root, ret);
dd5f9615
SB
1508 goto fail;
1509 }
1510 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1511 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
1512 new_root_item->received_uuid,
1513 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1514 objectid);
1515 if (ret && ret != -EEXIST) {
1516 btrfs_abort_transaction(trans, root, ret);
1517 goto fail;
1518 }
1519 }
d6726335
QW
1520
1521 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1522 if (ret) {
1523 btrfs_abort_transaction(trans, root, ret);
1524 goto fail;
1525 }
1526
1527 /*
1528 * account qgroup counters before qgroup_inherit()
1529 */
1530 ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
1531 if (ret)
1532 goto fail;
1533 ret = btrfs_qgroup_account_extents(trans, fs_info);
1534 if (ret)
1535 goto fail;
1536 ret = btrfs_qgroup_inherit(trans, fs_info,
1537 root->root_key.objectid,
1538 objectid, pending->inherit);
1539 if (ret) {
1540 btrfs_abort_transaction(trans, root, ret);
1541 goto fail;
1542 }
1543
3063d29f 1544fail:
aec8030a
MX
1545 pending->error = ret;
1546dir_item_existed:
98c9942a 1547 trans->block_rsv = rsv;
2382c5cc 1548 trans->bytes_reserved = 0;
d6726335
QW
1549clear_skip_qgroup:
1550 btrfs_clear_skip_qgroup(trans);
6fa9700e
MX
1551no_free_objectid:
1552 kfree(new_root_item);
1553root_item_alloc_fail:
42874b3d 1554 btrfs_free_path(path);
49b25e05 1555 return ret;
3063d29f
CM
1556}
1557
d352ac68
CM
1558/*
1559 * create all the snapshots we've scheduled for creation
1560 */
80b6794d
CM
1561static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1562 struct btrfs_fs_info *fs_info)
3de4586c 1563{
aec8030a 1564 struct btrfs_pending_snapshot *pending, *next;
3de4586c 1565 struct list_head *head = &trans->transaction->pending_snapshots;
aec8030a 1566 int ret = 0;
3de4586c 1567
aec8030a
MX
1568 list_for_each_entry_safe(pending, next, head, list) {
1569 list_del(&pending->list);
1570 ret = create_pending_snapshot(trans, fs_info, pending);
1571 if (ret)
1572 break;
1573 }
1574 return ret;
3de4586c
CM
1575}
1576
5d4f98a2
YZ
1577static void update_super_roots(struct btrfs_root *root)
1578{
1579 struct btrfs_root_item *root_item;
1580 struct btrfs_super_block *super;
1581
6c41761f 1582 super = root->fs_info->super_copy;
5d4f98a2
YZ
1583
1584 root_item = &root->fs_info->chunk_root->root_item;
1585 super->chunk_root = root_item->bytenr;
1586 super->chunk_root_generation = root_item->generation;
1587 super->chunk_root_level = root_item->level;
1588
1589 root_item = &root->fs_info->tree_root->root_item;
1590 super->root = root_item->bytenr;
1591 super->generation = root_item->generation;
1592 super->root_level = root_item->level;
73bc1876 1593 if (btrfs_test_opt(root, SPACE_CACHE))
0af3d00b 1594 super->cache_generation = root_item->generation;
70f80175
SB
1595 if (root->fs_info->update_uuid_tree_gen)
1596 super->uuid_tree_generation = root_item->generation;
5d4f98a2
YZ
1597}
1598
f36f3042
CM
1599int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1600{
4a9d8bde 1601 struct btrfs_transaction *trans;
f36f3042 1602 int ret = 0;
4a9d8bde 1603
a4abeea4 1604 spin_lock(&info->trans_lock);
4a9d8bde
MX
1605 trans = info->running_transaction;
1606 if (trans)
1607 ret = (trans->state >= TRANS_STATE_COMMIT_START);
a4abeea4 1608 spin_unlock(&info->trans_lock);
f36f3042
CM
1609 return ret;
1610}
1611
8929ecfa
YZ
1612int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1613{
4a9d8bde 1614 struct btrfs_transaction *trans;
8929ecfa 1615 int ret = 0;
4a9d8bde 1616
a4abeea4 1617 spin_lock(&info->trans_lock);
4a9d8bde
MX
1618 trans = info->running_transaction;
1619 if (trans)
1620 ret = is_transaction_blocked(trans);
a4abeea4 1621 spin_unlock(&info->trans_lock);
8929ecfa
YZ
1622 return ret;
1623}
1624
bb9c12c9
SW
1625/*
1626 * wait for the current transaction commit to start and block subsequent
1627 * transaction joins
1628 */
1629static void wait_current_trans_commit_start(struct btrfs_root *root,
1630 struct btrfs_transaction *trans)
1631{
4a9d8bde 1632 wait_event(root->fs_info->transaction_blocked_wait,
501407aa
JB
1633 trans->state >= TRANS_STATE_COMMIT_START ||
1634 trans->aborted);
bb9c12c9
SW
1635}
1636
1637/*
1638 * wait for the current transaction to start and then become unblocked.
1639 * caller holds ref.
1640 */
1641static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1642 struct btrfs_transaction *trans)
1643{
72d63ed6 1644 wait_event(root->fs_info->transaction_wait,
501407aa
JB
1645 trans->state >= TRANS_STATE_UNBLOCKED ||
1646 trans->aborted);
bb9c12c9
SW
1647}
1648
1649/*
1650 * commit transactions asynchronously. once btrfs_commit_transaction_async
1651 * returns, any subsequent transaction will not be allowed to join.
1652 */
1653struct btrfs_async_commit {
1654 struct btrfs_trans_handle *newtrans;
1655 struct btrfs_root *root;
7892b5af 1656 struct work_struct work;
bb9c12c9
SW
1657};
1658
1659static void do_async_commit(struct work_struct *work)
1660{
1661 struct btrfs_async_commit *ac =
7892b5af 1662 container_of(work, struct btrfs_async_commit, work);
bb9c12c9 1663
6fc4e354
SW
1664 /*
1665 * We've got freeze protection passed with the transaction.
1666 * Tell lockdep about it.
1667 */
b1a06a4b 1668 if (ac->newtrans->type & __TRANS_FREEZABLE)
bee9182d 1669 __sb_writers_acquired(ac->root->fs_info->sb, SB_FREEZE_FS);
6fc4e354 1670
e209db7a
SW
1671 current->journal_info = ac->newtrans;
1672
bb9c12c9
SW
1673 btrfs_commit_transaction(ac->newtrans, ac->root);
1674 kfree(ac);
1675}
1676
1677int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1678 struct btrfs_root *root,
1679 int wait_for_unblock)
1680{
1681 struct btrfs_async_commit *ac;
1682 struct btrfs_transaction *cur_trans;
1683
1684 ac = kmalloc(sizeof(*ac), GFP_NOFS);
db5b493a
TI
1685 if (!ac)
1686 return -ENOMEM;
bb9c12c9 1687
7892b5af 1688 INIT_WORK(&ac->work, do_async_commit);
bb9c12c9 1689 ac->root = root;
7a7eaa40 1690 ac->newtrans = btrfs_join_transaction(root);
3612b495
TI
1691 if (IS_ERR(ac->newtrans)) {
1692 int err = PTR_ERR(ac->newtrans);
1693 kfree(ac);
1694 return err;
1695 }
bb9c12c9
SW
1696
1697 /* take transaction reference */
bb9c12c9 1698 cur_trans = trans->transaction;
13c5a93e 1699 atomic_inc(&cur_trans->use_count);
bb9c12c9
SW
1700
1701 btrfs_end_transaction(trans, root);
6fc4e354
SW
1702
1703 /*
1704 * Tell lockdep we've released the freeze rwsem, since the
1705 * async commit thread will be the one to unlock it.
1706 */
b1a06a4b 1707 if (ac->newtrans->type & __TRANS_FREEZABLE)
bee9182d 1708 __sb_writers_release(root->fs_info->sb, SB_FREEZE_FS);
6fc4e354 1709
7892b5af 1710 schedule_work(&ac->work);
bb9c12c9
SW
1711
1712 /* wait for transaction to start and unblock */
bb9c12c9
SW
1713 if (wait_for_unblock)
1714 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1715 else
1716 wait_current_trans_commit_start(root, cur_trans);
bb9c12c9 1717
38e88054
SW
1718 if (current->journal_info == trans)
1719 current->journal_info = NULL;
1720
724e2315 1721 btrfs_put_transaction(cur_trans);
bb9c12c9
SW
1722 return 0;
1723}
1724
49b25e05
JM
1725
1726static void cleanup_transaction(struct btrfs_trans_handle *trans,
7b8b92af 1727 struct btrfs_root *root, int err)
49b25e05
JM
1728{
1729 struct btrfs_transaction *cur_trans = trans->transaction;
f094ac32 1730 DEFINE_WAIT(wait);
49b25e05
JM
1731
1732 WARN_ON(trans->use_count > 1);
1733
7b8b92af
JB
1734 btrfs_abort_transaction(trans, root, err);
1735
49b25e05 1736 spin_lock(&root->fs_info->trans_lock);
66b6135b 1737
25d8c284
MX
1738 /*
1739 * If the transaction is removed from the list, it means this
1740 * transaction has been committed successfully, so it is impossible
1741 * to call the cleanup function.
1742 */
1743 BUG_ON(list_empty(&cur_trans->list));
66b6135b 1744
49b25e05 1745 list_del_init(&cur_trans->list);
d7096fc3 1746 if (cur_trans == root->fs_info->running_transaction) {
4a9d8bde 1747 cur_trans->state = TRANS_STATE_COMMIT_DOING;
f094ac32
LB
1748 spin_unlock(&root->fs_info->trans_lock);
1749 wait_event(cur_trans->writer_wait,
1750 atomic_read(&cur_trans->num_writers) == 1);
1751
1752 spin_lock(&root->fs_info->trans_lock);
d7096fc3 1753 }
49b25e05
JM
1754 spin_unlock(&root->fs_info->trans_lock);
1755
1756 btrfs_cleanup_one_transaction(trans->transaction, root);
1757
4a9d8bde
MX
1758 spin_lock(&root->fs_info->trans_lock);
1759 if (cur_trans == root->fs_info->running_transaction)
1760 root->fs_info->running_transaction = NULL;
1761 spin_unlock(&root->fs_info->trans_lock);
1762
e0228285
JB
1763 if (trans->type & __TRANS_FREEZABLE)
1764 sb_end_intwrite(root->fs_info->sb);
724e2315
JB
1765 btrfs_put_transaction(cur_trans);
1766 btrfs_put_transaction(cur_trans);
49b25e05
JM
1767
1768 trace_btrfs_transaction_commit(root);
1769
49b25e05
JM
1770 if (current->journal_info == trans)
1771 current->journal_info = NULL;
c0af8f0b 1772 btrfs_scrub_cancel(root->fs_info);
49b25e05
JM
1773
1774 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1775}
1776
82436617
MX
1777static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1778{
1779 if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
6c255e67 1780 return btrfs_start_delalloc_roots(fs_info, 1, -1);
82436617
MX
1781 return 0;
1782}
1783
1784static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
1785{
1786 if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
b0244199 1787 btrfs_wait_ordered_roots(fs_info, -1);
82436617
MX
1788}
1789
50d9aa99
JB
1790static inline void
1791btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans,
1792 struct btrfs_fs_info *fs_info)
1793{
1794 struct btrfs_ordered_extent *ordered;
1795
1796 spin_lock(&fs_info->trans_lock);
1797 while (!list_empty(&cur_trans->pending_ordered)) {
1798 ordered = list_first_entry(&cur_trans->pending_ordered,
1799 struct btrfs_ordered_extent,
1800 trans_list);
1801 list_del_init(&ordered->trans_list);
1802 spin_unlock(&fs_info->trans_lock);
1803
1804 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE,
1805 &ordered->flags));
1806 btrfs_put_ordered_extent(ordered);
1807 spin_lock(&fs_info->trans_lock);
1808 }
1809 spin_unlock(&fs_info->trans_lock);
1810}
1811
79154b1b
CM
1812int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1813 struct btrfs_root *root)
1814{
49b25e05 1815 struct btrfs_transaction *cur_trans = trans->transaction;
8fd17795 1816 struct btrfs_transaction *prev_trans = NULL;
656f30db 1817 struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
25287e0a 1818 int ret;
79154b1b 1819
8d25a086
MX
1820 /* Stop the commit early if ->aborted is set */
1821 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
25287e0a 1822 ret = cur_trans->aborted;
e4a2bcac
JB
1823 btrfs_end_transaction(trans, root);
1824 return ret;
25287e0a 1825 }
49b25e05 1826
56bec294
CM
1827 /* make a pass through all the delayed refs we have so far
1828 * any runnings procs may add more while we are here
1829 */
1830 ret = btrfs_run_delayed_refs(trans, root, 0);
e4a2bcac
JB
1831 if (ret) {
1832 btrfs_end_transaction(trans, root);
1833 return ret;
1834 }
56bec294 1835
0e721106
JB
1836 btrfs_trans_release_metadata(trans, root);
1837 trans->block_rsv = NULL;
272d26d0
MX
1838 if (trans->qgroup_reserved) {
1839 btrfs_qgroup_free(root, trans->qgroup_reserved);
1840 trans->qgroup_reserved = 0;
1841 }
0e721106 1842
b7ec40d7 1843 cur_trans = trans->transaction;
49b25e05 1844
56bec294
CM
1845 /*
1846 * set the flushing flag so procs in this transaction have to
1847 * start sending their work down.
1848 */
b7ec40d7 1849 cur_trans->delayed_refs.flushing = 1;
1be41b78 1850 smp_wmb();
56bec294 1851
ea658bad
JB
1852 if (!list_empty(&trans->new_bgs))
1853 btrfs_create_pending_block_groups(trans, root);
1854
c3e69d58 1855 ret = btrfs_run_delayed_refs(trans, root, 0);
e4a2bcac
JB
1856 if (ret) {
1857 btrfs_end_transaction(trans, root);
1858 return ret;
1859 }
56bec294 1860
1bbc621e
CM
1861 if (!cur_trans->dirty_bg_run) {
1862 int run_it = 0;
1863
1864 /* this mutex is also taken before trying to set
1865 * block groups readonly. We need to make sure
1866 * that nobody has set a block group readonly
1867 * after a extents from that block group have been
1868 * allocated for cache files. btrfs_set_block_group_ro
1869 * will wait for the transaction to commit if it
1870 * finds dirty_bg_run = 1
1871 *
1872 * The dirty_bg_run flag is also used to make sure only
1873 * one process starts all the block group IO. It wouldn't
1874 * hurt to have more than one go through, but there's no
1875 * real advantage to it either.
1876 */
1877 mutex_lock(&root->fs_info->ro_block_group_mutex);
1878 if (!cur_trans->dirty_bg_run) {
1879 run_it = 1;
1880 cur_trans->dirty_bg_run = 1;
1881 }
1882 mutex_unlock(&root->fs_info->ro_block_group_mutex);
1883
1884 if (run_it)
1885 ret = btrfs_start_dirty_block_groups(trans, root);
1886 }
1887 if (ret) {
1888 btrfs_end_transaction(trans, root);
1889 return ret;
1890 }
1891
4a9d8bde 1892 spin_lock(&root->fs_info->trans_lock);
d3efe084 1893 list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
4a9d8bde
MX
1894 if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1895 spin_unlock(&root->fs_info->trans_lock);
13c5a93e 1896 atomic_inc(&cur_trans->use_count);
49b25e05 1897 ret = btrfs_end_transaction(trans, root);
ccd467d6 1898
b9c8300c 1899 wait_for_commit(root, cur_trans);
15ee9bc7 1900
b4924a0f
LB
1901 if (unlikely(cur_trans->aborted))
1902 ret = cur_trans->aborted;
1903
724e2315 1904 btrfs_put_transaction(cur_trans);
15ee9bc7 1905
49b25e05 1906 return ret;
79154b1b 1907 }
4313b399 1908
4a9d8bde 1909 cur_trans->state = TRANS_STATE_COMMIT_START;
bb9c12c9
SW
1910 wake_up(&root->fs_info->transaction_blocked_wait);
1911
ccd467d6
CM
1912 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1913 prev_trans = list_entry(cur_trans->list.prev,
1914 struct btrfs_transaction, list);
4a9d8bde 1915 if (prev_trans->state != TRANS_STATE_COMPLETED) {
13c5a93e 1916 atomic_inc(&prev_trans->use_count);
a4abeea4 1917 spin_unlock(&root->fs_info->trans_lock);
ccd467d6
CM
1918
1919 wait_for_commit(root, prev_trans);
1f9b8c8f 1920 ret = prev_trans->aborted;
ccd467d6 1921
724e2315 1922 btrfs_put_transaction(prev_trans);
1f9b8c8f
FM
1923 if (ret)
1924 goto cleanup_transaction;
a4abeea4
JB
1925 } else {
1926 spin_unlock(&root->fs_info->trans_lock);
ccd467d6 1927 }
a4abeea4
JB
1928 } else {
1929 spin_unlock(&root->fs_info->trans_lock);
ccd467d6 1930 }
15ee9bc7 1931
0860adfd
MX
1932 extwriter_counter_dec(cur_trans, trans->type);
1933
82436617
MX
1934 ret = btrfs_start_delalloc_flush(root->fs_info);
1935 if (ret)
1936 goto cleanup_transaction;
1937
8d875f95 1938 ret = btrfs_run_delayed_items(trans, root);
581227d0
MX
1939 if (ret)
1940 goto cleanup_transaction;
15ee9bc7 1941
581227d0
MX
1942 wait_event(cur_trans->writer_wait,
1943 extwriter_counter_read(cur_trans) == 0);
15ee9bc7 1944
581227d0 1945 /* some pending stuffs might be added after the previous flush. */
8d875f95 1946 ret = btrfs_run_delayed_items(trans, root);
ca469637
MX
1947 if (ret)
1948 goto cleanup_transaction;
1949
82436617 1950 btrfs_wait_delalloc_flush(root->fs_info);
cb7ab021 1951
50d9aa99
JB
1952 btrfs_wait_pending_ordered(cur_trans, root->fs_info);
1953
cb7ab021 1954 btrfs_scrub_pause(root);
ed0ca140
JB
1955 /*
1956 * Ok now we need to make sure to block out any other joins while we
1957 * commit the transaction. We could have started a join before setting
4a9d8bde 1958 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
ed0ca140
JB
1959 */
1960 spin_lock(&root->fs_info->trans_lock);
4a9d8bde 1961 cur_trans->state = TRANS_STATE_COMMIT_DOING;
ed0ca140
JB
1962 spin_unlock(&root->fs_info->trans_lock);
1963 wait_event(cur_trans->writer_wait,
1964 atomic_read(&cur_trans->num_writers) == 1);
1965
2cba30f1
MX
1966 /* ->aborted might be set after the previous check, so check it */
1967 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1968 ret = cur_trans->aborted;
6cf7f77e 1969 goto scrub_continue;
2cba30f1 1970 }
7585717f
CM
1971 /*
1972 * the reloc mutex makes sure that we stop
1973 * the balancing code from coming in and moving
1974 * extents around in the middle of the commit
1975 */
1976 mutex_lock(&root->fs_info->reloc_mutex);
1977
42874b3d
MX
1978 /*
1979 * We needn't worry about the delayed items because we will
1980 * deal with them in create_pending_snapshot(), which is the
1981 * core function of the snapshot creation.
1982 */
1983 ret = create_pending_snapshots(trans, root->fs_info);
49b25e05
JM
1984 if (ret) {
1985 mutex_unlock(&root->fs_info->reloc_mutex);
6cf7f77e 1986 goto scrub_continue;
49b25e05 1987 }
3063d29f 1988
42874b3d
MX
1989 /*
1990 * We insert the dir indexes of the snapshots and update the inode
1991 * of the snapshots' parents after the snapshot creation, so there
1992 * are some delayed items which are not dealt with. Now deal with
1993 * them.
1994 *
1995 * We needn't worry that this operation will corrupt the snapshots,
1996 * because all the tree which are snapshoted will be forced to COW
1997 * the nodes and leaves.
1998 */
1999 ret = btrfs_run_delayed_items(trans, root);
49b25e05
JM
2000 if (ret) {
2001 mutex_unlock(&root->fs_info->reloc_mutex);
6cf7f77e 2002 goto scrub_continue;
49b25e05 2003 }
16cdcec7 2004
56bec294 2005 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
49b25e05
JM
2006 if (ret) {
2007 mutex_unlock(&root->fs_info->reloc_mutex);
6cf7f77e 2008 goto scrub_continue;
49b25e05 2009 }
56bec294 2010
0ed4792a
QW
2011 /* Reocrd old roots for later qgroup accounting */
2012 ret = btrfs_qgroup_prepare_account_extents(trans, root->fs_info);
2013 if (ret) {
2014 mutex_unlock(&root->fs_info->reloc_mutex);
2015 goto scrub_continue;
2016 }
2017
e999376f
CM
2018 /*
2019 * make sure none of the code above managed to slip in a
2020 * delayed item
2021 */
2022 btrfs_assert_delayed_root_empty(root);
2023
2c90e5d6 2024 WARN_ON(cur_trans != trans->transaction);
dc17ff8f 2025
e02119d5
CM
2026 /* btrfs_commit_tree_roots is responsible for getting the
2027 * various roots consistent with each other. Every pointer
2028 * in the tree of tree roots has to point to the most up to date
2029 * root for every subvolume and other tree. So, we have to keep
2030 * the tree logging code from jumping in and changing any
2031 * of the trees.
2032 *
2033 * At this point in the commit, there can't be any tree-log
2034 * writers, but a little lower down we drop the trans mutex
2035 * and let new people in. By holding the tree_log_mutex
2036 * from now until after the super is written, we avoid races
2037 * with the tree-log code.
2038 */
2039 mutex_lock(&root->fs_info->tree_log_mutex);
2040
5d4f98a2 2041 ret = commit_fs_roots(trans, root);
49b25e05
JM
2042 if (ret) {
2043 mutex_unlock(&root->fs_info->tree_log_mutex);
871383be 2044 mutex_unlock(&root->fs_info->reloc_mutex);
6cf7f77e 2045 goto scrub_continue;
49b25e05 2046 }
54aa1f4d 2047
3818aea2 2048 /*
7e1876ac
DS
2049 * Since the transaction is done, we can apply the pending changes
2050 * before the next transaction.
3818aea2 2051 */
572d9ab7 2052 btrfs_apply_pending_changes(root->fs_info);
3818aea2 2053
5d4f98a2 2054 /* commit_fs_roots gets rid of all the tree log roots, it is now
e02119d5
CM
2055 * safe to free the root of tree log roots
2056 */
2057 btrfs_free_log_root_tree(trans, root->fs_info);
2058
0ed4792a
QW
2059 /*
2060 * Since fs roots are all committed, we can get a quite accurate
2061 * new_roots. So let's do quota accounting.
2062 */
2063 ret = btrfs_qgroup_account_extents(trans, root->fs_info);
2064 if (ret < 0) {
2065 mutex_unlock(&root->fs_info->tree_log_mutex);
2066 mutex_unlock(&root->fs_info->reloc_mutex);
2067 goto scrub_continue;
2068 }
2069
5d4f98a2 2070 ret = commit_cowonly_roots(trans, root);
49b25e05
JM
2071 if (ret) {
2072 mutex_unlock(&root->fs_info->tree_log_mutex);
871383be 2073 mutex_unlock(&root->fs_info->reloc_mutex);
6cf7f77e 2074 goto scrub_continue;
49b25e05 2075 }
54aa1f4d 2076
2cba30f1
MX
2077 /*
2078 * The tasks which save the space cache and inode cache may also
2079 * update ->aborted, check it.
2080 */
2081 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
2082 ret = cur_trans->aborted;
2083 mutex_unlock(&root->fs_info->tree_log_mutex);
2084 mutex_unlock(&root->fs_info->reloc_mutex);
6cf7f77e 2085 goto scrub_continue;
2cba30f1
MX
2086 }
2087
11833d66
YZ
2088 btrfs_prepare_extent_commit(trans, root);
2089
78fae27e 2090 cur_trans = root->fs_info->running_transaction;
5d4f98a2
YZ
2091
2092 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
2093 root->fs_info->tree_root->node);
9e351cc8
JB
2094 list_add_tail(&root->fs_info->tree_root->dirty_list,
2095 &cur_trans->switch_commits);
5d4f98a2
YZ
2096
2097 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
2098 root->fs_info->chunk_root->node);
9e351cc8
JB
2099 list_add_tail(&root->fs_info->chunk_root->dirty_list,
2100 &cur_trans->switch_commits);
2101
2102 switch_commit_roots(cur_trans, root->fs_info);
5d4f98a2 2103
edf39272 2104 assert_qgroups_uptodate(trans);
ce93ec54 2105 ASSERT(list_empty(&cur_trans->dirty_bgs));
1bbc621e 2106 ASSERT(list_empty(&cur_trans->io_bgs));
5d4f98a2 2107 update_super_roots(root);
e02119d5 2108
60e7cd3a
JB
2109 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
2110 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
6c41761f
DS
2111 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
2112 sizeof(*root->fs_info->super_copy));
ccd467d6 2113
935e5cc9 2114 btrfs_update_commit_device_size(root->fs_info);
ce7213c7 2115 btrfs_update_commit_device_bytes_used(root, cur_trans);
935e5cc9 2116
656f30db
FM
2117 clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
2118 clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
2119
4fbcdf66
FM
2120 btrfs_trans_release_chunk_metadata(trans);
2121
a4abeea4 2122 spin_lock(&root->fs_info->trans_lock);
4a9d8bde 2123 cur_trans->state = TRANS_STATE_UNBLOCKED;
a4abeea4 2124 root->fs_info->running_transaction = NULL;
a4abeea4 2125 spin_unlock(&root->fs_info->trans_lock);
7585717f 2126 mutex_unlock(&root->fs_info->reloc_mutex);
b7ec40d7 2127
f9295749 2128 wake_up(&root->fs_info->transaction_wait);
e6dcd2dc 2129
79154b1b 2130 ret = btrfs_write_and_wait_transaction(trans, root);
49b25e05 2131 if (ret) {
a4553fef 2132 btrfs_std_error(root->fs_info, ret,
08748810 2133 "Error while writing out transaction");
49b25e05 2134 mutex_unlock(&root->fs_info->tree_log_mutex);
6cf7f77e 2135 goto scrub_continue;
49b25e05
JM
2136 }
2137
2138 ret = write_ctree_super(trans, root, 0);
2139 if (ret) {
2140 mutex_unlock(&root->fs_info->tree_log_mutex);
6cf7f77e 2141 goto scrub_continue;
49b25e05 2142 }
4313b399 2143
e02119d5
CM
2144 /*
2145 * the super is written, we can safely allow the tree-loggers
2146 * to go about their business
2147 */
2148 mutex_unlock(&root->fs_info->tree_log_mutex);
2149
11833d66 2150 btrfs_finish_extent_commit(trans, root);
4313b399 2151
13212b54
ZL
2152 if (cur_trans->have_free_bgs)
2153 btrfs_clear_space_info_full(root->fs_info);
2154
15ee9bc7 2155 root->fs_info->last_trans_committed = cur_trans->transid;
4a9d8bde
MX
2156 /*
2157 * We needn't acquire the lock here because there is no other task
2158 * which can change it.
2159 */
2160 cur_trans->state = TRANS_STATE_COMPLETED;
2c90e5d6 2161 wake_up(&cur_trans->commit_wait);
3de4586c 2162
a4abeea4 2163 spin_lock(&root->fs_info->trans_lock);
13c5a93e 2164 list_del_init(&cur_trans->list);
a4abeea4
JB
2165 spin_unlock(&root->fs_info->trans_lock);
2166
724e2315
JB
2167 btrfs_put_transaction(cur_trans);
2168 btrfs_put_transaction(cur_trans);
58176a96 2169
0860adfd 2170 if (trans->type & __TRANS_FREEZABLE)
354aa0fb 2171 sb_end_intwrite(root->fs_info->sb);
b2b5ef5c 2172
1abe9b8a 2173 trace_btrfs_transaction_commit(root);
2174
a2de733c
AJ
2175 btrfs_scrub_continue(root);
2176
9ed74f2d
JB
2177 if (current->journal_info == trans)
2178 current->journal_info = NULL;
2179
2c90e5d6 2180 kmem_cache_free(btrfs_trans_handle_cachep, trans);
24bbcf04 2181
8a733013
ZL
2182 if (current != root->fs_info->transaction_kthread &&
2183 current != root->fs_info->cleaner_kthread)
24bbcf04
YZ
2184 btrfs_run_delayed_iputs(root);
2185
79154b1b 2186 return ret;
49b25e05 2187
6cf7f77e
WS
2188scrub_continue:
2189 btrfs_scrub_continue(root);
49b25e05 2190cleanup_transaction:
0e721106 2191 btrfs_trans_release_metadata(trans, root);
4fbcdf66 2192 btrfs_trans_release_chunk_metadata(trans);
0e721106 2193 trans->block_rsv = NULL;
272d26d0
MX
2194 if (trans->qgroup_reserved) {
2195 btrfs_qgroup_free(root, trans->qgroup_reserved);
2196 trans->qgroup_reserved = 0;
2197 }
c2cf52eb 2198 btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
49b25e05
JM
2199 if (current->journal_info == trans)
2200 current->journal_info = NULL;
7b8b92af 2201 cleanup_transaction(trans, root, ret);
49b25e05
JM
2202
2203 return ret;
79154b1b
CM
2204}
2205
d352ac68 2206/*
9d1a2a3a
DS
2207 * return < 0 if error
2208 * 0 if there are no more dead_roots at the time of call
2209 * 1 there are more to be processed, call me again
2210 *
2211 * The return value indicates there are certainly more snapshots to delete, but
2212 * if there comes a new one during processing, it may return 0. We don't mind,
2213 * because btrfs_commit_super will poke cleaner thread and it will process it a
2214 * few seconds later.
d352ac68 2215 */
9d1a2a3a 2216int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
e9d0b13b 2217{
9d1a2a3a 2218 int ret;
5d4f98a2
YZ
2219 struct btrfs_fs_info *fs_info = root->fs_info;
2220
a4abeea4 2221 spin_lock(&fs_info->trans_lock);
9d1a2a3a
DS
2222 if (list_empty(&fs_info->dead_roots)) {
2223 spin_unlock(&fs_info->trans_lock);
2224 return 0;
2225 }
2226 root = list_first_entry(&fs_info->dead_roots,
2227 struct btrfs_root, root_list);
cfad392b 2228 list_del_init(&root->root_list);
a4abeea4 2229 spin_unlock(&fs_info->trans_lock);
e9d0b13b 2230
efe120a0 2231 pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
76dda93c 2232
9d1a2a3a 2233 btrfs_kill_all_delayed_nodes(root);
16cdcec7 2234
9d1a2a3a
DS
2235 if (btrfs_header_backref_rev(root->node) <
2236 BTRFS_MIXED_BACKREF_REV)
2237 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
2238 else
2239 ret = btrfs_drop_snapshot(root, NULL, 1, 0);
32471dc2 2240
6596a928 2241 return (ret < 0) ? 0 : 1;
e9d0b13b 2242}
572d9ab7
DS
2243
2244void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2245{
2246 unsigned long prev;
2247 unsigned long bit;
2248
6c9fe14f 2249 prev = xchg(&fs_info->pending_changes, 0);
572d9ab7
DS
2250 if (!prev)
2251 return;
2252
7e1876ac
DS
2253 bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
2254 if (prev & bit)
2255 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2256 prev &= ~bit;
2257
2258 bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
2259 if (prev & bit)
2260 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2261 prev &= ~bit;
2262
d51033d0
DS
2263 bit = 1 << BTRFS_PENDING_COMMIT;
2264 if (prev & bit)
2265 btrfs_debug(fs_info, "pending commit done");
2266 prev &= ~bit;
2267
572d9ab7
DS
2268 if (prev)
2269 btrfs_warn(fs_info,
2270 "unknown pending changes left 0x%lx, ignoring", prev);
2271}