]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/btrfs/transaction.c
Btrfs: make delalloc inodes be flushed by multi-task
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / transaction.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33
34 #define BTRFS_ROOT_TRANS_TAG 0
35
36 void put_transaction(struct btrfs_transaction *transaction)
37 {
38 WARN_ON(atomic_read(&transaction->use_count) == 0);
39 if (atomic_dec_and_test(&transaction->use_count)) {
40 BUG_ON(!list_empty(&transaction->list));
41 WARN_ON(transaction->delayed_refs.root.rb_node);
42 memset(transaction, 0, sizeof(*transaction));
43 kmem_cache_free(btrfs_transaction_cachep, transaction);
44 }
45 }
46
47 static noinline void switch_commit_root(struct btrfs_root *root)
48 {
49 free_extent_buffer(root->commit_root);
50 root->commit_root = btrfs_root_node(root);
51 }
52
53 /*
54 * either allocate a new transaction or hop into the existing one
55 */
56 static noinline int join_transaction(struct btrfs_root *root, int type)
57 {
58 struct btrfs_transaction *cur_trans;
59 struct btrfs_fs_info *fs_info = root->fs_info;
60
61 spin_lock(&fs_info->trans_lock);
62 loop:
63 /* The file system has been taken offline. No new transactions. */
64 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
65 spin_unlock(&fs_info->trans_lock);
66 return -EROFS;
67 }
68
69 if (fs_info->trans_no_join) {
70 /*
71 * If we are JOIN_NOLOCK we're already committing a current
72 * transaction, we just need a handle to deal with something
73 * when committing the transaction, such as inode cache and
74 * space cache. It is a special case.
75 */
76 if (type != TRANS_JOIN_NOLOCK) {
77 spin_unlock(&fs_info->trans_lock);
78 return -EBUSY;
79 }
80 }
81
82 cur_trans = fs_info->running_transaction;
83 if (cur_trans) {
84 if (cur_trans->aborted) {
85 spin_unlock(&fs_info->trans_lock);
86 return cur_trans->aborted;
87 }
88 atomic_inc(&cur_trans->use_count);
89 atomic_inc(&cur_trans->num_writers);
90 cur_trans->num_joined++;
91 spin_unlock(&fs_info->trans_lock);
92 return 0;
93 }
94 spin_unlock(&fs_info->trans_lock);
95
96 /*
97 * If we are ATTACH, we just want to catch the current transaction,
98 * and commit it. If there is no transaction, just return ENOENT.
99 */
100 if (type == TRANS_ATTACH)
101 return -ENOENT;
102
103 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
104 if (!cur_trans)
105 return -ENOMEM;
106
107 spin_lock(&fs_info->trans_lock);
108 if (fs_info->running_transaction) {
109 /*
110 * someone started a transaction after we unlocked. Make sure
111 * to redo the trans_no_join checks above
112 */
113 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
114 cur_trans = fs_info->running_transaction;
115 goto loop;
116 } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
117 spin_unlock(&fs_info->trans_lock);
118 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
119 return -EROFS;
120 }
121
122 atomic_set(&cur_trans->num_writers, 1);
123 cur_trans->num_joined = 0;
124 init_waitqueue_head(&cur_trans->writer_wait);
125 init_waitqueue_head(&cur_trans->commit_wait);
126 cur_trans->in_commit = 0;
127 cur_trans->blocked = 0;
128 /*
129 * One for this trans handle, one so it will live on until we
130 * commit the transaction.
131 */
132 atomic_set(&cur_trans->use_count, 2);
133 cur_trans->commit_done = 0;
134 cur_trans->start_time = get_seconds();
135
136 cur_trans->delayed_refs.root = RB_ROOT;
137 cur_trans->delayed_refs.num_entries = 0;
138 cur_trans->delayed_refs.num_heads_ready = 0;
139 cur_trans->delayed_refs.num_heads = 0;
140 cur_trans->delayed_refs.flushing = 0;
141 cur_trans->delayed_refs.run_delayed_start = 0;
142
143 /*
144 * although the tree mod log is per file system and not per transaction,
145 * the log must never go across transaction boundaries.
146 */
147 smp_mb();
148 if (!list_empty(&fs_info->tree_mod_seq_list)) {
149 printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
150 "creating a fresh transaction\n");
151 WARN_ON(1);
152 }
153 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
154 printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
155 "creating a fresh transaction\n");
156 WARN_ON(1);
157 }
158 atomic_set(&fs_info->tree_mod_seq, 0);
159
160 spin_lock_init(&cur_trans->commit_lock);
161 spin_lock_init(&cur_trans->delayed_refs.lock);
162
163 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
164 list_add_tail(&cur_trans->list, &fs_info->trans_list);
165 extent_io_tree_init(&cur_trans->dirty_pages,
166 fs_info->btree_inode->i_mapping);
167 fs_info->generation++;
168 cur_trans->transid = fs_info->generation;
169 fs_info->running_transaction = cur_trans;
170 cur_trans->aborted = 0;
171 spin_unlock(&fs_info->trans_lock);
172
173 return 0;
174 }
175
176 /*
177 * this does all the record keeping required to make sure that a reference
178 * counted root is properly recorded in a given transaction. This is required
179 * to make sure the old root from before we joined the transaction is deleted
180 * when the transaction commits
181 */
182 static int record_root_in_trans(struct btrfs_trans_handle *trans,
183 struct btrfs_root *root)
184 {
185 if (root->ref_cows && root->last_trans < trans->transid) {
186 WARN_ON(root == root->fs_info->extent_root);
187 WARN_ON(root->commit_root != root->node);
188
189 /*
190 * see below for in_trans_setup usage rules
191 * we have the reloc mutex held now, so there
192 * is only one writer in this function
193 */
194 root->in_trans_setup = 1;
195
196 /* make sure readers find in_trans_setup before
197 * they find our root->last_trans update
198 */
199 smp_wmb();
200
201 spin_lock(&root->fs_info->fs_roots_radix_lock);
202 if (root->last_trans == trans->transid) {
203 spin_unlock(&root->fs_info->fs_roots_radix_lock);
204 return 0;
205 }
206 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
207 (unsigned long)root->root_key.objectid,
208 BTRFS_ROOT_TRANS_TAG);
209 spin_unlock(&root->fs_info->fs_roots_radix_lock);
210 root->last_trans = trans->transid;
211
212 /* this is pretty tricky. We don't want to
213 * take the relocation lock in btrfs_record_root_in_trans
214 * unless we're really doing the first setup for this root in
215 * this transaction.
216 *
217 * Normally we'd use root->last_trans as a flag to decide
218 * if we want to take the expensive mutex.
219 *
220 * But, we have to set root->last_trans before we
221 * init the relocation root, otherwise, we trip over warnings
222 * in ctree.c. The solution used here is to flag ourselves
223 * with root->in_trans_setup. When this is 1, we're still
224 * fixing up the reloc trees and everyone must wait.
225 *
226 * When this is zero, they can trust root->last_trans and fly
227 * through btrfs_record_root_in_trans without having to take the
228 * lock. smp_wmb() makes sure that all the writes above are
229 * done before we pop in the zero below
230 */
231 btrfs_init_reloc_root(trans, root);
232 smp_wmb();
233 root->in_trans_setup = 0;
234 }
235 return 0;
236 }
237
238
239 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
240 struct btrfs_root *root)
241 {
242 if (!root->ref_cows)
243 return 0;
244
245 /*
246 * see record_root_in_trans for comments about in_trans_setup usage
247 * and barriers
248 */
249 smp_rmb();
250 if (root->last_trans == trans->transid &&
251 !root->in_trans_setup)
252 return 0;
253
254 mutex_lock(&root->fs_info->reloc_mutex);
255 record_root_in_trans(trans, root);
256 mutex_unlock(&root->fs_info->reloc_mutex);
257
258 return 0;
259 }
260
261 /* wait for commit against the current transaction to become unblocked
262 * when this is done, it is safe to start a new transaction, but the current
263 * transaction might not be fully on disk.
264 */
265 static void wait_current_trans(struct btrfs_root *root)
266 {
267 struct btrfs_transaction *cur_trans;
268
269 spin_lock(&root->fs_info->trans_lock);
270 cur_trans = root->fs_info->running_transaction;
271 if (cur_trans && cur_trans->blocked) {
272 atomic_inc(&cur_trans->use_count);
273 spin_unlock(&root->fs_info->trans_lock);
274
275 wait_event(root->fs_info->transaction_wait,
276 !cur_trans->blocked);
277 put_transaction(cur_trans);
278 } else {
279 spin_unlock(&root->fs_info->trans_lock);
280 }
281 }
282
283 static int may_wait_transaction(struct btrfs_root *root, int type)
284 {
285 if (root->fs_info->log_root_recovering)
286 return 0;
287
288 if (type == TRANS_USERSPACE)
289 return 1;
290
291 if (type == TRANS_START &&
292 !atomic_read(&root->fs_info->open_ioctl_trans))
293 return 1;
294
295 return 0;
296 }
297
298 static struct btrfs_trans_handle *
299 start_transaction(struct btrfs_root *root, u64 num_items, int type,
300 enum btrfs_reserve_flush_enum flush)
301 {
302 struct btrfs_trans_handle *h;
303 struct btrfs_transaction *cur_trans;
304 u64 num_bytes = 0;
305 int ret;
306 u64 qgroup_reserved = 0;
307
308 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
309 return ERR_PTR(-EROFS);
310
311 if (current->journal_info) {
312 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
313 h = current->journal_info;
314 h->use_count++;
315 h->orig_rsv = h->block_rsv;
316 h->block_rsv = NULL;
317 goto got_it;
318 }
319
320 /*
321 * Do the reservation before we join the transaction so we can do all
322 * the appropriate flushing if need be.
323 */
324 if (num_items > 0 && root != root->fs_info->chunk_root) {
325 if (root->fs_info->quota_enabled &&
326 is_fstree(root->root_key.objectid)) {
327 qgroup_reserved = num_items * root->leafsize;
328 ret = btrfs_qgroup_reserve(root, qgroup_reserved);
329 if (ret)
330 return ERR_PTR(ret);
331 }
332
333 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
334 ret = btrfs_block_rsv_add(root,
335 &root->fs_info->trans_block_rsv,
336 num_bytes, flush);
337 if (ret)
338 return ERR_PTR(ret);
339 }
340 again:
341 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
342 if (!h)
343 return ERR_PTR(-ENOMEM);
344
345 /*
346 * If we are JOIN_NOLOCK we're already committing a transaction and
347 * waiting on this guy, so we don't need to do the sb_start_intwrite
348 * because we're already holding a ref. We need this because we could
349 * have raced in and did an fsync() on a file which can kick a commit
350 * and then we deadlock with somebody doing a freeze.
351 *
352 * If we are ATTACH, it means we just want to catch the current
353 * transaction and commit it, so we needn't do sb_start_intwrite().
354 */
355 if (type < TRANS_JOIN_NOLOCK)
356 sb_start_intwrite(root->fs_info->sb);
357
358 if (may_wait_transaction(root, type))
359 wait_current_trans(root);
360
361 do {
362 ret = join_transaction(root, type);
363 if (ret == -EBUSY)
364 wait_current_trans(root);
365 } while (ret == -EBUSY);
366
367 if (ret < 0) {
368 /* We must get the transaction if we are JOIN_NOLOCK. */
369 BUG_ON(type == TRANS_JOIN_NOLOCK);
370
371 if (type < TRANS_JOIN_NOLOCK)
372 sb_end_intwrite(root->fs_info->sb);
373 kmem_cache_free(btrfs_trans_handle_cachep, h);
374 return ERR_PTR(ret);
375 }
376
377 cur_trans = root->fs_info->running_transaction;
378
379 h->transid = cur_trans->transid;
380 h->transaction = cur_trans;
381 h->blocks_used = 0;
382 h->bytes_reserved = 0;
383 h->root = root;
384 h->delayed_ref_updates = 0;
385 h->use_count = 1;
386 h->adding_csums = 0;
387 h->block_rsv = NULL;
388 h->orig_rsv = NULL;
389 h->aborted = 0;
390 h->qgroup_reserved = qgroup_reserved;
391 h->delayed_ref_elem.seq = 0;
392 h->type = type;
393 INIT_LIST_HEAD(&h->qgroup_ref_list);
394 INIT_LIST_HEAD(&h->new_bgs);
395
396 smp_mb();
397 if (cur_trans->blocked && may_wait_transaction(root, type)) {
398 btrfs_commit_transaction(h, root);
399 goto again;
400 }
401
402 if (num_bytes) {
403 trace_btrfs_space_reservation(root->fs_info, "transaction",
404 h->transid, num_bytes, 1);
405 h->block_rsv = &root->fs_info->trans_block_rsv;
406 h->bytes_reserved = num_bytes;
407 }
408
409 got_it:
410 btrfs_record_root_in_trans(h, root);
411
412 if (!current->journal_info && type != TRANS_USERSPACE)
413 current->journal_info = h;
414 return h;
415 }
416
417 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
418 int num_items)
419 {
420 return start_transaction(root, num_items, TRANS_START,
421 BTRFS_RESERVE_FLUSH_ALL);
422 }
423
424 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
425 struct btrfs_root *root, int num_items)
426 {
427 return start_transaction(root, num_items, TRANS_START,
428 BTRFS_RESERVE_FLUSH_LIMIT);
429 }
430
431 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
432 {
433 return start_transaction(root, 0, TRANS_JOIN, 0);
434 }
435
436 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
437 {
438 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
439 }
440
441 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
442 {
443 return start_transaction(root, 0, TRANS_USERSPACE, 0);
444 }
445
446 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
447 {
448 return start_transaction(root, 0, TRANS_ATTACH, 0);
449 }
450
451 /* wait for a transaction commit to be fully complete */
452 static noinline void wait_for_commit(struct btrfs_root *root,
453 struct btrfs_transaction *commit)
454 {
455 wait_event(commit->commit_wait, commit->commit_done);
456 }
457
458 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
459 {
460 struct btrfs_transaction *cur_trans = NULL, *t;
461 int ret;
462
463 ret = 0;
464 if (transid) {
465 if (transid <= root->fs_info->last_trans_committed)
466 goto out;
467
468 /* find specified transaction */
469 spin_lock(&root->fs_info->trans_lock);
470 list_for_each_entry(t, &root->fs_info->trans_list, list) {
471 if (t->transid == transid) {
472 cur_trans = t;
473 atomic_inc(&cur_trans->use_count);
474 break;
475 }
476 if (t->transid > transid)
477 break;
478 }
479 spin_unlock(&root->fs_info->trans_lock);
480 ret = -EINVAL;
481 if (!cur_trans)
482 goto out; /* bad transid */
483 } else {
484 /* find newest transaction that is committing | committed */
485 spin_lock(&root->fs_info->trans_lock);
486 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
487 list) {
488 if (t->in_commit) {
489 if (t->commit_done)
490 break;
491 cur_trans = t;
492 atomic_inc(&cur_trans->use_count);
493 break;
494 }
495 }
496 spin_unlock(&root->fs_info->trans_lock);
497 if (!cur_trans)
498 goto out; /* nothing committing|committed */
499 }
500
501 wait_for_commit(root, cur_trans);
502
503 put_transaction(cur_trans);
504 ret = 0;
505 out:
506 return ret;
507 }
508
509 void btrfs_throttle(struct btrfs_root *root)
510 {
511 if (!atomic_read(&root->fs_info->open_ioctl_trans))
512 wait_current_trans(root);
513 }
514
515 static int should_end_transaction(struct btrfs_trans_handle *trans,
516 struct btrfs_root *root)
517 {
518 int ret;
519
520 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
521 return ret ? 1 : 0;
522 }
523
524 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
525 struct btrfs_root *root)
526 {
527 struct btrfs_transaction *cur_trans = trans->transaction;
528 int updates;
529 int err;
530
531 smp_mb();
532 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
533 return 1;
534
535 updates = trans->delayed_ref_updates;
536 trans->delayed_ref_updates = 0;
537 if (updates) {
538 err = btrfs_run_delayed_refs(trans, root, updates);
539 if (err) /* Error code will also eval true */
540 return err;
541 }
542
543 return should_end_transaction(trans, root);
544 }
545
546 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
547 struct btrfs_root *root, int throttle)
548 {
549 struct btrfs_transaction *cur_trans = trans->transaction;
550 struct btrfs_fs_info *info = root->fs_info;
551 int count = 0;
552 int lock = (trans->type != TRANS_JOIN_NOLOCK);
553 int err = 0;
554
555 if (--trans->use_count) {
556 trans->block_rsv = trans->orig_rsv;
557 return 0;
558 }
559
560 /*
561 * do the qgroup accounting as early as possible
562 */
563 err = btrfs_delayed_refs_qgroup_accounting(trans, info);
564
565 btrfs_trans_release_metadata(trans, root);
566 trans->block_rsv = NULL;
567 /*
568 * the same root has to be passed to start_transaction and
569 * end_transaction. Subvolume quota depends on this.
570 */
571 WARN_ON(trans->root != root);
572
573 if (trans->qgroup_reserved) {
574 btrfs_qgroup_free(root, trans->qgroup_reserved);
575 trans->qgroup_reserved = 0;
576 }
577
578 if (!list_empty(&trans->new_bgs))
579 btrfs_create_pending_block_groups(trans, root);
580
581 while (count < 2) {
582 unsigned long cur = trans->delayed_ref_updates;
583 trans->delayed_ref_updates = 0;
584 if (cur &&
585 trans->transaction->delayed_refs.num_heads_ready > 64) {
586 trans->delayed_ref_updates = 0;
587 btrfs_run_delayed_refs(trans, root, cur);
588 } else {
589 break;
590 }
591 count++;
592 }
593 btrfs_trans_release_metadata(trans, root);
594 trans->block_rsv = NULL;
595
596 if (!list_empty(&trans->new_bgs))
597 btrfs_create_pending_block_groups(trans, root);
598
599 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
600 should_end_transaction(trans, root)) {
601 trans->transaction->blocked = 1;
602 smp_wmb();
603 }
604
605 if (lock && cur_trans->blocked && !cur_trans->in_commit) {
606 if (throttle) {
607 /*
608 * We may race with somebody else here so end up having
609 * to call end_transaction on ourselves again, so inc
610 * our use_count.
611 */
612 trans->use_count++;
613 return btrfs_commit_transaction(trans, root);
614 } else {
615 wake_up_process(info->transaction_kthread);
616 }
617 }
618
619 if (trans->type < TRANS_JOIN_NOLOCK)
620 sb_end_intwrite(root->fs_info->sb);
621
622 WARN_ON(cur_trans != info->running_transaction);
623 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
624 atomic_dec(&cur_trans->num_writers);
625
626 smp_mb();
627 if (waitqueue_active(&cur_trans->writer_wait))
628 wake_up(&cur_trans->writer_wait);
629 put_transaction(cur_trans);
630
631 if (current->journal_info == trans)
632 current->journal_info = NULL;
633
634 if (throttle)
635 btrfs_run_delayed_iputs(root);
636
637 if (trans->aborted ||
638 root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
639 err = -EIO;
640 }
641 assert_qgroups_uptodate(trans);
642
643 memset(trans, 0, sizeof(*trans));
644 kmem_cache_free(btrfs_trans_handle_cachep, trans);
645 return err;
646 }
647
648 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
649 struct btrfs_root *root)
650 {
651 int ret;
652
653 ret = __btrfs_end_transaction(trans, root, 0);
654 if (ret)
655 return ret;
656 return 0;
657 }
658
659 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
660 struct btrfs_root *root)
661 {
662 int ret;
663
664 ret = __btrfs_end_transaction(trans, root, 1);
665 if (ret)
666 return ret;
667 return 0;
668 }
669
670 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
671 struct btrfs_root *root)
672 {
673 return __btrfs_end_transaction(trans, root, 1);
674 }
675
676 /*
677 * when btree blocks are allocated, they have some corresponding bits set for
678 * them in one of two extent_io trees. This is used to make sure all of
679 * those extents are sent to disk but does not wait on them
680 */
681 int btrfs_write_marked_extents(struct btrfs_root *root,
682 struct extent_io_tree *dirty_pages, int mark)
683 {
684 int err = 0;
685 int werr = 0;
686 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
687 struct extent_state *cached_state = NULL;
688 u64 start = 0;
689 u64 end;
690
691 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
692 mark, &cached_state)) {
693 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
694 mark, &cached_state, GFP_NOFS);
695 cached_state = NULL;
696 err = filemap_fdatawrite_range(mapping, start, end);
697 if (err)
698 werr = err;
699 cond_resched();
700 start = end + 1;
701 }
702 if (err)
703 werr = err;
704 return werr;
705 }
706
707 /*
708 * when btree blocks are allocated, they have some corresponding bits set for
709 * them in one of two extent_io trees. This is used to make sure all of
710 * those extents are on disk for transaction or log commit. We wait
711 * on all the pages and clear them from the dirty pages state tree
712 */
713 int btrfs_wait_marked_extents(struct btrfs_root *root,
714 struct extent_io_tree *dirty_pages, int mark)
715 {
716 int err = 0;
717 int werr = 0;
718 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
719 struct extent_state *cached_state = NULL;
720 u64 start = 0;
721 u64 end;
722
723 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
724 EXTENT_NEED_WAIT, &cached_state)) {
725 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
726 0, 0, &cached_state, GFP_NOFS);
727 err = filemap_fdatawait_range(mapping, start, end);
728 if (err)
729 werr = err;
730 cond_resched();
731 start = end + 1;
732 }
733 if (err)
734 werr = err;
735 return werr;
736 }
737
738 /*
739 * when btree blocks are allocated, they have some corresponding bits set for
740 * them in one of two extent_io trees. This is used to make sure all of
741 * those extents are on disk for transaction or log commit
742 */
743 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
744 struct extent_io_tree *dirty_pages, int mark)
745 {
746 int ret;
747 int ret2;
748
749 ret = btrfs_write_marked_extents(root, dirty_pages, mark);
750 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
751
752 if (ret)
753 return ret;
754 if (ret2)
755 return ret2;
756 return 0;
757 }
758
759 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
760 struct btrfs_root *root)
761 {
762 if (!trans || !trans->transaction) {
763 struct inode *btree_inode;
764 btree_inode = root->fs_info->btree_inode;
765 return filemap_write_and_wait(btree_inode->i_mapping);
766 }
767 return btrfs_write_and_wait_marked_extents(root,
768 &trans->transaction->dirty_pages,
769 EXTENT_DIRTY);
770 }
771
772 /*
773 * this is used to update the root pointer in the tree of tree roots.
774 *
775 * But, in the case of the extent allocation tree, updating the root
776 * pointer may allocate blocks which may change the root of the extent
777 * allocation tree.
778 *
779 * So, this loops and repeats and makes sure the cowonly root didn't
780 * change while the root pointer was being updated in the metadata.
781 */
782 static int update_cowonly_root(struct btrfs_trans_handle *trans,
783 struct btrfs_root *root)
784 {
785 int ret;
786 u64 old_root_bytenr;
787 u64 old_root_used;
788 struct btrfs_root *tree_root = root->fs_info->tree_root;
789
790 old_root_used = btrfs_root_used(&root->root_item);
791 btrfs_write_dirty_block_groups(trans, root);
792
793 while (1) {
794 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
795 if (old_root_bytenr == root->node->start &&
796 old_root_used == btrfs_root_used(&root->root_item))
797 break;
798
799 btrfs_set_root_node(&root->root_item, root->node);
800 ret = btrfs_update_root(trans, tree_root,
801 &root->root_key,
802 &root->root_item);
803 if (ret)
804 return ret;
805
806 old_root_used = btrfs_root_used(&root->root_item);
807 ret = btrfs_write_dirty_block_groups(trans, root);
808 if (ret)
809 return ret;
810 }
811
812 if (root != root->fs_info->extent_root)
813 switch_commit_root(root);
814
815 return 0;
816 }
817
818 /*
819 * update all the cowonly tree roots on disk
820 *
821 * The error handling in this function may not be obvious. Any of the
822 * failures will cause the file system to go offline. We still need
823 * to clean up the delayed refs.
824 */
825 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
826 struct btrfs_root *root)
827 {
828 struct btrfs_fs_info *fs_info = root->fs_info;
829 struct list_head *next;
830 struct extent_buffer *eb;
831 int ret;
832
833 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
834 if (ret)
835 return ret;
836
837 eb = btrfs_lock_root_node(fs_info->tree_root);
838 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
839 0, &eb);
840 btrfs_tree_unlock(eb);
841 free_extent_buffer(eb);
842
843 if (ret)
844 return ret;
845
846 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
847 if (ret)
848 return ret;
849
850 ret = btrfs_run_dev_stats(trans, root->fs_info);
851 BUG_ON(ret);
852
853 ret = btrfs_run_qgroups(trans, root->fs_info);
854 BUG_ON(ret);
855
856 /* run_qgroups might have added some more refs */
857 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
858 BUG_ON(ret);
859
860 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
861 next = fs_info->dirty_cowonly_roots.next;
862 list_del_init(next);
863 root = list_entry(next, struct btrfs_root, dirty_list);
864
865 ret = update_cowonly_root(trans, root);
866 if (ret)
867 return ret;
868 }
869
870 down_write(&fs_info->extent_commit_sem);
871 switch_commit_root(fs_info->extent_root);
872 up_write(&fs_info->extent_commit_sem);
873
874 return 0;
875 }
876
877 /*
878 * dead roots are old snapshots that need to be deleted. This allocates
879 * a dirty root struct and adds it into the list of dead roots that need to
880 * be deleted
881 */
882 int btrfs_add_dead_root(struct btrfs_root *root)
883 {
884 spin_lock(&root->fs_info->trans_lock);
885 list_add(&root->root_list, &root->fs_info->dead_roots);
886 spin_unlock(&root->fs_info->trans_lock);
887 return 0;
888 }
889
890 /*
891 * update all the cowonly tree roots on disk
892 */
893 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
894 struct btrfs_root *root)
895 {
896 struct btrfs_root *gang[8];
897 struct btrfs_fs_info *fs_info = root->fs_info;
898 int i;
899 int ret;
900 int err = 0;
901
902 spin_lock(&fs_info->fs_roots_radix_lock);
903 while (1) {
904 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
905 (void **)gang, 0,
906 ARRAY_SIZE(gang),
907 BTRFS_ROOT_TRANS_TAG);
908 if (ret == 0)
909 break;
910 for (i = 0; i < ret; i++) {
911 root = gang[i];
912 radix_tree_tag_clear(&fs_info->fs_roots_radix,
913 (unsigned long)root->root_key.objectid,
914 BTRFS_ROOT_TRANS_TAG);
915 spin_unlock(&fs_info->fs_roots_radix_lock);
916
917 btrfs_free_log(trans, root);
918 btrfs_update_reloc_root(trans, root);
919 btrfs_orphan_commit_root(trans, root);
920
921 btrfs_save_ino_cache(root, trans);
922
923 /* see comments in should_cow_block() */
924 root->force_cow = 0;
925 smp_wmb();
926
927 if (root->commit_root != root->node) {
928 mutex_lock(&root->fs_commit_mutex);
929 switch_commit_root(root);
930 btrfs_unpin_free_ino(root);
931 mutex_unlock(&root->fs_commit_mutex);
932
933 btrfs_set_root_node(&root->root_item,
934 root->node);
935 }
936
937 err = btrfs_update_root(trans, fs_info->tree_root,
938 &root->root_key,
939 &root->root_item);
940 spin_lock(&fs_info->fs_roots_radix_lock);
941 if (err)
942 break;
943 }
944 }
945 spin_unlock(&fs_info->fs_roots_radix_lock);
946 return err;
947 }
948
949 /*
950 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
951 * otherwise every leaf in the btree is read and defragged.
952 */
953 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
954 {
955 struct btrfs_fs_info *info = root->fs_info;
956 struct btrfs_trans_handle *trans;
957 int ret;
958 unsigned long nr;
959
960 if (xchg(&root->defrag_running, 1))
961 return 0;
962
963 while (1) {
964 trans = btrfs_start_transaction(root, 0);
965 if (IS_ERR(trans))
966 return PTR_ERR(trans);
967
968 ret = btrfs_defrag_leaves(trans, root, cacheonly);
969
970 nr = trans->blocks_used;
971 btrfs_end_transaction(trans, root);
972 btrfs_btree_balance_dirty(info->tree_root, nr);
973 cond_resched();
974
975 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
976 break;
977 }
978 root->defrag_running = 0;
979 return ret;
980 }
981
982 /*
983 * new snapshots need to be created at a very specific time in the
984 * transaction commit. This does the actual creation
985 */
986 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
987 struct btrfs_fs_info *fs_info,
988 struct btrfs_pending_snapshot *pending)
989 {
990 struct btrfs_key key;
991 struct btrfs_root_item *new_root_item;
992 struct btrfs_root *tree_root = fs_info->tree_root;
993 struct btrfs_root *root = pending->root;
994 struct btrfs_root *parent_root;
995 struct btrfs_block_rsv *rsv;
996 struct inode *parent_inode;
997 struct btrfs_path *path;
998 struct btrfs_dir_item *dir_item;
999 struct dentry *parent;
1000 struct dentry *dentry;
1001 struct extent_buffer *tmp;
1002 struct extent_buffer *old;
1003 struct timespec cur_time = CURRENT_TIME;
1004 int ret;
1005 u64 to_reserve = 0;
1006 u64 index = 0;
1007 u64 objectid;
1008 u64 root_flags;
1009 uuid_le new_uuid;
1010
1011 path = btrfs_alloc_path();
1012 if (!path) {
1013 ret = pending->error = -ENOMEM;
1014 goto path_alloc_fail;
1015 }
1016
1017 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1018 if (!new_root_item) {
1019 ret = pending->error = -ENOMEM;
1020 goto root_item_alloc_fail;
1021 }
1022
1023 ret = btrfs_find_free_objectid(tree_root, &objectid);
1024 if (ret) {
1025 pending->error = ret;
1026 goto no_free_objectid;
1027 }
1028
1029 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1030
1031 if (to_reserve > 0) {
1032 ret = btrfs_block_rsv_add(root, &pending->block_rsv,
1033 to_reserve,
1034 BTRFS_RESERVE_NO_FLUSH);
1035 if (ret) {
1036 pending->error = ret;
1037 goto no_free_objectid;
1038 }
1039 }
1040
1041 ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
1042 objectid, pending->inherit);
1043 if (ret) {
1044 pending->error = ret;
1045 goto no_free_objectid;
1046 }
1047
1048 key.objectid = objectid;
1049 key.offset = (u64)-1;
1050 key.type = BTRFS_ROOT_ITEM_KEY;
1051
1052 rsv = trans->block_rsv;
1053 trans->block_rsv = &pending->block_rsv;
1054
1055 dentry = pending->dentry;
1056 parent = dget_parent(dentry);
1057 parent_inode = parent->d_inode;
1058 parent_root = BTRFS_I(parent_inode)->root;
1059 record_root_in_trans(trans, parent_root);
1060
1061 /*
1062 * insert the directory item
1063 */
1064 ret = btrfs_set_inode_index(parent_inode, &index);
1065 BUG_ON(ret); /* -ENOMEM */
1066
1067 /* check if there is a file/dir which has the same name. */
1068 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1069 btrfs_ino(parent_inode),
1070 dentry->d_name.name,
1071 dentry->d_name.len, 0);
1072 if (dir_item != NULL && !IS_ERR(dir_item)) {
1073 pending->error = -EEXIST;
1074 goto fail;
1075 } else if (IS_ERR(dir_item)) {
1076 ret = PTR_ERR(dir_item);
1077 btrfs_abort_transaction(trans, root, ret);
1078 goto fail;
1079 }
1080 btrfs_release_path(path);
1081
1082 /*
1083 * pull in the delayed directory update
1084 * and the delayed inode item
1085 * otherwise we corrupt the FS during
1086 * snapshot
1087 */
1088 ret = btrfs_run_delayed_items(trans, root);
1089 if (ret) { /* Transaction aborted */
1090 btrfs_abort_transaction(trans, root, ret);
1091 goto fail;
1092 }
1093
1094 record_root_in_trans(trans, root);
1095 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1096 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1097 btrfs_check_and_init_root_item(new_root_item);
1098
1099 root_flags = btrfs_root_flags(new_root_item);
1100 if (pending->readonly)
1101 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1102 else
1103 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1104 btrfs_set_root_flags(new_root_item, root_flags);
1105
1106 btrfs_set_root_generation_v2(new_root_item,
1107 trans->transid);
1108 uuid_le_gen(&new_uuid);
1109 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1110 memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1111 BTRFS_UUID_SIZE);
1112 new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
1113 new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
1114 btrfs_set_root_otransid(new_root_item, trans->transid);
1115 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1116 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1117 btrfs_set_root_stransid(new_root_item, 0);
1118 btrfs_set_root_rtransid(new_root_item, 0);
1119
1120 old = btrfs_lock_root_node(root);
1121 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1122 if (ret) {
1123 btrfs_tree_unlock(old);
1124 free_extent_buffer(old);
1125 btrfs_abort_transaction(trans, root, ret);
1126 goto fail;
1127 }
1128
1129 btrfs_set_lock_blocking(old);
1130
1131 ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1132 /* clean up in any case */
1133 btrfs_tree_unlock(old);
1134 free_extent_buffer(old);
1135 if (ret) {
1136 btrfs_abort_transaction(trans, root, ret);
1137 goto fail;
1138 }
1139
1140 /* see comments in should_cow_block() */
1141 root->force_cow = 1;
1142 smp_wmb();
1143
1144 btrfs_set_root_node(new_root_item, tmp);
1145 /* record when the snapshot was created in key.offset */
1146 key.offset = trans->transid;
1147 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1148 btrfs_tree_unlock(tmp);
1149 free_extent_buffer(tmp);
1150 if (ret) {
1151 btrfs_abort_transaction(trans, root, ret);
1152 goto fail;
1153 }
1154
1155 /*
1156 * insert root back/forward references
1157 */
1158 ret = btrfs_add_root_ref(trans, tree_root, objectid,
1159 parent_root->root_key.objectid,
1160 btrfs_ino(parent_inode), index,
1161 dentry->d_name.name, dentry->d_name.len);
1162 if (ret) {
1163 btrfs_abort_transaction(trans, root, ret);
1164 goto fail;
1165 }
1166
1167 key.offset = (u64)-1;
1168 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1169 if (IS_ERR(pending->snap)) {
1170 ret = PTR_ERR(pending->snap);
1171 btrfs_abort_transaction(trans, root, ret);
1172 goto fail;
1173 }
1174
1175 ret = btrfs_reloc_post_snapshot(trans, pending);
1176 if (ret) {
1177 btrfs_abort_transaction(trans, root, ret);
1178 goto fail;
1179 }
1180
1181 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1182 if (ret) {
1183 btrfs_abort_transaction(trans, root, ret);
1184 goto fail;
1185 }
1186
1187 ret = btrfs_insert_dir_item(trans, parent_root,
1188 dentry->d_name.name, dentry->d_name.len,
1189 parent_inode, &key,
1190 BTRFS_FT_DIR, index);
1191 /* We have check then name at the beginning, so it is impossible. */
1192 BUG_ON(ret == -EEXIST);
1193 if (ret) {
1194 btrfs_abort_transaction(trans, root, ret);
1195 goto fail;
1196 }
1197
1198 btrfs_i_size_write(parent_inode, parent_inode->i_size +
1199 dentry->d_name.len * 2);
1200 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1201 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1202 if (ret)
1203 btrfs_abort_transaction(trans, root, ret);
1204 fail:
1205 dput(parent);
1206 trans->block_rsv = rsv;
1207 no_free_objectid:
1208 kfree(new_root_item);
1209 root_item_alloc_fail:
1210 btrfs_free_path(path);
1211 path_alloc_fail:
1212 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1213 return ret;
1214 }
1215
1216 /*
1217 * create all the snapshots we've scheduled for creation
1218 */
1219 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1220 struct btrfs_fs_info *fs_info)
1221 {
1222 struct btrfs_pending_snapshot *pending;
1223 struct list_head *head = &trans->transaction->pending_snapshots;
1224
1225 list_for_each_entry(pending, head, list)
1226 create_pending_snapshot(trans, fs_info, pending);
1227 return 0;
1228 }
1229
1230 static void update_super_roots(struct btrfs_root *root)
1231 {
1232 struct btrfs_root_item *root_item;
1233 struct btrfs_super_block *super;
1234
1235 super = root->fs_info->super_copy;
1236
1237 root_item = &root->fs_info->chunk_root->root_item;
1238 super->chunk_root = root_item->bytenr;
1239 super->chunk_root_generation = root_item->generation;
1240 super->chunk_root_level = root_item->level;
1241
1242 root_item = &root->fs_info->tree_root->root_item;
1243 super->root = root_item->bytenr;
1244 super->generation = root_item->generation;
1245 super->root_level = root_item->level;
1246 if (btrfs_test_opt(root, SPACE_CACHE))
1247 super->cache_generation = root_item->generation;
1248 }
1249
1250 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1251 {
1252 int ret = 0;
1253 spin_lock(&info->trans_lock);
1254 if (info->running_transaction)
1255 ret = info->running_transaction->in_commit;
1256 spin_unlock(&info->trans_lock);
1257 return ret;
1258 }
1259
1260 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1261 {
1262 int ret = 0;
1263 spin_lock(&info->trans_lock);
1264 if (info->running_transaction)
1265 ret = info->running_transaction->blocked;
1266 spin_unlock(&info->trans_lock);
1267 return ret;
1268 }
1269
1270 /*
1271 * wait for the current transaction commit to start and block subsequent
1272 * transaction joins
1273 */
1274 static void wait_current_trans_commit_start(struct btrfs_root *root,
1275 struct btrfs_transaction *trans)
1276 {
1277 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1278 }
1279
1280 /*
1281 * wait for the current transaction to start and then become unblocked.
1282 * caller holds ref.
1283 */
1284 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1285 struct btrfs_transaction *trans)
1286 {
1287 wait_event(root->fs_info->transaction_wait,
1288 trans->commit_done || (trans->in_commit && !trans->blocked));
1289 }
1290
1291 /*
1292 * commit transactions asynchronously. once btrfs_commit_transaction_async
1293 * returns, any subsequent transaction will not be allowed to join.
1294 */
1295 struct btrfs_async_commit {
1296 struct btrfs_trans_handle *newtrans;
1297 struct btrfs_root *root;
1298 struct delayed_work work;
1299 };
1300
1301 static void do_async_commit(struct work_struct *work)
1302 {
1303 struct btrfs_async_commit *ac =
1304 container_of(work, struct btrfs_async_commit, work.work);
1305
1306 /*
1307 * We've got freeze protection passed with the transaction.
1308 * Tell lockdep about it.
1309 */
1310 rwsem_acquire_read(
1311 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1312 0, 1, _THIS_IP_);
1313
1314 current->journal_info = ac->newtrans;
1315
1316 btrfs_commit_transaction(ac->newtrans, ac->root);
1317 kfree(ac);
1318 }
1319
1320 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1321 struct btrfs_root *root,
1322 int wait_for_unblock)
1323 {
1324 struct btrfs_async_commit *ac;
1325 struct btrfs_transaction *cur_trans;
1326
1327 ac = kmalloc(sizeof(*ac), GFP_NOFS);
1328 if (!ac)
1329 return -ENOMEM;
1330
1331 INIT_DELAYED_WORK(&ac->work, do_async_commit);
1332 ac->root = root;
1333 ac->newtrans = btrfs_join_transaction(root);
1334 if (IS_ERR(ac->newtrans)) {
1335 int err = PTR_ERR(ac->newtrans);
1336 kfree(ac);
1337 return err;
1338 }
1339
1340 /* take transaction reference */
1341 cur_trans = trans->transaction;
1342 atomic_inc(&cur_trans->use_count);
1343
1344 btrfs_end_transaction(trans, root);
1345
1346 /*
1347 * Tell lockdep we've released the freeze rwsem, since the
1348 * async commit thread will be the one to unlock it.
1349 */
1350 rwsem_release(&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1351 1, _THIS_IP_);
1352
1353 schedule_delayed_work(&ac->work, 0);
1354
1355 /* wait for transaction to start and unblock */
1356 if (wait_for_unblock)
1357 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1358 else
1359 wait_current_trans_commit_start(root, cur_trans);
1360
1361 if (current->journal_info == trans)
1362 current->journal_info = NULL;
1363
1364 put_transaction(cur_trans);
1365 return 0;
1366 }
1367
1368
1369 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1370 struct btrfs_root *root, int err)
1371 {
1372 struct btrfs_transaction *cur_trans = trans->transaction;
1373
1374 WARN_ON(trans->use_count > 1);
1375
1376 btrfs_abort_transaction(trans, root, err);
1377
1378 spin_lock(&root->fs_info->trans_lock);
1379 list_del_init(&cur_trans->list);
1380 if (cur_trans == root->fs_info->running_transaction) {
1381 root->fs_info->running_transaction = NULL;
1382 root->fs_info->trans_no_join = 0;
1383 }
1384 spin_unlock(&root->fs_info->trans_lock);
1385
1386 btrfs_cleanup_one_transaction(trans->transaction, root);
1387
1388 put_transaction(cur_trans);
1389 put_transaction(cur_trans);
1390
1391 trace_btrfs_transaction_commit(root);
1392
1393 btrfs_scrub_continue(root);
1394
1395 if (current->journal_info == trans)
1396 current->journal_info = NULL;
1397
1398 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1399 }
1400
1401 /*
1402 * btrfs_transaction state sequence:
1403 * in_commit = 0, blocked = 0 (initial)
1404 * in_commit = 1, blocked = 1
1405 * blocked = 0
1406 * commit_done = 1
1407 */
1408 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1409 struct btrfs_root *root)
1410 {
1411 unsigned long joined = 0;
1412 struct btrfs_transaction *cur_trans = trans->transaction;
1413 struct btrfs_transaction *prev_trans = NULL;
1414 DEFINE_WAIT(wait);
1415 int ret = -EIO;
1416 int should_grow = 0;
1417 unsigned long now = get_seconds();
1418 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1419
1420 btrfs_run_ordered_operations(root, 0);
1421
1422 if (cur_trans->aborted)
1423 goto cleanup_transaction;
1424
1425 /* make a pass through all the delayed refs we have so far
1426 * any runnings procs may add more while we are here
1427 */
1428 ret = btrfs_run_delayed_refs(trans, root, 0);
1429 if (ret)
1430 goto cleanup_transaction;
1431
1432 btrfs_trans_release_metadata(trans, root);
1433 trans->block_rsv = NULL;
1434
1435 cur_trans = trans->transaction;
1436
1437 /*
1438 * set the flushing flag so procs in this transaction have to
1439 * start sending their work down.
1440 */
1441 cur_trans->delayed_refs.flushing = 1;
1442
1443 if (!list_empty(&trans->new_bgs))
1444 btrfs_create_pending_block_groups(trans, root);
1445
1446 ret = btrfs_run_delayed_refs(trans, root, 0);
1447 if (ret)
1448 goto cleanup_transaction;
1449
1450 spin_lock(&cur_trans->commit_lock);
1451 if (cur_trans->in_commit) {
1452 spin_unlock(&cur_trans->commit_lock);
1453 atomic_inc(&cur_trans->use_count);
1454 ret = btrfs_end_transaction(trans, root);
1455
1456 wait_for_commit(root, cur_trans);
1457
1458 put_transaction(cur_trans);
1459
1460 return ret;
1461 }
1462
1463 trans->transaction->in_commit = 1;
1464 trans->transaction->blocked = 1;
1465 spin_unlock(&cur_trans->commit_lock);
1466 wake_up(&root->fs_info->transaction_blocked_wait);
1467
1468 spin_lock(&root->fs_info->trans_lock);
1469 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1470 prev_trans = list_entry(cur_trans->list.prev,
1471 struct btrfs_transaction, list);
1472 if (!prev_trans->commit_done) {
1473 atomic_inc(&prev_trans->use_count);
1474 spin_unlock(&root->fs_info->trans_lock);
1475
1476 wait_for_commit(root, prev_trans);
1477
1478 put_transaction(prev_trans);
1479 } else {
1480 spin_unlock(&root->fs_info->trans_lock);
1481 }
1482 } else {
1483 spin_unlock(&root->fs_info->trans_lock);
1484 }
1485
1486 if (!btrfs_test_opt(root, SSD) &&
1487 (now < cur_trans->start_time || now - cur_trans->start_time < 1))
1488 should_grow = 1;
1489
1490 do {
1491 int snap_pending = 0;
1492
1493 joined = cur_trans->num_joined;
1494 if (!list_empty(&trans->transaction->pending_snapshots))
1495 snap_pending = 1;
1496
1497 WARN_ON(cur_trans != trans->transaction);
1498
1499 if (flush_on_commit || snap_pending) {
1500 ret = btrfs_start_delalloc_inodes(root, 1);
1501 if (ret) {
1502 btrfs_abort_transaction(trans, root, ret);
1503 goto cleanup_transaction;
1504 }
1505 btrfs_wait_ordered_extents(root, 1);
1506 }
1507
1508 ret = btrfs_run_delayed_items(trans, root);
1509 if (ret)
1510 goto cleanup_transaction;
1511
1512 /*
1513 * running the delayed items may have added new refs. account
1514 * them now so that they hinder processing of more delayed refs
1515 * as little as possible.
1516 */
1517 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1518
1519 /*
1520 * rename don't use btrfs_join_transaction, so, once we
1521 * set the transaction to blocked above, we aren't going
1522 * to get any new ordered operations. We can safely run
1523 * it here and no for sure that nothing new will be added
1524 * to the list
1525 */
1526 btrfs_run_ordered_operations(root, 1);
1527
1528 prepare_to_wait(&cur_trans->writer_wait, &wait,
1529 TASK_UNINTERRUPTIBLE);
1530
1531 if (atomic_read(&cur_trans->num_writers) > 1)
1532 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1533 else if (should_grow)
1534 schedule_timeout(1);
1535
1536 finish_wait(&cur_trans->writer_wait, &wait);
1537 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1538 (should_grow && cur_trans->num_joined != joined));
1539
1540 /*
1541 * Ok now we need to make sure to block out any other joins while we
1542 * commit the transaction. We could have started a join before setting
1543 * no_join so make sure to wait for num_writers to == 1 again.
1544 */
1545 spin_lock(&root->fs_info->trans_lock);
1546 root->fs_info->trans_no_join = 1;
1547 spin_unlock(&root->fs_info->trans_lock);
1548 wait_event(cur_trans->writer_wait,
1549 atomic_read(&cur_trans->num_writers) == 1);
1550
1551 /*
1552 * the reloc mutex makes sure that we stop
1553 * the balancing code from coming in and moving
1554 * extents around in the middle of the commit
1555 */
1556 mutex_lock(&root->fs_info->reloc_mutex);
1557
1558 /*
1559 * We needn't worry about the delayed items because we will
1560 * deal with them in create_pending_snapshot(), which is the
1561 * core function of the snapshot creation.
1562 */
1563 ret = create_pending_snapshots(trans, root->fs_info);
1564 if (ret) {
1565 mutex_unlock(&root->fs_info->reloc_mutex);
1566 goto cleanup_transaction;
1567 }
1568
1569 /*
1570 * We insert the dir indexes of the snapshots and update the inode
1571 * of the snapshots' parents after the snapshot creation, so there
1572 * are some delayed items which are not dealt with. Now deal with
1573 * them.
1574 *
1575 * We needn't worry that this operation will corrupt the snapshots,
1576 * because all the tree which are snapshoted will be forced to COW
1577 * the nodes and leaves.
1578 */
1579 ret = btrfs_run_delayed_items(trans, root);
1580 if (ret) {
1581 mutex_unlock(&root->fs_info->reloc_mutex);
1582 goto cleanup_transaction;
1583 }
1584
1585 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1586 if (ret) {
1587 mutex_unlock(&root->fs_info->reloc_mutex);
1588 goto cleanup_transaction;
1589 }
1590
1591 /*
1592 * make sure none of the code above managed to slip in a
1593 * delayed item
1594 */
1595 btrfs_assert_delayed_root_empty(root);
1596
1597 WARN_ON(cur_trans != trans->transaction);
1598
1599 btrfs_scrub_pause(root);
1600 /* btrfs_commit_tree_roots is responsible for getting the
1601 * various roots consistent with each other. Every pointer
1602 * in the tree of tree roots has to point to the most up to date
1603 * root for every subvolume and other tree. So, we have to keep
1604 * the tree logging code from jumping in and changing any
1605 * of the trees.
1606 *
1607 * At this point in the commit, there can't be any tree-log
1608 * writers, but a little lower down we drop the trans mutex
1609 * and let new people in. By holding the tree_log_mutex
1610 * from now until after the super is written, we avoid races
1611 * with the tree-log code.
1612 */
1613 mutex_lock(&root->fs_info->tree_log_mutex);
1614
1615 ret = commit_fs_roots(trans, root);
1616 if (ret) {
1617 mutex_unlock(&root->fs_info->tree_log_mutex);
1618 mutex_unlock(&root->fs_info->reloc_mutex);
1619 goto cleanup_transaction;
1620 }
1621
1622 /* commit_fs_roots gets rid of all the tree log roots, it is now
1623 * safe to free the root of tree log roots
1624 */
1625 btrfs_free_log_root_tree(trans, root->fs_info);
1626
1627 ret = commit_cowonly_roots(trans, root);
1628 if (ret) {
1629 mutex_unlock(&root->fs_info->tree_log_mutex);
1630 mutex_unlock(&root->fs_info->reloc_mutex);
1631 goto cleanup_transaction;
1632 }
1633
1634 btrfs_prepare_extent_commit(trans, root);
1635
1636 cur_trans = root->fs_info->running_transaction;
1637
1638 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1639 root->fs_info->tree_root->node);
1640 switch_commit_root(root->fs_info->tree_root);
1641
1642 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1643 root->fs_info->chunk_root->node);
1644 switch_commit_root(root->fs_info->chunk_root);
1645
1646 assert_qgroups_uptodate(trans);
1647 update_super_roots(root);
1648
1649 if (!root->fs_info->log_root_recovering) {
1650 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1651 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1652 }
1653
1654 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1655 sizeof(*root->fs_info->super_copy));
1656
1657 trans->transaction->blocked = 0;
1658 spin_lock(&root->fs_info->trans_lock);
1659 root->fs_info->running_transaction = NULL;
1660 root->fs_info->trans_no_join = 0;
1661 spin_unlock(&root->fs_info->trans_lock);
1662 mutex_unlock(&root->fs_info->reloc_mutex);
1663
1664 wake_up(&root->fs_info->transaction_wait);
1665
1666 ret = btrfs_write_and_wait_transaction(trans, root);
1667 if (ret) {
1668 btrfs_error(root->fs_info, ret,
1669 "Error while writing out transaction.");
1670 mutex_unlock(&root->fs_info->tree_log_mutex);
1671 goto cleanup_transaction;
1672 }
1673
1674 ret = write_ctree_super(trans, root, 0);
1675 if (ret) {
1676 mutex_unlock(&root->fs_info->tree_log_mutex);
1677 goto cleanup_transaction;
1678 }
1679
1680 /*
1681 * the super is written, we can safely allow the tree-loggers
1682 * to go about their business
1683 */
1684 mutex_unlock(&root->fs_info->tree_log_mutex);
1685
1686 btrfs_finish_extent_commit(trans, root);
1687
1688 cur_trans->commit_done = 1;
1689
1690 root->fs_info->last_trans_committed = cur_trans->transid;
1691
1692 wake_up(&cur_trans->commit_wait);
1693
1694 spin_lock(&root->fs_info->trans_lock);
1695 list_del_init(&cur_trans->list);
1696 spin_unlock(&root->fs_info->trans_lock);
1697
1698 put_transaction(cur_trans);
1699 put_transaction(cur_trans);
1700
1701 if (trans->type < TRANS_JOIN_NOLOCK)
1702 sb_end_intwrite(root->fs_info->sb);
1703
1704 trace_btrfs_transaction_commit(root);
1705
1706 btrfs_scrub_continue(root);
1707
1708 if (current->journal_info == trans)
1709 current->journal_info = NULL;
1710
1711 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1712
1713 if (current != root->fs_info->transaction_kthread)
1714 btrfs_run_delayed_iputs(root);
1715
1716 return ret;
1717
1718 cleanup_transaction:
1719 btrfs_trans_release_metadata(trans, root);
1720 trans->block_rsv = NULL;
1721 btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1722 // WARN_ON(1);
1723 if (current->journal_info == trans)
1724 current->journal_info = NULL;
1725 cleanup_transaction(trans, root, ret);
1726
1727 return ret;
1728 }
1729
1730 /*
1731 * interface function to delete all the snapshots we have scheduled for deletion
1732 */
1733 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1734 {
1735 LIST_HEAD(list);
1736 struct btrfs_fs_info *fs_info = root->fs_info;
1737
1738 spin_lock(&fs_info->trans_lock);
1739 list_splice_init(&fs_info->dead_roots, &list);
1740 spin_unlock(&fs_info->trans_lock);
1741
1742 while (!list_empty(&list)) {
1743 int ret;
1744
1745 root = list_entry(list.next, struct btrfs_root, root_list);
1746 list_del(&root->root_list);
1747
1748 btrfs_kill_all_delayed_nodes(root);
1749
1750 if (btrfs_header_backref_rev(root->node) <
1751 BTRFS_MIXED_BACKREF_REV)
1752 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1753 else
1754 ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1755 BUG_ON(ret < 0);
1756 }
1757 return 0;
1758 }