]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/btrfs/transaction.c
Merge branch 'bjorn-pci-root-v4-2.6.35' into release
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / transaction.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30
31 #define BTRFS_ROOT_TRANS_TAG 0
32
33 static noinline void put_transaction(struct btrfs_transaction *transaction)
34 {
35 WARN_ON(transaction->use_count == 0);
36 transaction->use_count--;
37 if (transaction->use_count == 0) {
38 list_del_init(&transaction->list);
39 memset(transaction, 0, sizeof(*transaction));
40 kmem_cache_free(btrfs_transaction_cachep, transaction);
41 }
42 }
43
44 static noinline void switch_commit_root(struct btrfs_root *root)
45 {
46 free_extent_buffer(root->commit_root);
47 root->commit_root = btrfs_root_node(root);
48 }
49
50 /*
51 * either allocate a new transaction or hop into the existing one
52 */
53 static noinline int join_transaction(struct btrfs_root *root)
54 {
55 struct btrfs_transaction *cur_trans;
56 cur_trans = root->fs_info->running_transaction;
57 if (!cur_trans) {
58 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
59 GFP_NOFS);
60 BUG_ON(!cur_trans);
61 root->fs_info->generation++;
62 cur_trans->num_writers = 1;
63 cur_trans->num_joined = 0;
64 cur_trans->transid = root->fs_info->generation;
65 init_waitqueue_head(&cur_trans->writer_wait);
66 init_waitqueue_head(&cur_trans->commit_wait);
67 cur_trans->in_commit = 0;
68 cur_trans->blocked = 0;
69 cur_trans->use_count = 1;
70 cur_trans->commit_done = 0;
71 cur_trans->start_time = get_seconds();
72
73 cur_trans->delayed_refs.root = RB_ROOT;
74 cur_trans->delayed_refs.num_entries = 0;
75 cur_trans->delayed_refs.num_heads_ready = 0;
76 cur_trans->delayed_refs.num_heads = 0;
77 cur_trans->delayed_refs.flushing = 0;
78 cur_trans->delayed_refs.run_delayed_start = 0;
79 spin_lock_init(&cur_trans->delayed_refs.lock);
80
81 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
82 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
83 extent_io_tree_init(&cur_trans->dirty_pages,
84 root->fs_info->btree_inode->i_mapping,
85 GFP_NOFS);
86 spin_lock(&root->fs_info->new_trans_lock);
87 root->fs_info->running_transaction = cur_trans;
88 spin_unlock(&root->fs_info->new_trans_lock);
89 } else {
90 cur_trans->num_writers++;
91 cur_trans->num_joined++;
92 }
93
94 return 0;
95 }
96
97 /*
98 * this does all the record keeping required to make sure that a reference
99 * counted root is properly recorded in a given transaction. This is required
100 * to make sure the old root from before we joined the transaction is deleted
101 * when the transaction commits
102 */
103 static noinline int record_root_in_trans(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root)
105 {
106 if (root->ref_cows && root->last_trans < trans->transid) {
107 WARN_ON(root == root->fs_info->extent_root);
108 WARN_ON(root->commit_root != root->node);
109
110 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
111 (unsigned long)root->root_key.objectid,
112 BTRFS_ROOT_TRANS_TAG);
113 root->last_trans = trans->transid;
114 btrfs_init_reloc_root(trans, root);
115 }
116 return 0;
117 }
118
119 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
120 struct btrfs_root *root)
121 {
122 if (!root->ref_cows)
123 return 0;
124
125 mutex_lock(&root->fs_info->trans_mutex);
126 if (root->last_trans == trans->transid) {
127 mutex_unlock(&root->fs_info->trans_mutex);
128 return 0;
129 }
130
131 record_root_in_trans(trans, root);
132 mutex_unlock(&root->fs_info->trans_mutex);
133 return 0;
134 }
135
136 /* wait for commit against the current transaction to become unblocked
137 * when this is done, it is safe to start a new transaction, but the current
138 * transaction might not be fully on disk.
139 */
140 static void wait_current_trans(struct btrfs_root *root)
141 {
142 struct btrfs_transaction *cur_trans;
143
144 cur_trans = root->fs_info->running_transaction;
145 if (cur_trans && cur_trans->blocked) {
146 DEFINE_WAIT(wait);
147 cur_trans->use_count++;
148 while (1) {
149 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
150 TASK_UNINTERRUPTIBLE);
151 if (!cur_trans->blocked)
152 break;
153 mutex_unlock(&root->fs_info->trans_mutex);
154 schedule();
155 mutex_lock(&root->fs_info->trans_mutex);
156 }
157 finish_wait(&root->fs_info->transaction_wait, &wait);
158 put_transaction(cur_trans);
159 }
160 }
161
162 enum btrfs_trans_type {
163 TRANS_START,
164 TRANS_JOIN,
165 TRANS_USERSPACE,
166 };
167
168 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
169 int num_blocks, int type)
170 {
171 struct btrfs_trans_handle *h =
172 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
173 int ret;
174
175 mutex_lock(&root->fs_info->trans_mutex);
176 if (!root->fs_info->log_root_recovering &&
177 ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
178 type == TRANS_USERSPACE))
179 wait_current_trans(root);
180 ret = join_transaction(root);
181 BUG_ON(ret);
182
183 h->transid = root->fs_info->running_transaction->transid;
184 h->transaction = root->fs_info->running_transaction;
185 h->blocks_reserved = num_blocks;
186 h->blocks_used = 0;
187 h->block_group = 0;
188 h->alloc_exclude_nr = 0;
189 h->alloc_exclude_start = 0;
190 h->delayed_ref_updates = 0;
191
192 if (!current->journal_info && type != TRANS_USERSPACE)
193 current->journal_info = h;
194
195 root->fs_info->running_transaction->use_count++;
196 record_root_in_trans(h, root);
197 mutex_unlock(&root->fs_info->trans_mutex);
198 return h;
199 }
200
201 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
202 int num_blocks)
203 {
204 return start_transaction(root, num_blocks, TRANS_START);
205 }
206 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
207 int num_blocks)
208 {
209 return start_transaction(root, num_blocks, TRANS_JOIN);
210 }
211
212 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
213 int num_blocks)
214 {
215 return start_transaction(r, num_blocks, TRANS_USERSPACE);
216 }
217
218 /* wait for a transaction commit to be fully complete */
219 static noinline int wait_for_commit(struct btrfs_root *root,
220 struct btrfs_transaction *commit)
221 {
222 DEFINE_WAIT(wait);
223 mutex_lock(&root->fs_info->trans_mutex);
224 while (!commit->commit_done) {
225 prepare_to_wait(&commit->commit_wait, &wait,
226 TASK_UNINTERRUPTIBLE);
227 if (commit->commit_done)
228 break;
229 mutex_unlock(&root->fs_info->trans_mutex);
230 schedule();
231 mutex_lock(&root->fs_info->trans_mutex);
232 }
233 mutex_unlock(&root->fs_info->trans_mutex);
234 finish_wait(&commit->commit_wait, &wait);
235 return 0;
236 }
237
238 #if 0
239 /*
240 * rate limit against the drop_snapshot code. This helps to slow down new
241 * operations if the drop_snapshot code isn't able to keep up.
242 */
243 static void throttle_on_drops(struct btrfs_root *root)
244 {
245 struct btrfs_fs_info *info = root->fs_info;
246 int harder_count = 0;
247
248 harder:
249 if (atomic_read(&info->throttles)) {
250 DEFINE_WAIT(wait);
251 int thr;
252 thr = atomic_read(&info->throttle_gen);
253
254 do {
255 prepare_to_wait(&info->transaction_throttle,
256 &wait, TASK_UNINTERRUPTIBLE);
257 if (!atomic_read(&info->throttles)) {
258 finish_wait(&info->transaction_throttle, &wait);
259 break;
260 }
261 schedule();
262 finish_wait(&info->transaction_throttle, &wait);
263 } while (thr == atomic_read(&info->throttle_gen));
264 harder_count++;
265
266 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
267 harder_count < 2)
268 goto harder;
269
270 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
271 harder_count < 10)
272 goto harder;
273
274 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
275 harder_count < 20)
276 goto harder;
277 }
278 }
279 #endif
280
281 void btrfs_throttle(struct btrfs_root *root)
282 {
283 mutex_lock(&root->fs_info->trans_mutex);
284 if (!root->fs_info->open_ioctl_trans)
285 wait_current_trans(root);
286 mutex_unlock(&root->fs_info->trans_mutex);
287 }
288
289 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
290 struct btrfs_root *root, int throttle)
291 {
292 struct btrfs_transaction *cur_trans;
293 struct btrfs_fs_info *info = root->fs_info;
294 int count = 0;
295
296 while (count < 4) {
297 unsigned long cur = trans->delayed_ref_updates;
298 trans->delayed_ref_updates = 0;
299 if (cur &&
300 trans->transaction->delayed_refs.num_heads_ready > 64) {
301 trans->delayed_ref_updates = 0;
302
303 /*
304 * do a full flush if the transaction is trying
305 * to close
306 */
307 if (trans->transaction->delayed_refs.flushing)
308 cur = 0;
309 btrfs_run_delayed_refs(trans, root, cur);
310 } else {
311 break;
312 }
313 count++;
314 }
315
316 mutex_lock(&info->trans_mutex);
317 cur_trans = info->running_transaction;
318 WARN_ON(cur_trans != trans->transaction);
319 WARN_ON(cur_trans->num_writers < 1);
320 cur_trans->num_writers--;
321
322 if (waitqueue_active(&cur_trans->writer_wait))
323 wake_up(&cur_trans->writer_wait);
324 put_transaction(cur_trans);
325 mutex_unlock(&info->trans_mutex);
326
327 if (current->journal_info == trans)
328 current->journal_info = NULL;
329 memset(trans, 0, sizeof(*trans));
330 kmem_cache_free(btrfs_trans_handle_cachep, trans);
331
332 if (throttle)
333 btrfs_run_delayed_iputs(root);
334
335 return 0;
336 }
337
338 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
339 struct btrfs_root *root)
340 {
341 return __btrfs_end_transaction(trans, root, 0);
342 }
343
344 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
345 struct btrfs_root *root)
346 {
347 return __btrfs_end_transaction(trans, root, 1);
348 }
349
350 /*
351 * when btree blocks are allocated, they have some corresponding bits set for
352 * them in one of two extent_io trees. This is used to make sure all of
353 * those extents are sent to disk but does not wait on them
354 */
355 int btrfs_write_marked_extents(struct btrfs_root *root,
356 struct extent_io_tree *dirty_pages, int mark)
357 {
358 int ret;
359 int err = 0;
360 int werr = 0;
361 struct page *page;
362 struct inode *btree_inode = root->fs_info->btree_inode;
363 u64 start = 0;
364 u64 end;
365 unsigned long index;
366
367 while (1) {
368 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
369 mark);
370 if (ret)
371 break;
372 while (start <= end) {
373 cond_resched();
374
375 index = start >> PAGE_CACHE_SHIFT;
376 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
377 page = find_get_page(btree_inode->i_mapping, index);
378 if (!page)
379 continue;
380
381 btree_lock_page_hook(page);
382 if (!page->mapping) {
383 unlock_page(page);
384 page_cache_release(page);
385 continue;
386 }
387
388 if (PageWriteback(page)) {
389 if (PageDirty(page))
390 wait_on_page_writeback(page);
391 else {
392 unlock_page(page);
393 page_cache_release(page);
394 continue;
395 }
396 }
397 err = write_one_page(page, 0);
398 if (err)
399 werr = err;
400 page_cache_release(page);
401 }
402 }
403 if (err)
404 werr = err;
405 return werr;
406 }
407
408 /*
409 * when btree blocks are allocated, they have some corresponding bits set for
410 * them in one of two extent_io trees. This is used to make sure all of
411 * those extents are on disk for transaction or log commit. We wait
412 * on all the pages and clear them from the dirty pages state tree
413 */
414 int btrfs_wait_marked_extents(struct btrfs_root *root,
415 struct extent_io_tree *dirty_pages, int mark)
416 {
417 int ret;
418 int err = 0;
419 int werr = 0;
420 struct page *page;
421 struct inode *btree_inode = root->fs_info->btree_inode;
422 u64 start = 0;
423 u64 end;
424 unsigned long index;
425
426 while (1) {
427 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
428 mark);
429 if (ret)
430 break;
431
432 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
433 while (start <= end) {
434 index = start >> PAGE_CACHE_SHIFT;
435 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
436 page = find_get_page(btree_inode->i_mapping, index);
437 if (!page)
438 continue;
439 if (PageDirty(page)) {
440 btree_lock_page_hook(page);
441 wait_on_page_writeback(page);
442 err = write_one_page(page, 0);
443 if (err)
444 werr = err;
445 }
446 wait_on_page_writeback(page);
447 page_cache_release(page);
448 cond_resched();
449 }
450 }
451 if (err)
452 werr = err;
453 return werr;
454 }
455
456 /*
457 * when btree blocks are allocated, they have some corresponding bits set for
458 * them in one of two extent_io trees. This is used to make sure all of
459 * those extents are on disk for transaction or log commit
460 */
461 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
462 struct extent_io_tree *dirty_pages, int mark)
463 {
464 int ret;
465 int ret2;
466
467 ret = btrfs_write_marked_extents(root, dirty_pages, mark);
468 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
469 return ret || ret2;
470 }
471
472 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
473 struct btrfs_root *root)
474 {
475 if (!trans || !trans->transaction) {
476 struct inode *btree_inode;
477 btree_inode = root->fs_info->btree_inode;
478 return filemap_write_and_wait(btree_inode->i_mapping);
479 }
480 return btrfs_write_and_wait_marked_extents(root,
481 &trans->transaction->dirty_pages,
482 EXTENT_DIRTY);
483 }
484
485 /*
486 * this is used to update the root pointer in the tree of tree roots.
487 *
488 * But, in the case of the extent allocation tree, updating the root
489 * pointer may allocate blocks which may change the root of the extent
490 * allocation tree.
491 *
492 * So, this loops and repeats and makes sure the cowonly root didn't
493 * change while the root pointer was being updated in the metadata.
494 */
495 static int update_cowonly_root(struct btrfs_trans_handle *trans,
496 struct btrfs_root *root)
497 {
498 int ret;
499 u64 old_root_bytenr;
500 u64 old_root_used;
501 struct btrfs_root *tree_root = root->fs_info->tree_root;
502
503 old_root_used = btrfs_root_used(&root->root_item);
504 btrfs_write_dirty_block_groups(trans, root);
505
506 while (1) {
507 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
508 if (old_root_bytenr == root->node->start &&
509 old_root_used == btrfs_root_used(&root->root_item))
510 break;
511
512 btrfs_set_root_node(&root->root_item, root->node);
513 ret = btrfs_update_root(trans, tree_root,
514 &root->root_key,
515 &root->root_item);
516 BUG_ON(ret);
517
518 old_root_used = btrfs_root_used(&root->root_item);
519 ret = btrfs_write_dirty_block_groups(trans, root);
520 BUG_ON(ret);
521 }
522
523 if (root != root->fs_info->extent_root)
524 switch_commit_root(root);
525
526 return 0;
527 }
528
529 /*
530 * update all the cowonly tree roots on disk
531 */
532 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
533 struct btrfs_root *root)
534 {
535 struct btrfs_fs_info *fs_info = root->fs_info;
536 struct list_head *next;
537 struct extent_buffer *eb;
538 int ret;
539
540 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
541 BUG_ON(ret);
542
543 eb = btrfs_lock_root_node(fs_info->tree_root);
544 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
545 btrfs_tree_unlock(eb);
546 free_extent_buffer(eb);
547
548 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
549 BUG_ON(ret);
550
551 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
552 next = fs_info->dirty_cowonly_roots.next;
553 list_del_init(next);
554 root = list_entry(next, struct btrfs_root, dirty_list);
555
556 update_cowonly_root(trans, root);
557 }
558
559 down_write(&fs_info->extent_commit_sem);
560 switch_commit_root(fs_info->extent_root);
561 up_write(&fs_info->extent_commit_sem);
562
563 return 0;
564 }
565
566 /*
567 * dead roots are old snapshots that need to be deleted. This allocates
568 * a dirty root struct and adds it into the list of dead roots that need to
569 * be deleted
570 */
571 int btrfs_add_dead_root(struct btrfs_root *root)
572 {
573 mutex_lock(&root->fs_info->trans_mutex);
574 list_add(&root->root_list, &root->fs_info->dead_roots);
575 mutex_unlock(&root->fs_info->trans_mutex);
576 return 0;
577 }
578
579 /*
580 * update all the cowonly tree roots on disk
581 */
582 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
583 struct btrfs_root *root)
584 {
585 struct btrfs_root *gang[8];
586 struct btrfs_fs_info *fs_info = root->fs_info;
587 int i;
588 int ret;
589 int err = 0;
590
591 while (1) {
592 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
593 (void **)gang, 0,
594 ARRAY_SIZE(gang),
595 BTRFS_ROOT_TRANS_TAG);
596 if (ret == 0)
597 break;
598 for (i = 0; i < ret; i++) {
599 root = gang[i];
600 radix_tree_tag_clear(&fs_info->fs_roots_radix,
601 (unsigned long)root->root_key.objectid,
602 BTRFS_ROOT_TRANS_TAG);
603
604 btrfs_free_log(trans, root);
605 btrfs_update_reloc_root(trans, root);
606
607 if (root->commit_root != root->node) {
608 switch_commit_root(root);
609 btrfs_set_root_node(&root->root_item,
610 root->node);
611 }
612
613 err = btrfs_update_root(trans, fs_info->tree_root,
614 &root->root_key,
615 &root->root_item);
616 if (err)
617 break;
618 }
619 }
620 return err;
621 }
622
623 /*
624 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
625 * otherwise every leaf in the btree is read and defragged.
626 */
627 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
628 {
629 struct btrfs_fs_info *info = root->fs_info;
630 int ret;
631 struct btrfs_trans_handle *trans;
632 unsigned long nr;
633
634 smp_mb();
635 if (root->defrag_running)
636 return 0;
637 trans = btrfs_start_transaction(root, 1);
638 while (1) {
639 root->defrag_running = 1;
640 ret = btrfs_defrag_leaves(trans, root, cacheonly);
641 nr = trans->blocks_used;
642 btrfs_end_transaction(trans, root);
643 btrfs_btree_balance_dirty(info->tree_root, nr);
644 cond_resched();
645
646 trans = btrfs_start_transaction(root, 1);
647 if (root->fs_info->closing || ret != -EAGAIN)
648 break;
649 }
650 root->defrag_running = 0;
651 smp_mb();
652 btrfs_end_transaction(trans, root);
653 return 0;
654 }
655
656 #if 0
657 /*
658 * when dropping snapshots, we generate a ton of delayed refs, and it makes
659 * sense not to join the transaction while it is trying to flush the current
660 * queue of delayed refs out.
661 *
662 * This is used by the drop snapshot code only
663 */
664 static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
665 {
666 DEFINE_WAIT(wait);
667
668 mutex_lock(&info->trans_mutex);
669 while (info->running_transaction &&
670 info->running_transaction->delayed_refs.flushing) {
671 prepare_to_wait(&info->transaction_wait, &wait,
672 TASK_UNINTERRUPTIBLE);
673 mutex_unlock(&info->trans_mutex);
674
675 schedule();
676
677 mutex_lock(&info->trans_mutex);
678 finish_wait(&info->transaction_wait, &wait);
679 }
680 mutex_unlock(&info->trans_mutex);
681 return 0;
682 }
683
684 /*
685 * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
686 * all of them
687 */
688 int btrfs_drop_dead_root(struct btrfs_root *root)
689 {
690 struct btrfs_trans_handle *trans;
691 struct btrfs_root *tree_root = root->fs_info->tree_root;
692 unsigned long nr;
693 int ret;
694
695 while (1) {
696 /*
697 * we don't want to jump in and create a bunch of
698 * delayed refs if the transaction is starting to close
699 */
700 wait_transaction_pre_flush(tree_root->fs_info);
701 trans = btrfs_start_transaction(tree_root, 1);
702
703 /*
704 * we've joined a transaction, make sure it isn't
705 * closing right now
706 */
707 if (trans->transaction->delayed_refs.flushing) {
708 btrfs_end_transaction(trans, tree_root);
709 continue;
710 }
711
712 ret = btrfs_drop_snapshot(trans, root);
713 if (ret != -EAGAIN)
714 break;
715
716 ret = btrfs_update_root(trans, tree_root,
717 &root->root_key,
718 &root->root_item);
719 if (ret)
720 break;
721
722 nr = trans->blocks_used;
723 ret = btrfs_end_transaction(trans, tree_root);
724 BUG_ON(ret);
725
726 btrfs_btree_balance_dirty(tree_root, nr);
727 cond_resched();
728 }
729 BUG_ON(ret);
730
731 ret = btrfs_del_root(trans, tree_root, &root->root_key);
732 BUG_ON(ret);
733
734 nr = trans->blocks_used;
735 ret = btrfs_end_transaction(trans, tree_root);
736 BUG_ON(ret);
737
738 free_extent_buffer(root->node);
739 free_extent_buffer(root->commit_root);
740 kfree(root);
741
742 btrfs_btree_balance_dirty(tree_root, nr);
743 return ret;
744 }
745 #endif
746
747 /*
748 * new snapshots need to be created at a very specific time in the
749 * transaction commit. This does the actual creation
750 */
751 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
752 struct btrfs_fs_info *fs_info,
753 struct btrfs_pending_snapshot *pending)
754 {
755 struct btrfs_key key;
756 struct btrfs_root_item *new_root_item;
757 struct btrfs_root *tree_root = fs_info->tree_root;
758 struct btrfs_root *root = pending->root;
759 struct btrfs_root *parent_root;
760 struct inode *parent_inode;
761 struct extent_buffer *tmp;
762 struct extent_buffer *old;
763 int ret;
764 u64 objectid;
765 int namelen;
766 u64 index = 0;
767
768 parent_inode = pending->dentry->d_parent->d_inode;
769 parent_root = BTRFS_I(parent_inode)->root;
770
771 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
772 if (!new_root_item) {
773 ret = -ENOMEM;
774 goto fail;
775 }
776 ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
777 if (ret)
778 goto fail;
779
780 key.objectid = objectid;
781 /* record when the snapshot was created in key.offset */
782 key.offset = trans->transid;
783 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
784
785 memcpy(&pending->root_key, &key, sizeof(key));
786 pending->root_key.offset = (u64)-1;
787
788 record_root_in_trans(trans, parent_root);
789 /*
790 * insert the directory item
791 */
792 namelen = strlen(pending->name);
793 ret = btrfs_set_inode_index(parent_inode, &index);
794 BUG_ON(ret);
795 ret = btrfs_insert_dir_item(trans, parent_root,
796 pending->name, namelen,
797 parent_inode->i_ino,
798 &pending->root_key, BTRFS_FT_DIR, index);
799 BUG_ON(ret);
800
801 btrfs_i_size_write(parent_inode, parent_inode->i_size + namelen * 2);
802 ret = btrfs_update_inode(trans, parent_root, parent_inode);
803 BUG_ON(ret);
804
805 record_root_in_trans(trans, root);
806 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
807 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
808
809 old = btrfs_lock_root_node(root);
810 btrfs_cow_block(trans, root, old, NULL, 0, &old);
811 btrfs_set_lock_blocking(old);
812
813 btrfs_copy_root(trans, root, old, &tmp, objectid);
814 btrfs_tree_unlock(old);
815 free_extent_buffer(old);
816
817 btrfs_set_root_node(new_root_item, tmp);
818 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
819 new_root_item);
820 BUG_ON(ret);
821 btrfs_tree_unlock(tmp);
822 free_extent_buffer(tmp);
823
824 ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
825 pending->root_key.objectid,
826 parent_root->root_key.objectid,
827 parent_inode->i_ino, index, pending->name,
828 namelen);
829 BUG_ON(ret);
830
831 fail:
832 kfree(new_root_item);
833 return ret;
834 }
835
836 /*
837 * create all the snapshots we've scheduled for creation
838 */
839 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
840 struct btrfs_fs_info *fs_info)
841 {
842 struct btrfs_pending_snapshot *pending;
843 struct list_head *head = &trans->transaction->pending_snapshots;
844 int ret;
845
846 list_for_each_entry(pending, head, list) {
847 ret = create_pending_snapshot(trans, fs_info, pending);
848 BUG_ON(ret);
849 }
850 return 0;
851 }
852
853 static void update_super_roots(struct btrfs_root *root)
854 {
855 struct btrfs_root_item *root_item;
856 struct btrfs_super_block *super;
857
858 super = &root->fs_info->super_copy;
859
860 root_item = &root->fs_info->chunk_root->root_item;
861 super->chunk_root = root_item->bytenr;
862 super->chunk_root_generation = root_item->generation;
863 super->chunk_root_level = root_item->level;
864
865 root_item = &root->fs_info->tree_root->root_item;
866 super->root = root_item->bytenr;
867 super->generation = root_item->generation;
868 super->root_level = root_item->level;
869 }
870
871 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
872 {
873 int ret = 0;
874 spin_lock(&info->new_trans_lock);
875 if (info->running_transaction)
876 ret = info->running_transaction->in_commit;
877 spin_unlock(&info->new_trans_lock);
878 return ret;
879 }
880
881 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
882 struct btrfs_root *root)
883 {
884 unsigned long joined = 0;
885 unsigned long timeout = 1;
886 struct btrfs_transaction *cur_trans;
887 struct btrfs_transaction *prev_trans = NULL;
888 DEFINE_WAIT(wait);
889 int ret;
890 int should_grow = 0;
891 unsigned long now = get_seconds();
892 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
893
894 btrfs_run_ordered_operations(root, 0);
895
896 /* make a pass through all the delayed refs we have so far
897 * any runnings procs may add more while we are here
898 */
899 ret = btrfs_run_delayed_refs(trans, root, 0);
900 BUG_ON(ret);
901
902 cur_trans = trans->transaction;
903 /*
904 * set the flushing flag so procs in this transaction have to
905 * start sending their work down.
906 */
907 cur_trans->delayed_refs.flushing = 1;
908
909 ret = btrfs_run_delayed_refs(trans, root, 0);
910 BUG_ON(ret);
911
912 mutex_lock(&root->fs_info->trans_mutex);
913 if (cur_trans->in_commit) {
914 cur_trans->use_count++;
915 mutex_unlock(&root->fs_info->trans_mutex);
916 btrfs_end_transaction(trans, root);
917
918 ret = wait_for_commit(root, cur_trans);
919 BUG_ON(ret);
920
921 mutex_lock(&root->fs_info->trans_mutex);
922 put_transaction(cur_trans);
923 mutex_unlock(&root->fs_info->trans_mutex);
924
925 return 0;
926 }
927
928 trans->transaction->in_commit = 1;
929 trans->transaction->blocked = 1;
930 if (cur_trans->list.prev != &root->fs_info->trans_list) {
931 prev_trans = list_entry(cur_trans->list.prev,
932 struct btrfs_transaction, list);
933 if (!prev_trans->commit_done) {
934 prev_trans->use_count++;
935 mutex_unlock(&root->fs_info->trans_mutex);
936
937 wait_for_commit(root, prev_trans);
938
939 mutex_lock(&root->fs_info->trans_mutex);
940 put_transaction(prev_trans);
941 }
942 }
943
944 if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
945 should_grow = 1;
946
947 do {
948 int snap_pending = 0;
949 joined = cur_trans->num_joined;
950 if (!list_empty(&trans->transaction->pending_snapshots))
951 snap_pending = 1;
952
953 WARN_ON(cur_trans != trans->transaction);
954 prepare_to_wait(&cur_trans->writer_wait, &wait,
955 TASK_UNINTERRUPTIBLE);
956
957 if (cur_trans->num_writers > 1)
958 timeout = MAX_SCHEDULE_TIMEOUT;
959 else if (should_grow)
960 timeout = 1;
961
962 mutex_unlock(&root->fs_info->trans_mutex);
963
964 if (flush_on_commit || snap_pending) {
965 btrfs_start_delalloc_inodes(root, 1);
966 ret = btrfs_wait_ordered_extents(root, 0, 1);
967 BUG_ON(ret);
968 }
969
970 /*
971 * rename don't use btrfs_join_transaction, so, once we
972 * set the transaction to blocked above, we aren't going
973 * to get any new ordered operations. We can safely run
974 * it here and no for sure that nothing new will be added
975 * to the list
976 */
977 btrfs_run_ordered_operations(root, 1);
978
979 smp_mb();
980 if (cur_trans->num_writers > 1 || should_grow)
981 schedule_timeout(timeout);
982
983 mutex_lock(&root->fs_info->trans_mutex);
984 finish_wait(&cur_trans->writer_wait, &wait);
985 } while (cur_trans->num_writers > 1 ||
986 (should_grow && cur_trans->num_joined != joined));
987
988 ret = create_pending_snapshots(trans, root->fs_info);
989 BUG_ON(ret);
990
991 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
992 BUG_ON(ret);
993
994 WARN_ON(cur_trans != trans->transaction);
995
996 /* btrfs_commit_tree_roots is responsible for getting the
997 * various roots consistent with each other. Every pointer
998 * in the tree of tree roots has to point to the most up to date
999 * root for every subvolume and other tree. So, we have to keep
1000 * the tree logging code from jumping in and changing any
1001 * of the trees.
1002 *
1003 * At this point in the commit, there can't be any tree-log
1004 * writers, but a little lower down we drop the trans mutex
1005 * and let new people in. By holding the tree_log_mutex
1006 * from now until after the super is written, we avoid races
1007 * with the tree-log code.
1008 */
1009 mutex_lock(&root->fs_info->tree_log_mutex);
1010
1011 ret = commit_fs_roots(trans, root);
1012 BUG_ON(ret);
1013
1014 /* commit_fs_roots gets rid of all the tree log roots, it is now
1015 * safe to free the root of tree log roots
1016 */
1017 btrfs_free_log_root_tree(trans, root->fs_info);
1018
1019 ret = commit_cowonly_roots(trans, root);
1020 BUG_ON(ret);
1021
1022 btrfs_prepare_extent_commit(trans, root);
1023
1024 cur_trans = root->fs_info->running_transaction;
1025 spin_lock(&root->fs_info->new_trans_lock);
1026 root->fs_info->running_transaction = NULL;
1027 spin_unlock(&root->fs_info->new_trans_lock);
1028
1029 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1030 root->fs_info->tree_root->node);
1031 switch_commit_root(root->fs_info->tree_root);
1032
1033 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1034 root->fs_info->chunk_root->node);
1035 switch_commit_root(root->fs_info->chunk_root);
1036
1037 update_super_roots(root);
1038
1039 if (!root->fs_info->log_root_recovering) {
1040 btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
1041 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
1042 }
1043
1044 memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
1045 sizeof(root->fs_info->super_copy));
1046
1047 trans->transaction->blocked = 0;
1048
1049 wake_up(&root->fs_info->transaction_wait);
1050
1051 mutex_unlock(&root->fs_info->trans_mutex);
1052 ret = btrfs_write_and_wait_transaction(trans, root);
1053 BUG_ON(ret);
1054 write_ctree_super(trans, root, 0);
1055
1056 /*
1057 * the super is written, we can safely allow the tree-loggers
1058 * to go about their business
1059 */
1060 mutex_unlock(&root->fs_info->tree_log_mutex);
1061
1062 btrfs_finish_extent_commit(trans, root);
1063
1064 mutex_lock(&root->fs_info->trans_mutex);
1065
1066 cur_trans->commit_done = 1;
1067
1068 root->fs_info->last_trans_committed = cur_trans->transid;
1069
1070 wake_up(&cur_trans->commit_wait);
1071
1072 put_transaction(cur_trans);
1073 put_transaction(cur_trans);
1074
1075 mutex_unlock(&root->fs_info->trans_mutex);
1076
1077 if (current->journal_info == trans)
1078 current->journal_info = NULL;
1079
1080 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1081
1082 if (current != root->fs_info->transaction_kthread)
1083 btrfs_run_delayed_iputs(root);
1084
1085 return ret;
1086 }
1087
1088 /*
1089 * interface function to delete all the snapshots we have scheduled for deletion
1090 */
1091 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1092 {
1093 LIST_HEAD(list);
1094 struct btrfs_fs_info *fs_info = root->fs_info;
1095
1096 mutex_lock(&fs_info->trans_mutex);
1097 list_splice_init(&fs_info->dead_roots, &list);
1098 mutex_unlock(&fs_info->trans_mutex);
1099
1100 while (!list_empty(&list)) {
1101 root = list_entry(list.next, struct btrfs_root, root_list);
1102 list_del(&root->root_list);
1103
1104 if (btrfs_header_backref_rev(root->node) <
1105 BTRFS_MIXED_BACKREF_REV)
1106 btrfs_drop_snapshot(root, 0);
1107 else
1108 btrfs_drop_snapshot(root, 1);
1109 }
1110 return 0;
1111 }