]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/btrfs/transaction.c
Btrfs: Split the extent_map code into two parts
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / transaction.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26
27 static int total_trans = 0;
28 extern struct kmem_cache *btrfs_trans_handle_cachep;
29 extern struct kmem_cache *btrfs_transaction_cachep;
30
31 static struct workqueue_struct *trans_wq;
32
33 #define BTRFS_ROOT_TRANS_TAG 0
34 #define BTRFS_ROOT_DEFRAG_TAG 1
35
36 static void put_transaction(struct btrfs_transaction *transaction)
37 {
38 WARN_ON(transaction->use_count == 0);
39 transaction->use_count--;
40 if (transaction->use_count == 0) {
41 WARN_ON(total_trans == 0);
42 total_trans--;
43 list_del_init(&transaction->list);
44 memset(transaction, 0, sizeof(*transaction));
45 kmem_cache_free(btrfs_transaction_cachep, transaction);
46 }
47 }
48
49 static int join_transaction(struct btrfs_root *root)
50 {
51 struct btrfs_transaction *cur_trans;
52 cur_trans = root->fs_info->running_transaction;
53 if (!cur_trans) {
54 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
55 GFP_NOFS);
56 total_trans++;
57 BUG_ON(!cur_trans);
58 root->fs_info->generation++;
59 root->fs_info->running_transaction = cur_trans;
60 root->fs_info->last_alloc = 0;
61 cur_trans->num_writers = 1;
62 cur_trans->num_joined = 0;
63 cur_trans->transid = root->fs_info->generation;
64 init_waitqueue_head(&cur_trans->writer_wait);
65 init_waitqueue_head(&cur_trans->commit_wait);
66 cur_trans->in_commit = 0;
67 cur_trans->use_count = 1;
68 cur_trans->commit_done = 0;
69 cur_trans->start_time = get_seconds();
70 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
71 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
72 btrfs_ordered_inode_tree_init(&cur_trans->ordered_inode_tree);
73 extent_io_tree_init(&cur_trans->dirty_pages,
74 root->fs_info->btree_inode->i_mapping,
75 GFP_NOFS);
76 } else {
77 cur_trans->num_writers++;
78 cur_trans->num_joined++;
79 }
80
81 return 0;
82 }
83
84 static int record_root_in_trans(struct btrfs_root *root)
85 {
86 u64 running_trans_id = root->fs_info->running_transaction->transid;
87 if (root->ref_cows && root->last_trans < running_trans_id) {
88 WARN_ON(root == root->fs_info->extent_root);
89 if (root->root_item.refs != 0) {
90 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
91 (unsigned long)root->root_key.objectid,
92 BTRFS_ROOT_TRANS_TAG);
93 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
94 (unsigned long)root->root_key.objectid,
95 BTRFS_ROOT_DEFRAG_TAG);
96 root->commit_root = root->node;
97 extent_buffer_get(root->node);
98 } else {
99 WARN_ON(1);
100 }
101 root->last_trans = running_trans_id;
102 }
103 return 0;
104 }
105
106 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
107 int num_blocks)
108 {
109 struct btrfs_trans_handle *h =
110 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
111 int ret;
112
113 mutex_lock(&root->fs_info->trans_mutex);
114 ret = join_transaction(root);
115 BUG_ON(ret);
116
117 record_root_in_trans(root);
118 h->transid = root->fs_info->running_transaction->transid;
119 h->transaction = root->fs_info->running_transaction;
120 h->blocks_reserved = num_blocks;
121 h->blocks_used = 0;
122 h->block_group = NULL;
123 h->alloc_exclude_nr = 0;
124 h->alloc_exclude_start = 0;
125 root->fs_info->running_transaction->use_count++;
126 mutex_unlock(&root->fs_info->trans_mutex);
127 return h;
128 }
129
130 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
131 struct btrfs_root *root)
132 {
133 struct btrfs_transaction *cur_trans;
134
135 mutex_lock(&root->fs_info->trans_mutex);
136 cur_trans = root->fs_info->running_transaction;
137 WARN_ON(cur_trans != trans->transaction);
138 WARN_ON(cur_trans->num_writers < 1);
139 cur_trans->num_writers--;
140 if (waitqueue_active(&cur_trans->writer_wait))
141 wake_up(&cur_trans->writer_wait);
142 put_transaction(cur_trans);
143 mutex_unlock(&root->fs_info->trans_mutex);
144 memset(trans, 0, sizeof(*trans));
145 kmem_cache_free(btrfs_trans_handle_cachep, trans);
146 return 0;
147 }
148
149
150 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
151 struct btrfs_root *root)
152 {
153 int ret;
154 int err;
155 int werr = 0;
156 struct extent_io_tree *dirty_pages;
157 struct page *page;
158 struct inode *btree_inode = root->fs_info->btree_inode;
159 u64 start;
160 u64 end;
161 unsigned long index;
162
163 if (!trans || !trans->transaction) {
164 return filemap_write_and_wait(btree_inode->i_mapping);
165 }
166 dirty_pages = &trans->transaction->dirty_pages;
167 while(1) {
168 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
169 EXTENT_DIRTY);
170 if (ret)
171 break;
172 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
173 while(start <= end) {
174 index = start >> PAGE_CACHE_SHIFT;
175 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
176 page = find_lock_page(btree_inode->i_mapping, index);
177 if (!page)
178 continue;
179 if (PageWriteback(page)) {
180 if (PageDirty(page))
181 wait_on_page_writeback(page);
182 else {
183 unlock_page(page);
184 page_cache_release(page);
185 continue;
186 }
187 }
188 err = write_one_page(page, 0);
189 if (err)
190 werr = err;
191 page_cache_release(page);
192 }
193 }
194 err = filemap_fdatawait(btree_inode->i_mapping);
195 if (err)
196 werr = err;
197 return werr;
198 }
199
200 int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
201 struct btrfs_root *root)
202 {
203 int ret;
204 u64 old_extent_block;
205 struct btrfs_fs_info *fs_info = root->fs_info;
206 struct btrfs_root *tree_root = fs_info->tree_root;
207 struct btrfs_root *extent_root = fs_info->extent_root;
208
209 btrfs_write_dirty_block_groups(trans, extent_root);
210 while(1) {
211 old_extent_block = btrfs_root_bytenr(&extent_root->root_item);
212 if (old_extent_block == extent_root->node->start)
213 break;
214 btrfs_set_root_bytenr(&extent_root->root_item,
215 extent_root->node->start);
216 btrfs_set_root_level(&extent_root->root_item,
217 btrfs_header_level(extent_root->node));
218 ret = btrfs_update_root(trans, tree_root,
219 &extent_root->root_key,
220 &extent_root->root_item);
221 BUG_ON(ret);
222 btrfs_write_dirty_block_groups(trans, extent_root);
223 }
224 return 0;
225 }
226
227 static int wait_for_commit(struct btrfs_root *root,
228 struct btrfs_transaction *commit)
229 {
230 DEFINE_WAIT(wait);
231 mutex_lock(&root->fs_info->trans_mutex);
232 while(!commit->commit_done) {
233 prepare_to_wait(&commit->commit_wait, &wait,
234 TASK_UNINTERRUPTIBLE);
235 if (commit->commit_done)
236 break;
237 mutex_unlock(&root->fs_info->trans_mutex);
238 schedule();
239 mutex_lock(&root->fs_info->trans_mutex);
240 }
241 mutex_unlock(&root->fs_info->trans_mutex);
242 finish_wait(&commit->commit_wait, &wait);
243 return 0;
244 }
245
246 struct dirty_root {
247 struct list_head list;
248 struct btrfs_root *root;
249 struct btrfs_root *latest_root;
250 };
251
252 int btrfs_add_dead_root(struct btrfs_root *root,
253 struct btrfs_root *latest,
254 struct list_head *dead_list)
255 {
256 struct dirty_root *dirty;
257
258 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
259 if (!dirty)
260 return -ENOMEM;
261 dirty->root = root;
262 dirty->latest_root = latest;
263 list_add(&dirty->list, dead_list);
264 return 0;
265 }
266
267 static int add_dirty_roots(struct btrfs_trans_handle *trans,
268 struct radix_tree_root *radix,
269 struct list_head *list)
270 {
271 struct dirty_root *dirty;
272 struct btrfs_root *gang[8];
273 struct btrfs_root *root;
274 int i;
275 int ret;
276 int err = 0;
277 u32 refs;
278
279 while(1) {
280 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
281 ARRAY_SIZE(gang),
282 BTRFS_ROOT_TRANS_TAG);
283 if (ret == 0)
284 break;
285 for (i = 0; i < ret; i++) {
286 root = gang[i];
287 radix_tree_tag_clear(radix,
288 (unsigned long)root->root_key.objectid,
289 BTRFS_ROOT_TRANS_TAG);
290 if (root->commit_root == root->node) {
291 WARN_ON(root->node->start !=
292 btrfs_root_bytenr(&root->root_item));
293 free_extent_buffer(root->commit_root);
294 root->commit_root = NULL;
295
296 /* make sure to update the root on disk
297 * so we get any updates to the block used
298 * counts
299 */
300 err = btrfs_update_root(trans,
301 root->fs_info->tree_root,
302 &root->root_key,
303 &root->root_item);
304 continue;
305 }
306 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
307 BUG_ON(!dirty);
308 dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
309 BUG_ON(!dirty->root);
310
311 memset(&root->root_item.drop_progress, 0,
312 sizeof(struct btrfs_disk_key));
313 root->root_item.drop_level = 0;
314
315 memcpy(dirty->root, root, sizeof(*root));
316 dirty->root->node = root->commit_root;
317 dirty->latest_root = root;
318 root->commit_root = NULL;
319
320 root->root_key.offset = root->fs_info->generation;
321 btrfs_set_root_bytenr(&root->root_item,
322 root->node->start);
323 btrfs_set_root_level(&root->root_item,
324 btrfs_header_level(root->node));
325 err = btrfs_insert_root(trans, root->fs_info->tree_root,
326 &root->root_key,
327 &root->root_item);
328 if (err)
329 break;
330
331 refs = btrfs_root_refs(&dirty->root->root_item);
332 btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
333 err = btrfs_update_root(trans, root->fs_info->tree_root,
334 &dirty->root->root_key,
335 &dirty->root->root_item);
336
337 BUG_ON(err);
338 if (refs == 1) {
339 list_add(&dirty->list, list);
340 } else {
341 WARN_ON(1);
342 kfree(dirty->root);
343 kfree(dirty);
344 }
345 }
346 }
347 return err;
348 }
349
350 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
351 {
352 struct btrfs_fs_info *info = root->fs_info;
353 int ret;
354 struct btrfs_trans_handle *trans;
355 unsigned long nr;
356
357 if (root->defrag_running)
358 return 0;
359 trans = btrfs_start_transaction(root, 1);
360 while (1) {
361 root->defrag_running = 1;
362 ret = btrfs_defrag_leaves(trans, root, cacheonly);
363 nr = trans->blocks_used;
364 btrfs_end_transaction(trans, root);
365 mutex_unlock(&info->fs_mutex);
366 btrfs_btree_balance_dirty(info->tree_root, nr);
367 cond_resched();
368
369 mutex_lock(&info->fs_mutex);
370 trans = btrfs_start_transaction(root, 1);
371 if (ret != -EAGAIN)
372 break;
373 }
374 root->defrag_running = 0;
375 radix_tree_tag_clear(&info->fs_roots_radix,
376 (unsigned long)root->root_key.objectid,
377 BTRFS_ROOT_DEFRAG_TAG);
378 btrfs_end_transaction(trans, root);
379 return 0;
380 }
381
382 int btrfs_defrag_dirty_roots(struct btrfs_fs_info *info)
383 {
384 struct btrfs_root *gang[1];
385 struct btrfs_root *root;
386 int i;
387 int ret;
388 int err = 0;
389 u64 last = 0;
390
391 while(1) {
392 ret = radix_tree_gang_lookup_tag(&info->fs_roots_radix,
393 (void **)gang, last,
394 ARRAY_SIZE(gang),
395 BTRFS_ROOT_DEFRAG_TAG);
396 if (ret == 0)
397 break;
398 for (i = 0; i < ret; i++) {
399 root = gang[i];
400 last = root->root_key.objectid + 1;
401 btrfs_defrag_root(root, 1);
402 }
403 }
404 btrfs_defrag_root(info->extent_root, 1);
405 return err;
406 }
407
408 static int drop_dirty_roots(struct btrfs_root *tree_root,
409 struct list_head *list)
410 {
411 struct dirty_root *dirty;
412 struct btrfs_trans_handle *trans;
413 unsigned long nr;
414 u64 num_bytes;
415 u64 bytes_used;
416 int ret = 0;
417 int err;
418
419 while(!list_empty(list)) {
420 struct btrfs_root *root;
421
422 mutex_lock(&tree_root->fs_info->fs_mutex);
423 dirty = list_entry(list->next, struct dirty_root, list);
424 list_del_init(&dirty->list);
425
426 num_bytes = btrfs_root_used(&dirty->root->root_item);
427 root = dirty->latest_root;
428 root->fs_info->throttles++;
429
430 while(1) {
431 trans = btrfs_start_transaction(tree_root, 1);
432 ret = btrfs_drop_snapshot(trans, dirty->root);
433 if (ret != -EAGAIN) {
434 break;
435 }
436
437 err = btrfs_update_root(trans,
438 tree_root,
439 &dirty->root->root_key,
440 &dirty->root->root_item);
441 if (err)
442 ret = err;
443 nr = trans->blocks_used;
444 ret = btrfs_end_transaction(trans, tree_root);
445 BUG_ON(ret);
446 mutex_unlock(&tree_root->fs_info->fs_mutex);
447 btrfs_btree_balance_dirty(tree_root, nr);
448 cond_resched();
449 mutex_lock(&tree_root->fs_info->fs_mutex);
450 }
451 BUG_ON(ret);
452 root->fs_info->throttles--;
453
454 num_bytes -= btrfs_root_used(&dirty->root->root_item);
455 bytes_used = btrfs_root_used(&root->root_item);
456 if (num_bytes) {
457 record_root_in_trans(root);
458 btrfs_set_root_used(&root->root_item,
459 bytes_used - num_bytes);
460 }
461 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
462 if (ret) {
463 BUG();
464 break;
465 }
466 nr = trans->blocks_used;
467 ret = btrfs_end_transaction(trans, tree_root);
468 BUG_ON(ret);
469
470 free_extent_buffer(dirty->root->node);
471 kfree(dirty->root);
472 kfree(dirty);
473 mutex_unlock(&tree_root->fs_info->fs_mutex);
474
475 btrfs_btree_balance_dirty(tree_root, nr);
476 cond_resched();
477 }
478 return ret;
479 }
480
481 int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans,
482 struct btrfs_root *root)
483 {
484 struct btrfs_transaction *cur_trans = trans->transaction;
485 struct inode *inode;
486 u64 root_objectid = 0;
487 u64 objectid = 0;
488 int ret;
489
490 root->fs_info->throttles++;
491 while(1) {
492 ret = btrfs_find_first_ordered_inode(
493 &cur_trans->ordered_inode_tree,
494 &root_objectid, &objectid, &inode);
495 if (!ret)
496 break;
497
498 mutex_unlock(&root->fs_info->trans_mutex);
499 mutex_unlock(&root->fs_info->fs_mutex);
500
501 if (S_ISREG(inode->i_mode))
502 filemap_fdatawrite(inode->i_mapping);
503 iput(inode);
504
505 mutex_lock(&root->fs_info->fs_mutex);
506 mutex_lock(&root->fs_info->trans_mutex);
507 }
508 while(1) {
509 root_objectid = 0;
510 objectid = 0;
511 ret = btrfs_find_del_first_ordered_inode(
512 &cur_trans->ordered_inode_tree,
513 &root_objectid, &objectid, &inode);
514 if (!ret)
515 break;
516 mutex_unlock(&root->fs_info->trans_mutex);
517 mutex_unlock(&root->fs_info->fs_mutex);
518
519 if (S_ISREG(inode->i_mode))
520 filemap_write_and_wait(inode->i_mapping);
521 atomic_dec(&inode->i_count);
522 iput(inode);
523
524 mutex_lock(&root->fs_info->fs_mutex);
525 mutex_lock(&root->fs_info->trans_mutex);
526 }
527 root->fs_info->throttles--;
528 return 0;
529 }
530
531 static int create_pending_snapshot(struct btrfs_trans_handle *trans,
532 struct btrfs_fs_info *fs_info,
533 struct btrfs_pending_snapshot *pending)
534 {
535 struct btrfs_key key;
536 struct btrfs_root_item new_root_item;
537 struct btrfs_root *tree_root = fs_info->tree_root;
538 struct btrfs_root *root = pending->root;
539 struct extent_buffer *tmp;
540 int ret;
541 u64 objectid;
542
543 ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
544 if (ret)
545 goto fail;
546
547 memcpy(&new_root_item, &root->root_item, sizeof(new_root_item));
548
549 key.objectid = objectid;
550 key.offset = 1;
551 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
552
553 extent_buffer_get(root->node);
554 btrfs_cow_block(trans, root, root->node, NULL, 0, &tmp);
555 free_extent_buffer(tmp);
556
557 btrfs_copy_root(trans, root, root->node, &tmp, objectid);
558
559 btrfs_set_root_bytenr(&new_root_item, tmp->start);
560 btrfs_set_root_level(&new_root_item, btrfs_header_level(tmp));
561 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
562 &new_root_item);
563 free_extent_buffer(tmp);
564 if (ret)
565 goto fail;
566
567 /*
568 * insert the directory item
569 */
570 key.offset = (u64)-1;
571 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
572 pending->name, strlen(pending->name),
573 root->fs_info->sb->s_root->d_inode->i_ino,
574 &key, BTRFS_FT_DIR);
575
576 if (ret)
577 goto fail;
578
579 ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
580 pending->name, strlen(pending->name), objectid,
581 root->fs_info->sb->s_root->d_inode->i_ino);
582 fail:
583 return ret;
584 }
585
586 static int create_pending_snapshots(struct btrfs_trans_handle *trans,
587 struct btrfs_fs_info *fs_info)
588 {
589 struct btrfs_pending_snapshot *pending;
590 struct list_head *head = &trans->transaction->pending_snapshots;
591 int ret;
592
593 while(!list_empty(head)) {
594 pending = list_entry(head->next,
595 struct btrfs_pending_snapshot, list);
596 ret = create_pending_snapshot(trans, fs_info, pending);
597 BUG_ON(ret);
598 list_del(&pending->list);
599 kfree(pending->name);
600 kfree(pending);
601 }
602 return 0;
603 }
604
605 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
606 struct btrfs_root *root)
607 {
608 unsigned long joined = 0;
609 unsigned long timeout = 1;
610 struct btrfs_transaction *cur_trans;
611 struct btrfs_transaction *prev_trans = NULL;
612 struct list_head dirty_fs_roots;
613 struct extent_io_tree *pinned_copy;
614 DEFINE_WAIT(wait);
615 int ret;
616
617 INIT_LIST_HEAD(&dirty_fs_roots);
618
619 mutex_lock(&root->fs_info->trans_mutex);
620 if (trans->transaction->in_commit) {
621 cur_trans = trans->transaction;
622 trans->transaction->use_count++;
623 mutex_unlock(&root->fs_info->trans_mutex);
624 btrfs_end_transaction(trans, root);
625
626 mutex_unlock(&root->fs_info->fs_mutex);
627 ret = wait_for_commit(root, cur_trans);
628 BUG_ON(ret);
629
630 mutex_lock(&root->fs_info->trans_mutex);
631 put_transaction(cur_trans);
632 mutex_unlock(&root->fs_info->trans_mutex);
633
634 mutex_lock(&root->fs_info->fs_mutex);
635 return 0;
636 }
637
638 pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
639 if (!pinned_copy)
640 return -ENOMEM;
641
642 extent_io_tree_init(pinned_copy,
643 root->fs_info->btree_inode->i_mapping, GFP_NOFS);
644
645 trans->transaction->in_commit = 1;
646 cur_trans = trans->transaction;
647 if (cur_trans->list.prev != &root->fs_info->trans_list) {
648 prev_trans = list_entry(cur_trans->list.prev,
649 struct btrfs_transaction, list);
650 if (!prev_trans->commit_done) {
651 prev_trans->use_count++;
652 mutex_unlock(&root->fs_info->fs_mutex);
653 mutex_unlock(&root->fs_info->trans_mutex);
654
655 wait_for_commit(root, prev_trans);
656
657 mutex_lock(&root->fs_info->fs_mutex);
658 mutex_lock(&root->fs_info->trans_mutex);
659 put_transaction(prev_trans);
660 }
661 }
662
663 do {
664 joined = cur_trans->num_joined;
665 WARN_ON(cur_trans != trans->transaction);
666 prepare_to_wait(&cur_trans->writer_wait, &wait,
667 TASK_UNINTERRUPTIBLE);
668
669 if (cur_trans->num_writers > 1)
670 timeout = MAX_SCHEDULE_TIMEOUT;
671 else
672 timeout = 1;
673
674 mutex_unlock(&root->fs_info->fs_mutex);
675 mutex_unlock(&root->fs_info->trans_mutex);
676
677 schedule_timeout(timeout);
678
679 mutex_lock(&root->fs_info->fs_mutex);
680 mutex_lock(&root->fs_info->trans_mutex);
681 finish_wait(&cur_trans->writer_wait, &wait);
682 ret = btrfs_write_ordered_inodes(trans, root);
683
684 } while (cur_trans->num_writers > 1 ||
685 (cur_trans->num_joined != joined));
686
687 ret = create_pending_snapshots(trans, root->fs_info);
688 BUG_ON(ret);
689
690 WARN_ON(cur_trans != trans->transaction);
691
692 ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
693 &dirty_fs_roots);
694 BUG_ON(ret);
695
696 ret = btrfs_commit_tree_roots(trans, root);
697 BUG_ON(ret);
698
699 cur_trans = root->fs_info->running_transaction;
700 spin_lock(&root->fs_info->new_trans_lock);
701 root->fs_info->running_transaction = NULL;
702 spin_unlock(&root->fs_info->new_trans_lock);
703 btrfs_set_super_generation(&root->fs_info->super_copy,
704 cur_trans->transid);
705 btrfs_set_super_root(&root->fs_info->super_copy,
706 root->fs_info->tree_root->node->start);
707 btrfs_set_super_root_level(&root->fs_info->super_copy,
708 btrfs_header_level(root->fs_info->tree_root->node));
709
710 write_extent_buffer(root->fs_info->sb_buffer,
711 &root->fs_info->super_copy, 0,
712 sizeof(root->fs_info->super_copy));
713
714 btrfs_copy_pinned(root, pinned_copy);
715
716 mutex_unlock(&root->fs_info->trans_mutex);
717 mutex_unlock(&root->fs_info->fs_mutex);
718 ret = btrfs_write_and_wait_transaction(trans, root);
719 BUG_ON(ret);
720 write_ctree_super(trans, root);
721
722 mutex_lock(&root->fs_info->fs_mutex);
723 btrfs_finish_extent_commit(trans, root, pinned_copy);
724 mutex_lock(&root->fs_info->trans_mutex);
725
726 kfree(pinned_copy);
727
728 cur_trans->commit_done = 1;
729 root->fs_info->last_trans_committed = cur_trans->transid;
730 wake_up(&cur_trans->commit_wait);
731 put_transaction(cur_trans);
732 put_transaction(cur_trans);
733
734 if (root->fs_info->closing)
735 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
736 else
737 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
738
739 mutex_unlock(&root->fs_info->trans_mutex);
740 kmem_cache_free(btrfs_trans_handle_cachep, trans);
741
742 if (root->fs_info->closing) {
743 mutex_unlock(&root->fs_info->fs_mutex);
744 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
745 mutex_lock(&root->fs_info->fs_mutex);
746 }
747 return ret;
748 }
749
750 int btrfs_clean_old_snapshots(struct btrfs_root *root)
751 {
752 struct list_head dirty_roots;
753 INIT_LIST_HEAD(&dirty_roots);
754
755 mutex_lock(&root->fs_info->trans_mutex);
756 list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
757 mutex_unlock(&root->fs_info->trans_mutex);
758
759 if (!list_empty(&dirty_roots)) {
760 drop_dirty_roots(root, &dirty_roots);
761 }
762 return 0;
763 }
764 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
765 void btrfs_transaction_cleaner(void *p)
766 #else
767 void btrfs_transaction_cleaner(struct work_struct *work)
768 #endif
769 {
770 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
771 struct btrfs_fs_info *fs_info = p;
772 #else
773 struct btrfs_fs_info *fs_info = container_of(work,
774 struct btrfs_fs_info,
775 trans_work.work);
776
777 #endif
778 struct btrfs_root *root = fs_info->tree_root;
779 struct btrfs_transaction *cur;
780 struct btrfs_trans_handle *trans;
781 unsigned long now;
782 unsigned long delay = HZ * 30;
783 int ret;
784
785 mutex_lock(&root->fs_info->fs_mutex);
786 mutex_lock(&root->fs_info->trans_mutex);
787 cur = root->fs_info->running_transaction;
788 if (!cur) {
789 mutex_unlock(&root->fs_info->trans_mutex);
790 goto out;
791 }
792 now = get_seconds();
793 if (now < cur->start_time || now - cur->start_time < 30) {
794 mutex_unlock(&root->fs_info->trans_mutex);
795 delay = HZ * 5;
796 goto out;
797 }
798 mutex_unlock(&root->fs_info->trans_mutex);
799 btrfs_defrag_dirty_roots(root->fs_info);
800 trans = btrfs_start_transaction(root, 1);
801 ret = btrfs_commit_transaction(trans, root);
802 out:
803 mutex_unlock(&root->fs_info->fs_mutex);
804 btrfs_clean_old_snapshots(root);
805 btrfs_transaction_queue_work(root, delay);
806 }
807
808 void btrfs_transaction_queue_work(struct btrfs_root *root, int delay)
809 {
810 queue_delayed_work(trans_wq, &root->fs_info->trans_work, delay);
811 }
812
813 void btrfs_transaction_flush_work(struct btrfs_root *root)
814 {
815 cancel_rearming_delayed_workqueue(trans_wq, &root->fs_info->trans_work);
816 flush_workqueue(trans_wq);
817 }
818
819 void __init btrfs_init_transaction_sys(void)
820 {
821 trans_wq = create_workqueue("btrfs");
822 }
823
824 void btrfs_exit_transaction_sys(void)
825 {
826 destroy_workqueue(trans_wq);
827 }
828