]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Nov 2011 04:03:41 +0000 (20:03 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Nov 2011 04:03:41 +0000 (20:03 -0800)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (114 commits)
  Btrfs: check for a null fs root when writing to the backup root log
  Btrfs: fix race during transaction joins
  Btrfs: fix a potential btrfs_bio leak on scrub fixups
  Btrfs: rename btrfs_bio multi -> bbio for consistency
  Btrfs: stop leaking btrfs_bios on readahead
  Btrfs: stop the readahead threads on failed mount
  Btrfs: fix extent_buffer leak in the metadata IO error handling
  Btrfs: fix the new inspection ioctls for 32 bit compat
  Btrfs: fix delayed insertion reservation
  Btrfs: ClearPageError during writepage and clean_tree_block
  Btrfs: be smarter about committing the transaction in reserve_metadata_bytes
  Btrfs: make a delayed_block_rsv for the delayed item insertion
  Btrfs: add a log of past tree roots
  btrfs: separate superblock items out of fs_info
  Btrfs: use the global reserve when truncating the free space cache inode
  Btrfs: release metadata from global reserve if we have to fallback for unlink
  Btrfs: make sure to flush queued bios if write_cache_pages waits
  Btrfs: fix extent pinning bugs in the tree log
  Btrfs: make sure btrfs_remove_free_space doesn't leak EAGAIN
  Btrfs: don't wait as long for more batches during SSD log commit
  ...

1  2 
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/tree-log.c
fs/btrfs/xattr.c

diff --combined fs/btrfs/delayed-inode.c
index ae4d9cd10961d488dae63cc196187f95f6611ee9,bbe8496d5339f24a5939ceb32778338f06869a49..3a1b939c9ae2a7d0617946340dafa534baa402e6
@@@ -591,7 -591,7 +591,7 @@@ static int btrfs_delayed_item_reserve_m
                return 0;
  
        src_rsv = trans->block_rsv;
-       dst_rsv = &root->fs_info->global_block_rsv;
+       dst_rsv = &root->fs_info->delayed_block_rsv;
  
        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
@@@ -609,7 -609,7 +609,7 @@@ static void btrfs_delayed_item_release_
        if (!item->bytes_reserved)
                return;
  
-       rsv = &root->fs_info->global_block_rsv;
+       rsv = &root->fs_info->delayed_block_rsv;
        btrfs_block_rsv_release(root, rsv,
                                item->bytes_reserved);
  }
@@@ -624,13 -624,36 +624,36 @@@ static int btrfs_delayed_inode_reserve_
        u64 num_bytes;
        int ret;
  
-       if (!trans->bytes_reserved)
-               return 0;
        src_rsv = trans->block_rsv;
-       dst_rsv = &root->fs_info->global_block_rsv;
+       dst_rsv = &root->fs_info->delayed_block_rsv;
  
        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+       /*
+        * btrfs_dirty_inode will update the inode under btrfs_join_transaction
+        * which doesn't reserve space for speed.  This is a problem since we
+        * still need to reserve space for this update, so try to reserve the
+        * space.
+        *
+        * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
+        * we're accounted for.
+        */
+       if (!trans->bytes_reserved &&
+           src_rsv != &root->fs_info->delalloc_block_rsv) {
+               ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
+               /*
+                * Since we're under a transaction reserve_metadata_bytes could
+                * try to commit the transaction which will make it return
+                * EAGAIN to make us stop the transaction we have, so return
+                * ENOSPC instead so that btrfs_dirty_inode knows what to do.
+                */
+               if (ret == -EAGAIN)
+                       ret = -ENOSPC;
+               if (!ret)
+                       node->bytes_reserved = num_bytes;
+               return ret;
+       }
        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
        if (!ret)
                node->bytes_reserved = num_bytes;
@@@ -646,7 -669,7 +669,7 @@@ static void btrfs_delayed_inode_release
        if (!node->bytes_reserved)
                return;
  
-       rsv = &root->fs_info->global_block_rsv;
+       rsv = &root->fs_info->delayed_block_rsv;
        btrfs_block_rsv_release(root, rsv,
                                node->bytes_reserved);
        node->bytes_reserved = 0;
@@@ -1026,7 -1049,7 +1049,7 @@@ int btrfs_run_delayed_items(struct btrf
        path->leave_spinning = 1;
  
        block_rsv = trans->block_rsv;
-       trans->block_rsv = &root->fs_info->global_block_rsv;
+       trans->block_rsv = &root->fs_info->delayed_block_rsv;
  
        delayed_root = btrfs_get_delayed_root(root);
  
@@@ -1069,7 -1092,7 +1092,7 @@@ static int __btrfs_commit_inode_delayed
        path->leave_spinning = 1;
  
        block_rsv = trans->block_rsv;
-       trans->block_rsv = &node->root->fs_info->global_block_rsv;
+       trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
  
        ret = btrfs_insert_delayed_items(trans, path, node->root, node);
        if (!ret)
@@@ -1149,7 -1172,7 +1172,7 @@@ static void btrfs_async_run_delayed_nod
                goto free_path;
  
        block_rsv = trans->block_rsv;
-       trans->block_rsv = &root->fs_info->global_block_rsv;
+       trans->block_rsv = &root->fs_info->delayed_block_rsv;
  
        ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
        if (!ret)
@@@ -1641,7 -1664,7 +1664,7 @@@ int btrfs_fill_inode(struct inode *inod
        inode->i_gid = btrfs_stack_inode_gid(inode_item);
        btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
        inode->i_mode = btrfs_stack_inode_mode(inode_item);
 -      inode->i_nlink = btrfs_stack_inode_nlink(inode_item);
 +      set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
        inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
        BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
        BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
@@@ -1686,11 -1709,8 +1709,8 @@@ int btrfs_delayed_update_inode(struct b
        }
  
        ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
-       /*
-        * we must reserve enough space when we start a new transaction,
-        * so reserving metadata failure is impossible
-        */
-       BUG_ON(ret);
+       if (ret)
+               goto release_node;
  
        fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
        delayed_node->inode_dirty = 1;
diff --combined fs/btrfs/disk-io.c
index 07ea91879a91a35d99f2ca6e1cbffd52bdbcae82,e53a5bb85670a302de4aa6bb4d859c3b7e3a50ea..102c176fc29c0b2bfed6259d259203347a9f6a07
@@@ -256,8 -256,7 +256,7 @@@ void btrfs_csum_final(u32 crc, char *re
  static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
                           int verify)
  {
-       u16 csum_size =
-               btrfs_super_csum_size(&root->fs_info->super_copy);
+       u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
        char *result = NULL;
        unsigned long len;
        unsigned long cur_len;
@@@ -367,7 -366,8 +366,8 @@@ static int btree_read_extent_buffer_pag
        clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
        io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
        while (1) {
-               ret = read_extent_buffer_pages(io_tree, eb, start, 1,
+               ret = read_extent_buffer_pages(io_tree, eb, start,
+                                              WAIT_COMPLETE,
                                               btree_get_extent, mirror_num);
                if (!ret &&
                    !verify_parent_transid(io_tree, eb, parent_transid))
@@@ -608,11 -608,48 +608,48 @@@ static int btree_readpage_end_io_hook(s
        end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
        end = eb->start + end - 1;
  err:
+       if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
+               clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
+               btree_readahead_hook(root, eb, eb->start, ret);
+       }
        free_extent_buffer(eb);
  out:
        return ret;
  }
  
+ static int btree_io_failed_hook(struct bio *failed_bio,
+                        struct page *page, u64 start, u64 end,
+                        u64 mirror_num, struct extent_state *state)
+ {
+       struct extent_io_tree *tree;
+       unsigned long len;
+       struct extent_buffer *eb;
+       struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
+       tree = &BTRFS_I(page->mapping->host)->io_tree;
+       if (page->private == EXTENT_PAGE_PRIVATE)
+               goto out;
+       if (!page->private)
+               goto out;
+       len = page->private >> 2;
+       WARN_ON(len == 0);
+       eb = alloc_extent_buffer(tree, start, len, page);
+       if (eb == NULL)
+               goto out;
+       if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
+               clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
+               btree_readahead_hook(root, eb, eb->start, -EIO);
+       }
+       free_extent_buffer(eb);
+ out:
+       return -EIO;    /* we fixed nothing */
+ }
  static void end_workqueue_bio(struct bio *bio, int err)
  {
        struct end_io_wq *end_io_wq = bio->bi_private;
@@@ -908,7 -945,7 +945,7 @@@ static int btree_readpage(struct file *
  {
        struct extent_io_tree *tree;
        tree = &BTRFS_I(page->mapping->host)->io_tree;
-       return extent_read_full_page(tree, page, btree_get_extent);
+       return extent_read_full_page(tree, page, btree_get_extent, 0);
  }
  
  static int btree_releasepage(struct page *page, gfp_t gfp_flags)
@@@ -974,11 -1011,43 +1011,43 @@@ int readahead_tree_block(struct btrfs_r
        if (!buf)
                return 0;
        read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
-                                buf, 0, 0, btree_get_extent, 0);
+                                buf, 0, WAIT_NONE, btree_get_extent, 0);
        free_extent_buffer(buf);
        return ret;
  }
  
+ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
+                        int mirror_num, struct extent_buffer **eb)
+ {
+       struct extent_buffer *buf = NULL;
+       struct inode *btree_inode = root->fs_info->btree_inode;
+       struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
+       int ret;
+       buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+       if (!buf)
+               return 0;
+       set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
+       ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
+                                      btree_get_extent, mirror_num);
+       if (ret) {
+               free_extent_buffer(buf);
+               return ret;
+       }
+       if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
+               free_extent_buffer(buf);
+               return -EIO;
+       } else if (extent_buffer_uptodate(io_tree, buf, NULL)) {
+               *eb = buf;
+       } else {
+               free_extent_buffer(buf);
+       }
+       return 0;
+ }
  struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
                                            u64 bytenr, u32 blocksize)
  {
@@@ -1135,10 -1204,12 +1204,12 @@@ static int find_and_setup_root(struct b
  
        generation = btrfs_root_generation(&root->root_item);
        blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
+       root->commit_root = NULL;
        root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
                                     blocksize, generation);
        if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
                free_extent_buffer(root->node);
+               root->node = NULL;
                return -EIO;
        }
        root->commit_root = btrfs_root_node(root);
@@@ -1577,6 -1648,235 +1648,235 @@@ sleep
        return 0;
  }
  
+ /*
+  * this will find the highest generation in the array of
+  * root backups.  The index of the highest array is returned,
+  * or -1 if we can't find anything.
+  *
+  * We check to make sure the array is valid by comparing the
+  * generation of the latest  root in the array with the generation
+  * in the super block.  If they don't match we pitch it.
+  */
+ static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
+ {
+       u64 cur;
+       int newest_index = -1;
+       struct btrfs_root_backup *root_backup;
+       int i;
+       for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
+               root_backup = info->super_copy->super_roots + i;
+               cur = btrfs_backup_tree_root_gen(root_backup);
+               if (cur == newest_gen)
+                       newest_index = i;
+       }
+       /* check to see if we actually wrapped around */
+       if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
+               root_backup = info->super_copy->super_roots;
+               cur = btrfs_backup_tree_root_gen(root_backup);
+               if (cur == newest_gen)
+                       newest_index = 0;
+       }
+       return newest_index;
+ }
+ /*
+  * find the oldest backup so we know where to store new entries
+  * in the backup array.  This will set the backup_root_index
+  * field in the fs_info struct
+  */
+ static void find_oldest_super_backup(struct btrfs_fs_info *info,
+                                    u64 newest_gen)
+ {
+       int newest_index = -1;
+       newest_index = find_newest_super_backup(info, newest_gen);
+       /* if there was garbage in there, just move along */
+       if (newest_index == -1) {
+               info->backup_root_index = 0;
+       } else {
+               info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
+       }
+ }
+ /*
+  * copy all the root pointers into the super backup array.
+  * this will bump the backup pointer by one when it is
+  * done
+  */
+ static void backup_super_roots(struct btrfs_fs_info *info)
+ {
+       int next_backup;
+       struct btrfs_root_backup *root_backup;
+       int last_backup;
+       next_backup = info->backup_root_index;
+       last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
+               BTRFS_NUM_BACKUP_ROOTS;
+       /*
+        * just overwrite the last backup if we're at the same generation
+        * this happens only at umount
+        */
+       root_backup = info->super_for_commit->super_roots + last_backup;
+       if (btrfs_backup_tree_root_gen(root_backup) ==
+           btrfs_header_generation(info->tree_root->node))
+               next_backup = last_backup;
+       root_backup = info->super_for_commit->super_roots + next_backup;
+       /*
+        * make sure all of our padding and empty slots get zero filled
+        * regardless of which ones we use today
+        */
+       memset(root_backup, 0, sizeof(*root_backup));
+       info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
+       btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
+       btrfs_set_backup_tree_root_gen(root_backup,
+                              btrfs_header_generation(info->tree_root->node));
+       btrfs_set_backup_tree_root_level(root_backup,
+                              btrfs_header_level(info->tree_root->node));
+       btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
+       btrfs_set_backup_chunk_root_gen(root_backup,
+                              btrfs_header_generation(info->chunk_root->node));
+       btrfs_set_backup_chunk_root_level(root_backup,
+                              btrfs_header_level(info->chunk_root->node));
+       btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
+       btrfs_set_backup_extent_root_gen(root_backup,
+                              btrfs_header_generation(info->extent_root->node));
+       btrfs_set_backup_extent_root_level(root_backup,
+                              btrfs_header_level(info->extent_root->node));
+       /*
+        * we might commit during log recovery, which happens before we set
+        * the fs_root.  Make sure it is valid before we fill it in.
+        */
+       if (info->fs_root && info->fs_root->node) {
+               btrfs_set_backup_fs_root(root_backup,
+                                        info->fs_root->node->start);
+               btrfs_set_backup_fs_root_gen(root_backup,
+                              btrfs_header_generation(info->fs_root->node));
+               btrfs_set_backup_fs_root_level(root_backup,
+                              btrfs_header_level(info->fs_root->node));
+       }
+       btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
+       btrfs_set_backup_dev_root_gen(root_backup,
+                              btrfs_header_generation(info->dev_root->node));
+       btrfs_set_backup_dev_root_level(root_backup,
+                                      btrfs_header_level(info->dev_root->node));
+       btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
+       btrfs_set_backup_csum_root_gen(root_backup,
+                              btrfs_header_generation(info->csum_root->node));
+       btrfs_set_backup_csum_root_level(root_backup,
+                              btrfs_header_level(info->csum_root->node));
+       btrfs_set_backup_total_bytes(root_backup,
+                            btrfs_super_total_bytes(info->super_copy));
+       btrfs_set_backup_bytes_used(root_backup,
+                            btrfs_super_bytes_used(info->super_copy));
+       btrfs_set_backup_num_devices(root_backup,
+                            btrfs_super_num_devices(info->super_copy));
+       /*
+        * if we don't copy this out to the super_copy, it won't get remembered
+        * for the next commit
+        */
+       memcpy(&info->super_copy->super_roots,
+              &info->super_for_commit->super_roots,
+              sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
+ }
+ /*
+  * this copies info out of the root backup array and back into
+  * the in-memory super block.  It is meant to help iterate through
+  * the array, so you send it the number of backups you've already
+  * tried and the last backup index you used.
+  *
+  * this returns -1 when it has tried all the backups
+  */
+ static noinline int next_root_backup(struct btrfs_fs_info *info,
+                                    struct btrfs_super_block *super,
+                                    int *num_backups_tried, int *backup_index)
+ {
+       struct btrfs_root_backup *root_backup;
+       int newest = *backup_index;
+       if (*num_backups_tried == 0) {
+               u64 gen = btrfs_super_generation(super);
+               newest = find_newest_super_backup(info, gen);
+               if (newest == -1)
+                       return -1;
+               *backup_index = newest;
+               *num_backups_tried = 1;
+       } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
+               /* we've tried all the backups, all done */
+               return -1;
+       } else {
+               /* jump to the next oldest backup */
+               newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
+                       BTRFS_NUM_BACKUP_ROOTS;
+               *backup_index = newest;
+               *num_backups_tried += 1;
+       }
+       root_backup = super->super_roots + newest;
+       btrfs_set_super_generation(super,
+                                  btrfs_backup_tree_root_gen(root_backup));
+       btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
+       btrfs_set_super_root_level(super,
+                                  btrfs_backup_tree_root_level(root_backup));
+       btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
+       /*
+        * fixme: the total bytes and num_devices need to match or we should
+        * need a fsck
+        */
+       btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
+       btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
+       return 0;
+ }
+ /* helper to cleanup tree roots */
+ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
+ {
+       free_extent_buffer(info->tree_root->node);
+       free_extent_buffer(info->tree_root->commit_root);
+       free_extent_buffer(info->dev_root->node);
+       free_extent_buffer(info->dev_root->commit_root);
+       free_extent_buffer(info->extent_root->node);
+       free_extent_buffer(info->extent_root->commit_root);
+       free_extent_buffer(info->csum_root->node);
+       free_extent_buffer(info->csum_root->commit_root);
+       info->tree_root->node = NULL;
+       info->tree_root->commit_root = NULL;
+       info->dev_root->node = NULL;
+       info->dev_root->commit_root = NULL;
+       info->extent_root->node = NULL;
+       info->extent_root->commit_root = NULL;
+       info->csum_root->node = NULL;
+       info->csum_root->commit_root = NULL;
+       if (chunk_root) {
+               free_extent_buffer(info->chunk_root->node);
+               free_extent_buffer(info->chunk_root->commit_root);
+               info->chunk_root->node = NULL;
+               info->chunk_root->commit_root = NULL;
+       }
+ }
  struct btrfs_root *open_ctree(struct super_block *sb,
                              struct btrfs_fs_devices *fs_devices,
                              char *options)
  
        int ret;
        int err = -EINVAL;
+       int num_backups_tried = 0;
+       int backup_index = 0;
  
        struct btrfs_super_block *disk_super;
  
        spin_lock_init(&fs_info->fs_roots_radix_lock);
        spin_lock_init(&fs_info->delayed_iput_lock);
        spin_lock_init(&fs_info->defrag_inodes_lock);
+       spin_lock_init(&fs_info->free_chunk_lock);
        mutex_init(&fs_info->reloc_mutex);
  
        init_completion(&fs_info->kobj_unregister);
        btrfs_init_block_rsv(&fs_info->trans_block_rsv);
        btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
        btrfs_init_block_rsv(&fs_info->empty_block_rsv);
-       INIT_LIST_HEAD(&fs_info->durable_block_rsv_list);
-       mutex_init(&fs_info->durable_block_rsv_mutex);
+       btrfs_init_block_rsv(&fs_info->delayed_block_rsv);
        atomic_set(&fs_info->nr_async_submits, 0);
        atomic_set(&fs_info->async_delalloc_pages, 0);
        atomic_set(&fs_info->async_submit_draining, 0);
        fs_info->metadata_ratio = 0;
        fs_info->defrag_inodes = RB_ROOT;
        fs_info->trans_no_join = 0;
+       fs_info->free_chunk_space = 0;
+       /* readahead state */
+       INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
+       spin_lock_init(&fs_info->reada_lock);
  
        fs_info->thread_pool_size = min_t(unsigned long,
                                          num_online_cpus() + 2, 8);
        sb->s_bdi = &fs_info->bdi;
  
        fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
 -      fs_info->btree_inode->i_nlink = 1;
 +      set_nlink(fs_info->btree_inode, 1);
        /*
         * we set the i_size on the btree inode to the max possible int.
         * the real end of the address space is determined by all of
                goto fail_alloc;
        }
  
-       memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
-       memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
-              sizeof(fs_info->super_for_commit));
+       memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
+       memcpy(fs_info->super_for_commit, fs_info->super_copy,
+              sizeof(*fs_info->super_for_commit));
        brelse(bh);
  
-       memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
+       memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
  
-       disk_super = &fs_info->super_copy;
+       disk_super = fs_info->super_copy;
        if (!btrfs_super_root(disk_super))
                goto fail_alloc;
  
  
        btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
  
+       /*
+        * run through our array of backup supers and setup
+        * our ring pointer to the oldest one
+        */
+       generation = btrfs_super_generation(disk_super);
+       find_oldest_super_backup(fs_info, generation);
        /*
         * In the long term, we'll store the compression type in the super
         * block, and it'll be used for per file compression control.
        btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
                           fs_info->thread_pool_size,
                           &fs_info->generic_worker);
+       btrfs_init_workers(&fs_info->readahead_workers, "readahead",
+                          fs_info->thread_pool_size,
+                          &fs_info->generic_worker);
  
        /*
         * endios are largely parallel and should have a very
  
        fs_info->endio_write_workers.idle_thresh = 2;
        fs_info->endio_meta_write_workers.idle_thresh = 2;
+       fs_info->readahead_workers.idle_thresh = 2;
  
        btrfs_start_workers(&fs_info->workers, 1);
        btrfs_start_workers(&fs_info->generic_worker, 1);
        btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
        btrfs_start_workers(&fs_info->delayed_workers, 1);
        btrfs_start_workers(&fs_info->caching_workers, 1);
+       btrfs_start_workers(&fs_info->readahead_workers, 1);
  
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
        if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
                printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
                       sb->s_id);
-               goto fail_chunk_root;
+               goto fail_tree_roots;
        }
        btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
        chunk_root->commit_root = btrfs_root_node(chunk_root);
        if (ret) {
                printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
                       sb->s_id);
-               goto fail_chunk_root;
+               goto fail_tree_roots;
        }
  
        btrfs_close_extra_devices(fs_devices);
  
+ retry_root_backup:
        blocksize = btrfs_level_size(tree_root,
                                     btrfs_super_root_level(disk_super));
        generation = btrfs_super_generation(disk_super);
        tree_root->node = read_tree_block(tree_root,
                                          btrfs_super_root(disk_super),
                                          blocksize, generation);
-       if (!tree_root->node)
-               goto fail_chunk_root;
-       if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
+       if (!tree_root->node ||
+           !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
                printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
                       sb->s_id);
-               goto fail_tree_root;
+               goto recovery_tree_root;
        }
        btrfs_set_root_node(&tree_root->root_item, tree_root->node);
        tree_root->commit_root = btrfs_root_node(tree_root);
  
        ret = find_and_setup_root(tree_root, fs_info,
                                  BTRFS_EXTENT_TREE_OBJECTID, extent_root);
        if (ret)
-               goto fail_tree_root;
+               goto recovery_tree_root;
        extent_root->track_dirty = 1;
  
        ret = find_and_setup_root(tree_root, fs_info,
                                  BTRFS_DEV_TREE_OBJECTID, dev_root);
        if (ret)
-               goto fail_extent_root;
+               goto recovery_tree_root;
        dev_root->track_dirty = 1;
  
        ret = find_and_setup_root(tree_root, fs_info,
                                  BTRFS_CSUM_TREE_OBJECTID, csum_root);
        if (ret)
-               goto fail_dev_root;
+               goto recovery_tree_root;
  
        csum_root->track_dirty = 1;
  
@@@ -2124,22 -2445,13 +2445,13 @@@ fail_cleaner
  
  fail_block_groups:
        btrfs_free_block_groups(fs_info);
-       free_extent_buffer(csum_root->node);
-       free_extent_buffer(csum_root->commit_root);
- fail_dev_root:
-       free_extent_buffer(dev_root->node);
-       free_extent_buffer(dev_root->commit_root);
- fail_extent_root:
-       free_extent_buffer(extent_root->node);
-       free_extent_buffer(extent_root->commit_root);
- fail_tree_root:
-       free_extent_buffer(tree_root->node);
-       free_extent_buffer(tree_root->commit_root);
- fail_chunk_root:
-       free_extent_buffer(chunk_root->node);
-       free_extent_buffer(chunk_root->commit_root);
+ fail_tree_roots:
+       free_root_pointers(fs_info, 1);
  fail_sb_buffer:
        btrfs_stop_workers(&fs_info->generic_worker);
+       btrfs_stop_workers(&fs_info->readahead_workers);
        btrfs_stop_workers(&fs_info->fixup_workers);
        btrfs_stop_workers(&fs_info->delalloc_workers);
        btrfs_stop_workers(&fs_info->workers);
        btrfs_stop_workers(&fs_info->delayed_workers);
        btrfs_stop_workers(&fs_info->caching_workers);
  fail_alloc:
-       kfree(fs_info->delayed_root);
  fail_iput:
        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
        iput(fs_info->btree_inode);
@@@ -2164,13 -2475,27 +2475,27 @@@ fail_bdi
  fail_srcu:
        cleanup_srcu_struct(&fs_info->subvol_srcu);
  fail:
-       kfree(extent_root);
-       kfree(tree_root);
-       kfree(fs_info);
-       kfree(chunk_root);
-       kfree(dev_root);
-       kfree(csum_root);
+       free_fs_info(fs_info);
        return ERR_PTR(err);
+ recovery_tree_root:
+       if (!btrfs_test_opt(tree_root, RECOVERY))
+               goto fail_tree_roots;
+       free_root_pointers(fs_info, 0);
+       /* don't use the log in recovery mode, it won't be valid */
+       btrfs_set_super_log_root(disk_super, 0);
+       /* we can't trust the free space cache either */
+       btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
+       ret = next_root_backup(fs_info, fs_info->super_copy,
+                              &num_backups_tried, &backup_index);
+       if (ret == -1)
+               goto fail_block_groups;
+       goto retry_root_backup;
  }
  
  static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
@@@ -2338,10 -2663,11 +2663,11 @@@ int write_all_supers(struct btrfs_root 
        int total_errors = 0;
        u64 flags;
  
-       max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
+       max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
        do_barriers = !btrfs_test_opt(root, NOBARRIER);
+       backup_super_roots(root->fs_info);
  
-       sb = &root->fs_info->super_for_commit;
+       sb = root->fs_info->super_for_commit;
        dev_item = &sb->dev_item;
  
        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
@@@ -2545,8 -2871,6 +2871,6 @@@ int close_ctree(struct btrfs_root *root
        /* clear out the rbtree of defraggable inodes */
        btrfs_run_defrag_inodes(root->fs_info);
  
-       btrfs_put_block_group_cache(fs_info);
        /*
         * Here come 2 situations when btrfs is broken to flip readonly:
         *
                        printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
        }
  
+       btrfs_put_block_group_cache(fs_info);
        kthread_stop(root->fs_info->transaction_kthread);
        kthread_stop(root->fs_info->cleaner_kthread);
  
        del_fs_roots(fs_info);
  
        iput(fs_info->btree_inode);
-       kfree(fs_info->delayed_root);
  
        btrfs_stop_workers(&fs_info->generic_worker);
        btrfs_stop_workers(&fs_info->fixup_workers);
        btrfs_stop_workers(&fs_info->submit_workers);
        btrfs_stop_workers(&fs_info->delayed_workers);
        btrfs_stop_workers(&fs_info->caching_workers);
+       btrfs_stop_workers(&fs_info->readahead_workers);
  
        btrfs_close_devices(fs_info->fs_devices);
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
        bdi_destroy(&fs_info->bdi);
        cleanup_srcu_struct(&fs_info->subvol_srcu);
  
-       kfree(fs_info->extent_root);
-       kfree(fs_info->tree_root);
-       kfree(fs_info->chunk_root);
-       kfree(fs_info->dev_root);
-       kfree(fs_info->csum_root);
-       kfree(fs_info);
+       free_fs_info(fs_info);
  
        return 0;
  }
@@@ -2735,7 -3056,8 +3056,8 @@@ int btrfs_read_buffer(struct extent_buf
        return ret;
  }
  
- int btree_lock_page_hook(struct page *page)
+ static int btree_lock_page_hook(struct page *page, void *data,
+                               void (*flush_fn)(void *))
  {
        struct inode *inode = page->mapping->host;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        if (!eb)
                goto out;
  
-       btrfs_tree_lock(eb);
+       if (!btrfs_try_tree_write_lock(eb)) {
+               flush_fn(data);
+               btrfs_tree_lock(eb);
+       }
        btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
  
        if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
        btrfs_tree_unlock(eb);
        free_extent_buffer(eb);
  out:
-       lock_page(page);
+       if (!trylock_page(page)) {
+               flush_fn(data);
+               lock_page(page);
+       }
        return 0;
  }
  
@@@ -3123,6 -3451,7 +3451,7 @@@ static int btrfs_cleanup_transaction(st
  static struct extent_io_ops btree_extent_io_ops = {
        .write_cache_pages_lock_hook = btree_lock_page_hook,
        .readpage_end_io_hook = btree_readpage_end_io_hook,
+       .readpage_io_failed_hook = btree_io_failed_hook,
        .submit_bio_hook = btree_submit_bio_hook,
        /* note we're sharing with inode.c for the merge bio hook */
        .merge_bio_hook = btrfs_merge_bio_hook,
diff --combined fs/btrfs/extent-tree.c
index c9ee0e18bbdcc6942fd72470a75cd71a7729fe77,18ea90c8943b77faebf68dfa3499193a6a02ab25..9879bd474632eb59ab2cb73bdbc62dc8ce4db927
@@@ -23,6 -23,7 +23,7 @@@
  #include <linux/rcupdate.h>
  #include <linux/kthread.h>
  #include <linux/slab.h>
+ #include <linux/ratelimit.h>
  #include "compat.h"
  #include "hash.h"
  #include "ctree.h"
@@@ -52,6 -53,21 +53,21 @@@ enum 
        CHUNK_ALLOC_LIMITED = 2,
  };
  
+ /*
+  * Control how reservations are dealt with.
+  *
+  * RESERVE_FREE - freeing a reservation.
+  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
+  *   ENOSPC accounting
+  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
+  *   bytes_may_use as the ENOSPC accounting is done elsewhere
+  */
+ enum {
+       RESERVE_FREE = 0,
+       RESERVE_ALLOC = 1,
+       RESERVE_ALLOC_NO_ACCOUNT = 2,
+ };
  static int update_block_group(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
                              u64 bytenr, u64 num_bytes, int alloc);
@@@ -81,6 -97,8 +97,8 @@@ static int find_next_key(struct btrfs_p
                         struct btrfs_key *key);
  static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
                            int dump_block_groups);
+ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
+                                      u64 num_bytes, int reserve);
  
  static noinline int
  block_group_cache_done(struct btrfs_block_group_cache *cache)
@@@ -104,7 -122,6 +122,6 @@@ void btrfs_put_block_group(struct btrfs
        if (atomic_dec_and_test(&cache->count)) {
                WARN_ON(cache->pinned > 0);
                WARN_ON(cache->reserved > 0);
-               WARN_ON(cache->reserved_pinned > 0);
                kfree(cache->free_space_ctl);
                kfree(cache);
        }
@@@ -465,7 -482,8 +482,8 @@@ static int cache_block_group(struct btr
         * we likely hold important locks.
         */
        if (trans && (!trans->transaction->in_commit) &&
-           (root && root != root->fs_info->tree_root)) {
+           (root && root != root->fs_info->tree_root) &&
+           btrfs_test_opt(root, SPACE_CACHE)) {
                spin_lock(&cache->lock);
                if (cache->cached != BTRFS_CACHE_NO) {
                        spin_unlock(&cache->lock);
@@@ -1770,18 -1788,18 +1788,18 @@@ static int btrfs_discard_extent(struct 
  {
        int ret;
        u64 discarded_bytes = 0;
-       struct btrfs_multi_bio *multi = NULL;
+       struct btrfs_bio *bbio = NULL;
  
  
        /* Tell the block device(s) that the sectors can be discarded */
        ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
-                             bytenr, &num_bytes, &multi, 0);
+                             bytenr, &num_bytes, &bbio, 0);
        if (!ret) {
-               struct btrfs_bio_stripe *stripe = multi->stripes;
+               struct btrfs_bio_stripe *stripe = bbio->stripes;
                int i;
  
  
-               for (i = 0; i < multi->num_stripes; i++, stripe++) {
+               for (i = 0; i < bbio->num_stripes; i++, stripe++) {
                        if (!stripe->dev->can_discard)
                                continue;
  
                         */
                        ret = 0;
                }
-               kfree(multi);
+               kfree(bbio);
        }
  
        if (actual_bytes)
@@@ -2700,6 -2718,13 +2718,13 @@@ again
                goto again;
        }
  
+       /* We've already setup this transaction, go ahead and exit */
+       if (block_group->cache_generation == trans->transid &&
+           i_size_read(inode)) {
+               dcs = BTRFS_DC_SETUP;
+               goto out_put;
+       }
        /*
         * We want to set the generation to 0, that way if anything goes wrong
         * from here on out we know not to trust this cache when we load up next
        if (!ret)
                dcs = BTRFS_DC_SETUP;
        btrfs_free_reserved_data_space(inode, num_pages);
  out_put:
        iput(inode);
  out_free:
        btrfs_release_path(path);
  out:
        spin_lock(&block_group->lock);
+       if (!ret)
+               block_group->cache_generation = trans->transid;
        block_group->disk_cache_state = dcs;
        spin_unlock(&block_group->lock);
  
@@@ -3122,16 -3150,13 +3150,13 @@@ commit_trans
                return -ENOSPC;
        }
        data_sinfo->bytes_may_use += bytes;
-       BTRFS_I(inode)->reserved_bytes += bytes;
        spin_unlock(&data_sinfo->lock);
  
        return 0;
  }
  
  /*
-  * called when we are clearing an delalloc extent from the
-  * inode's io_tree or there was an error for whatever reason
-  * after calling btrfs_check_data_free_space
+  * Called if we need to clear a data reservation for this inode.
   */
  void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
  {
        data_sinfo = BTRFS_I(inode)->space_info;
        spin_lock(&data_sinfo->lock);
        data_sinfo->bytes_may_use -= bytes;
-       BTRFS_I(inode)->reserved_bytes -= bytes;
        spin_unlock(&data_sinfo->lock);
  }
  
@@@ -3165,6 -3189,7 +3189,7 @@@ static int should_alloc_chunk(struct bt
                              struct btrfs_space_info *sinfo, u64 alloc_bytes,
                              int force)
  {
+       struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
        u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
        u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
        u64 thresh;
        if (force == CHUNK_ALLOC_FORCE)
                return 1;
  
+       /*
+        * We need to take into account the global rsv because for all intents
+        * and purposes it's used space.  Don't worry about locking the
+        * global_rsv, it doesn't change except when the transaction commits.
+        */
+       num_allocated += global_rsv->size;
        /*
         * in limited mode, we want to have some free space up to
         * about 1% of the FS size.
         */
        if (force == CHUNK_ALLOC_LIMITED) {
-               thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+               thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
                thresh = max_t(u64, 64 * 1024 * 1024,
                               div_factor_fine(thresh, 1));
  
        if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
                return 0;
  
-       thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+       thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
  
        /* 256MB or 5% of the FS */
        thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
  /*
   * shrink metadata reservation for delalloc
   */
- static int shrink_delalloc(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root, u64 to_reclaim, int sync)
+ static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
+                          bool wait_ordered)
  {
        struct btrfs_block_rsv *block_rsv;
        struct btrfs_space_info *space_info;
+       struct btrfs_trans_handle *trans;
        u64 reserved;
        u64 max_reclaim;
        u64 reclaimed = 0;
        long time_left;
-       int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
+       unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
        int loops = 0;
        unsigned long progress;
  
+       trans = (struct btrfs_trans_handle *)current->journal_info;
        block_rsv = &root->fs_info->delalloc_block_rsv;
        space_info = block_rsv->space_info;
  
        smp_mb();
-       reserved = space_info->bytes_reserved;
+       reserved = space_info->bytes_may_use;
        progress = space_info->reservation_progress;
  
        if (reserved == 0)
        }
  
        max_reclaim = min(reserved, to_reclaim);
+       nr_pages = max_t(unsigned long, nr_pages,
+                        max_reclaim >> PAGE_CACHE_SHIFT);
        while (loops < 1024) {
                /* have the flusher threads jump in and do some IO */
                smp_mb();
                nr_pages = min_t(unsigned long, nr_pages,
                       root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
 -              writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
 +              writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
 +                                              WB_REASON_FS_FREE_SPACE);
  
                spin_lock(&space_info->lock);
-               if (reserved > space_info->bytes_reserved)
-                       reclaimed += reserved - space_info->bytes_reserved;
-               reserved = space_info->bytes_reserved;
+               if (reserved > space_info->bytes_may_use)
+                       reclaimed += reserved - space_info->bytes_may_use;
+               reserved = space_info->bytes_may_use;
                spin_unlock(&space_info->lock);
  
                loops++;
                if (trans && trans->transaction->blocked)
                        return -EAGAIN;
  
-               time_left = schedule_timeout_interruptible(1);
+               if (wait_ordered && !trans) {
+                       btrfs_wait_ordered_extents(root, 0, 0);
+               } else {
+                       time_left = schedule_timeout_interruptible(1);
  
-               /* We were interrupted, exit */
-               if (time_left)
-                       break;
+                       /* We were interrupted, exit */
+                       if (time_left)
+                               break;
+               }
  
                /* we've kicked the IO a few times, if anything has been freed,
                 * exit.  There is no sense in looping here for a long time
                }
  
        }
-       if (reclaimed >= to_reclaim && !trans)
-               btrfs_wait_ordered_extents(root, 0, 0);
        return reclaimed >= to_reclaim;
  }
  
- /*
-  * Retries tells us how many times we've called reserve_metadata_bytes.  The
-  * idea is if this is the first call (retries == 0) then we will add to our
-  * reserved count if we can't make the allocation in order to hold our place
-  * while we go and try and free up space.  That way for retries > 1 we don't try
-  * and add space, we just check to see if the amount of unused space is >= the
-  * total space, meaning that our reservation is valid.
+ /**
+  * maybe_commit_transaction - possibly commit the transaction if its ok to
+  * @root - the root we're allocating for
+  * @bytes - the number of bytes we want to reserve
+  * @force - force the commit
   *
-  * However if we don't intend to retry this reservation, pass -1 as retries so
-  * that it short circuits this logic.
+  * This will check to make sure that committing the transaction will actually
+  * get us somewhere and then commit the transaction if it does.  Otherwise it
+  * will return -ENOSPC.
   */
- static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
-                                 struct btrfs_root *root,
+ static int may_commit_transaction(struct btrfs_root *root,
+                                 struct btrfs_space_info *space_info,
+                                 u64 bytes, int force)
+ {
+       struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
+       struct btrfs_trans_handle *trans;
+       trans = (struct btrfs_trans_handle *)current->journal_info;
+       if (trans)
+               return -EAGAIN;
+       if (force)
+               goto commit;
+       /* See if there is enough pinned space to make this reservation */
+       spin_lock(&space_info->lock);
+       if (space_info->bytes_pinned >= bytes) {
+               spin_unlock(&space_info->lock);
+               goto commit;
+       }
+       spin_unlock(&space_info->lock);
+       /*
+        * See if there is some space in the delayed insertion reservation for
+        * this reservation.
+        */
+       if (space_info != delayed_rsv->space_info)
+               return -ENOSPC;
+       spin_lock(&delayed_rsv->lock);
+       if (delayed_rsv->size < bytes) {
+               spin_unlock(&delayed_rsv->lock);
+               return -ENOSPC;
+       }
+       spin_unlock(&delayed_rsv->lock);
+ commit:
+       trans = btrfs_join_transaction(root);
+       if (IS_ERR(trans))
+               return -ENOSPC;
+       return btrfs_commit_transaction(trans, root);
+ }
+ /**
+  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
+  * @root - the root we're allocating for
+  * @block_rsv - the block_rsv we're allocating for
+  * @orig_bytes - the number of bytes we want
+  * @flush - wether or not we can flush to make our reservation
+  *
+  * This will reserve orgi_bytes number of bytes from the space info associated
+  * with the block_rsv.  If there is not enough space it will make an attempt to
+  * flush out space to make room.  It will do this by flushing delalloc if
+  * possible or committing the transaction.  If flush is 0 then no attempts to
+  * regain reservations will be made and this will fail if there is not enough
+  * space already.
+  */
+ static int reserve_metadata_bytes(struct btrfs_root *root,
                                  struct btrfs_block_rsv *block_rsv,
                                  u64 orig_bytes, int flush)
  {
        struct btrfs_space_info *space_info = block_rsv->space_info;
-       u64 unused;
+       u64 used;
        u64 num_bytes = orig_bytes;
        int retries = 0;
        int ret = 0;
        bool committed = false;
        bool flushing = false;
+       bool wait_ordered = false;
  
  again:
        ret = 0;
                 * deadlock since we are waiting for the flusher to finish, but
                 * hold the current transaction open.
                 */
-               if (trans)
+               if (current->journal_info)
                        return -EAGAIN;
                ret = wait_event_interruptible(space_info->wait,
                                               !space_info->flush);
        }
  
        ret = -ENOSPC;
-       unused = space_info->bytes_used + space_info->bytes_reserved +
-                space_info->bytes_pinned + space_info->bytes_readonly +
-                space_info->bytes_may_use;
+       used = space_info->bytes_used + space_info->bytes_reserved +
+               space_info->bytes_pinned + space_info->bytes_readonly +
+               space_info->bytes_may_use;
  
        /*
         * The idea here is that we've not already over-reserved the block group
         * lets start flushing stuff first and then come back and try to make
         * our reservation.
         */
-       if (unused <= space_info->total_bytes) {
-               unused = space_info->total_bytes - unused;
-               if (unused >= num_bytes) {
-                       space_info->bytes_reserved += orig_bytes;
+       if (used <= space_info->total_bytes) {
+               if (used + orig_bytes <= space_info->total_bytes) {
+                       space_info->bytes_may_use += orig_bytes;
                        ret = 0;
                } else {
                        /*
                 * amount plus the amount of bytes that we need for this
                 * reservation.
                 */
-               num_bytes = unused - space_info->total_bytes +
+               wait_ordered = true;
+               num_bytes = used - space_info->total_bytes +
                        (orig_bytes * (retries + 1));
        }
  
+       if (ret) {
+               u64 profile = btrfs_get_alloc_profile(root, 0);
+               u64 avail;
+               /*
+                * If we have a lot of space that's pinned, don't bother doing
+                * the overcommit dance yet and just commit the transaction.
+                */
+               avail = (space_info->total_bytes - space_info->bytes_used) * 8;
+               do_div(avail, 10);
+               if (space_info->bytes_pinned >= avail && flush && !committed) {
+                       space_info->flush = 1;
+                       flushing = true;
+                       spin_unlock(&space_info->lock);
+                       ret = may_commit_transaction(root, space_info,
+                                                    orig_bytes, 1);
+                       if (ret)
+                               goto out;
+                       committed = true;
+                       goto again;
+               }
+               spin_lock(&root->fs_info->free_chunk_lock);
+               avail = root->fs_info->free_chunk_space;
+               /*
+                * If we have dup, raid1 or raid10 then only half of the free
+                * space is actually useable.
+                */
+               if (profile & (BTRFS_BLOCK_GROUP_DUP |
+                              BTRFS_BLOCK_GROUP_RAID1 |
+                              BTRFS_BLOCK_GROUP_RAID10))
+                       avail >>= 1;
+               /*
+                * If we aren't flushing don't let us overcommit too much, say
+                * 1/8th of the space.  If we can flush, let it overcommit up to
+                * 1/2 of the space.
+                */
+               if (flush)
+                       avail >>= 3;
+               else
+                       avail >>= 1;
+                spin_unlock(&root->fs_info->free_chunk_lock);
+               if (used + num_bytes < space_info->total_bytes + avail) {
+                       space_info->bytes_may_use += orig_bytes;
+                       ret = 0;
+               } else {
+                       wait_ordered = true;
+               }
+       }
        /*
         * Couldn't make our reservation, save our place so while we're trying
         * to reclaim space we can actually use it instead of somebody else
         * We do synchronous shrinking since we don't actually unreserve
         * metadata until after the IO is completed.
         */
-       ret = shrink_delalloc(trans, root, num_bytes, 1);
+       ret = shrink_delalloc(root, num_bytes, wait_ordered);
        if (ret < 0)
                goto out;
  
         * so go back around and try again.
         */
        if (retries < 2) {
+               wait_ordered = true;
                retries++;
                goto again;
        }
  
-       /*
-        * Not enough space to be reclaimed, don't bother committing the
-        * transaction.
-        */
-       spin_lock(&space_info->lock);
-       if (space_info->bytes_pinned < orig_bytes)
-               ret = -ENOSPC;
-       spin_unlock(&space_info->lock);
-       if (ret)
-               goto out;
-       ret = -EAGAIN;
-       if (trans)
-               goto out;
        ret = -ENOSPC;
        if (committed)
                goto out;
  
-       trans = btrfs_join_transaction(root);
-       if (IS_ERR(trans))
-               goto out;
-       ret = btrfs_commit_transaction(trans, root);
+       ret = may_commit_transaction(root, space_info, orig_bytes, 0);
        if (!ret) {
-               trans = NULL;
                committed = true;
                goto again;
        }
  static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
                                             struct btrfs_root *root)
  {
-       struct btrfs_block_rsv *block_rsv;
-       if (root->ref_cows)
+       struct btrfs_block_rsv *block_rsv = NULL;
+       if (root->ref_cows || root == root->fs_info->csum_root)
                block_rsv = trans->block_rsv;
-       else
+       if (!block_rsv)
                block_rsv = root->block_rsv;
  
        if (!block_rsv)
@@@ -3617,7 -3748,7 +3749,7 @@@ static void block_rsv_release_bytes(str
                }
                if (num_bytes) {
                        spin_lock(&space_info->lock);
-                       space_info->bytes_reserved -= num_bytes;
+                       space_info->bytes_may_use -= num_bytes;
                        space_info->reservation_progress++;
                        spin_unlock(&space_info->lock);
                }
@@@ -3641,9 -3772,6 +3773,6 @@@ void btrfs_init_block_rsv(struct btrfs_
  {
        memset(rsv, 0, sizeof(*rsv));
        spin_lock_init(&rsv->lock);
-       atomic_set(&rsv->usage, 1);
-       rsv->priority = 6;
-       INIT_LIST_HEAD(&rsv->list);
  }
  
  struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
  void btrfs_free_block_rsv(struct btrfs_root *root,
                          struct btrfs_block_rsv *rsv)
  {
-       if (rsv && atomic_dec_and_test(&rsv->usage)) {
-               btrfs_block_rsv_release(root, rsv, (u64)-1);
-               if (!rsv->durable)
-                       kfree(rsv);
-       }
+       btrfs_block_rsv_release(root, rsv, (u64)-1);
+       kfree(rsv);
  }
  
- /*
-  * make the block_rsv struct be able to capture freed space.
-  * the captured space will re-add to the the block_rsv struct
-  * after transaction commit
-  */
- void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
-                                struct btrfs_block_rsv *block_rsv)
+ int btrfs_block_rsv_add(struct btrfs_root *root,
+                       struct btrfs_block_rsv *block_rsv,
+                       u64 num_bytes)
  {
-       block_rsv->durable = 1;
-       mutex_lock(&fs_info->durable_block_rsv_mutex);
-       list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
-       mutex_unlock(&fs_info->durable_block_rsv_mutex);
+       int ret;
+       if (num_bytes == 0)
+               return 0;
+       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
+       if (!ret) {
+               block_rsv_add_bytes(block_rsv, num_bytes, 1);
+               return 0;
+       }
+       return ret;
  }
  
- int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
-                       struct btrfs_root *root,
-                       struct btrfs_block_rsv *block_rsv,
-                       u64 num_bytes)
+ int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
+                               struct btrfs_block_rsv *block_rsv,
+                               u64 num_bytes)
  {
        int ret;
  
        if (num_bytes == 0)
                return 0;
  
-       ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
+       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
        if (!ret) {
                block_rsv_add_bytes(block_rsv, num_bytes, 1);
                return 0;
        return ret;
  }
  
- int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
-                         struct btrfs_root *root,
-                         struct btrfs_block_rsv *block_rsv,
-                         u64 min_reserved, int min_factor)
+ int btrfs_block_rsv_check(struct btrfs_root *root,
+                         struct btrfs_block_rsv *block_rsv, int min_factor)
  {
        u64 num_bytes = 0;
-       int commit_trans = 0;
        int ret = -ENOSPC;
  
        if (!block_rsv)
                return 0;
  
        spin_lock(&block_rsv->lock);
-       if (min_factor > 0)
-               num_bytes = div_factor(block_rsv->size, min_factor);
-       if (min_reserved > num_bytes)
-               num_bytes = min_reserved;
+       num_bytes = div_factor(block_rsv->size, min_factor);
+       if (block_rsv->reserved >= num_bytes)
+               ret = 0;
+       spin_unlock(&block_rsv->lock);
  
-       if (block_rsv->reserved >= num_bytes) {
+       return ret;
+ }
+ int btrfs_block_rsv_refill(struct btrfs_root *root,
+                         struct btrfs_block_rsv *block_rsv,
+                         u64 min_reserved)
+ {
+       u64 num_bytes = 0;
+       int ret = -ENOSPC;
+       if (!block_rsv)
+               return 0;
+       spin_lock(&block_rsv->lock);
+       num_bytes = min_reserved;
+       if (block_rsv->reserved >= num_bytes)
                ret = 0;
-       } else {
+       else
                num_bytes -= block_rsv->reserved;
-               if (block_rsv->durable &&
-                   block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
-                       commit_trans = 1;
-       }
        spin_unlock(&block_rsv->lock);
        if (!ret)
                return 0;
  
-       if (block_rsv->refill_used) {
-               ret = reserve_metadata_bytes(trans, root, block_rsv,
-                                            num_bytes, 0);
-               if (!ret) {
-                       block_rsv_add_bytes(block_rsv, num_bytes, 0);
-                       return 0;
-               }
-       }
-       if (commit_trans) {
-               if (trans)
-                       return -EAGAIN;
-               trans = btrfs_join_transaction(root);
-               BUG_ON(IS_ERR(trans));
-               ret = btrfs_commit_transaction(trans, root);
+       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
+       if (!ret) {
+               block_rsv_add_bytes(block_rsv, num_bytes, 0);
                return 0;
        }
  
-       return -ENOSPC;
+       return ret;
  }
  
  int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
@@@ -3784,7 -3909,7 +3910,7 @@@ static u64 calc_global_metadata_size(st
        u64 num_bytes;
        u64 meta_used;
        u64 data_used;
-       int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
+       int csum_size = btrfs_super_csum_size(fs_info->super_copy);
  
        sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
        spin_lock(&sinfo->lock);
@@@ -3828,12 -3953,12 +3954,12 @@@ static void update_global_block_rsv(str
        if (sinfo->total_bytes > num_bytes) {
                num_bytes = sinfo->total_bytes - num_bytes;
                block_rsv->reserved += num_bytes;
-               sinfo->bytes_reserved += num_bytes;
+               sinfo->bytes_may_use += num_bytes;
        }
  
        if (block_rsv->reserved >= block_rsv->size) {
                num_bytes = block_rsv->reserved - block_rsv->size;
-               sinfo->bytes_reserved -= num_bytes;
+               sinfo->bytes_may_use -= num_bytes;
                sinfo->reservation_progress++;
                block_rsv->reserved = block_rsv->size;
                block_rsv->full = 1;
@@@ -3849,16 -3974,13 +3975,13 @@@ static void init_global_block_rsv(struc
  
        space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
        fs_info->chunk_block_rsv.space_info = space_info;
-       fs_info->chunk_block_rsv.priority = 10;
  
        space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
        fs_info->global_block_rsv.space_info = space_info;
-       fs_info->global_block_rsv.priority = 10;
-       fs_info->global_block_rsv.refill_used = 1;
        fs_info->delalloc_block_rsv.space_info = space_info;
        fs_info->trans_block_rsv.space_info = space_info;
        fs_info->empty_block_rsv.space_info = space_info;
-       fs_info->empty_block_rsv.priority = 10;
+       fs_info->delayed_block_rsv.space_info = space_info;
  
        fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
        fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
        fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
        fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  
-       btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
-       btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
        update_global_block_rsv(fs_info);
  }
  
@@@ -3882,37 -4000,8 +4001,8 @@@ static void release_global_block_rsv(st
        WARN_ON(fs_info->trans_block_rsv.reserved > 0);
        WARN_ON(fs_info->chunk_block_rsv.size > 0);
        WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
- }
- int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
-                                   struct btrfs_root *root,
-                                   struct btrfs_block_rsv *rsv)
- {
-       struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
-       u64 num_bytes;
-       int ret;
-       /*
-        * Truncate should be freeing data, but give us 2 items just in case it
-        * needs to use some space.  We may want to be smarter about this in the
-        * future.
-        */
-       num_bytes = btrfs_calc_trans_metadata_size(root, 2);
-       /* We already have enough bytes, just return */
-       if (rsv->reserved >= num_bytes)
-               return 0;
-       num_bytes -= rsv->reserved;
-       /*
-        * You should have reserved enough space before hand to do this, so this
-        * should not fail.
-        */
-       ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
-       BUG_ON(ret);
-       return 0;
+       WARN_ON(fs_info->delayed_block_rsv.size > 0);
+       WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  }
  
  void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
        if (!trans->bytes_reserved)
                return;
  
-       BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
-       btrfs_block_rsv_release(root, trans->block_rsv,
-                               trans->bytes_reserved);
+       btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
        trans->bytes_reserved = 0;
  }
  
@@@ -3965,11 -4052,19 +4053,19 @@@ int btrfs_snap_reserve_metadata(struct 
        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  }
  
+ /**
+  * drop_outstanding_extent - drop an outstanding extent
+  * @inode: the inode we're dropping the extent for
+  *
+  * This is called when we are freeing up an outstanding extent, either called
+  * after an error or after an extent is written.  This will return the number of
+  * reserved extents that need to be freed.  This must be called with
+  * BTRFS_I(inode)->lock held.
+  */
  static unsigned drop_outstanding_extent(struct inode *inode)
  {
        unsigned dropped_extents = 0;
  
-       spin_lock(&BTRFS_I(inode)->lock);
        BUG_ON(!BTRFS_I(inode)->outstanding_extents);
        BTRFS_I(inode)->outstanding_extents--;
  
         */
        if (BTRFS_I(inode)->outstanding_extents >=
            BTRFS_I(inode)->reserved_extents)
-               goto out;
+               return 0;
  
        dropped_extents = BTRFS_I(inode)->reserved_extents -
                BTRFS_I(inode)->outstanding_extents;
        BTRFS_I(inode)->reserved_extents -= dropped_extents;
- out:
-       spin_unlock(&BTRFS_I(inode)->lock);
        return dropped_extents;
  }
  
- static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
+ /**
+  * calc_csum_metadata_size - return the amount of metada space that must be
+  *    reserved/free'd for the given bytes.
+  * @inode: the inode we're manipulating
+  * @num_bytes: the number of bytes in question
+  * @reserve: 1 if we are reserving space, 0 if we are freeing space
+  *
+  * This adjusts the number of csum_bytes in the inode and then returns the
+  * correct amount of metadata that must either be reserved or freed.  We
+  * calculate how many checksums we can fit into one leaf and then divide the
+  * number of bytes that will need to be checksumed by this value to figure out
+  * how many checksums will be required.  If we are adding bytes then the number
+  * may go up and we will return the number of additional bytes that must be
+  * reserved.  If it is going down we will return the number of bytes that must
+  * be freed.
+  *
+  * This must be called with BTRFS_I(inode)->lock held.
+  */
+ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
+                                  int reserve)
  {
-       return num_bytes >>= 3;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       u64 csum_size;
+       int num_csums_per_leaf;
+       int num_csums;
+       int old_csums;
+       if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
+           BTRFS_I(inode)->csum_bytes == 0)
+               return 0;
+       old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
+       if (reserve)
+               BTRFS_I(inode)->csum_bytes += num_bytes;
+       else
+               BTRFS_I(inode)->csum_bytes -= num_bytes;
+       csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
+       num_csums_per_leaf = (int)div64_u64(csum_size,
+                                           sizeof(struct btrfs_csum_item) +
+                                           sizeof(struct btrfs_disk_key));
+       num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
+       num_csums = num_csums + num_csums_per_leaf - 1;
+       num_csums = num_csums / num_csums_per_leaf;
+       old_csums = old_csums + num_csums_per_leaf - 1;
+       old_csums = old_csums / num_csums_per_leaf;
+       /* No change, no need to reserve more */
+       if (old_csums == num_csums)
+               return 0;
+       if (reserve)
+               return btrfs_calc_trans_metadata_size(root,
+                                                     num_csums - old_csums);
+       return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
  }
  
  int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
        u64 to_reserve = 0;
        unsigned nr_extents = 0;
+       int flush = 1;
        int ret;
  
-       if (btrfs_transaction_in_commit(root->fs_info))
+       if (btrfs_is_free_space_inode(root, inode))
+               flush = 0;
+       if (flush && btrfs_transaction_in_commit(root->fs_info))
                schedule_timeout(1);
  
        num_bytes = ALIGN(num_bytes, root->sectorsize);
  
                to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
        }
+       to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
        spin_unlock(&BTRFS_I(inode)->lock);
  
-       to_reserve += calc_csum_metadata_size(inode, num_bytes);
-       ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
+       ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
        if (ret) {
+               u64 to_free = 0;
                unsigned dropped;
+               spin_lock(&BTRFS_I(inode)->lock);
+               dropped = drop_outstanding_extent(inode);
+               to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+               spin_unlock(&BTRFS_I(inode)->lock);
+               to_free += btrfs_calc_trans_metadata_size(root, dropped);
                /*
-                * We don't need the return value since our reservation failed,
-                * we just need to clean up our counter.
+                * Somebody could have come in and twiddled with the
+                * reservation, so if we have to free more than we would have
+                * reserved from this reservation go ahead and release those
+                * bytes.
                 */
-               dropped = drop_outstanding_extent(inode);
-               WARN_ON(dropped > 1);
+               to_free -= to_reserve;
+               if (to_free)
+                       btrfs_block_rsv_release(root, block_rsv, to_free);
                return ret;
        }
  
        return 0;
  }
  
+ /**
+  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
+  * @inode: the inode to release the reservation for
+  * @num_bytes: the number of bytes we're releasing
+  *
+  * This will release the metadata reservation for an inode.  This can be called
+  * once we complete IO for a given set of bytes to release their metadata
+  * reservations.
+  */
  void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
  {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        unsigned dropped;
  
        num_bytes = ALIGN(num_bytes, root->sectorsize);
+       spin_lock(&BTRFS_I(inode)->lock);
        dropped = drop_outstanding_extent(inode);
  
-       to_free = calc_csum_metadata_size(inode, num_bytes);
+       to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+       spin_unlock(&BTRFS_I(inode)->lock);
        if (dropped > 0)
                to_free += btrfs_calc_trans_metadata_size(root, dropped);
  
                                to_free);
  }
  
+ /**
+  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
+  * @inode: inode we're writing to
+  * @num_bytes: the number of bytes we want to allocate
+  *
+  * This will do the following things
+  *
+  * o reserve space in the data space info for num_bytes
+  * o reserve space in the metadata space info based on number of outstanding
+  *   extents and how much csums will be needed
+  * o add to the inodes ->delalloc_bytes
+  * o add it to the fs_info's delalloc inodes list.
+  *
+  * This will return 0 for success and -ENOSPC if there is no space left.
+  */
  int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
  {
        int ret;
        return 0;
  }
  
+ /**
+  * btrfs_delalloc_release_space - release data and metadata space for delalloc
+  * @inode: inode we're releasing space for
+  * @num_bytes: the number of bytes we want to free up
+  *
+  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
+  * called in the case that we don't need the metadata AND data reservations
+  * anymore.  So if there is an error or we insert an inline extent.
+  *
+  * This function will release the metadata space that was not used and will
+  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
+  * list if there are no delalloc bytes left.
+  */
  void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
  {
        btrfs_delalloc_release_metadata(inode, num_bytes);
@@@ -4091,12 -4291,12 +4292,12 @@@ static int update_block_group(struct bt
  
        /* block accounting for super block */
        spin_lock(&info->delalloc_lock);
-       old_val = btrfs_super_bytes_used(&info->super_copy);
+       old_val = btrfs_super_bytes_used(info->super_copy);
        if (alloc)
                old_val += num_bytes;
        else
                old_val -= num_bytes;
-       btrfs_set_super_bytes_used(&info->super_copy, old_val);
+       btrfs_set_super_bytes_used(info->super_copy, old_val);
        spin_unlock(&info->delalloc_lock);
  
        while (total) {
                spin_lock(&cache->space_info->lock);
                spin_lock(&cache->lock);
  
-               if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
+               if (btrfs_test_opt(root, SPACE_CACHE) &&
                    cache->disk_cache_state < BTRFS_DC_CLEAR)
                        cache->disk_cache_state = BTRFS_DC_CLEAR;
  
                        btrfs_set_block_group_used(&cache->item, old_val);
                        cache->reserved -= num_bytes;
                        cache->space_info->bytes_reserved -= num_bytes;
-                       cache->space_info->reservation_progress++;
                        cache->space_info->bytes_used += num_bytes;
                        cache->space_info->disk_used += num_bytes * factor;
                        spin_unlock(&cache->lock);
@@@ -4188,7 -4387,6 +4388,6 @@@ static int pin_down_extent(struct btrfs
        if (reserved) {
                cache->reserved -= num_bytes;
                cache->space_info->bytes_reserved -= num_bytes;
-               cache->space_info->reservation_progress++;
        }
        spin_unlock(&cache->lock);
        spin_unlock(&cache->space_info->lock);
@@@ -4216,45 -4414,82 +4415,82 @@@ int btrfs_pin_extent(struct btrfs_root 
  }
  
  /*
-  * update size of reserved extents. this function may return -EAGAIN
-  * if 'reserve' is true or 'sinfo' is false.
+  * this function must be called within transaction
+  */
+ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
+                                   struct btrfs_root *root,
+                                   u64 bytenr, u64 num_bytes)
+ {
+       struct btrfs_block_group_cache *cache;
+       cache = btrfs_lookup_block_group(root->fs_info, bytenr);
+       BUG_ON(!cache);
+       /*
+        * pull in the free space cache (if any) so that our pin
+        * removes the free space from the cache.  We have load_only set
+        * to one because the slow code to read in the free extents does check
+        * the pinned extents.
+        */
+       cache_block_group(cache, trans, root, 1);
+       pin_down_extent(root, cache, bytenr, num_bytes, 0);
+       /* remove us from the free space cache (if we're there at all) */
+       btrfs_remove_free_space(cache, bytenr, num_bytes);
+       btrfs_put_block_group(cache);
+       return 0;
+ }
+ /**
+  * btrfs_update_reserved_bytes - update the block_group and space info counters
+  * @cache:    The cache we are manipulating
+  * @num_bytes:        The number of bytes in question
+  * @reserve:  One of the reservation enums
+  *
+  * This is called by the allocator when it reserves space, or by somebody who is
+  * freeing space that was never actually used on disk.  For example if you
+  * reserve some space for a new leaf in transaction A and before transaction A
+  * commits you free that leaf, you call this with reserve set to 0 in order to
+  * clear the reservation.
+  *
+  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
+  * ENOSPC accounting.  For data we handle the reservation through clearing the
+  * delalloc bits in the io_tree.  We have to do this since we could end up
+  * allocating less disk space for the amount of data we have reserved in the
+  * case of compression.
+  *
+  * If this is a reservation and the block group has become read only we cannot
+  * make the reservation and return -EAGAIN, otherwise this function always
+  * succeeds.
   */
- int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-                               u64 num_bytes, int reserve, int sinfo)
static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
+                                      u64 num_bytes, int reserve)
  {
+       struct btrfs_space_info *space_info = cache->space_info;
        int ret = 0;
-       if (sinfo) {
-               struct btrfs_space_info *space_info = cache->space_info;
-               spin_lock(&space_info->lock);
-               spin_lock(&cache->lock);
-               if (reserve) {
-                       if (cache->ro) {
-                               ret = -EAGAIN;
-                       } else {
-                               cache->reserved += num_bytes;
-                               space_info->bytes_reserved += num_bytes;
-                       }
-               } else {
-                       if (cache->ro)
-                               space_info->bytes_readonly += num_bytes;
-                       cache->reserved -= num_bytes;
-                       space_info->bytes_reserved -= num_bytes;
-                       space_info->reservation_progress++;
-               }
-               spin_unlock(&cache->lock);
-               spin_unlock(&space_info->lock);
-       } else {
-               spin_lock(&cache->lock);
+       spin_lock(&space_info->lock);
+       spin_lock(&cache->lock);
+       if (reserve != RESERVE_FREE) {
                if (cache->ro) {
                        ret = -EAGAIN;
                } else {
-                       if (reserve)
-                               cache->reserved += num_bytes;
-                       else
-                               cache->reserved -= num_bytes;
+                       cache->reserved += num_bytes;
+                       space_info->bytes_reserved += num_bytes;
+                       if (reserve == RESERVE_ALLOC) {
+                               BUG_ON(space_info->bytes_may_use < num_bytes);
+                               space_info->bytes_may_use -= num_bytes;
+                       }
                }
-               spin_unlock(&cache->lock);
+       } else {
+               if (cache->ro)
+                       space_info->bytes_readonly += num_bytes;
+               cache->reserved -= num_bytes;
+               space_info->bytes_reserved -= num_bytes;
+               space_info->reservation_progress++;
        }
+       spin_unlock(&cache->lock);
+       spin_unlock(&space_info->lock);
        return ret;
  }
  
@@@ -4320,13 -4555,8 +4556,8 @@@ static int unpin_extent_range(struct bt
                spin_lock(&cache->lock);
                cache->pinned -= len;
                cache->space_info->bytes_pinned -= len;
-               if (cache->ro) {
+               if (cache->ro)
                        cache->space_info->bytes_readonly += len;
-               } else if (cache->reserved_pinned > 0) {
-                       len = min(len, cache->reserved_pinned);
-                       cache->reserved_pinned -= len;
-                       cache->space_info->bytes_reserved += len;
-               }
                spin_unlock(&cache->lock);
                spin_unlock(&cache->space_info->lock);
        }
@@@ -4341,11 -4571,8 +4572,8 @@@ int btrfs_finish_extent_commit(struct b
  {
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct extent_io_tree *unpin;
-       struct btrfs_block_rsv *block_rsv;
-       struct btrfs_block_rsv *next_rsv;
        u64 start;
        u64 end;
-       int idx;
        int ret;
  
        if (fs_info->pinned_extents == &fs_info->freed_extents[0])
                cond_resched();
        }
  
-       mutex_lock(&fs_info->durable_block_rsv_mutex);
-       list_for_each_entry_safe(block_rsv, next_rsv,
-                                &fs_info->durable_block_rsv_list, list) {
-               idx = trans->transid & 0x1;
-               if (block_rsv->freed[idx] > 0) {
-                       block_rsv_add_bytes(block_rsv,
-                                           block_rsv->freed[idx], 0);
-                       block_rsv->freed[idx] = 0;
-               }
-               if (atomic_read(&block_rsv->usage) == 0) {
-                       btrfs_block_rsv_release(root, block_rsv, (u64)-1);
-                       if (block_rsv->freed[0] == 0 &&
-                           block_rsv->freed[1] == 0) {
-                               list_del_init(&block_rsv->list);
-                               kfree(block_rsv);
-                       }
-               } else {
-                       btrfs_block_rsv_release(root, block_rsv, 0);
-               }
-       }
-       mutex_unlock(&fs_info->durable_block_rsv_mutex);
        return 0;
  }
  
@@@ -4669,7 -4872,6 +4873,6 @@@ void btrfs_free_tree_block(struct btrfs
                           struct extent_buffer *buf,
                           u64 parent, int last_ref)
  {
-       struct btrfs_block_rsv *block_rsv;
        struct btrfs_block_group_cache *cache = NULL;
        int ret;
  
        if (!last_ref)
                return;
  
-       block_rsv = get_block_rsv(trans, root);
        cache = btrfs_lookup_block_group(root->fs_info, buf->start);
-       if (block_rsv->space_info != cache->space_info)
-               goto out;
  
        if (btrfs_header_generation(buf) == trans->transid) {
                if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
                        ret = check_ref_cleanup(trans, root, buf->start);
                        if (!ret)
-                               goto pin;
+                               goto out;
                }
  
                if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
                        pin_down_extent(root, cache, buf->start, buf->len, 1);
-                       goto pin;
+                       goto out;
                }
  
                WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  
                btrfs_add_free_space(cache, buf->start, buf->len);
-               ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0);
-               if (ret == -EAGAIN) {
-                       /* block group became read-only */
-                       btrfs_update_reserved_bytes(cache, buf->len, 0, 1);
-                       goto out;
-               }
-               ret = 1;
-               spin_lock(&block_rsv->lock);
-               if (block_rsv->reserved < block_rsv->size) {
-                       block_rsv->reserved += buf->len;
-                       ret = 0;
-               }
-               spin_unlock(&block_rsv->lock);
-               if (ret) {
-                       spin_lock(&cache->space_info->lock);
-                       cache->space_info->bytes_reserved -= buf->len;
-                       cache->space_info->reservation_progress++;
-                       spin_unlock(&cache->space_info->lock);
-               }
-               goto out;
-       }
- pin:
-       if (block_rsv->durable && !cache->ro) {
-               ret = 0;
-               spin_lock(&cache->lock);
-               if (!cache->ro) {
-                       cache->reserved_pinned += buf->len;
-                       ret = 1;
-               }
-               spin_unlock(&cache->lock);
-               if (ret) {
-                       spin_lock(&block_rsv->lock);
-                       block_rsv->freed[trans->transid & 0x1] += buf->len;
-                       spin_unlock(&block_rsv->lock);
-               }
+               btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
        }
  out:
        /*
@@@ -4884,10 -5046,13 +5047,13 @@@ static noinline int find_free_extent(st
        int last_ptr_loop = 0;
        int loop = 0;
        int index = 0;
+       int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
+               RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
        bool found_uncached_bg = false;
        bool failed_cluster_refill = false;
        bool failed_alloc = false;
        bool use_cluster = true;
+       bool have_caching_bg = false;
        u64 ideal_cache_percent = 0;
        u64 ideal_cache_offset = 0;
  
@@@ -4970,6 -5135,7 +5136,7 @@@ ideal_cache
                }
        }
  search:
+       have_caching_bg = false;
        down_read(&space_info->groups_sem);
        list_for_each_entry(block_group, &space_info->block_groups[index],
                            list) {
@@@ -5178,6 -5344,8 +5345,8 @@@ refill_cluster
                        failed_alloc = true;
                        goto have_block_group;
                } else if (!offset) {
+                       if (!cached)
+                               have_caching_bg = true;
                        goto loop;
                }
  checks:
                                             search_start - offset);
                BUG_ON(offset > search_start);
  
-               ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1,
-                                           (data & BTRFS_BLOCK_GROUP_DATA));
+               ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+                                                 alloc_type);
                if (ret == -EAGAIN) {
                        btrfs_add_free_space(block_group, offset, num_bytes);
                        goto loop;
@@@ -5228,6 -5396,9 +5397,9 @@@ loop
        }
        up_read(&space_info->groups_sem);
  
+       if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
+               goto search;
        if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
                goto search;
  
@@@ -5326,7 -5497,8 +5498,8 @@@ static void dump_space_info(struct btrf
        int index = 0;
  
        spin_lock(&info->lock);
-       printk(KERN_INFO "space_info has %llu free, is %sfull\n",
+       printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
+              (unsigned long long)info->flags,
               (unsigned long long)(info->total_bytes - info->bytes_used -
                                    info->bytes_pinned - info->bytes_reserved -
                                    info->bytes_readonly),
@@@ -5412,7 -5584,8 +5585,8 @@@ again
        return ret;
  }
  
- int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
+ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+                                       u64 start, u64 len, int pin)
  {
        struct btrfs_block_group_cache *cache;
        int ret = 0;
        if (btrfs_test_opt(root, DISCARD))
                ret = btrfs_discard_extent(root, start, len, NULL);
  
-       btrfs_add_free_space(cache, start, len);
-       btrfs_update_reserved_bytes(cache, len, 0, 1);
+       if (pin)
+               pin_down_extent(root, cache, start, len, 1);
+       else {
+               btrfs_add_free_space(cache, start, len);
+               btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
+       }
        btrfs_put_block_group(cache);
  
        trace_btrfs_reserved_extent_free(root, start, len);
        return ret;
  }
  
+ int btrfs_free_reserved_extent(struct btrfs_root *root,
+                                       u64 start, u64 len)
+ {
+       return __btrfs_free_reserved_extent(root, start, len, 0);
+ }
+ int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
+                                      u64 start, u64 len)
+ {
+       return __btrfs_free_reserved_extent(root, start, len, 1);
+ }
  static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
                                      struct btrfs_root *root,
                                      u64 parent, u64 root_objectid,
@@@ -5631,7 -5820,8 +5821,8 @@@ int btrfs_alloc_logged_file_extent(stru
                put_caching_control(caching_ctl);
        }
  
-       ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1);
+       ret = btrfs_update_reserved_bytes(block_group, ins->offset,
+                                         RESERVE_ALLOC_NO_ACCOUNT);
        BUG_ON(ret);
        btrfs_put_block_group(block_group);
        ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
@@@ -5688,8 -5878,7 +5879,7 @@@ use_block_rsv(struct btrfs_trans_handl
        block_rsv = get_block_rsv(trans, root);
  
        if (block_rsv->size == 0) {
-               ret = reserve_metadata_bytes(trans, root, block_rsv,
-                                            blocksize, 0);
+               ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
                /*
                 * If we couldn't reserve metadata bytes try and use some from
                 * the global reserve.
        if (!ret)
                return block_rsv;
        if (ret) {
-               WARN_ON(1);
-               ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
-                                            0);
+               static DEFINE_RATELIMIT_STATE(_rs,
+                               DEFAULT_RATELIMIT_INTERVAL,
+                               /*DEFAULT_RATELIMIT_BURST*/ 2);
+               if (__ratelimit(&_rs)) {
+                       printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
+                       WARN_ON(1);
+               }
+               ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
                if (!ret) {
-                       spin_lock(&block_rsv->lock);
-                       block_rsv->size += blocksize;
-                       spin_unlock(&block_rsv->lock);
                        return block_rsv;
                } else if (ret && block_rsv != global_rsv) {
                        ret = block_rsv_use_bytes(global_rsv, blocksize);
@@@ -6593,12 -6784,9 +6785,9 @@@ static int set_block_group_ro(struct bt
                    cache->bytes_super - btrfs_block_group_used(&cache->item);
  
        if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
-           sinfo->bytes_may_use + sinfo->bytes_readonly +
-           cache->reserved_pinned + num_bytes + min_allocable_bytes <=
-           sinfo->total_bytes) {
+           sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
+           min_allocable_bytes <= sinfo->total_bytes) {
                sinfo->bytes_readonly += num_bytes;
-               sinfo->bytes_reserved += cache->reserved_pinned;
-               cache->reserved_pinned = 0;
                cache->ro = 1;
                ret = 0;
        }
@@@ -6965,7 -7153,8 +7154,8 @@@ int btrfs_free_block_groups(struct btrf
                                        struct btrfs_space_info,
                                        list);
                if (space_info->bytes_pinned > 0 ||
-                   space_info->bytes_reserved > 0) {
+                   space_info->bytes_reserved > 0 ||
+                   space_info->bytes_may_use > 0) {
                        WARN_ON(1);
                        dump_space_info(space_info, 0, 0);
                }
@@@ -7007,14 -7196,12 +7197,12 @@@ int btrfs_read_block_groups(struct btrf
                return -ENOMEM;
        path->reada = 1;
  
-       cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
-       if (cache_gen != 0 &&
-           btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
+       cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
+       if (btrfs_test_opt(root, SPACE_CACHE) &&
+           btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
                need_clear = 1;
        if (btrfs_test_opt(root, CLEAR_CACHE))
                need_clear = 1;
-       if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
-               printk(KERN_INFO "btrfs: disk space caching is enabled\n");
  
        while (1) {
                ret = find_first_block_group(root, path, &key);
@@@ -7253,7 -7440,7 +7441,7 @@@ int btrfs_remove_block_group(struct btr
                goto out;
        }
  
-       inode = lookup_free_space_inode(root, block_group, path);
+       inode = lookup_free_space_inode(tree_root, block_group, path);
        if (!IS_ERR(inode)) {
                ret = btrfs_orphan_add(trans, inode);
                BUG_ON(ret);
                        spin_unlock(&block_group->lock);
                }
                /* One for our lookup ref */
-               iput(inode);
+               btrfs_add_delayed_iput(inode);
        }
  
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@@ -7340,7 -7527,7 +7528,7 @@@ int btrfs_init_space_info(struct btrfs_
        int mixed = 0;
        int ret;
  
-       disk_super = &fs_info->super_copy;
+       disk_super = fs_info->super_copy;
        if (!btrfs_super_root(disk_super))
                return 1;
  
diff --combined fs/btrfs/file.c
index 1266f6e9cdb22a309d7da8893963fc3765824b65,f2e928289600dbefd736a5df07401bf5415de511..dafdfa059bf66a489bd3d858990b9025fd50a72f
@@@ -1069,6 -1069,7 +1069,7 @@@ static noinline int prepare_pages(struc
        int i;
        unsigned long index = pos >> PAGE_CACHE_SHIFT;
        struct inode *inode = fdentry(file)->d_inode;
+       gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
        int err = 0;
        int faili = 0;
        u64 start_pos;
  again:
        for (i = 0; i < num_pages; i++) {
                pages[i] = find_or_create_page(inode->i_mapping, index + i,
-                                              GFP_NOFS);
+                                              mask);
                if (!pages[i]) {
                        faili = i - 1;
                        err = -ENOMEM;
@@@ -1615,10 -1616,6 +1616,6 @@@ static long btrfs_fallocate(struct fil
                        goto out;
        }
  
-       ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
-       if (ret)
-               goto out;
        locked_end = alloc_end - 1;
        while (1) {
                struct btrfs_ordered_extent *ordered;
                if (em->block_start == EXTENT_MAP_HOLE ||
                    (cur_offset >= inode->i_size &&
                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
+                       /*
+                        * Make sure we have enough space before we do the
+                        * allocation.
+                        */
+                       ret = btrfs_check_data_free_space(inode, last_byte -
+                                                         cur_offset);
+                       if (ret) {
+                               free_extent_map(em);
+                               break;
+                       }
                        ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
                                                        last_byte - cur_offset,
                                                        1 << inode->i_blkbits,
                                                        offset + len,
                                                        &alloc_hint);
+                       /* Let go of our reservation. */
+                       btrfs_free_reserved_data_space(inode, last_byte -
+                                                      cur_offset);
                        if (ret < 0) {
                                free_extent_map(em);
                                break;
        }
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
                             &cached_state, GFP_NOFS);
-       btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
  out:
        mutex_unlock(&inode->i_mutex);
        return ret;
@@@ -1821,7 -1832,7 +1832,7 @@@ static loff_t btrfs_file_llseek(struct 
        switch (origin) {
        case SEEK_END:
        case SEEK_CUR:
 -              offset = generic_file_llseek_unlocked(file, offset, origin);
 +              offset = generic_file_llseek(file, offset, origin);
                goto out;
        case SEEK_DATA:
        case SEEK_HOLE:
diff --combined fs/btrfs/inode.c
index 75686a61bd4550c5adad9a9f56091ed42217de6d,9d0eaa57d4ee357f0eb14495dbdc55b76fc6c8d8..966ddcc4c63d71b73be121ebd11d304d30fc723e
  #include "btrfs_inode.h"
  #include "ioctl.h"
  #include "print-tree.h"
- #include "volumes.h"
  #include "ordered-data.h"
  #include "xattr.h"
  #include "tree-log.h"
+ #include "volumes.h"
  #include "compression.h"
  #include "locking.h"
  #include "free-space-cache.h"
@@@ -393,7 -393,10 +393,10 @@@ again
             (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
                WARN_ON(pages);
                pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
-               BUG_ON(!pages);
+               if (!pages) {
+                       /* just bail out to the uncompressed code */
+                       goto cont;
+               }
  
                if (BTRFS_I(inode)->force_compress)
                        compress_type = BTRFS_I(inode)->force_compress;
                        will_compress = 1;
                }
        }
+ cont:
        if (start == 0) {
                trans = btrfs_join_transaction(root);
                BUG_ON(IS_ERR(trans));
@@@ -820,7 -824,7 +824,7 @@@ static noinline int cow_file_range(stru
        }
  
        BUG_ON(disk_num_bytes >
-              btrfs_super_total_bytes(&root->fs_info->super_copy));
+              btrfs_super_total_bytes(root->fs_info->super_copy));
  
        alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
        btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
@@@ -1792,12 -1796,12 +1796,12 @@@ static int btrfs_finish_ordered_io(stru
        }
        ret = 0;
  out:
-       if (nolock) {
-               if (trans)
-                       btrfs_end_transaction_nolock(trans, root);
-       } else {
+       if (root != root->fs_info->tree_root)
                btrfs_delalloc_release_metadata(inode, ordered_extent->len);
-               if (trans)
+       if (trans) {
+               if (nolock)
+                       btrfs_end_transaction_nolock(trans, root);
+               else
                        btrfs_end_transaction(trans, root);
        }
  
@@@ -1818,154 -1822,10 +1822,10 @@@ static int btrfs_writepage_end_io_hook(
        return btrfs_finish_ordered_io(page->mapping->host, start, end);
  }
  
- /*
-  * When IO fails, either with EIO or csum verification fails, we
-  * try other mirrors that might have a good copy of the data.  This
-  * io_failure_record is used to record state as we go through all the
-  * mirrors.  If another mirror has good data, the page is set up to date
-  * and things continue.  If a good mirror can't be found, the original
-  * bio end_io callback is called to indicate things have failed.
-  */
- struct io_failure_record {
-       struct page *page;
-       u64 start;
-       u64 len;
-       u64 logical;
-       unsigned long bio_flags;
-       int last_mirror;
- };
- static int btrfs_io_failed_hook(struct bio *failed_bio,
-                        struct page *page, u64 start, u64 end,
-                        struct extent_state *state)
- {
-       struct io_failure_record *failrec = NULL;
-       u64 private;
-       struct extent_map *em;
-       struct inode *inode = page->mapping->host;
-       struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
-       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
-       struct bio *bio;
-       int num_copies;
-       int ret;
-       int rw;
-       u64 logical;
-       ret = get_state_private(failure_tree, start, &private);
-       if (ret) {
-               failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
-               if (!failrec)
-                       return -ENOMEM;
-               failrec->start = start;
-               failrec->len = end - start + 1;
-               failrec->last_mirror = 0;
-               failrec->bio_flags = 0;
-               read_lock(&em_tree->lock);
-               em = lookup_extent_mapping(em_tree, start, failrec->len);
-               if (em->start > start || em->start + em->len < start) {
-                       free_extent_map(em);
-                       em = NULL;
-               }
-               read_unlock(&em_tree->lock);
-               if (IS_ERR_OR_NULL(em)) {
-                       kfree(failrec);
-                       return -EIO;
-               }
-               logical = start - em->start;
-               logical = em->block_start + logical;
-               if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
-                       logical = em->block_start;
-                       failrec->bio_flags = EXTENT_BIO_COMPRESSED;
-                       extent_set_compress_type(&failrec->bio_flags,
-                                                em->compress_type);
-               }
-               failrec->logical = logical;
-               free_extent_map(em);
-               set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
-                               EXTENT_DIRTY, GFP_NOFS);
-               set_state_private(failure_tree, start,
-                                (u64)(unsigned long)failrec);
-       } else {
-               failrec = (struct io_failure_record *)(unsigned long)private;
-       }
-       num_copies = btrfs_num_copies(
-                             &BTRFS_I(inode)->root->fs_info->mapping_tree,
-                             failrec->logical, failrec->len);
-       failrec->last_mirror++;
-       if (!state) {
-               spin_lock(&BTRFS_I(inode)->io_tree.lock);
-               state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
-                                                   failrec->start,
-                                                   EXTENT_LOCKED);
-               if (state && state->start != failrec->start)
-                       state = NULL;
-               spin_unlock(&BTRFS_I(inode)->io_tree.lock);
-       }
-       if (!state || failrec->last_mirror > num_copies) {
-               set_state_private(failure_tree, failrec->start, 0);
-               clear_extent_bits(failure_tree, failrec->start,
-                                 failrec->start + failrec->len - 1,
-                                 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
-               kfree(failrec);
-               return -EIO;
-       }
-       bio = bio_alloc(GFP_NOFS, 1);
-       bio->bi_private = state;
-       bio->bi_end_io = failed_bio->bi_end_io;
-       bio->bi_sector = failrec->logical >> 9;
-       bio->bi_bdev = failed_bio->bi_bdev;
-       bio->bi_size = 0;
-       bio_add_page(bio, page, failrec->len, start - page_offset(page));
-       if (failed_bio->bi_rw & REQ_WRITE)
-               rw = WRITE;
-       else
-               rw = READ;
-       ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
-                                                     failrec->last_mirror,
-                                                     failrec->bio_flags, 0);
-       return ret;
- }
- /*
-  * each time an IO finishes, we do a fast check in the IO failure tree
-  * to see if we need to process or clean up an io_failure_record
-  */
- static int btrfs_clean_io_failures(struct inode *inode, u64 start)
- {
-       u64 private;
-       u64 private_failure;
-       struct io_failure_record *failure;
-       int ret;
-       private = 0;
-       if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
-                            (u64)-1, 1, EXTENT_DIRTY, 0)) {
-               ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
-                                       start, &private_failure);
-               if (ret == 0) {
-                       failure = (struct io_failure_record *)(unsigned long)
-                                  private_failure;
-                       set_state_private(&BTRFS_I(inode)->io_failure_tree,
-                                         failure->start, 0);
-                       clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
-                                         failure->start,
-                                         failure->start + failure->len - 1,
-                                         EXTENT_DIRTY | EXTENT_LOCKED,
-                                         GFP_NOFS);
-                       kfree(failure);
-               }
-       }
-       return 0;
- }
  /*
   * when reads are done, we need to check csums to verify the data is correct
-  * if there's a match, we allow the bio to finish.  If not, we go through
-  * the io_failure_record routines to find good copies
+  * if there's a match, we allow the bio to finish.  If not, the code in
+  * extent_io.c will try to find good copies for us.
   */
  static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
                               struct extent_state *state)
  
        kunmap_atomic(kaddr, KM_USER0);
  good:
-       /* if the io failure tree for this inode is non-empty,
-        * check to see if we've recovered from a failed IO
-        */
-       btrfs_clean_io_failures(inode, start);
        return 0;
  
  zeroit:
@@@ -2079,89 -1935,6 +1935,6 @@@ void btrfs_run_delayed_iputs(struct btr
        up_read(&root->fs_info->cleanup_work_sem);
  }
  
- /*
-  * calculate extra metadata reservation when snapshotting a subvolume
-  * contains orphan files.
-  */
- void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
-                               struct btrfs_pending_snapshot *pending,
-                               u64 *bytes_to_reserve)
- {
-       struct btrfs_root *root;
-       struct btrfs_block_rsv *block_rsv;
-       u64 num_bytes;
-       int index;
-       root = pending->root;
-       if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
-               return;
-       block_rsv = root->orphan_block_rsv;
-       /* orphan block reservation for the snapshot */
-       num_bytes = block_rsv->size;
-       /*
-        * after the snapshot is created, COWing tree blocks may use more
-        * space than it frees. So we should make sure there is enough
-        * reserved space.
-        */
-       index = trans->transid & 0x1;
-       if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
-               num_bytes += block_rsv->size -
-                            (block_rsv->reserved + block_rsv->freed[index]);
-       }
-       *bytes_to_reserve += num_bytes;
- }
- void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
-                               struct btrfs_pending_snapshot *pending)
- {
-       struct btrfs_root *root = pending->root;
-       struct btrfs_root *snap = pending->snap;
-       struct btrfs_block_rsv *block_rsv;
-       u64 num_bytes;
-       int index;
-       int ret;
-       if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
-               return;
-       /* refill source subvolume's orphan block reservation */
-       block_rsv = root->orphan_block_rsv;
-       index = trans->transid & 0x1;
-       if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
-               num_bytes = block_rsv->size -
-                           (block_rsv->reserved + block_rsv->freed[index]);
-               ret = btrfs_block_rsv_migrate(&pending->block_rsv,
-                                             root->orphan_block_rsv,
-                                             num_bytes);
-               BUG_ON(ret);
-       }
-       /* setup orphan block reservation for the snapshot */
-       block_rsv = btrfs_alloc_block_rsv(snap);
-       BUG_ON(!block_rsv);
-       btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
-       snap->orphan_block_rsv = block_rsv;
-       num_bytes = root->orphan_block_rsv->size;
-       ret = btrfs_block_rsv_migrate(&pending->block_rsv,
-                                     block_rsv, num_bytes);
-       BUG_ON(ret);
- #if 0
-       /* insert orphan item for the snapshot */
-       WARN_ON(!root->orphan_item_inserted);
-       ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
-                                      snap->root_key.objectid);
-       BUG_ON(ret);
-       snap->orphan_item_inserted = 1;
- #endif
- }
  enum btrfs_orphan_cleanup_state {
        ORPHAN_CLEANUP_STARTED  = 1,
        ORPHAN_CLEANUP_DONE     = 2,
@@@ -2247,9 -2020,6 +2020,6 @@@ int btrfs_orphan_add(struct btrfs_trans
        }
        spin_unlock(&root->orphan_lock);
  
-       if (block_rsv)
-               btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
        /* grab metadata reservation from transaction handle */
        if (reserve) {
                ret = btrfs_orphan_reserve_metadata(trans, inode);
@@@ -2316,6 -2086,7 +2086,7 @@@ int btrfs_orphan_cleanup(struct btrfs_r
        struct btrfs_key key, found_key;
        struct btrfs_trans_handle *trans;
        struct inode *inode;
+       u64 last_objectid = 0;
        int ret = 0, nr_unlink = 0, nr_truncate = 0;
  
        if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
                 * crossing root thing.  we store the inode number in the
                 * offset of the orphan item.
                 */
+               if (found_key.offset == last_objectid) {
+                       printk(KERN_ERR "btrfs: Error removing orphan entry, "
+                              "stopping orphan cleanup\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+               last_objectid = found_key.offset;
                found_key.objectid = found_key.offset;
                found_key.type = BTRFS_INODE_ITEM_KEY;
                found_key.offset = 0;
                inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
-               if (IS_ERR(inode)) {
-                       ret = PTR_ERR(inode);
+               ret = PTR_RET(inode);
+               if (ret && ret != -ESTALE)
                        goto out;
-               }
  
                /*
-                * add this inode to the orphan list so btrfs_orphan_del does
-                * the proper thing when we hit it
+                * Inode is already gone but the orphan item is still there,
+                * kill the orphan item.
                 */
-               spin_lock(&root->orphan_lock);
-               list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
-               spin_unlock(&root->orphan_lock);
-               /*
-                * if this is a bad inode, means we actually succeeded in
-                * removing the inode, but not the orphan record, which means
-                * we need to manually delete the orphan since iput will just
-                * do a destroy_inode
-                */
-               if (is_bad_inode(inode)) {
-                       trans = btrfs_start_transaction(root, 0);
+               if (ret == -ESTALE) {
+                       trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
                                ret = PTR_ERR(trans);
                                goto out;
                        }
-                       btrfs_orphan_del(trans, inode);
+                       ret = btrfs_del_orphan_item(trans, root,
+                                                   found_key.objectid);
+                       BUG_ON(ret);
                        btrfs_end_transaction(trans, root);
-                       iput(inode);
                        continue;
                }
  
+               /*
+                * add this inode to the orphan list so btrfs_orphan_del does
+                * the proper thing when we hit it
+                */
+               spin_lock(&root->orphan_lock);
+               list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
+               spin_unlock(&root->orphan_lock);
                /* if we have links, this was a truncate, lets do that */
                if (inode->i_nlink) {
                        if (!S_ISREG(inode->i_mode)) {
@@@ -2534,7 -2313,7 +2313,7 @@@ static void btrfs_read_locked_inode(str
        inode_item = btrfs_item_ptr(leaf, path->slots[0],
                                    struct btrfs_inode_item);
        inode->i_mode = btrfs_inode_mode(leaf, inode_item);
 -      inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
 +      set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
        inode->i_uid = btrfs_inode_uid(leaf, inode_item);
        inode->i_gid = btrfs_inode_gid(leaf, inode_item);
        btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
@@@ -2835,7 -2614,16 +2614,16 @@@ static struct btrfs_trans_handle *__unl
        u64 ino = btrfs_ino(inode);
        u64 dir_ino = btrfs_ino(dir);
  
-       trans = btrfs_start_transaction(root, 10);
+       /*
+        * 1 for the possible orphan item
+        * 1 for the dir item
+        * 1 for the dir index
+        * 1 for the inode ref
+        * 1 for the inode ref in the tree log
+        * 2 for the dir entries in the log
+        * 1 for the inode
+        */
+       trans = btrfs_start_transaction(root, 8);
        if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
                return trans;
  
                return ERR_PTR(-ENOMEM);
        }
  
-       trans = btrfs_start_transaction(root, 0);
+       /* 1 for the orphan item */
+       trans = btrfs_start_transaction(root, 1);
        if (IS_ERR(trans)) {
                btrfs_free_path(path);
                root->fs_info->enospc_unlink = 0;
        err = 0;
  out:
        btrfs_free_path(path);
+       /* Migrate the orphan reservation over */
+       if (!err)
+               err = btrfs_block_rsv_migrate(trans->block_rsv,
+                               &root->fs_info->global_block_rsv,
+                               trans->bytes_reserved);
        if (err) {
                btrfs_end_transaction(trans, root);
                root->fs_info->enospc_unlink = 0;
@@@ -2977,6 -2772,9 +2772,9 @@@ static void __unlink_end_trans(struct b
                               struct btrfs_root *root)
  {
        if (trans->block_rsv == &root->fs_info->global_block_rsv) {
+               btrfs_block_rsv_release(root, trans->block_rsv,
+                                       trans->bytes_reserved);
+               trans->block_rsv = &root->fs_info->trans_block_rsv;
                BUG_ON(!root->fs_info->enospc_unlink);
                root->fs_info->enospc_unlink = 0;
        }
@@@ -3368,6 -3166,7 +3166,7 @@@ static int btrfs_truncate_page(struct a
        pgoff_t index = from >> PAGE_CACHE_SHIFT;
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
        struct page *page;
+       gfp_t mask = btrfs_alloc_write_mask(mapping);
        int ret = 0;
        u64 page_start;
        u64 page_end;
  
        ret = -ENOMEM;
  again:
-       page = find_or_create_page(mapping, index, GFP_NOFS);
+       page = find_or_create_page(mapping, index, mask);
        if (!page) {
                btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
                goto out;
@@@ -3613,6 -3412,8 +3412,8 @@@ void btrfs_evict_inode(struct inode *in
  {
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_block_rsv *rsv, *global_rsv;
+       u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
        unsigned long nr;
        int ret;
  
                goto no_delete;
        }
  
+       rsv = btrfs_alloc_block_rsv(root);
+       if (!rsv) {
+               btrfs_orphan_del(NULL, inode);
+               goto no_delete;
+       }
+       rsv->size = min_size;
+       global_rsv = &root->fs_info->global_block_rsv;
        btrfs_i_size_write(inode, 0);
  
+       /*
+        * This is a bit simpler than btrfs_truncate since
+        *
+        * 1) We've already reserved our space for our orphan item in the
+        *    unlink.
+        * 2) We're going to delete the inode item, so we don't need to update
+        *    it at all.
+        *
+        * So we just need to reserve some slack space in case we add bytes when
+        * doing the truncate.
+        */
        while (1) {
-               trans = btrfs_join_transaction(root);
-               BUG_ON(IS_ERR(trans));
-               trans->block_rsv = root->orphan_block_rsv;
+               ret = btrfs_block_rsv_refill(root, rsv, min_size);
+               /*
+                * Try and steal from the global reserve since we will
+                * likely not use this space anyway, we want to try as
+                * hard as possible to get this to work.
+                */
+               if (ret)
+                       ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
  
-               ret = btrfs_block_rsv_check(trans, root,
-                                           root->orphan_block_rsv, 0, 5);
                if (ret) {
-                       BUG_ON(ret != -EAGAIN);
-                       ret = btrfs_commit_transaction(trans, root);
-                       BUG_ON(ret);
-                       continue;
+                       printk(KERN_WARNING "Could not get space for a "
+                              "delete, will truncate on mount %d\n", ret);
+                       btrfs_orphan_del(NULL, inode);
+                       btrfs_free_block_rsv(root, rsv);
+                       goto no_delete;
                }
  
+               trans = btrfs_start_transaction(root, 0);
+               if (IS_ERR(trans)) {
+                       btrfs_orphan_del(NULL, inode);
+                       btrfs_free_block_rsv(root, rsv);
+                       goto no_delete;
+               }
+               trans->block_rsv = rsv;
                ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
                if (ret != -EAGAIN)
                        break;
                btrfs_end_transaction(trans, root);
                trans = NULL;
                btrfs_btree_balance_dirty(root, nr);
        }
  
+       btrfs_free_block_rsv(root, rsv);
        if (ret == 0) {
+               trans->block_rsv = root->orphan_block_rsv;
                ret = btrfs_orphan_del(trans, inode);
                BUG_ON(ret);
        }
  
+       trans->block_rsv = &root->fs_info->trans_block_rsv;
        if (!(root == root->fs_info->tree_root ||
              root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
                btrfs_return_ino(root, btrfs_ino(inode));
@@@ -5795,8 -5632,7 +5632,7 @@@ again
        if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
                ret = btrfs_ordered_update_i_size(inode, 0, ordered);
                if (!ret)
-                       ret = btrfs_update_inode(trans, root, inode);
-               err = ret;
+                       err = btrfs_update_inode(trans, root, inode);
                goto out;
        }
  
@@@ -6289,7 -6125,7 +6125,7 @@@ int btrfs_readpage(struct file *file, s
  {
        struct extent_io_tree *tree;
        tree = &BTRFS_I(page->mapping->host)->io_tree;
-       return extent_read_full_page(tree, page, btrfs_get_extent);
+       return extent_read_full_page(tree, page, btrfs_get_extent, 0);
  }
  
  static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
@@@ -6541,6 -6377,7 +6377,7 @@@ static int btrfs_truncate(struct inode 
        struct btrfs_trans_handle *trans;
        unsigned long nr;
        u64 mask = root->sectorsize - 1;
+       u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
  
        ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
        if (ret)
        rsv = btrfs_alloc_block_rsv(root);
        if (!rsv)
                return -ENOMEM;
-       btrfs_add_durable_block_rsv(root->fs_info, rsv);
+       rsv->size = min_size;
  
+       /*
+        * 1 for the truncate slack space
+        * 1 for the orphan item we're going to add
+        * 1 for the orphan item deletion
+        * 1 for updating the inode.
+        */
        trans = btrfs_start_transaction(root, 4);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
                goto out;
        }
  
-       /*
-        * Reserve space for the truncate process.  Truncate should be adding
-        * space, but if there are snapshots it may end up using space.
-        */
-       ret = btrfs_truncate_reserve_metadata(trans, root, rsv);
+       /* Migrate the slack space for the truncate to our reserve */
+       ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
+                                     min_size);
        BUG_ON(ret);
  
        ret = btrfs_orphan_add(trans, inode);
                goto out;
        }
  
-       nr = trans->blocks_used;
-       btrfs_end_transaction(trans, root);
-       btrfs_btree_balance_dirty(root, nr);
-       /*
-        * Ok so we've already migrated our bytes over for the truncate, so here
-        * just reserve the one slot we need for updating the inode.
-        */
-       trans = btrfs_start_transaction(root, 1);
-       if (IS_ERR(trans)) {
-               err = PTR_ERR(trans);
-               goto out;
-       }
-       trans->block_rsv = rsv;
        /*
         * setattr is responsible for setting the ordered_data_close flag,
         * but that is only tested during the last file release.  That
                btrfs_add_ordered_operation(trans, root, inode);
  
        while (1) {
+               ret = btrfs_block_rsv_refill(root, rsv, min_size);
+               if (ret) {
+                       /*
+                        * This can only happen with the original transaction we
+                        * started above, every other time we shouldn't have a
+                        * transaction started yet.
+                        */
+                       if (ret == -EAGAIN)
+                               goto end_trans;
+                       err = ret;
+                       break;
+               }
                if (!trans) {
-                       trans = btrfs_start_transaction(root, 3);
+                       /* Just need the 1 for updating the inode */
+                       trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
                                err = PTR_ERR(trans);
                                goto out;
                        }
-                       ret = btrfs_truncate_reserve_metadata(trans, root,
-                                                             rsv);
-                       BUG_ON(ret);
-                       trans->block_rsv = rsv;
                }
  
+               trans->block_rsv = rsv;
                ret = btrfs_truncate_inode_items(trans, root, inode,
                                                 inode->i_size,
                                                 BTRFS_EXTENT_DATA_KEY);
                        err = ret;
                        break;
                }
+ end_trans:
                nr = trans->blocks_used;
                btrfs_end_transaction(trans, root);
                trans = NULL;
@@@ -6728,7 -6564,7 +6564,7 @@@ int btrfs_create_subvol_root(struct btr
        inode->i_op = &btrfs_dir_inode_operations;
        inode->i_fop = &btrfs_dir_file_operations;
  
 -      inode->i_nlink = 1;
 +      set_nlink(inode, 1);
        btrfs_i_size_write(inode, 0);
  
        err = btrfs_update_inode(trans, new_root, inode);
@@@ -6755,9 -6591,9 +6591,9 @@@ struct inode *btrfs_alloc_inode(struct 
        ei->last_sub_trans = 0;
        ei->logged_trans = 0;
        ei->delalloc_bytes = 0;
-       ei->reserved_bytes = 0;
        ei->disk_i_size = 0;
        ei->flags = 0;
+       ei->csum_bytes = 0;
        ei->index_cnt = (u64)-1;
        ei->last_unlink_trans = 0;
  
@@@ -6803,6 -6639,8 +6639,8 @@@ void btrfs_destroy_inode(struct inode *
        WARN_ON(inode->i_data.nrpages);
        WARN_ON(BTRFS_I(inode)->outstanding_extents);
        WARN_ON(BTRFS_I(inode)->reserved_extents);
+       WARN_ON(BTRFS_I(inode)->delalloc_bytes);
+       WARN_ON(BTRFS_I(inode)->csum_bytes);
  
        /*
         * This can happen where we create an inode, but somebody else also
@@@ -7420,7 -7258,6 +7258,6 @@@ static struct extent_io_ops btrfs_exten
        .readpage_end_io_hook = btrfs_readpage_end_io_hook,
        .writepage_end_io_hook = btrfs_writepage_end_io_hook,
        .writepage_start_hook = btrfs_writepage_start_hook,
-       .readpage_io_failed_hook = btrfs_io_failed_hook,
        .set_bit_hook = btrfs_set_bit_hook,
        .clear_bit_hook = btrfs_clear_bit_hook,
        .merge_extent_hook = btrfs_merge_extent_hook,
diff --combined fs/btrfs/tree-log.c
index 0618aa39740b35cc7cb1620e5c1cf034c18a5400,f4d81c06d48fc613061ca5192211914e7fbe23b6..3568374d419da8ee50eb17f4af5319964750614d
@@@ -276,8 -276,9 +276,9 @@@ static int process_one_buffer(struct bt
                              struct walk_control *wc, u64 gen)
  {
        if (wc->pin)
-               btrfs_pin_extent(log->fs_info->extent_root,
-                                eb->start, eb->len, 0);
+               btrfs_pin_extent_for_log_replay(wc->trans,
+                                               log->fs_info->extent_root,
+                                               eb->start, eb->len);
  
        if (btrfs_buffer_uptodate(eb, gen)) {
                if (wc->write)
@@@ -1030,7 -1031,7 +1031,7 @@@ static noinline int fixup_inode_link_co
        }
        btrfs_release_path(path);
        if (nlink != inode->i_nlink) {
 -              inode->i_nlink = nlink;
 +              set_nlink(inode, nlink);
                btrfs_update_inode(trans, root, inode);
        }
        BTRFS_I(inode)->index_cnt = (u64)-1;
@@@ -1760,7 -1761,7 +1761,7 @@@ static noinline int walk_down_log_tree(
  
                                WARN_ON(root_owner !=
                                        BTRFS_TREE_LOG_OBJECTID);
-                               ret = btrfs_free_reserved_extent(root,
+                               ret = btrfs_free_and_pin_reserved_extent(root,
                                                         bytenr, blocksize);
                                BUG_ON(ret);
                        }
@@@ -1828,7 -1829,7 +1829,7 @@@ static noinline int walk_up_log_tree(st
                                btrfs_tree_unlock(next);
  
                                WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
-                               ret = btrfs_free_reserved_extent(root,
+                               ret = btrfs_free_and_pin_reserved_extent(root,
                                                path->nodes[*level]->start,
                                                path->nodes[*level]->len);
                                BUG_ON(ret);
@@@ -1897,7 -1898,7 +1898,7 @@@ static int walk_log_tree(struct btrfs_t
  
                        WARN_ON(log->root_key.objectid !=
                                BTRFS_TREE_LOG_OBJECTID);
-                       ret = btrfs_free_reserved_extent(log, next->start,
+                       ret = btrfs_free_and_pin_reserved_extent(log, next->start,
                                                         next->len);
                        BUG_ON(ret);
                }
@@@ -2013,10 -2014,10 +2014,10 @@@ int btrfs_sync_log(struct btrfs_trans_h
        /* wait for previous tree log sync to complete */
        if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
                wait_log_commit(trans, root, root->log_transid - 1);
        while (1) {
                unsigned long batch = root->log_batch;
-               if (root->log_multiple_pids) {
+               /* when we're on an ssd, just kick the log commit out */
+               if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
                        mutex_unlock(&root->log_mutex);
                        schedule_timeout_uninterruptible(1);
                        mutex_lock(&root->log_mutex);
        BUG_ON(ret);
        btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
  
-       btrfs_set_super_log_root(&root->fs_info->super_for_commit,
+       btrfs_set_super_log_root(root->fs_info->super_for_commit,
                                log_root_tree->node->start);
-       btrfs_set_super_log_root_level(&root->fs_info->super_for_commit,
+       btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
                                btrfs_header_level(log_root_tree->node));
  
        log_root_tree->log_batch = 0;
diff --combined fs/btrfs/xattr.c
index 426aa464f1afc45aa5501e35af6ebb4f8ab37aa7,a76e41c04b711fd0f637d2b0f293424098acf69f..3848b04e310e4800f6768160c6ef5111734ad1d5
@@@ -127,6 -127,17 +127,17 @@@ static int do_setxattr(struct btrfs_tra
  again:
        ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
                                      name, name_len, value, size);
+       /*
+        * If we're setting an xattr to a new value but the new value is say
+        * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting
+        * back from split_leaf.  This is because it thinks we'll be extending
+        * the existing item size, but we're asking for enough space to add the
+        * item itself.  So if we get EOVERFLOW just set ret to EEXIST and let
+        * the rest of the function figure it out.
+        */
+       if (ret == -EOVERFLOW)
+               ret = -EEXIST;
        if (ret == -EEXIST) {
                if (flags & XATTR_CREATE)
                        goto out;
@@@ -383,36 -394,36 +394,36 @@@ int btrfs_removexattr(struct dentry *de
                                XATTR_REPLACE);
  }
  
 -int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
 -                            struct inode *inode, struct inode *dir,
 -                            const struct qstr *qstr)
 +int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
 +                   void *fs_info)
  {
 -      int err;
 -      size_t len;
 -      void *value;
 -      char *suffix;
 +      const struct xattr *xattr;
 +      struct btrfs_trans_handle *trans = fs_info;
        char *name;
 +      int err = 0;
  
 -      err = security_inode_init_security(inode, dir, qstr, &suffix, &value,
 -                                         &len);
 -      if (err) {
 -              if (err == -EOPNOTSUPP)
 -                      return 0;
 -              return err;
 -      }
 -
 -      name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(suffix) + 1,
 -                     GFP_NOFS);
 -      if (!name) {
 -              err = -ENOMEM;
 -      } else {
 +      for (xattr = xattr_array; xattr->name != NULL; xattr++) {
 +              name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
 +                             strlen(xattr->name) + 1, GFP_NOFS);
 +              if (!name) {
 +                      err = -ENOMEM;
 +                      break;
 +              }
                strcpy(name, XATTR_SECURITY_PREFIX);
 -              strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
 -              err = __btrfs_setxattr(trans, inode, name, value, len, 0);
 +              strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
 +              err = __btrfs_setxattr(trans, inode, name,
 +                                     xattr->value, xattr->value_len, 0);
                kfree(name);
 +              if (err < 0)
 +                      break;
        }
 -
 -      kfree(suffix);
 -      kfree(value);
        return err;
  }
 +
 +int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
 +                            struct inode *inode, struct inode *dir,
 +                            const struct qstr *qstr)
 +{
 +      return security_inode_init_security(inode, dir, qstr,
 +                                          &btrfs_initxattrs, trans);
 +}