]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - fs/btrfs/extent-tree.c
Btrfs: make add_pinned_bytes() take an s64 num_bytes instead of u64
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / extent-tree.c
index e390451c72e6cdb93492e519cea82d5d7b3dfaf9..d784ecef27c034468d965eefd54d767dc831a1da 100644 (file)
@@ -97,10 +97,11 @@ static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
                                     u64 num_bytes, int delalloc);
 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
                               u64 num_bytes);
-static int __reserve_metadata_bytes(struct btrfs_root *root,
+static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
                                    struct btrfs_space_info *space_info,
                                    u64 orig_bytes,
-                                   enum btrfs_reserve_flush_enum flush);
+                                   enum btrfs_reserve_flush_enum flush,
+                                   bool system_chunk);
 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
                                     struct btrfs_space_info *space_info,
                                     u64 num_bytes);
@@ -766,6 +767,26 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
        return NULL;
 }
 
+static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
+                            u64 owner, u64 root_objectid)
+{
+       struct btrfs_space_info *space_info;
+       u64 flags;
+
+       if (owner < BTRFS_FIRST_FREE_OBJECTID) {
+               if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
+                       flags = BTRFS_BLOCK_GROUP_SYSTEM;
+               else
+                       flags = BTRFS_BLOCK_GROUP_METADATA;
+       } else {
+               flags = BTRFS_BLOCK_GROUP_DATA;
+       }
+
+       space_info = __find_space_info(fs_info, flags);
+       BUG_ON(!space_info); /* Logic bug */
+       percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
+}
+
 /*
  * after adding space to the filesystem, we need to clear the full flags
  * on all the space infos.
@@ -3924,87 +3945,83 @@ static const char *alloc_name(u64 flags)
        };
 }
 
-static int update_space_info(struct btrfs_fs_info *info, u64 flags,
-                            u64 total_bytes, u64 bytes_used,
-                            u64 bytes_readonly,
-                            struct btrfs_space_info **space_info)
+static int create_space_info(struct btrfs_fs_info *info, u64 flags,
+                            struct btrfs_space_info **new)
 {
-       struct btrfs_space_info *found;
+
+       struct btrfs_space_info *space_info;
        int i;
-       int factor;
        int ret;
 
-       if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
-                    BTRFS_BLOCK_GROUP_RAID10))
-               factor = 2;
-       else
-               factor = 1;
-
-       found = __find_space_info(info, flags);
-       if (found) {
-               spin_lock(&found->lock);
-               found->total_bytes += total_bytes;
-               found->disk_total += total_bytes * factor;
-               found->bytes_used += bytes_used;
-               found->disk_used += bytes_used * factor;
-               found->bytes_readonly += bytes_readonly;
-               if (total_bytes > 0)
-                       found->full = 0;
-               space_info_add_new_bytes(info, found, total_bytes -
-                                        bytes_used - bytes_readonly);
-               spin_unlock(&found->lock);
-               *space_info = found;
-               return 0;
-       }
-       found = kzalloc(sizeof(*found), GFP_NOFS);
-       if (!found)
+       space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
+       if (!space_info)
                return -ENOMEM;
 
-       ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
+       ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
+                                GFP_KERNEL);
        if (ret) {
-               kfree(found);
+               kfree(space_info);
                return ret;
        }
 
        for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
-               INIT_LIST_HEAD(&found->block_groups[i]);
-       init_rwsem(&found->groups_sem);
-       spin_lock_init(&found->lock);
-       found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
-       found->total_bytes = total_bytes;
-       found->disk_total = total_bytes * factor;
-       found->bytes_used = bytes_used;
-       found->disk_used = bytes_used * factor;
-       found->bytes_pinned = 0;
-       found->bytes_reserved = 0;
-       found->bytes_readonly = bytes_readonly;
-       found->bytes_may_use = 0;
-       found->full = 0;
-       found->max_extent_size = 0;
-       found->force_alloc = CHUNK_ALLOC_NO_FORCE;
-       found->chunk_alloc = 0;
-       found->flush = 0;
-       init_waitqueue_head(&found->wait);
-       INIT_LIST_HEAD(&found->ro_bgs);
-       INIT_LIST_HEAD(&found->tickets);
-       INIT_LIST_HEAD(&found->priority_tickets);
-
-       ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
+               INIT_LIST_HEAD(&space_info->block_groups[i]);
+       init_rwsem(&space_info->groups_sem);
+       spin_lock_init(&space_info->lock);
+       space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
+       space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+       init_waitqueue_head(&space_info->wait);
+       INIT_LIST_HEAD(&space_info->ro_bgs);
+       INIT_LIST_HEAD(&space_info->tickets);
+       INIT_LIST_HEAD(&space_info->priority_tickets);
+
+       ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
                                    info->space_info_kobj, "%s",
-                                   alloc_name(found->flags));
+                                   alloc_name(space_info->flags));
        if (ret) {
-               kfree(found);
+               percpu_counter_destroy(&space_info->total_bytes_pinned);
+               kfree(space_info);
                return ret;
        }
 
-       *space_info = found;
-       list_add_rcu(&found->list, &info->space_info);
+       *new = space_info;
+       list_add_rcu(&space_info->list, &info->space_info);
        if (flags & BTRFS_BLOCK_GROUP_DATA)
-               info->data_sinfo = found;
+               info->data_sinfo = space_info;
 
        return ret;
 }
 
+static void update_space_info(struct btrfs_fs_info *info, u64 flags,
+                            u64 total_bytes, u64 bytes_used,
+                            u64 bytes_readonly,
+                            struct btrfs_space_info **space_info)
+{
+       struct btrfs_space_info *found;
+       int factor;
+
+       if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
+                    BTRFS_BLOCK_GROUP_RAID10))
+               factor = 2;
+       else
+               factor = 1;
+
+       found = __find_space_info(info, flags);
+       ASSERT(found);
+       spin_lock(&found->lock);
+       found->total_bytes += total_bytes;
+       found->disk_total += total_bytes * factor;
+       found->bytes_used += bytes_used;
+       found->disk_used += bytes_used * factor;
+       found->bytes_readonly += bytes_readonly;
+       if (total_bytes > 0)
+               found->full = 0;
+       space_info_add_new_bytes(info, found, total_bytes -
+                                bytes_used - bytes_readonly);
+       spin_unlock(&found->lock);
+       *space_info = found;
+}
+
 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 {
        u64 extra_flags = chunk_to_extended(flags) &
@@ -4120,7 +4137,7 @@ static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
        return btrfs_reduce_alloc_profile(fs_info, flags);
 }
 
-u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
+static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
        u64 flags;
@@ -4137,6 +4154,21 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
        return ret;
 }
 
+u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
+{
+       return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
+}
+
+u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
+{
+       return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+}
+
+u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
+{
+       return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
+}
+
 static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
                                 bool may_use_included)
 {
@@ -4186,7 +4218,7 @@ again:
                        data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
                        spin_unlock(&data_sinfo->lock);
 alloc:
-                       alloc_target = btrfs_get_alloc_profile(root, 1);
+                       alloc_target = btrfs_data_alloc_profile(fs_info);
                        /*
                         * It is ugly that we don't call nolock join
                         * transaction for the free space inode case here.
@@ -4462,9 +4494,8 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
        }
 
        if (left < thresh) {
-               u64 flags;
+               u64 flags = btrfs_system_alloc_profile(fs_info);
 
-               flags = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
                /*
                 * Ignore failure to create system chunk. We might end up not
                 * needing it, as we might not need to COW all nodes/leafs from
@@ -4505,10 +4536,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 
        space_info = __find_space_info(fs_info, flags);
        if (!space_info) {
-               ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
-               BUG_ON(ret); /* -ENOMEM */
+               ret = create_space_info(fs_info, flags, &space_info);
+               if (ret)
+                       return ret;
        }
-       BUG_ON(!space_info); /* Logic error */
 
 again:
        spin_lock(&space_info->lock);
@@ -4613,11 +4644,11 @@ out:
        return ret;
 }
 
-static int can_overcommit(struct btrfs_root *root,
+static int can_overcommit(struct btrfs_fs_info *fs_info,
                          struct btrfs_space_info *space_info, u64 bytes,
-                         enum btrfs_reserve_flush_enum flush)
+                         enum btrfs_reserve_flush_enum flush,
+                         bool system_chunk)
 {
-       struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
        u64 profile;
        u64 space_size;
@@ -4628,7 +4659,11 @@ static int can_overcommit(struct btrfs_root *root,
        if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
                return 0;
 
-       profile = btrfs_get_alloc_profile(root, 0);
+       if (system_chunk)
+               profile = btrfs_system_alloc_profile(fs_info);
+       else
+               profile = btrfs_metadata_alloc_profile(fs_info);
+
        used = btrfs_space_info_used(space_info, false);
 
        /*
@@ -4645,9 +4680,7 @@ static int can_overcommit(struct btrfs_root *root,
 
        used += space_info->bytes_may_use;
 
-       spin_lock(&fs_info->free_chunk_lock);
-       avail = fs_info->free_chunk_space;
-       spin_unlock(&fs_info->free_chunk_lock);
+       avail = atomic64_read(&fs_info->free_chunk_space);
 
        /*
         * If we have dup, raid1 or raid10 then only half of the free
@@ -4715,10 +4748,9 @@ static inline int calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
 /*
  * shrink metadata reservation for delalloc
  */
-static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
-                           bool wait_ordered)
+static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
+                           u64 orig, bool wait_ordered)
 {
-       struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_block_rsv *block_rsv;
        struct btrfs_space_info *space_info;
        struct btrfs_trans_handle *trans;
@@ -4775,7 +4807,7 @@ skip_async:
                else
                        flush = BTRFS_RESERVE_NO_FLUSH;
                spin_lock(&space_info->lock);
-               if (can_overcommit(root, space_info, orig, flush)) {
+               if (can_overcommit(fs_info, space_info, orig, flush, false)) {
                        spin_unlock(&space_info->lock);
                        break;
                }
@@ -4837,14 +4869,14 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
 
        spin_lock(&delayed_rsv->lock);
        if (percpu_counter_compare(&space_info->total_bytes_pinned,
-                                  bytes - delayed_rsv->size) >= 0) {
+                                  bytes - delayed_rsv->size) < 0) {
                spin_unlock(&delayed_rsv->lock);
                return -ENOSPC;
        }
        spin_unlock(&delayed_rsv->lock);
 
 commit:
-       trans = btrfs_join_transaction(fs_info->fs_root);
+       trans = btrfs_join_transaction(fs_info->extent_root);
        if (IS_ERR(trans))
                return -ENOSPC;
 
@@ -4862,7 +4894,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
                       struct btrfs_space_info *space_info, u64 num_bytes,
                       u64 orig_bytes, int state)
 {
-       struct btrfs_root *root = fs_info->fs_root;
+       struct btrfs_root *root = fs_info->extent_root;
        struct btrfs_trans_handle *trans;
        int nr;
        int ret = 0;
@@ -4885,7 +4917,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
                break;
        case FLUSH_DELALLOC:
        case FLUSH_DELALLOC_WAIT:
-               shrink_delalloc(root, num_bytes * 2, orig_bytes,
+               shrink_delalloc(fs_info, num_bytes * 2, orig_bytes,
                                state == FLUSH_DELALLOC_WAIT);
                break;
        case ALLOC_CHUNK:
@@ -4895,7 +4927,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
                        break;
                }
                ret = do_chunk_alloc(trans, fs_info,
-                                    btrfs_get_alloc_profile(root, 0),
+                                    btrfs_metadata_alloc_profile(fs_info),
                                     CHUNK_ALLOC_NO_FORCE);
                btrfs_end_transaction(trans);
                if (ret > 0 || ret == -ENOSPC)
@@ -4916,8 +4948,9 @@ static int flush_space(struct btrfs_fs_info *fs_info,
 }
 
 static inline u64
-btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
-                                struct btrfs_space_info *space_info)
+btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
+                                struct btrfs_space_info *space_info,
+                                bool system_chunk)
 {
        struct reserve_ticket *ticket;
        u64 used;
@@ -4932,14 +4965,14 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
                return to_reclaim;
 
        to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
-       if (can_overcommit(root, space_info, to_reclaim,
-                          BTRFS_RESERVE_FLUSH_ALL))
+       if (can_overcommit(fs_info, space_info, to_reclaim,
+                          BTRFS_RESERVE_FLUSH_ALL, system_chunk))
                return 0;
 
-       used = space_info->bytes_used + space_info->bytes_reserved +
-              space_info->bytes_pinned + space_info->bytes_readonly +
-              space_info->bytes_may_use;
-       if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
+       used = btrfs_space_info_used(space_info, true);
+
+       if (can_overcommit(fs_info, space_info, SZ_1M,
+                          BTRFS_RESERVE_FLUSH_ALL, system_chunk))
                expected = div_factor_fine(space_info->total_bytes, 95);
        else
                expected = div_factor_fine(space_info->total_bytes, 90);
@@ -4953,17 +4986,18 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
        return to_reclaim;
 }
 
-static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
-                                       struct btrfs_root *root, u64 used)
+static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
+                                       struct btrfs_space_info *space_info,
+                                       u64 used, bool system_chunk)
 {
-       struct btrfs_fs_info *fs_info = root->fs_info;
        u64 thresh = div_factor_fine(space_info->total_bytes, 98);
 
        /* If we're just plain full then async reclaim just slows us down. */
        if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
                return 0;
 
-       if (!btrfs_calc_reclaim_metadata_size(root, space_info))
+       if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info,
+                                             system_chunk))
                return 0;
 
        return (used >= thresh && !btrfs_fs_closing(fs_info) &&
@@ -5000,8 +5034,8 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
        space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
 
        spin_lock(&space_info->lock);
-       to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
-                                                     space_info);
+       to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
+                                                     false);
        if (!to_reclaim) {
                space_info->flush = 0;
                spin_unlock(&space_info->lock);
@@ -5023,8 +5057,9 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
                        spin_unlock(&space_info->lock);
                        return;
                }
-               to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
-                                                             space_info);
+               to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
+                                                             space_info,
+                                                             false);
                ticket = list_first_entry(&space_info->tickets,
                                          struct reserve_ticket, list);
                if (last_tickets_id == space_info->tickets_id) {
@@ -5062,8 +5097,8 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
        int flush_state = FLUSH_DELAYED_ITEMS_NR;
 
        spin_lock(&space_info->lock);
-       to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
-                                                     space_info);
+       to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
+                                                     false);
        if (!to_reclaim) {
                spin_unlock(&space_info->lock);
                return;
@@ -5142,12 +5177,12 @@ static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
  * regain reservations will be made and this will fail if there is not enough
  * space already.
  */
-static int __reserve_metadata_bytes(struct btrfs_root *root,
+static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
                                    struct btrfs_space_info *space_info,
                                    u64 orig_bytes,
-                                   enum btrfs_reserve_flush_enum flush)
+                                   enum btrfs_reserve_flush_enum flush,
+                                   bool system_chunk)
 {
-       struct btrfs_fs_info *fs_info = root->fs_info;
        struct reserve_ticket ticket;
        u64 used;
        int ret = 0;
@@ -5169,7 +5204,8 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
                trace_btrfs_space_reservation(fs_info, "space_info",
                                              space_info->flags, orig_bytes, 1);
                ret = 0;
-       } else if (can_overcommit(root, space_info, orig_bytes, flush)) {
+       } else if (can_overcommit(fs_info, space_info, orig_bytes, flush,
+                                 system_chunk)) {
                space_info->bytes_may_use += orig_bytes;
                trace_btrfs_space_reservation(fs_info, "space_info",
                                              space_info->flags, orig_bytes, 1);
@@ -5196,7 +5232,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
                                                          orig_bytes, flush,
                                                          "enospc");
                                queue_work(system_unbound_wq,
-                                          &root->fs_info->async_reclaim_work);
+                                          &fs_info->async_reclaim_work);
                        }
                } else {
                        list_add_tail(&ticket.list,
@@ -5210,7 +5246,8 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
                 * the async reclaim as we will panic.
                 */
                if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
-                   need_do_async_reclaim(space_info, root, used) &&
+                   need_do_async_reclaim(fs_info, space_info,
+                                         used, system_chunk) &&
                    !work_busy(&fs_info->async_reclaim_work)) {
                        trace_btrfs_trigger_flush(fs_info, space_info->flags,
                                                  orig_bytes, flush, "preempt");
@@ -5268,9 +5305,10 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
        int ret;
+       bool system_chunk = (root == fs_info->chunk_root);
 
-       ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
-                                      flush);
+       ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
+                                      orig_bytes, flush, system_chunk);
        if (ret == -ENOSPC &&
            unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
                if (block_rsv != global_rsv &&
@@ -5379,9 +5417,7 @@ static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
         * overcommit, and if we can't then we just need to free up our space
         * and not satisfy any requests.
         */
-       used = space_info->bytes_used + space_info->bytes_reserved +
-               space_info->bytes_pinned + space_info->bytes_readonly +
-               space_info->bytes_may_use;
+       used = btrfs_space_info_used(space_info, true);
        if (used - num_bytes >= space_info->total_bytes)
                check_overcommit = true;
 again:
@@ -5393,8 +5429,7 @@ again:
                 * adding the ticket space would be a double count.
                 */
                if (check_overcommit &&
-                   !can_overcommit(fs_info->extent_root, space_info, 0,
-                                   flush))
+                   !can_overcommit(fs_info, space_info, 0, flush, false))
                        break;
                if (num_bytes >= ticket->bytes) {
                        list_del_init(&ticket->list);
@@ -6793,27 +6828,6 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
        return 0;
 }
 
-static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
-                            u64 owner, u64 root_objectid)
-{
-       struct btrfs_space_info *space_info;
-       u64 flags;
-
-       if (owner < BTRFS_FIRST_FREE_OBJECTID) {
-               if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
-                       flags = BTRFS_BLOCK_GROUP_SYSTEM;
-               else
-                       flags = BTRFS_BLOCK_GROUP_METADATA;
-       } else {
-               flags = BTRFS_BLOCK_GROUP_DATA;
-       }
-
-       space_info = __find_space_info(fs_info, flags);
-       BUG_ON(!space_info); /* Logic bug */
-       percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
-}
-
-
 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                                struct btrfs_fs_info *info,
                                struct btrfs_delayed_ref_node *node, u64 parent,
@@ -7955,7 +7969,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
        u64 flags;
        int ret;
 
-       flags = btrfs_get_alloc_profile(root, is_data);
+       flags = get_alloc_profile_by_root(root, is_data);
 again:
        WARN_ON(num_bytes < fs_info->sectorsize);
        ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
@@ -10058,19 +10072,9 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
                }
 
                trace_btrfs_add_block_group(info, cache, 0);
-               ret = update_space_info(info, cache->flags, found_key.offset,
-                                       btrfs_block_group_used(&cache->item),
-                                       cache->bytes_super, &space_info);
-               if (ret) {
-                       btrfs_remove_free_space_cache(cache);
-                       spin_lock(&info->block_group_cache_lock);
-                       rb_erase(&cache->cache_node,
-                                &info->block_group_cache_tree);
-                       RB_CLEAR_NODE(&cache->cache_node);
-                       spin_unlock(&info->block_group_cache_lock);
-                       btrfs_put_block_group(cache);
-                       goto error;
-               }
+               update_space_info(info, cache->flags, found_key.offset,
+                                 btrfs_block_group_used(&cache->item),
+                                 cache->bytes_super, &space_info);
 
                cache->space_info = space_info;
 
@@ -10202,16 +10206,19 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        }
 #endif
        /*
-        * Call to ensure the corresponding space_info object is created and
-        * assigned to our block group, but don't update its counters just yet.
-        * We want our bg to be added to the rbtree with its ->space_info set.
+        * Ensure the corresponding space_info object is created and
+        * assigned to our block group. We want our bg to be added to the rbtree
+        * with its ->space_info set.
         */
-       ret = update_space_info(fs_info, cache->flags, 0, 0, 0,
-                               &cache->space_info);
-       if (ret) {
-               btrfs_remove_free_space_cache(cache);
-               btrfs_put_block_group(cache);
-               return ret;
+       cache->space_info = __find_space_info(fs_info, cache->flags);
+       if (!cache->space_info) {
+               ret = create_space_info(fs_info, cache->flags,
+                                      &cache->space_info);
+               if (ret) {
+                       btrfs_remove_free_space_cache(cache);
+                       btrfs_put_block_group(cache);
+                       return ret;
+               }
        }
 
        ret = btrfs_add_block_group_cache(fs_info, cache);
@@ -10226,18 +10233,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
         * the rbtree, update the space info's counters.
         */
        trace_btrfs_add_block_group(fs_info, cache, 1);
-       ret = update_space_info(fs_info, cache->flags, size, bytes_used,
+       update_space_info(fs_info, cache->flags, size, bytes_used,
                                cache->bytes_super, &cache->space_info);
-       if (ret) {
-               btrfs_remove_free_space_cache(cache);
-               spin_lock(&fs_info->block_group_cache_lock);
-               rb_erase(&cache->cache_node,
-                        &fs_info->block_group_cache_tree);
-               RB_CLEAR_NODE(&cache->cache_node);
-               spin_unlock(&fs_info->block_group_cache_lock);
-               btrfs_put_block_group(cache);
-               return ret;
-       }
        update_global_block_rsv(fs_info);
 
        __link_block_group(cache->space_info, cache);
@@ -10785,21 +10782,21 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
                mixed = 1;
 
        flags = BTRFS_BLOCK_GROUP_SYSTEM;
-       ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
+       ret = create_space_info(fs_info, flags, &space_info);
        if (ret)
                goto out;
 
        if (mixed) {
                flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
-               ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
+               ret = create_space_info(fs_info, flags, &space_info);
        } else {
                flags = BTRFS_BLOCK_GROUP_METADATA;
-               ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
+               ret = create_space_info(fs_info, flags, &space_info);
                if (ret)
                        goto out;
 
                flags = BTRFS_BLOCK_GROUP_DATA;
-               ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
+               ret = create_space_info(fs_info, flags, &space_info);
        }
 out:
        return ret;