]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
btrfs: qgroup: catch reserved space leaks at unmount time
authorQu Wenruo <wqu@suse.com>
Wed, 10 Jun 2020 01:04:44 +0000 (09:04 +0800)
committerDavid Sterba <dsterba@suse.com>
Mon, 27 Jul 2020 10:55:24 +0000 (12:55 +0200)
Before this patch, qgroup completely relies on per-inode extent io tree
to detect reserved data space leak.

However previous bug has already shown how release page before
btrfs_finish_ordered_io() could lead to leak, and since it's
QGROUP_RESERVED bit cleared without triggering qgroup rsv, it can't be
detected by per-inode extent io tree.

So this patch adds another (and hopefully the final) safety net to catch
qgroup data reserved space leak.  At least the new safety net catches
all the leaks during development, so it should be pretty useful in the
real world.

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/disk-io.c
fs/btrfs/qgroup.c
fs/btrfs/qgroup.h

index b1a148058773e486c55b899e2360627bb2d15db1..a9cf6152d175ae7d737e1a4eb469e0ffcb251043 100644 (file)
@@ -4058,6 +4058,11 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
        ASSERT(list_empty(&fs_info->delayed_iputs));
        set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
 
+       if (btrfs_check_quota_leak(fs_info)) {
+               WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+               btrfs_err(fs_info, "qgroup reserved space leaked");
+       }
+
        btrfs_free_qgroup_config(fs_info);
        ASSERT(list_empty(&fs_info->delalloc_roots));
 
index 5bd4089ad0e1ac0609c390efdf56e185bb033b97..74eb984791098531190a8e202bdf2f06d945e871 100644 (file)
@@ -505,6 +505,49 @@ out:
        return ret < 0 ? ret : 0;
 }
 
+static u64 btrfs_qgroup_subvolid(u64 qgroupid)
+{
+       return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
+}
+
+/*
+ * Called in close_ctree() when quota is still enabled.  This verifies we don't
+ * leak some reserved space.
+ *
+ * Return false if no reserved space is left.
+ * Return true if some reserved space is leaked.
+ */
+bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
+{
+       struct rb_node *node;
+       bool ret = false;
+
+       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+               return ret;
+       /*
+        * Since we're unmounting, there is no race and no need to grab qgroup
+        * lock.  And here we don't go post-order to provide a more user
+        * friendly sorted result.
+        */
+       for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
+               struct btrfs_qgroup *qgroup;
+               int i;
+
+               qgroup = rb_entry(node, struct btrfs_qgroup, node);
+               for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
+                       if (qgroup->rsv.values[i]) {
+                               ret = true;
+                               btrfs_warn(fs_info,
+               "qgroup %llu/%llu has unreleased space, type %d rsv %llu",
+                                  btrfs_qgroup_level(qgroup->qgroupid),
+                                  btrfs_qgroup_subvolid(qgroup->qgroupid),
+                                  i, qgroup->rsv.values[i]);
+                       }
+               }
+       }
+       return ret;
+}
+
 /*
  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
  * first two are in single-threaded paths.And for the third one, we have set
index 1bc65445946907c171eccc0fe1363b8ce2f0b754..3be5198a37195f9eee7f7461c5890642c358340c 100644 (file)
@@ -415,5 +415,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
                struct btrfs_root *root, struct extent_buffer *eb);
 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
+bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
 
 #endif