]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
btrfs: extend locking to all space_info members accesses
authorNiels Dossche <dossche.niels@gmail.com>
Fri, 25 Feb 2022 21:20:28 +0000 (22:20 +0100)
committerStefan Bader <stefan.bader@canonical.com>
Fri, 20 May 2022 12:37:48 +0000 (14:37 +0200)
BugLink: https://bugs.launchpad.net/bugs/1969110
commit 06bae876634ebf837ba70ea3de532b288326103d upstream.

bytes_pinned is always accessed under space_info->lock, except in
btrfs_preempt_reclaim_metadata_space, however the other members are
accessed under that lock. The reserved member of the rsv's are also
partially accessed under a lock and partially not. Move all these
accesses into the same lock to ensure consistency.

This could potentially race and lead to a flush instead of a commit but
it's not a big problem as it's only for preemptive flush.

CC: stable@vger.kernel.org # 5.15+
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Niels Dossche <niels.dossche@ugent.be>
Signed-off-by: Niels Dossche <dossche.niels@gmail.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit f85ee0c845fd42b63bc10dd3e37e68115ad3a710)
Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
fs/btrfs/space-info.c

index aa5be0b24987ad4366854d6d4ad59c2d096321a0..5ed66a794e577aa8770f7ede36b7678dfd50bad6 100644 (file)
@@ -1054,7 +1054,6 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
                        trans_rsv->reserved;
                if (block_rsv_size < space_info->bytes_may_use)
                        delalloc_size = space_info->bytes_may_use - block_rsv_size;
-               spin_unlock(&space_info->lock);
 
                /*
                 * We don't want to include the global_rsv in our calculation,
@@ -1085,6 +1084,8 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
                        flush = FLUSH_DELAYED_REFS_NR;
                }
 
+               spin_unlock(&space_info->lock);
+
                /*
                 * We don't want to reclaim everything, just a portion, so scale
                 * down the to_reclaim by 1/4.  If it takes us down to 0,