]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/commitdiff
btrfs: add new helper btrfs_lock_and_flush_ordered_range
authorNikolay Borisov <nborisov@suse.com>
Tue, 7 May 2019 07:19:22 +0000 (10:19 +0300)
committerDavid Sterba <dsterba@suse.com>
Mon, 1 Jul 2019 11:34:59 +0000 (13:34 +0200)
There is a certain idiom used in multiple places in btrfs' codebase,
dealing with flushing an ordered range. Factor this in a separate
function that can be reused. Future patches will replace the existing
code with that function.

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Nikolay Borisov <nborisov@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h

index 52889da69113c59a1f35dd31876140f81d58c888..37401cc04a6b1cf606a85ed31f71e3af512b9956 100644 (file)
@@ -962,6 +962,39 @@ out:
        return index;
 }
 
+/*
+ * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
+ * ordered extents in it are run to completion.
+ *
+ * @tree:         IO tree used for locking out other users of the range
+ * @inode:        Inode whose ordered tree is to be searched
+ * @start:        Beginning of range to flush
+ * @end:          Last byte of range to lock
+ * @cached_state: If passed, will return the extent state responsible for the
+ * locked range. It's the caller's responsibility to free the cached state.
+ *
+ * This function always returns with the given range locked, ensuring after it's
+ * called no order extent can be pending.
+ */
+void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
+                                       struct btrfs_inode *inode, u64 start,
+                                       u64 end,
+                                       struct extent_state **cached_state)
+{
+       struct btrfs_ordered_extent *ordered;
+
+       while (1) {
+               lock_extent_bits(tree, start, end, cached_state);
+               ordered = btrfs_lookup_ordered_range(inode, start,
+                                                    end - start + 1);
+               if (!ordered)
+                       break;
+               unlock_extent_cached(tree, start, end, cached_state);
+               btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
+               btrfs_put_ordered_extent(ordered);
+       }
+}
+
 int __init ordered_data_init(void)
 {
        btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
index 4c5991c3de14961b0a94a8b105fc382d49c0434f..9b68179d580f8d6c8ea948eb109a612342868e2a 100644 (file)
@@ -188,6 +188,10 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
                               const u64 range_start, const u64 range_len);
 u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
                              const u64 range_start, const u64 range_len);
+void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
+                                       struct btrfs_inode *inode, u64 start,
+                                       u64 end,
+                                       struct extent_state **cached_state);
 int __init ordered_data_init(void);
 void __cold ordered_data_exit(void);