]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
Btrfs: adjust the write_lock_level as we unlock
authorChris Mason <chris.mason@oracle.com>
Mon, 19 Mar 2012 19:54:38 +0000 (15:54 -0400)
committerChris Mason <chris.mason@oracle.com>
Mon, 26 Mar 2012 21:04:24 +0000 (17:04 -0400)
btrfs_search_slot sometimes needs write locks on high levels of
the tree.  It remembers the highest level that needs a write lock
and will use that for all future searches through the tree in a given
call.

But, very often we'll just cow the top level or the level below and we
won't really need write locks on the root again after that.  This patch
changes things to adjust the write lock requirement as it unlocks
levels.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
fs/btrfs/ctree.c

index 8af24da983b1156e888ac0a95f58e7aa0a5bb9bb..270655da11d1882e3a45088ba2264636942b72e6 100644 (file)
@@ -1395,7 +1395,8 @@ static noinline int reada_for_balance(struct btrfs_root *root,
  * if lowest_unlock is 1, level 0 won't be unlocked
  */
 static noinline void unlock_up(struct btrfs_path *path, int level,
-                              int lowest_unlock)
+                              int lowest_unlock, int min_write_lock_level,
+                              int *write_lock_level)
 {
        int i;
        int skip_level = level;
@@ -1427,6 +1428,11 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
                if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
                        btrfs_tree_unlock_rw(t, path->locks[i]);
                        path->locks[i] = 0;
+                       if (write_lock_level &&
+                           i > min_write_lock_level &&
+                           i <= *write_lock_level) {
+                               *write_lock_level = i - 1;
+                       }
                }
        }
 }
@@ -1650,6 +1656,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
        /* everything at write_lock_level or lower must be write locked */
        int write_lock_level = 0;
        u8 lowest_level = 0;
+       int min_write_lock_level;
 
        lowest_level = p->lowest_level;
        WARN_ON(lowest_level && ins_len > 0);
@@ -1677,6 +1684,8 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
        if (cow && (p->keep_locks || p->lowest_level))
                write_lock_level = BTRFS_MAX_LEVEL;
 
+       min_write_lock_level = write_lock_level;
+
 again:
        /*
         * we try very hard to do read locks on the root
@@ -1808,7 +1817,8 @@ cow_done:
                                goto again;
                        }
 
-                       unlock_up(p, level, lowest_unlock);
+                       unlock_up(p, level, lowest_unlock,
+                                 min_write_lock_level, &write_lock_level);
 
                        if (level == lowest_level) {
                                if (dec)
@@ -1870,7 +1880,8 @@ cow_done:
                                }
                        }
                        if (!p->search_for_split)
-                               unlock_up(p, level, lowest_unlock);
+                               unlock_up(p, level, lowest_unlock,
+                                         min_write_lock_level, &write_lock_level);
                        goto done;
                }
        }
@@ -4108,7 +4119,7 @@ find_next_key:
                path->slots[level] = slot;
                if (level == path->lowest_level) {
                        ret = 0;
-                       unlock_up(path, level, 1);
+                       unlock_up(path, level, 1, 0, NULL);
                        goto out;
                }
                btrfs_set_path_blocking(path);
@@ -4119,7 +4130,7 @@ find_next_key:
 
                path->locks[level - 1] = BTRFS_READ_LOCK;
                path->nodes[level - 1] = cur;
-               unlock_up(path, level, 1);
+               unlock_up(path, level, 1, 0, NULL);
                btrfs_clear_path_blocking(path, NULL, 0);
        }
 out:
@@ -4355,7 +4366,7 @@ again:
        }
        ret = 0;
 done:
-       unlock_up(path, 0, 1);
+       unlock_up(path, 0, 1, 0, NULL);
        path->leave_spinning = old_spinning;
        if (!old_spinning)
                btrfs_set_path_blocking(path);