]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
bcachefs: gc lock no longer needed for disk reservations
authorKent Overstreet <kent.overstreet@gmail.com>
Tue, 12 Feb 2019 03:08:09 +0000 (22:08 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:16 +0000 (17:08 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/alloc_background.c
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/buckets.c
fs/bcachefs/buckets.h
fs/bcachefs/extents.c

index 9d2e21d99e6e7ae17f2c759f8699b0005f297b51..7c57de5390b4780d9c8111023efc4024942b7989 100644 (file)
@@ -1474,7 +1474,6 @@ not_enough:
                                                           &journal_seq);
 
                                fifo_push(&ca->free[RESERVE_BTREE], bu);
-                               bucket_set_dirty(ca, bu);
                        }
                }
 
index 391389d431c8053cac8007e45566371c36c94dd6..315f2d76947a9d2c635f5829530b8f1417b86104 100644 (file)
@@ -141,24 +141,23 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
 
                bkey_for_each_ptr(ptrs, ptr) {
                        struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
-                       size_t b = PTR_BUCKET_NR(ca, ptr);
-                       struct bucket *g = PTR_BUCKET(ca, ptr);
+                       struct bucket *g = PTR_BUCKET(ca, ptr, true);
 
                        if (mustfix_fsck_err_on(!g->gen_valid, c,
                                        "found ptr with missing gen in alloc btree,\n"
                                        "type %u gen %u",
                                        k.k->type, ptr->gen)) {
-                               g->_mark.gen = ptr->gen;
-                               g->gen_valid = 1;
-                               bucket_set_dirty(ca, b);
+                               g->_mark.gen    = ptr->gen;
+                               g->_mark.dirty  = true;
+                               g->gen_valid    = 1;
                        }
 
                        if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
                                        "%u ptr gen in the future: %u > %u",
                                        k.k->type, ptr->gen, g->mark.gen)) {
-                               g->_mark.gen = ptr->gen;
-                               g->gen_valid = 1;
-                               bucket_set_dirty(ca, b);
+                               g->_mark.gen    = ptr->gen;
+                               g->_mark.dirty  = true;
+                               g->gen_valid    = 1;
                                set_bit(BCH_FS_FIXED_GENS, &c->flags);
                        }
                }
@@ -166,8 +165,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
 
        bkey_for_each_ptr(ptrs, ptr) {
                struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
-               size_t b = PTR_BUCKET_NR(ca, ptr);
-               struct bucket *g = __bucket(ca, b, true);
+               struct bucket *g = PTR_BUCKET(ca, ptr, true);
 
                if (gen_after(g->oldest_gen, ptr->gen))
                        g->oldest_gen = ptr->gen;
@@ -646,13 +644,14 @@ static int bch2_gc_start(struct bch_fs *c)
        struct bch_dev *ca;
        unsigned i;
 
+       percpu_down_write(&c->mark_lock);
+
        /*
         * indicate to stripe code that we need to allocate for the gc stripes
         * radix tree, too
         */
        gc_pos_set(c, gc_phase(GC_PHASE_START));
 
-       percpu_down_write(&c->mark_lock);
        BUG_ON(c->usage[1]);
 
        c->usage[1] = __alloc_percpu_gfp(sizeof(struct bch_fs_usage) +
index b5a4853451a71b4674f6c4754e55586e53fff6d1..5f0e0009ec5dd3ed762175bc85f4724774be19dc 100644 (file)
@@ -490,7 +490,6 @@ enum btree_insert_ret {
        /* leaf node needs to be split */
        BTREE_INSERT_BTREE_NODE_FULL,
        BTREE_INSERT_ENOSPC,
-       BTREE_INSERT_NEED_GC_LOCK,
        BTREE_INSERT_NEED_MARK_REPLICAS,
 };
 
index 451b293c44a6d4bea52c4c630fa746d4f6741eae..6dff960e095da69cd4209ab82ad0d6b2b535fdda 100644 (file)
@@ -484,7 +484,7 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
        struct btree *b;
        struct disk_reservation disk_res = { 0, 0 };
        unsigned sectors = nr_nodes * c->opts.btree_node_size;
-       int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD;
+       int ret, disk_res_flags = 0;
 
        if (flags & BTREE_INSERT_NOFAIL)
                disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
@@ -1947,8 +1947,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
        ret = bch2_disk_reservation_add(c, &as->reserve->disk_res,
                        c->opts.btree_node_size *
                        bch2_bkey_nr_ptrs(bkey_i_to_s_c(&new_key->k_i)),
-                       BCH_DISK_RESERVATION_NOFAIL|
-                       BCH_DISK_RESERVATION_GC_LOCK_HELD);
+                       BCH_DISK_RESERVATION_NOFAIL);
        BUG_ON(ret);
 
        parent = btree_node_parent(iter, b);
index d1a2ac48ed29b43ae8a922780e963ed0e1458f95..5555c6e1c7cf7609cb154f6a42688763fef51ad5 100644 (file)
@@ -719,18 +719,6 @@ err:
                        ret = -EINTR;
                }
                break;
-       case BTREE_INSERT_NEED_GC_LOCK:
-               ret = -EINTR;
-
-               if (!down_read_trylock(&c->gc_lock)) {
-                       if (flags & BTREE_INSERT_NOUNLOCK)
-                               goto out;
-
-                       bch2_btree_iter_unlock(trans->entries[0].iter);
-                       down_read(&c->gc_lock);
-               }
-               up_read(&c->gc_lock);
-               break;
        case BTREE_INSERT_ENOSPC:
                ret = -ENOSPC;
                break;
index 5a3ecbcd5ad4df4cf9c644ca9981a2953d9aabbd..9aa369c6f28eae1c69411e5904b2eb30bf5280dd 100644 (file)
@@ -407,14 +407,14 @@ static inline void update_cached_sectors(struct bch_fs *c,
 }
 
 static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
-                                    size_t b, struct bucket_mark *old,
+                                    size_t b, struct bucket_mark *ret,
                                     bool gc)
 {
        struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
        struct bucket *g = __bucket(ca, b, gc);
-       struct bucket_mark new;
+       struct bucket_mark old, new;
 
-       *old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
+       old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
                BUG_ON(!is_available_bucket(new));
 
                new.owned_by_allocator  = true;
@@ -425,9 +425,12 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
                new.gen++;
        }));
 
-       if (old->cached_sectors)
+       if (old.cached_sectors)
                update_cached_sectors(c, fs_usage, ca->dev_idx,
-                                     -old->cached_sectors);
+                                     -old.cached_sectors);
+
+       if (ret)
+               *ret = old;
 }
 
 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
@@ -437,6 +440,9 @@ void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
 
        __bch2_invalidate_bucket(c, ca, b, old, false);
 
+       if (gc_visited(c, gc_phase(GC_PHASE_START)))
+               __bch2_invalidate_bucket(c, ca, b, NULL, true);
+
        if (!old->owned_by_allocator && old->cached_sectors)
                trace_invalidate(ca, bucket_to_sector(ca, b),
                                 old->cached_sectors);
@@ -1091,24 +1097,8 @@ out:
        return 0;
 
 recalculate:
-       /*
-        * GC recalculates sectors_available when it starts, so that hopefully
-        * we don't normally end up blocking here:
-        */
-
-       /*
-        * Piss fuck, we can be called from extent_insert_fixup() with btree
-        * locks held:
-        */
-
-       if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) {
-               if (!(flags & BCH_DISK_RESERVATION_BTREE_LOCKS_HELD))
-                       down_read(&c->gc_lock);
-               else if (!down_read_trylock(&c->gc_lock))
-                       return -EINTR;
-       }
-
        percpu_down_write(&c->mark_lock);
+
        sectors_available = bch2_recalc_sectors_available(c);
 
        if (sectors <= sectors_available ||
@@ -1125,9 +1115,6 @@ recalculate:
 
        percpu_up_write(&c->mark_lock);
 
-       if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
-               up_read(&c->gc_lock);
-
        return ret;
 }
 
index 885280899dc64da558983013c9b1e8003daeaa25..ecc4ae22f7360408296fbbb873c8e3f9e294206a 100644 (file)
@@ -57,18 +57,6 @@ static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
        return __bucket(ca, b, false);
 }
 
-static inline void bucket_set_dirty(struct bch_dev *ca, size_t b)
-{
-       struct bucket *g;
-       struct bucket_mark m;
-
-       rcu_read_lock();
-       g = bucket(ca, b);
-       bucket_cmpxchg(g, m, m.dirty = true);
-       rcu_read_unlock();
-
-}
-
 static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
                                         size_t b, int rw)
 {
@@ -99,7 +87,8 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
 }
 
 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
-                                       const struct bch_extent_ptr *ptr)
+                                       const struct bch_extent_ptr *ptr,
+                                       bool gc)
 {
        return bucket(ca, PTR_BUCKET_NR(ca, ptr));
 }
@@ -285,8 +274,6 @@ static inline void bch2_disk_reservation_put(struct bch_fs *c,
 }
 
 #define BCH_DISK_RESERVATION_NOFAIL            (1 << 0)
-#define BCH_DISK_RESERVATION_GC_LOCK_HELD      (1 << 1)
-#define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD  (1 << 2)
 
 int bch2_disk_reservation_add(struct bch_fs *,
                             struct disk_reservation *,
index 1d96a1773f740b5e51d9b46b64c9ec48641b0e74..41194462be3053f1462e1844c0bd5610f8c9c7f4 100644 (file)
@@ -979,10 +979,8 @@ bch2_extent_can_insert(struct btree_insert *trans,
 
        if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
            (sectors = bch2_extent_is_compressed(k))) {
-               int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
-
-               if (trans->flags & BTREE_INSERT_NOFAIL)
-                       flags |= BCH_DISK_RESERVATION_NOFAIL;
+               int flags = trans->flags & BTREE_INSERT_NOFAIL
+                       ? BCH_DISK_RESERVATION_NOFAIL : 0;
 
                switch (bch2_disk_reservation_add(trans->c,
                                trans->disk_res,
@@ -991,8 +989,6 @@ bch2_extent_can_insert(struct btree_insert *trans,
                        break;
                case -ENOSPC:
                        return BTREE_INSERT_ENOSPC;
-               case -EINTR:
-                       return BTREE_INSERT_NEED_GC_LOCK;
                default:
                        BUG();
                }