]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
bcachefs: Kill BTREE_UPDATE_PREJOURNAL
authorKent Overstreet <kent.overstreet@linux.dev>
Thu, 9 Nov 2023 03:04:29 +0000 (22:04 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 1 Jan 2024 16:47:37 +0000 (11:47 -0500)
With the previous patch that reworks BTREE_INSERT_JOURNAL_REPLAY, we can
now switch the btree write buffer to use it for flushing.

This has the advantage that transaction commits don't need to take a
journal reservation at all.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bkey_methods.h
fs/bcachefs/btree_trans_commit.c
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update.c
fs/bcachefs/btree_write_buffer.c

index 3a370b7087acea9bed0de0e1c565034336303d01..912adadfb4dd40a3435d7f6a82eba365a750fa67 100644 (file)
@@ -93,7 +93,6 @@ static inline int bch2_mark_key(struct btree_trans *trans,
 enum btree_update_flags {
        __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE = __BTREE_ITER_FLAGS_END,
        __BTREE_UPDATE_NOJOURNAL,
-       __BTREE_UPDATE_PREJOURNAL,
        __BTREE_UPDATE_KEY_CACHE_RECLAIM,
 
        __BTREE_TRIGGER_NORUN,          /* Don't run triggers at all */
@@ -108,7 +107,6 @@ enum btree_update_flags {
 
 #define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
 #define BTREE_UPDATE_NOJOURNAL         (1U << __BTREE_UPDATE_NOJOURNAL)
-#define BTREE_UPDATE_PREJOURNAL                (1U << __BTREE_UPDATE_PREJOURNAL)
 #define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
 
 #define BTREE_TRIGGER_NORUN            (1U << __BTREE_TRIGGER_NORUN)
index 6c5510c4a2c49123172868d1e3473bc475081310..403b7310d21aa36b7a29f3ab7de40e02b6268b77 100644 (file)
@@ -778,12 +778,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
 
        trans_for_each_update(trans, i) {
                if (!i->cached) {
-                       u64 seq = trans->journal_res.seq;
-
-                       if (i->flags & BTREE_UPDATE_PREJOURNAL)
-                               seq = i->seq;
-
-                       bch2_btree_insert_key_leaf(trans, i->path, i->k, seq);
+                       bch2_btree_insert_key_leaf(trans, i->path, i->k, trans->journal_res.seq);
                } else if (!i->key_cache_already_flushed)
                        bch2_btree_insert_key_cached(trans, flags, i);
                else {
index 60453ba86c4b963777f67693352d4929ac726549..b667da4e8403648c21e31ace7aa203abb838860f 100644 (file)
@@ -356,7 +356,6 @@ struct btree_insert_entry {
        u8                      old_btree_u64s;
        struct bkey_i           *k;
        struct btree_path       *path;
-       u64                     seq;
        /* key being overwritten: */
        struct bkey             old_k;
        const struct bch_val    *old_v;
index 2fd3c8cc6f5115c19f0abe6f9066cd9a1b7245a4..82f85e3a578779fe1d67f06158998ae9ecbe0735 100644 (file)
@@ -381,21 +381,12 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
 {
        struct bch_fs *c = trans->c;
        struct btree_insert_entry *i, n;
-       u64 seq = 0;
        int cmp;
 
        EBUG_ON(!path->should_be_locked);
        EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
        EBUG_ON(!bpos_eq(k->k.p, path->pos));
 
-       /*
-        * The transaction journal res hasn't been allocated at this point.
-        * That occurs at commit time. Reuse the seq field to pass in the seq
-        * of a prejournaled key.
-        */
-       if (flags & BTREE_UPDATE_PREJOURNAL)
-               seq = trans->journal_res.seq;
-
        n = (struct btree_insert_entry) {
                .flags          = flags,
                .bkey_type      = __btree_node_type(path->level, path->btree_id),
@@ -404,7 +395,6 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
                .cached         = path->cached,
                .path           = path,
                .k              = k,
-               .seq            = seq,
                .ip_allocated   = ip,
        };
 
@@ -432,7 +422,6 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
                i->cached       = n.cached;
                i->k            = n.k;
                i->path         = n.path;
-               i->seq          = n.seq;
                i->ip_allocated = n.ip_allocated;
        } else {
                array_insert_item(trans->updates, trans->nr_updates,
@@ -543,18 +532,6 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
        return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_);
 }
 
-/*
- * Add a transaction update for a key that has already been journaled.
- */
-int __must_check bch2_trans_update_seq(struct btree_trans *trans, u64 seq,
-                                      struct btree_iter *iter, struct bkey_i *k,
-                                      enum btree_update_flags flags)
-{
-       trans->journal_res.seq = seq;
-       return bch2_trans_update(trans, iter, k, flags|BTREE_UPDATE_NOJOURNAL|
-                                                BTREE_UPDATE_PREJOURNAL);
-}
-
 static noinline int bch2_btree_insert_clone_trans(struct btree_trans *trans,
                                                  enum btree_id btree,
                                                  struct bkey_i *k)
index 9609eb18f38d6e6608c2f8e0259c37349aafe1d1..7f3147e064a5768ef95eda416044aba5c078d4c9 100644 (file)
@@ -78,12 +78,15 @@ static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans,
        }
        return 0;
 trans_commit:
-       return  bch2_trans_update_seq(trans, wb->journal_seq, iter, &wb->k,
-                                     BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+       trans->journal_res.seq = wb->journal_seq;
+
+       return  bch2_trans_update(trans, iter, &wb->k,
+                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
                bch2_trans_commit(trans, NULL, NULL,
                                  commit_flags|
                                  BTREE_INSERT_NOCHECK_RW|
                                  BTREE_INSERT_NOFAIL|
+                                 BTREE_INSERT_JOURNAL_REPLAY|
                                  BTREE_INSERT_JOURNAL_RECLAIM);
 }
 
@@ -127,9 +130,11 @@ btree_write_buffered_insert(struct btree_trans *trans,
        bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
                             BTREE_ITER_CACHED|BTREE_ITER_INTENT);
 
+       trans->journal_res.seq = wb->journal_seq;
+
        ret   = bch2_btree_iter_traverse(&iter) ?:
-               bch2_trans_update_seq(trans, wb->journal_seq, &iter, &wb->k,
-                                     BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+               bch2_trans_update(trans, &iter, &wb->k,
+                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
        bch2_trans_iter_exit(trans, &iter);
        return ret;
 }
@@ -262,6 +267,7 @@ slowpath:
                ret = commit_do(trans, NULL, NULL,
                                commit_flags|
                                BTREE_INSERT_NOFAIL|
+                               BTREE_INSERT_JOURNAL_REPLAY|
                                BTREE_INSERT_JOURNAL_RECLAIM,
                                btree_write_buffered_insert(trans, i));
                if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))