]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
bcachefs: New bucket sector count helpers
authorKent Overstreet <kent.overstreet@linux.dev>
Thu, 23 Nov 2023 23:05:18 +0000 (18:05 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 1 Jan 2024 16:47:38 +0000 (11:47 -0500)
This introduces bch2_bucket_sectors() and bch2_bucket_sectors_dirty(),
prep work for separately accounting stripe sectors.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_background.h
fs/bcachefs/buckets.c
fs/bcachefs/move.c
fs/bcachefs/movinggc.c

index dcfe26fdb5000ed78482cb3743a095c263cd446c..3b1ddb8397b0d2a2b5f933ba21b0ba12efd04413 100644 (file)
@@ -261,10 +261,8 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
        case BCH_DATA_free:
        case BCH_DATA_need_gc_gens:
        case BCH_DATA_need_discard:
-               bkey_fsck_err_on(a.v->dirty_sectors ||
-                                a.v->cached_sectors ||
-                                a.v->stripe, c, err,
-                                alloc_key_empty_but_have_data,
+               bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe,
+                                c, err, alloc_key_empty_but_have_data,
                                 "empty data type free but have data");
                break;
        case BCH_DATA_sb:
@@ -272,22 +270,21 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
        case BCH_DATA_btree:
        case BCH_DATA_user:
        case BCH_DATA_parity:
-               bkey_fsck_err_on(!a.v->dirty_sectors, c, err,
-                                alloc_key_dirty_sectors_0,
+               bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
+                                c, err, alloc_key_dirty_sectors_0,
                                 "data_type %s but dirty_sectors==0",
                                 bch2_data_types[a.v->data_type]);
                break;
        case BCH_DATA_cached:
                bkey_fsck_err_on(!a.v->cached_sectors ||
-                                a.v->dirty_sectors ||
-                                a.v->stripe, c, err,
-                                alloc_key_cached_inconsistency,
+                                bch2_bucket_sectors_dirty(*a.v) ||
+                                a.v->stripe,
+                                c, err, alloc_key_cached_inconsistency,
                                 "data type inconsistency");
 
                bkey_fsck_err_on(!a.v->io_time[READ] &&
                                 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
-                                c, err,
-                                alloc_key_cached_but_read_time_zero,
+                                c, err, alloc_key_cached_but_read_time_zero,
                                 "cached bucket with read_time == 0");
                break;
        case BCH_DATA_stripe:
@@ -790,8 +787,7 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
 
        new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
 
-       if (new_a->dirty_sectors > old_a->dirty_sectors ||
-           new_a->cached_sectors > old_a->cached_sectors) {
+       if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
                new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
                new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
                SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
index 73faf99a222aac3b33035432666e4d9b272c6fe9..96671f166dd8053a7842fdda13e9afd14e62144b 100644 (file)
@@ -71,6 +71,24 @@ static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type)
        return data_type == BCH_DATA_stripe ? BCH_DATA_user : data_type;
 }
 
+static inline unsigned bch2_bucket_sectors(struct bch_alloc_v4 a)
+{
+       return a.dirty_sectors + a.cached_sectors;
+}
+
+static inline unsigned bch2_bucket_sectors_dirty(struct bch_alloc_v4 a)
+{
+       return a.dirty_sectors;
+}
+
+static inline unsigned bch2_bucket_sectors_fragmented(struct bch_dev *ca,
+                                                struct bch_alloc_v4 a)
+{
+       int d = bch2_bucket_sectors_dirty(a);
+
+       return d ? max(0, ca->mi.bucket_size - d) : 0;
+}
+
 static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
 {
        return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0;
@@ -90,10 +108,11 @@ static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
                                              struct bch_dev *ca)
 {
        if (!data_type_movable(a.data_type) ||
-           a.dirty_sectors >= ca->mi.bucket_size)
+           !bch2_bucket_sectors_fragmented(ca, a))
                return 0;
 
-       return div_u64((u64) a.dirty_sectors * (1ULL << 31), ca->mi.bucket_size);
+       u64 d = bch2_bucket_sectors_dirty(a);
+       return div_u64(d * (1ULL << 31), ca->mi.bucket_size);
 }
 
 static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
index ee3a2f5271c3ebe8e249ba7087dbe3afc1cf9f82..bb2ded894b7ee2004844bb4e9bbc7096bbb710bc 100644 (file)
@@ -277,14 +277,6 @@ void bch2_dev_usage_init(struct bch_dev *ca)
        ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
 }
 
-static inline int bucket_sectors_fragmented(struct bch_dev *ca,
-                                           struct bch_alloc_v4 a)
-{
-       return a.dirty_sectors
-               ? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors)
-               : 0;
-}
-
 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
                                  struct bch_alloc_v4 old,
                                  struct bch_alloc_v4 new,
@@ -306,41 +298,40 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
        u->d[old.data_type].buckets--;
        u->d[new.data_type].buckets++;
 
-       u->buckets_ec -= (int) !!old.stripe;
-       u->buckets_ec += (int) !!new.stripe;
+       u->buckets_ec -= !!old.stripe;
+       u->buckets_ec += !!new.stripe;
 
-       u->d[old.data_type].sectors -= old.dirty_sectors;
-       u->d[new.data_type].sectors += new.dirty_sectors;
+       u->d[old.data_type].sectors -= bch2_bucket_sectors_dirty(old);
+       u->d[new.data_type].sectors += bch2_bucket_sectors_dirty(new);
 
        u->d[BCH_DATA_cached].sectors += new.cached_sectors;
        u->d[BCH_DATA_cached].sectors -= old.cached_sectors;
 
-       u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
-       u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
+       u->d[old.data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, old);
+       u->d[new.data_type].fragmented += bch2_bucket_sectors_fragmented(ca, new);
 
        preempt_enable();
 }
 
+static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
+{
+       return (struct bch_alloc_v4) {
+               .gen            = b.gen,
+               .data_type      = b.data_type,
+               .dirty_sectors  = b.dirty_sectors,
+               .cached_sectors = b.cached_sectors,
+               .stripe         = b.stripe,
+       };
+}
+
 static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
                                    struct bucket old, struct bucket new,
                                    u64 journal_seq, bool gc)
 {
-       struct bch_alloc_v4 old_a = {
-               .gen            = old.gen,
-               .data_type      = old.data_type,
-               .dirty_sectors  = old.dirty_sectors,
-               .cached_sectors = old.cached_sectors,
-               .stripe         = old.stripe,
-       };
-       struct bch_alloc_v4 new_a = {
-               .gen            = new.gen,
-               .data_type      = new.data_type,
-               .dirty_sectors  = new.dirty_sectors,
-               .cached_sectors = new.cached_sectors,
-               .stripe         = new.stripe,
-       };
-
-       bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
+       bch2_dev_usage_update(c, ca,
+                             bucket_m_to_alloc(old),
+                             bucket_m_to_alloc(new),
+                             journal_seq, gc);
 }
 
 static inline int __update_replicas(struct bch_fs *c,
@@ -640,7 +631,6 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
                goto err;
        }
 
-
        g->data_type = data_type;
        g->dirty_sectors += sectors;
        new = *g;
index d99742fbdb19f8218ff6bd5c1a4c6f247684579b..615f18a0311a9d7004438eadbe4102ceb7d4b249 100644 (file)
@@ -677,7 +677,7 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt,
        }
 
        a = bch2_alloc_to_v4(k, &a_convert);
-       dirty_sectors = a->dirty_sectors;
+       dirty_sectors = bch2_bucket_sectors_dirty(*a);
        bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size;
        fragmentation = a->fragmentation_lru;
 
index a84e79f79e5ec562fa8f9d072ef3250e60a8564f..b8d0542222c3bb71f1ee195df44394f03697ec84 100644 (file)
@@ -91,7 +91,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
 
        a = bch2_alloc_to_v4(k, &_a);
        b->k.gen        = a->gen;
-       b->sectors      = a->dirty_sectors;
+       b->sectors      = bch2_bucket_sectors_dirty(*a);
 
        ret = data_type_movable(a->data_type) &&
                a->fragmentation_lru &&