bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
- bch2_mark_key(c, k, true, 0, NULL, 0, 0);
+ bch2_mark_key(c, k, true, 0, NULL, 0,
+ BCH_BUCKET_MARK_NOATOMIC|
+ BCH_BUCKET_MARK_ALLOC_READ);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret) {
for_each_journal_key(*journal_keys, j)
if (j->btree_id == BTREE_ID_ALLOC)
bch2_mark_key(c, bkey_i_to_s_c(j->k),
- true, 0, NULL, 0, 0);
+ true, 0, NULL, 0,
+ BCH_BUCKET_MARK_NOATOMIC|
+ BCH_BUCKET_MARK_ALLOC_READ);
percpu_down_write(&c->mark_lock);
bch2_dev_usage_from_buckets(c);
g = __bucket(ca, k.k->p.offset, gc);
u = bch2_alloc_unpack(k);
- old = bucket_data_cmpxchg(c, ca, fs_usage, g, m, ({
+ old = bucket_cmpxchg(g, m, ({
m.gen = u.gen;
m.data_type = u.data_type;
m.dirty_sectors = u.dirty_sectors;
}
}));
+ if (!(flags & BCH_BUCKET_MARK_ALLOC_READ))
+ bch2_dev_usage_update(c, ca, fs_usage, old, m, gc);
+
g->io_time[READ] = u.read_time;
g->io_time[WRITE] = u.write_time;
g->oldest_gen = u.oldest_gen;
#define BCH_BUCKET_MARK_GC (1 << 0)
#define BCH_BUCKET_MARK_NOATOMIC (1 << 1)
+#define BCH_BUCKET_MARK_ALLOC_READ (1 << 2)
int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
bool, s64, struct bch_fs_usage *,