static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id,
unsigned level, struct bkey_s_c k)
{
- if (!level)
- bch2_mark_key(c, k, 0, 0, NULL, 0,
- BTREE_TRIGGER_ALLOC_READ|
- BTREE_TRIGGER_NOATOMIC);
+ struct bch_dev *ca;
+ struct bucket *g;
+ struct bkey_alloc_unpacked u;
+
+ if (level || k.k->type != KEY_TYPE_alloc)
+ return 0;
+
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ g = __bucket(ca, k.k->p.offset, 0);
+ u = bch2_alloc_unpack(k);
+
+ g->_mark.gen = u.gen;
+ g->_mark.data_type = u.data_type;
+ g->_mark.dirty_sectors = u.dirty_sectors;
+ g->_mark.cached_sectors = u.cached_sectors;
+ g->io_time[READ] = u.read_time;
+ g->io_time[WRITE] = u.write_time;
+ g->oldest_gen = u.oldest_gen;
+ g->gen_valid = 1;
return 0;
}
unsigned i;
int ret = 0;
+ down_read(&c->gc_lock);
ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_ALLOC,
NULL, bch2_alloc_read_fn);
+ up_read(&c->gc_lock);
+
if (ret) {
bch_err(c, "error reading alloc info: %i", ret);
return ret;
__BTREE_TRIGGER_GC,
__BTREE_TRIGGER_BUCKET_INVALIDATE,
- __BTREE_TRIGGER_ALLOC_READ,
__BTREE_TRIGGER_NOATOMIC,
};
#define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
-#define BTREE_TRIGGER_ALLOC_READ (1U << __BTREE_TRIGGER_ALLOC_READ)
#define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC)
static inline bool btree_node_type_needs_gc(enum btree_node_type type)
bch2_wake_allocator(ca);
}
+__flatten
void bch2_dev_usage_from_buckets(struct bch_fs *c)
{
struct bch_dev *ca;
}
}));
- if (!(flags & BTREE_TRIGGER_ALLOC_READ))
- bch2_dev_usage_update(c, ca, fs_usage, old_m, m, gc);
+ bch2_dev_usage_update(c, ca, fs_usage, old_m, m, gc);
g->io_time[READ] = u.read_time;
g->io_time[WRITE] = u.write_time;
ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL) ?:
bch2_mark_key(c, k, 0, 0, NULL, 0,
- BTREE_TRIGGER_ALLOC_READ|
BTREE_TRIGGER_NOATOMIC);
if (ret)
return ret;