switch (type) {
case BKEY_TYPE_BTREE:
- bch2_mark_key(c, k, c->opts.btree_node_size, true, pos, NULL,
+ bch2_mark_key(c, k, c->opts.btree_node_size,
+ BCH_DATA_BTREE, pos, NULL,
0, flags|
BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
BCH_BUCKET_MARK_GC_LOCK_HELD);
break;
case BKEY_TYPE_EXTENTS:
- bch2_mark_key(c, k, k.k->size, false, pos, NULL,
+ bch2_mark_key(c, k, k.k->size, BCH_DATA_USER, pos, NULL,
0, flags|
BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
BCH_BUCKET_MARK_GC_LOCK_HELD);
for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done)
bch2_mark_key(c, bkey_i_to_s_c(&d->key),
- c->opts.btree_node_size, true, pos,
+ c->opts.btree_node_size,
+ BCH_DATA_BTREE, pos,
&stats, 0,
BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
BCH_BUCKET_MARK_GC_LOCK_HELD);
struct bch_fs_usage tmp = { 0 };
bch2_mark_key(c, bkey_i_to_s_c(&d->key),
- -c->opts.btree_node_size, true, b
+ -c->opts.btree_node_size, BCH_DATA_BTREE, b
? gc_pos_btree_node(b)
: gc_pos_btree_root(as->btree_id),
&tmp, 0, 0);
BUG_ON(!pending->index_update_done);
bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
- -c->opts.btree_node_size, true,
+ -c->opts.btree_node_size, BCH_DATA_BTREE,
gc_phase(GC_PHASE_PENDING_DELETE),
&stats, 0, 0);
/*
__bch2_btree_set_root_inmem(c, b);
bch2_mark_key(c, bkey_i_to_s_c(&b->key),
- c->opts.btree_node_size, true,
+ c->opts.btree_node_size, BCH_DATA_BTREE,
gc_pos_btree_root(b->btree_id),
&stats, 0, 0);
if (bkey_extent_is_data(&insert->k))
bch2_mark_key(c, bkey_i_to_s_c(insert),
- c->opts.btree_node_size, true,
+ c->opts.btree_node_size, BCH_DATA_BTREE,
gc_pos_btree_node(b), &stats, 0, 0);
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
bch2_btree_node_lock_write(b, iter);
bch2_mark_key(c, bkey_i_to_s_c(&new_key->k_i),
- c->opts.btree_node_size, true,
+ c->opts.btree_node_size, BCH_DATA_BTREE,
gc_pos_btree_root(b->btree_id),
&stats, 0, 0);
bch2_btree_node_free_index(as, NULL,
struct bkey_s_c_extent e,
const struct bch_extent_ptr *ptr,
struct bch_extent_crc_unpacked crc,
- s64 sectors, enum s_alloc type,
+ s64 sectors, enum bch_data_type data_type,
struct bch_fs_usage *stats,
u64 journal_seq, unsigned flags)
{
struct bucket_mark old, new;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g = PTR_BUCKET(ca, ptr);
- enum bch_data_type data_type = type == S_META
- ? BCH_DATA_BTREE : BCH_DATA_USER;
u64 v;
if (crc.compression_type) {
}
void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors, bool metadata,
+ s64 sectors, enum bch_data_type data_type,
struct gc_pos pos,
struct bch_fs_usage *stats,
u64 journal_seq, unsigned flags)
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
- enum s_alloc type = metadata ? S_META : S_DIRTY;
+ enum s_alloc type = data_type == BCH_DATA_USER
+ ? S_DIRTY : S_META;
unsigned replicas = 0;
- BUG_ON(metadata && bkey_extent_is_cached(e.k));
BUG_ON(!sectors);
extent_for_each_ptr_crc(e, ptr, crc) {
- bch2_mark_pointer(c, e, ptr, crc, sectors, type,
+ bch2_mark_pointer(c, e, ptr, crc, sectors, data_type,
stats, journal_seq, flags);
replicas += !ptr->cached;
}
#define BCH_BUCKET_MARK_GC_WILL_VISIT (1 << 2)
#define BCH_BUCKET_MARK_GC_LOCK_HELD (1 << 3)
-void bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool, struct gc_pos,
- struct bch_fs_usage *, u64, unsigned);
+void bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, enum bch_data_type,
+ struct gc_pos, struct bch_fs_usage *, u64, unsigned);
void bch2_recalc_sectors_available(struct bch_fs *);
if (!sectors)
return;
- bch2_mark_key(c, k, sectors, false, gc_pos_btree_node(b),
+ bch2_mark_key(c, k, sectors, BCH_DATA_USER, gc_pos_btree_node(b),
&s->stats, s->trans->journal_res.seq, 0);
}