struct bkey_s_c k, bool initial)
{
struct gc_pos pos = { 0 };
- unsigned flags = initial ? BCH_BUCKET_MARK_NOATOMIC : 0;
+ unsigned flags =
+ BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+ BCH_BUCKET_MARK_GC_LOCK_HELD|
+ (initial ? BCH_BUCKET_MARK_NOATOMIC : 0);
int ret = 0;
switch (type) {
case BKEY_TYPE_BTREE:
- if (initial) {
- ret = bch2_btree_mark_ptrs_initial(c, type, k);
- if (ret < 0)
- return ret;
- }
-
- bch2_mark_key(c, k, c->opts.btree_node_size,
- BCH_DATA_BTREE, pos, NULL,
- 0, flags|
- BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
- BCH_BUCKET_MARK_GC_LOCK_HELD);
- break;
case BKEY_TYPE_EXTENTS:
if (initial) {
ret = bch2_btree_mark_ptrs_initial(c, type, k);
if (ret < 0)
return ret;
}
+ break;
+ default:
+ break;
+ }
+
+ bch2_mark_key(c, type, k, true, k.k->size,
+ pos, NULL, 0, flags);
- bch2_mark_key(c, k, k.k->size, BCH_DATA_USER, pos, NULL,
- 0, flags|
- BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
- BCH_BUCKET_MARK_GC_LOCK_HELD);
+ switch (type) {
+ case BKEY_TYPE_BTREE:
+ case BKEY_TYPE_EXTENTS:
ret = bch2_btree_key_recalc_oldest_gen(c, k);
break;
default:
for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done)
- bch2_mark_key(c, bkey_i_to_s_c(&d->key),
- c->opts.btree_node_size,
- BCH_DATA_BTREE, pos,
- &stats, 0,
+ bch2_mark_key(c, BKEY_TYPE_BTREE,
+ bkey_i_to_s_c(&d->key),
+ true, 0,
+ pos, &stats, 0,
BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
BCH_BUCKET_MARK_GC_LOCK_HELD);
/*
if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
struct bch_fs_usage tmp = { 0 };
- bch2_mark_key(c, bkey_i_to_s_c(&d->key),
- -c->opts.btree_node_size, BCH_DATA_BTREE, b
- ? gc_pos_btree_node(b)
- : gc_pos_btree_root(as->btree_id),
- &tmp, 0, 0);
+ bch2_mark_key(c, BKEY_TYPE_BTREE,
+ bkey_i_to_s_c(&d->key),
+ false, 0, b
+ ? gc_pos_btree_node(b)
+ : gc_pos_btree_root(as->btree_id),
+ &tmp, 0, 0);
/*
* Don't apply tmp - pending deletes aren't tracked in
* bch_alloc_stats:
BUG_ON(!pending->index_update_done);
- bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
- -c->opts.btree_node_size, BCH_DATA_BTREE,
- gc_phase(GC_PHASE_PENDING_DELETE),
- &stats, 0, 0);
+ bch2_mark_key(c, BKEY_TYPE_BTREE,
+ bkey_i_to_s_c(&pending->key),
+ false, 0,
+ gc_phase(GC_PHASE_PENDING_DELETE),
+ &stats, 0, 0);
/*
* Don't apply stats - pending deletes aren't tracked in
* bch_alloc_stats:
__bch2_btree_set_root_inmem(c, b);
- bch2_mark_key(c, bkey_i_to_s_c(&b->key),
- c->opts.btree_node_size, BCH_DATA_BTREE,
+ bch2_mark_key(c, BKEY_TYPE_BTREE,
+ bkey_i_to_s_c(&b->key),
+ true, 0,
gc_pos_btree_root(b->btree_id),
&stats, 0, 0);
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b));
if (bkey_extent_is_data(&insert->k))
- bch2_mark_key(c, bkey_i_to_s_c(insert),
- c->opts.btree_node_size, BCH_DATA_BTREE,
- gc_pos_btree_node(b), &stats, 0, 0);
+ bch2_mark_key(c, BKEY_TYPE_BTREE,
+ bkey_i_to_s_c(insert),
+ true, 0,
+ gc_pos_btree_node(b), &stats, 0, 0);
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
bkey_iter_pos_cmp(b, &insert->k.p, k) > 0)
bch2_btree_node_lock_write(b, iter);
- bch2_mark_key(c, bkey_i_to_s_c(&new_key->k_i),
- c->opts.btree_node_size, BCH_DATA_BTREE,
+ bch2_mark_key(c, BKEY_TYPE_BTREE,
+ bkey_i_to_s_c(&new_key->k_i),
+ true, 0,
gc_pos_btree_root(b->btree_id),
&stats, 0, 0);
bch2_btree_node_free_index(as, NULL,
crc.uncompressed_size));
}
-/*
- * Checking against gc's position has to be done here, inside the cmpxchg()
- * loop, to avoid racing with the start of gc clearing all the marks - GC does
- * that with the gc pos seqlock held.
- */
-static void bch2_mark_pointer(struct bch_fs *c,
- struct bkey_s_c_extent e,
- struct extent_ptr_decoded p,
- s64 sectors, enum bch_data_type data_type,
- unsigned replicas,
- struct bch_fs_usage *fs_usage,
- u64 journal_seq, unsigned flags)
+static s64 ptr_disk_sectors(struct bkey_s_c_extent e,
+ struct extent_ptr_decoded p,
+ s64 sectors)
{
- struct bucket_mark old, new;
- struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- struct bucket *g = PTR_BUCKET(ca, &p.ptr);
- s64 uncompressed_sectors = sectors;
- u64 v;
if (p.crc.compression_type) {
unsigned old_sectors, new_sectors;
+__disk_sectors(p.crc, new_sectors);
}
- /*
- * fs level usage (which determines free space) is in uncompressed
- * sectors, until copygc + compression is sorted out:
- *
- * note also that we always update @fs_usage, even when we otherwise
- * wouldn't do anything because gc is running - this is because the
- * caller still needs to account w.r.t. its disk reservation. It is
- * caller's responsibility to not apply @fs_usage if gc is in progress.
- */
- fs_usage->replicas
- [!p.ptr.cached && replicas ? replicas - 1 : 0].data
- [!p.ptr.cached ? data_type : BCH_DATA_CACHED] +=
- uncompressed_sectors;
+ return sectors;
+}
+
+/*
+ * Checking against gc's position has to be done here, inside the cmpxchg()
+ * loop, to avoid racing with the start of gc clearing all the marks - GC does
+ * that with the gc pos seqlock held.
+ */
+static void bch2_mark_pointer(struct bch_fs *c,
+ struct bkey_s_c_extent e,
+ struct extent_ptr_decoded p,
+ s64 sectors, enum bch_data_type data_type,
+ struct bch_fs_usage *fs_usage,
+ u64 journal_seq, unsigned flags)
+{
+ struct bucket_mark old, new;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bucket *g = PTR_BUCKET(ca, &p.ptr);
+ u64 v;
if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT) {
if (journal_seq)
bucket_became_unavailable(c, old, new));
}
-void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors, enum bch_data_type data_type,
- struct gc_pos pos,
- struct bch_fs_usage *stats,
- u64 journal_seq, unsigned flags)
+static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
+ s64 sectors, enum bch_data_type data_type,
+ struct gc_pos pos,
+ struct bch_fs_usage *stats,
+ u64 journal_seq, unsigned flags)
{
unsigned replicas = bch2_extent_nr_dirty_ptrs(k);
BUG_ON(replicas && replicas - 1 > ARRAY_SIZE(stats->replicas));
+ BUG_ON(!sectors);
+
+ switch (k.k->type) {
+ case BCH_EXTENT:
+ case BCH_EXTENT_CACHED: {
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ extent_for_each_ptr_decode(e, p, entry) {
+ s64 disk_sectors = ptr_disk_sectors(e, p, sectors);
+
+ /*
+ * fs level usage (which determines free space) is in
+ * uncompressed sectors, until copygc + compression is
+ * sorted out:
+ *
+ * note also that we always update @fs_usage, even when
+ * we otherwise wouldn't do anything because gc is
+ * running - this is because the caller still needs to
+ * account w.r.t. its disk reservation. It is caller's
+ * responsibility to not apply @fs_usage if gc is in
+ * progress.
+ */
+ stats->replicas
+ [!p.ptr.cached && replicas ? replicas - 1 : 0].data
+ [!p.ptr.cached ? data_type : BCH_DATA_CACHED] +=
+ sectors;
+
+ bch2_mark_pointer(c, e, p, disk_sectors, data_type,
+ stats, journal_seq, flags);
+ }
+ break;
+ }
+ case BCH_RESERVATION:
+ if (replicas)
+ stats->replicas[replicas - 1].persistent_reserved +=
+ sectors * replicas;
+ break;
+ }
+}
+void bch2_mark_key(struct bch_fs *c,
+ enum bkey_type type, struct bkey_s_c k,
+ bool inserting, s64 sectors,
+ struct gc_pos pos,
+ struct bch_fs_usage *stats,
+ u64 journal_seq, unsigned flags)
+{
/*
* synchronization w.r.t. GC:
*
if (!stats)
stats = this_cpu_ptr(c->usage_percpu);
- switch (k.k->type) {
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- BUG_ON(!sectors);
-
- extent_for_each_ptr_decode(e, p, entry)
- bch2_mark_pointer(c, e, p, sectors, data_type,
- replicas, stats, journal_seq, flags);
+ switch (type) {
+ case BKEY_TYPE_BTREE:
+ bch2_mark_extent(c, k, inserting
+ ? c->opts.btree_node_size
+ : -c->opts.btree_node_size,
+ BCH_DATA_BTREE,
+ pos, stats, journal_seq, flags);
break;
- }
- case BCH_RESERVATION:
- if (replicas)
- stats->replicas[replicas - 1].persistent_reserved +=
- sectors * replicas;
+ case BKEY_TYPE_EXTENTS:
+ bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
+ pos, stats, journal_seq, flags);
+ break;
+ default:
break;
}
percpu_up_read(&c->usage_lock);