}
new->k.p = iter->pos;
- bch2_trans_update(trans, iter, &new->k_i);
+ bch2_trans_update(trans, iter, &new->k_i, 0);
*new_acl = acl;
acl = NULL;
err:
for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
bch2_mark_key(c, k, 0, 0, NULL, 0,
- BCH_BUCKET_MARK_ALLOC_READ|
- BCH_BUCKET_MARK_NOATOMIC);
+ BTREE_TRIGGER_ALLOC_READ|
+ BTREE_TRIGGER_NOATOMIC);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret) {
if (j->btree_id == BTREE_ID_ALLOC)
bch2_mark_key(c, bkey_i_to_s_c(j->k),
0, 0, NULL, 0,
- BCH_BUCKET_MARK_ALLOC_READ|
- BCH_BUCKET_MARK_NOATOMIC);
+ BTREE_TRIGGER_ALLOC_READ|
+ BTREE_TRIGGER_NOATOMIC);
percpu_down_write(&c->mark_lock);
bch2_dev_usage_from_buckets(c);
a->k.p = iter->pos;
bch2_alloc_pack(a, new_u);
- bch2_trans_update(trans, iter, &a->k_i);
+ bch2_trans_update(trans, iter, &a->k_i,
+ BTREE_TRIGGER_NORUN);
ret = bch2_trans_commit(trans, NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_NOMARK|
- flags);
+ BTREE_INSERT_NOFAIL|flags);
err:
if (ret == -EINTR)
goto retry;
ret = bch2_alloc_write_key(&trans, iter,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW|
- BTREE_INSERT_JOURNAL_REPLAY|
- BTREE_INSERT_NOMARK);
+ BTREE_INSERT_JOURNAL_REPLAY);
bch2_trans_exit(&trans);
return ret < 0 ? ret : 0;
}
a->k.p = iter->pos;
bch2_alloc_pack(a, u);
- bch2_trans_update(trans, iter, &a->k_i);
+ bch2_trans_update(trans, iter, &a->k_i,
+ BTREE_TRIGGER_BUCKET_INVALIDATE);
/*
* XXX:
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE|
- BTREE_INSERT_BUCKET_INVALIDATE|
flags);
if (ret == -EINTR)
goto retry;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
unsigned flags =
- BCH_BUCKET_MARK_GC|
- (initial ? BCH_BUCKET_MARK_NOATOMIC : 0);
+ BTREE_TRIGGER_GC|
+ (initial ? BTREE_TRIGGER_NOATOMIC : 0);
int ret = 0;
if (initial) {
BTREE_ITER_SLOTS, k, ret) {
percpu_down_read(&c->mark_lock);
ret = bch2_mark_overwrite(&trans, iter, k, insert, NULL,
- BCH_BUCKET_MARK_GC|
- BCH_BUCKET_MARK_NOATOMIC);
+ BTREE_TRIGGER_GC|
+ BTREE_TRIGGER_NOATOMIC);
percpu_up_read(&c->mark_lock);
if (!ret)
gc_pos_set(c, gc_phase(GC_PHASE_SB));
for_each_online_member(ca, c, i)
- bch2_mark_dev_superblock(c, ca, BCH_BUCKET_MARK_GC);
+ bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
mutex_unlock(&c->sb_lock);
}
if (d->index_update_done)
bch2_mark_key(c, bkey_i_to_s_c(&d->key),
0, 0, NULL, 0,
- BCH_BUCKET_MARK_GC);
+ BTREE_TRIGGER_GC);
mutex_unlock(&c->btree_interior_update_lock);
}
fifo_for_each_entry(i, &ca->free_inc, iter)
bch2_mark_alloc_bucket(c, ca, i, true,
gc_pos_alloc(c, NULL),
- BCH_BUCKET_MARK_GC);
+ BTREE_TRIGGER_GC);
fifo_for_each_entry(i, &ca->free[j], iter)
bch2_mark_alloc_bucket(c, ca, i, true,
gc_pos_alloc(c, NULL),
- BCH_BUCKET_MARK_GC);
+ BTREE_TRIGGER_GC);
}
spin_unlock(&c->freelist_lock);
ca = bch_dev_bkey_exists(c, ob->ptr.dev);
bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), true,
gc_pos_alloc(c, ob),
- BCH_BUCKET_MARK_GC);
+ BTREE_TRIGGER_GC);
}
spin_unlock(&ob->lock);
}
}
struct btree_insert_entry {
+ unsigned trigger_flags;
struct bkey_i *k;
struct btree_iter *iter;
};
(1U << BKEY_TYPE_INODES)| \
(1U << BKEY_TYPE_REFLINK))
+enum btree_trigger_flags {
+ __BTREE_TRIGGER_NORUN, /* Don't run triggers at all */
+ __BTREE_TRIGGER_NOOVERWRITES, /* Don't run triggers on overwrites */
+
+ __BTREE_TRIGGER_INSERT,
+ __BTREE_TRIGGER_OVERWRITE,
+ __BTREE_TRIGGER_OVERWRITE_SPLIT,
+
+ __BTREE_TRIGGER_GC,
+ __BTREE_TRIGGER_BUCKET_INVALIDATE,
+ __BTREE_TRIGGER_ALLOC_READ,
+ __BTREE_TRIGGER_NOATOMIC,
+};
+
+#define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
+#define BTREE_TRIGGER_NOOVERWRITES (1U << __BTREE_TRIGGER_NOOVERWRITES)
+
+#define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)
+#define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE)
+#define BTREE_TRIGGER_OVERWRITE_SPLIT (1U << __BTREE_TRIGGER_OVERWRITE_SPLIT)
+
+#define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
+#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
+#define BTREE_TRIGGER_ALLOC_READ (1U << __BTREE_TRIGGER_ALLOC_READ)
+#define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC)
+
static inline bool btree_node_type_needs_gc(enum btree_node_type type)
{
return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
void bch2_btree_journal_key(struct btree_trans *, struct btree_iter *,
struct bkey_i *);
-enum {
+enum btree_insert_flags {
__BTREE_INSERT_NOUNLOCK,
__BTREE_INSERT_NOFAIL,
__BTREE_INSERT_NOCHECK_RW,
__BTREE_INSERT_USE_ALLOC_RESERVE,
__BTREE_INSERT_JOURNAL_REPLAY,
__BTREE_INSERT_JOURNAL_RESERVED,
- __BTREE_INSERT_NOMARK_OVERWRITES,
- __BTREE_INSERT_NOMARK,
- __BTREE_INSERT_BUCKET_INVALIDATE,
__BTREE_INSERT_NOWAIT,
__BTREE_INSERT_GC_LOCK_HELD,
__BCH_HASH_SET_MUST_CREATE,
#define BTREE_INSERT_JOURNAL_RESERVED (1 << __BTREE_INSERT_JOURNAL_RESERVED)
-/* Don't mark overwrites, just new key: */
-#define BTREE_INSERT_NOMARK_OVERWRITES (1 << __BTREE_INSERT_NOMARK_OVERWRITES)
-
-/* Don't call mark new key at all: */
-#define BTREE_INSERT_NOMARK (1 << __BTREE_INSERT_NOMARK)
-
-#define BTREE_INSERT_BUCKET_INVALIDATE (1 << __BTREE_INSERT_BUCKET_INVALIDATE)
-
/* Don't block on allocation failure (for new btree nodes: */
#define BTREE_INSERT_NOWAIT (1 << __BTREE_INSERT_NOWAIT)
#define BTREE_INSERT_GC_LOCK_HELD (1 << __BTREE_INSERT_GC_LOCK_HELD)
}
static inline void bch2_trans_update(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_i *k)
+ struct btree_iter *iter, struct bkey_i *k,
+ enum btree_trigger_flags flags)
{
EBUG_ON(trans->nr_updates >= trans->nr_iters);
iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
trans->updates[trans->nr_updates++] = (struct btree_insert_entry) {
- .iter = iter, .k = k
+ .trigger_flags = flags, .iter = iter, .k = k
};
}
gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0)
bch2_mark_key_locked(c, bkey_i_to_s_c(&d->key),
0, 0, NULL, 0,
- BCH_BUCKET_MARK_OVERWRITE|
- BCH_BUCKET_MARK_GC);
+ BTREE_TRIGGER_OVERWRITE|
+ BTREE_TRIGGER_GC);
}
static void __btree_node_free(struct bch_fs *c, struct btree *b)
BUG_ON(!pending->index_update_done);
bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
- 0, 0, NULL, 0, BCH_BUCKET_MARK_OVERWRITE);
+ 0, 0, NULL, 0, BTREE_TRIGGER_OVERWRITE);
if (gc_visited(c, gc_phase(GC_PHASE_PENDING_DELETE)))
bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
0, 0, NULL, 0,
- BCH_BUCKET_MARK_OVERWRITE|
- BCH_BUCKET_MARK_GC);
+ BTREE_TRIGGER_OVERWRITE|
+ BTREE_TRIGGER_GC);
}
static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
0, 0, &fs_usage->u, 0,
- BCH_BUCKET_MARK_INSERT);
+ BTREE_TRIGGER_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
0, 0, NULL, 0,
- BCH_BUCKET_MARK_INSERT|
- BCH_BUCKET_MARK_GC);
+ BTREE_TRIGGER_INSERT|
+ BTREE_TRIGGER_GC);
if (old && !btree_node_fake(old))
bch2_btree_node_free_index(as, NULL,
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
0, 0, &fs_usage->u, 0,
- BCH_BUCKET_MARK_INSERT);
+ BTREE_TRIGGER_INSERT);
if (gc_visited(c, gc_pos_btree_node(b)))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
0, 0, NULL, 0,
- BCH_BUCKET_MARK_INSERT|
- BCH_BUCKET_MARK_GC);
+ BTREE_TRIGGER_INSERT|
+ BTREE_TRIGGER_GC);
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
bkey_iter_pos_cmp(b, &insert->k.p, k) > 0)
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
0, 0, &fs_usage->u, 0,
- BCH_BUCKET_MARK_INSERT);
+ BTREE_TRIGGER_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
0, 0, NULL, 0,
- BCH_BUCKET_MARK_INSERT||
- BCH_BUCKET_MARK_GC);
+ BTREE_TRIGGER_INSERT||
+ BTREE_TRIGGER_GC);
bch2_btree_node_free_index(as, NULL,
bkey_i_to_s_c(&b->key),
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
- unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
- ? BCH_BUCKET_MARK_BUCKET_INVALIDATE
- : 0;
-
- if (unlikely(trans->flags & BTREE_INSERT_NOMARK))
- return;
trans_for_each_update(trans, i)
if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
bch2_mark_update(trans, i->iter, i->k, NULL,
- mark_flags|BCH_BUCKET_MARK_GC);
+ i->trigger_flags|BTREE_TRIGGER_GC);
}
static inline int
struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i;
- unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
- ? BCH_BUCKET_MARK_BUCKET_INVALIDATE
- : 0;
unsigned iter, u64s = 0;
bool marking = false;
int ret;
}
trans_for_each_update(trans, i)
- if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
- iter_has_nontrans_triggers(i->iter))
+ if (iter_has_nontrans_triggers(i->iter))
bch2_mark_update(trans, i->iter, i->k,
- &fs_usage->u, mark_flags);
+ &fs_usage->u, i->trigger_flags);
if (marking)
bch2_trans_fs_usage_apply(trans, fs_usage);
goto out;
}
- if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
- iter_has_trans_triggers(i->iter)) {
- ret = bch2_trans_mark_update(trans, i->iter, i->k);
+ if (iter_has_trans_triggers(i->iter)) {
+ ret = bch2_trans_mark_update(trans, i->iter, i->k,
+ i->trigger_flags);
if (unlikely(ret)) {
if (ret == -EINTR)
trace_trans_restart_mark(trans->ip);
if (IS_ERR(iter))
return PTR_ERR(iter);
- bch2_trans_update(trans, iter, k);
+ bch2_trans_update(trans, iter, k, 0);
return 0;
}
break;
}
- bch2_trans_update(trans, iter, &delete);
+ bch2_trans_update(trans, iter, &delete, 0);
ret = bch2_trans_commit(trans, NULL, journal_seq,
BTREE_INSERT_NOFAIL);
if (ret)
bkey_init(&k.k);
k.k.p = iter->pos;
- bch2_trans_update(trans, iter, &k);
+ bch2_trans_update(trans, iter, &k, 0);
return bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|flags);
percpu_rwsem_assert_held(&c->mark_lock); \
\
for (gc = 0; gc < 2 && !ret; gc++) \
- if (!gc == !(flags & BCH_BUCKET_MARK_GC) || \
+ if (!gc == !(flags & BTREE_TRIGGER_GC) || \
(gc && gc_visited(c, pos))) \
ret = fn(c, __VA_ARGS__, gc); \
ret; \
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
- bool gc = flags & BCH_BUCKET_MARK_GC;
+ bool gc = flags & BTREE_TRIGGER_GC;
struct bkey_alloc_unpacked u;
struct bch_dev *ca;
struct bucket *g;
/*
* alloc btree is read in by bch2_alloc_read, not gc:
*/
- if ((flags & BCH_BUCKET_MARK_GC) &&
- !(flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE))
+ if ((flags & BTREE_TRIGGER_GC) &&
+ !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
return 0;
ca = bch_dev_bkey_exists(c, k.k->p.inode);
}
}));
- if (!(flags & BCH_BUCKET_MARK_ALLOC_READ))
+ if (!(flags & BTREE_TRIGGER_ALLOC_READ))
bch2_dev_usage_update(c, ca, fs_usage, old, m, gc);
g->io_time[READ] = u.read_time;
* not:
*/
- if ((flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE) &&
+ if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
old.cached_sectors) {
update_cached_sectors(c, fs_usage, ca->dev_idx,
-old.cached_sectors);
{
BUG_ON(!n || !d);
- if (flags & BCH_BUCKET_MARK_OVERWRITE_SPLIT) {
+ if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
BUG_ON(offset + -delta > old_size);
return -disk_sectors_scaled(n, d, old_size) +
disk_sectors_scaled(n, d, offset) +
disk_sectors_scaled(n, d, old_size - offset + delta);
- } else if (flags & BCH_BUCKET_MARK_OVERWRITE) {
+ } else if (flags & BTREE_TRIGGER_OVERWRITE) {
BUG_ON(offset + -delta > old_size);
return -disk_sectors_scaled(n, d, old_size) +
u64 journal_seq,
unsigned flags)
{
- bool enabled = !(flags & BCH_BUCKET_MARK_OVERWRITE);
- bool gc = flags & BCH_BUCKET_MARK_GC;
+ bool enabled = !(flags & BTREE_TRIGGER_OVERWRITE);
+ bool gc = flags & BTREE_TRIGGER_GC;
unsigned i;
for (i = 0; i < v->nr_blocks; i++) {
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
- bool gc = flags & BCH_BUCKET_MARK_GC;
+ bool gc = flags & BTREE_TRIGGER_GC;
struct bucket_mark old, new;
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
new.data_type = data_type;
}
- if (flags & BCH_BUCKET_MARK_NOATOMIC) {
+ if (flags & BTREE_TRIGGER_NOATOMIC) {
g->_mark = new;
break;
}
unsigned *nr_data,
unsigned *nr_parity)
{
- bool gc = flags & BCH_BUCKET_MARK_GC;
+ bool gc = flags & BTREE_TRIGGER_GC;
struct stripe *m;
unsigned old, new;
int blocks_nonempty_delta;
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
- bool gc = flags & BCH_BUCKET_MARK_GC;
+ bool gc = flags & BTREE_TRIGGER_GC;
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
size_t idx = s.k->p.offset;
struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
spin_lock(&c->ec_stripes_heap_lock);
- if (!m || ((flags & BCH_BUCKET_MARK_OVERWRITE) && !m->alive)) {
+ if (!m || ((flags & BTREE_TRIGGER_OVERWRITE) && !m->alive)) {
spin_unlock(&c->ec_stripes_heap_lock);
bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
idx);
return -1;
}
- if (!(flags & BCH_BUCKET_MARK_OVERWRITE)) {
+ if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
m->sectors = le16_to_cpu(s.v->sectors);
m->algorithm = s.v->algorithm;
m->nr_blocks = s.v->nr_blocks;
#endif
/* gc recalculates these fields: */
- if (!(flags & BCH_BUCKET_MARK_GC)) {
+ if (!(flags & BTREE_TRIGGER_GC)) {
for (i = 0; i < s.v->nr_blocks; i++) {
m->block_sectors[i] =
stripe_blockcount_get(s.v, i);
preempt_disable();
- if (!fs_usage || (flags & BCH_BUCKET_MARK_GC))
+ if (!fs_usage || (flags & BTREE_TRIGGER_GC))
fs_usage = fs_usage_ptr(c, journal_seq,
- flags & BCH_BUCKET_MARK_GC);
+ flags & BTREE_TRIGGER_GC);
switch (k.k->type) {
case KEY_TYPE_alloc:
ret = bch2_mark_alloc(c, k, fs_usage, journal_seq, flags);
break;
case KEY_TYPE_btree_ptr:
- sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE)
+ sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
? c->opts.btree_node_size
: -c->opts.btree_node_size;
ret = bch2_mark_stripe(c, k, fs_usage, journal_seq, flags);
break;
case KEY_TYPE_inode:
- if (!(flags & BCH_BUCKET_MARK_OVERWRITE))
+ if (!(flags & BTREE_TRIGGER_OVERWRITE))
fs_usage->nr_inodes++;
else
fs_usage->nr_inodes--;
unsigned offset = 0;
s64 sectors = 0;
- flags |= BCH_BUCKET_MARK_OVERWRITE;
+ flags |= BTREE_TRIGGER_OVERWRITE;
if (btree_node_is_extents(b)
? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
offset = bkey_start_offset(&new->k) -
bkey_start_offset(old.k);
sectors = -((s64) new->k.size);
- flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
+ flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
break;
}
struct bkey_packed *_k;
int ret = 0;
+ if (unlikely(flags & BTREE_TRIGGER_NORUN))
+ return 0;
+
if (!btree_node_type_needs_gc(iter->btree_id))
return 0;
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
0, insert->k.size,
fs_usage, trans->journal_res.seq,
- BCH_BUCKET_MARK_INSERT|flags);
+ BTREE_TRIGGER_INSERT|flags);
- if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
+ if (unlikely(flags & BTREE_TRIGGER_NOOVERWRITES))
return 0;
/*
return new_k;
}
- bch2_trans_update(trans, iter, new_k);
+ bch2_trans_update(trans, iter, new_k, 0);
return new_k;
}
goto err;
}
- if ((flags & BCH_BUCKET_MARK_OVERWRITE) &&
+ if ((flags & BTREE_TRIGGER_OVERWRITE) &&
(bkey_start_offset(k.k) < idx ||
k.k->p.offset > idx + sectors))
goto out;
r_v = bkey_i_to_reflink_v(new_k);
le64_add_cpu(&r_v->v.refcount,
- !(flags & BCH_BUCKET_MARK_OVERWRITE) ? 1 : -1);
+ !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
if (!r_v->v.refcount) {
r_v->k.type = KEY_TYPE_deleted;
switch (k.k->type) {
case KEY_TYPE_btree_ptr:
- sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE)
+ sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
? c->opts.btree_node_size
: -c->opts.btree_node_size;
case KEY_TYPE_inode:
d = replicas_deltas_realloc(trans, 0);
- if (!(flags & BCH_BUCKET_MARK_OVERWRITE))
+ if (!(flags & BTREE_TRIGGER_OVERWRITE))
d->nr_inodes++;
else
d->nr_inodes--;
int bch2_trans_mark_update(struct btree_trans *trans,
struct btree_iter *iter,
- struct bkey_i *insert)
+ struct bkey_i *insert,
+ unsigned flags)
{
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct bkey_packed *_k;
int ret;
+ if (unlikely(flags & BTREE_TRIGGER_NORUN))
+ return 0;
+
if (!btree_node_type_needs_gc(iter->btree_id))
return 0;
ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
- 0, insert->k.size, BCH_BUCKET_MARK_INSERT);
+ 0, insert->k.size, BTREE_TRIGGER_INSERT);
if (ret)
return ret;
- if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
+ if (unlikely(flags & BTREE_TRIGGER_NOOVERWRITES))
return 0;
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
struct bkey_s_c k;
unsigned offset = 0;
s64 sectors = 0;
- unsigned flags = BCH_BUCKET_MARK_OVERWRITE;
+ unsigned flags = BTREE_TRIGGER_OVERWRITE;
k = bkey_disassemble(b, _k, &unpacked);
offset = bkey_start_offset(&insert->k) -
bkey_start_offset(k.k);
sectors = -((s64) insert->k.size);
- flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
+ flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
break;
}
size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned);
-#define BCH_BUCKET_MARK_INSERT (1 << 0)
-#define BCH_BUCKET_MARK_OVERWRITE (1 << 1)
-#define BCH_BUCKET_MARK_OVERWRITE_SPLIT (1 << 2)
-#define BCH_BUCKET_MARK_BUCKET_INVALIDATE (1 << 3)
-#define BCH_BUCKET_MARK_GC (1 << 4)
-#define BCH_BUCKET_MARK_ALLOC_READ (1 << 5)
-#define BCH_BUCKET_MARK_NOATOMIC (1 << 6)
-
int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, unsigned, s64,
struct bch_fs_usage *, u64, unsigned);
int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned, s64,
struct replicas_delta_list *);
int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c,
unsigned, s64, unsigned);
-int bch2_trans_mark_update(struct btree_trans *,
- struct btree_iter *iter,
- struct bkey_i *insert);
+int bch2_trans_mark_update(struct btree_trans *, struct btree_iter *iter,
+ struct bkey_i *insert, unsigned);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage_online *);
/* disk reservations: */
*/
new_dst->k.p = src_iter->pos;
bch2_trans_update(trans, src_iter,
- &new_dst->k_i);
+ &new_dst->k_i, 0);
return 0;
} else {
/* If we're overwriting, we can't insert new_dst
}
}
- bch2_trans_update(trans, src_iter, &new_src->k_i);
- bch2_trans_update(trans, dst_iter, &new_dst->k_i);
+ bch2_trans_update(trans, src_iter, &new_src->k_i, 0);
+ bch2_trans_update(trans, dst_iter, &new_dst->k_i, 0);
return 0;
}
stripe->k.p = iter->pos;
- bch2_trans_update(&trans, iter, &stripe->k_i);
+ bch2_trans_update(&trans, iter, &stripe->k_i, 0);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
extent_stripe_ptr_add(e, s, ec_ptr, idx);
- bch2_trans_update(&trans, iter, sk.k);
+ bch2_trans_update(&trans, iter, sk.k, 0);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
spin_unlock(&c->ec_stripes_heap_lock);
- bch2_trans_update(trans, iter, &new_key->k_i);
+ bch2_trans_update(trans, iter, &new_key->k_i, 0);
return bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|flags);
bch2_mark_key(c, btree ? btree_k : journal_k,
0, 0, NULL, 0,
- BCH_BUCKET_MARK_ALLOC_READ|
- BCH_BUCKET_MARK_NOATOMIC);
+ BTREE_TRIGGER_ALLOC_READ|
+ BTREE_TRIGGER_NOATOMIC);
if (btree)
btree_k = bch2_btree_iter_next(btree_iter);
struct bpos next_pos;
struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
struct bpos atomic_end;
- unsigned commit_flags = 0;
+ unsigned trigger_flags = 0;
k = insert
? bch2_btree_iter_peek_prev(src)
bkey_start_pos(&delete.k));
}
- bch2_trans_update(&trans, dst, copy.k);
- bch2_trans_update(&trans, del ?: src, &delete);
-
if (copy.k->k.size == k.k->size) {
/*
* If we're moving the entire extent, we can skip
* running triggers:
*/
- commit_flags |= BTREE_INSERT_NOMARK;
+ trigger_flags |= BTREE_TRIGGER_NORUN;
} else {
/* We might end up splitting compressed extents: */
unsigned nr_ptrs =
BUG_ON(ret);
}
+ bch2_trans_update(&trans, dst, copy.k, trigger_flags);
+ bch2_trans_update(&trans, del ?: src, &delete, trigger_flags);
+
ret = bch2_trans_commit(&trans, &disk_res,
&inode->ei_journal_seq,
- BTREE_INSERT_NOFAIL|
- commit_flags);
+ BTREE_INSERT_NOFAIL);
bch2_disk_reservation_put(c, &disk_res);
bkey_err:
if (del)
bkey_init(&delete.k);
delete.k.p = k_iter->pos;
- bch2_trans_update(trans, k_iter, &delete);
+ bch2_trans_update(trans, k_iter, &delete, 0);
return bch2_hash_set(trans, desc, &h->info, k_iter->pos.inode,
tmp, BCH_HASH_SET_MUST_CREATE) ?:
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW,
TRANS_RESET_MEM,
- (bch2_trans_update(trans, iter, &d->k_i), 0));
+ (bch2_trans_update(trans, iter, &d->k_i, 0), 0));
if (ret)
goto err;
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW,
TRANS_RESET_MEM,
- (bch2_trans_update(&trans, iter, &n->k_i), 0));
+ (bch2_trans_update(&trans, iter, &n->k_i, 0), 0));
kfree(n);
if (ret)
goto err;
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW,
TRANS_RESET_MEM,
- (bch2_trans_update(trans, iter, &p.inode.k_i), 0));
+ (bch2_trans_update(trans, iter, &p.inode.k_i, 0), 0));
if (ret)
bch_err(c, "error in fsck: error %i "
"updating inode", ret);
return PTR_ERR(inode_p);
bch2_inode_pack(inode_p, inode);
- bch2_trans_update(trans, iter, &inode_p->inode.k_i);
+ bch2_trans_update(trans, iter, &inode_p->inode.k_i, 0);
return 0;
}
inode_u->bi_generation = bkey_generation(k);
bch2_inode_pack(inode_p, inode_u);
- bch2_trans_update(trans, iter, &inode_p->inode.k_i);
+ bch2_trans_update(trans, iter, &inode_p->inode.k_i, 0);
return 0;
}
}
delete.v.bi_generation = cpu_to_le32(bi_generation);
}
- bch2_trans_update(&trans, iter, &delete.k_i);
+ bch2_trans_update(&trans, iter, &delete.k_i, 0);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
if (delta || new_i_size) {
bch2_inode_pack(&inode_p, &inode_u);
bch2_trans_update(trans, inode_iter,
- &inode_p.inode.k_i);
+ &inode_p.inode.k_i, 0);
}
bch2_trans_iter_put(trans, inode_iter);
}
- bch2_trans_update(trans, iter, k);
+ bch2_trans_update(trans, iter, k, 0);
ret = bch2_trans_commit(trans, disk_res, journal_seq,
BTREE_INSERT_NOCHECK_RW|
if (!bch2_bkey_narrow_crcs(new.k, new_crc))
goto out;
- bch2_trans_update(&trans, iter, new.k);
+ bch2_trans_update(&trans, iter, new.k, 0);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_NOWAIT);
bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k));
- bch2_trans_update(&trans, iter, sk.k);
+ bch2_trans_update(&trans, iter, sk.k, 0);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
goto next;
}
- bch2_trans_update(&trans, iter, insert);
+ bch2_trans_update(&trans, iter, insert, 0);
ret = bch2_trans_commit(&trans, &op->res,
op_journal_seq(op),
if (qdq->d_fieldmask & QC_INO_HARD)
new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
- bch2_trans_update(&trans, iter, &new_quota.k_i);
+ bch2_trans_update(&trans, iter, &new_quota.k_i, 0);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
bch2_cut_front(split_iter->pos, split);
bch2_cut_back(atomic_end, split);
- bch2_trans_update(&trans, split_iter, split);
+ bch2_trans_update(&trans, split_iter, split, !remark
+ ? BTREE_TRIGGER_NORUN
+ : BTREE_TRIGGER_NOOVERWRITES);
bch2_btree_iter_set_pos(iter, split->k.p);
} while (bkey_cmp(iter->pos, k->k.p) < 0);
if (remark) {
ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k),
0, -((s64) k->k.size),
- BCH_BUCKET_MARK_OVERWRITE) ?:
- bch2_trans_commit(&trans, &disk_res, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_LAZY_RW|
- BTREE_INSERT_NOMARK_OVERWRITES);
- } else {
- ret = bch2_trans_commit(&trans, &disk_res, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_LAZY_RW|
- BTREE_INSERT_JOURNAL_REPLAY|
- BTREE_INSERT_NOMARK);
+ BTREE_TRIGGER_OVERWRITE);
+ if (ret)
+ goto err;
}
- if (ret)
- goto err;
+ ret = bch2_trans_commit(&trans, &disk_res, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_JOURNAL_REPLAY);
err:
if (ret == -EINTR)
goto retry;
return bch2_trans_exit(&trans) ?: ret;
}
+static int __bch2_journal_replay_key(struct btree_trans *trans,
+ enum btree_id id, struct bkey_i *k)
+{
+ struct btree_iter *iter;
+
+ iter = bch2_trans_get_iter(trans, id, bkey_start_pos(&k->k),
+ BTREE_ITER_INTENT);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
+ return 0;
+}
+
+static int bch2_journal_replay_key(struct bch_fs *c, enum btree_id id,
+ struct bkey_i *k)
+{
+ return bch2_trans_do(c, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_JOURNAL_REPLAY,
+ __bch2_journal_replay_key(&trans, id, k));
+}
+
static int bch2_journal_replay(struct bch_fs *c,
struct journal_keys keys)
{
else if (btree_node_type_is_extents(i->btree_id))
ret = bch2_extent_replay_key(c, i->btree_id, i->k);
else
- ret = bch2_btree_insert(c, i->btree_id, i->k,
- NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_LAZY_RW|
- BTREE_INSERT_JOURNAL_REPLAY|
- BTREE_INSERT_NOMARK);
+ ret = bch2_journal_replay_key(c, i->btree_id, i->k);
if (ret) {
bch_err(c, "journal replay: error %d while replaying key",
r_v->v.refcount = 0;
memcpy(r_v->v.start, e->v.start, bkey_val_bytes(&e->k));
- bch2_trans_update(trans, reflink_iter, &r_v->k_i);
+ bch2_trans_update(trans, reflink_iter, &r_v->k_i, 0);
r_p = bch2_trans_kmalloc(trans, sizeof(*r_p));
if (IS_ERR(r_p))
set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
- bch2_trans_update(trans, extent_iter, &r_p->k_i);
+ bch2_trans_update(trans, extent_iter, &r_p->k_i, 0);
err:
if (!IS_ERR(reflink_iter)) {
c->reflink_hint = reflink_iter->pos.offset;
swap(iter, slot);
insert->k.p = iter->pos;
- bch2_trans_update(trans, iter, insert);
+ bch2_trans_update(trans, iter, insert, 0);
}
goto out;
delete->k.p = iter->pos;
delete->k.type = ret ? KEY_TYPE_whiteout : KEY_TYPE_deleted;
- bch2_trans_update(trans, iter, delete);
+ bch2_trans_update(trans, iter, delete, 0);
return 0;
}
ret = bch2_btree_iter_traverse(iter);
BUG_ON(ret);
- bch2_trans_update(&trans, iter, &k.k_i);
+ bch2_trans_update(&trans, iter, &k.k_i, 0);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret);
ret = bch2_btree_iter_traverse(iter);
BUG_ON(ret);
- bch2_trans_update(&trans, iter, &k.k_i);
+ bch2_trans_update(&trans, iter, &k.k_i, 0);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret);
bkey_cookie_init(&k.k_i);
k.k.p = iter->pos;
- bch2_trans_update(&trans, iter, &k.k_i);
+ bch2_trans_update(&trans, iter, &k.k_i, 0);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret);
}
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
insert.k.p = iter->pos;
- bch2_trans_update(&trans, iter, &insert.k_i);
+ bch2_trans_update(&trans, iter, &insert.k_i, 0);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret);
bkey_reassemble(&u.k_i, k);
- bch2_trans_update(&trans, iter, &u.k_i);
+ bch2_trans_update(&trans, iter, &u.k_i, 0);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret);
}