/* Slowpath, don't want it inlined into btree_iter_traverse() */
static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
+ struct btree_trans *trans,
struct btree_iter *iter,
const struct bkey_i *k,
enum btree_id btree_id,
* Parent node must be locked, else we could read in a btree node that's
* been freed:
*/
- if (iter && !bch2_btree_node_relock(iter, level + 1)) {
- btree_trans_restart(iter->trans);
+ if (trans && !bch2_btree_node_relock(trans, iter, level + 1)) {
+ btree_trans_restart(trans);
return ERR_PTR(-EINTR);
}
six_unlock_intent(&b->c.lock);
/* Unlock before doing IO: */
- if (iter && sync)
- bch2_trans_unlock(iter->trans);
+ if (trans && sync)
+ bch2_trans_unlock(trans);
bch2_btree_node_read(c, b, sync);
if (!sync)
return NULL;
- if (iter &&
- (!bch2_trans_relock(iter->trans) ||
- !bch2_btree_iter_relock_intent(iter))) {
- BUG_ON(!iter->trans->restarted);
+ if (trans &&
+ (!bch2_trans_relock(trans) ||
+ !bch2_btree_iter_relock_intent(trans, iter))) {
+ BUG_ON(!trans->restarted);
return ERR_PTR(-EINTR);
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) {
- btree_trans_restart(iter->trans);
+ btree_trans_restart(trans);
return ERR_PTR(-EINTR);
}
* else we could read in a btree node from disk that's been
* freed:
*/
- b = bch2_btree_node_fill(c, iter, k, iter->btree_id,
+ b = bch2_btree_node_fill(c, trans, iter, k, iter->btree_id,
level, lock_type, true);
/* We raced and found the btree node in the cache */
if (btree_node_read_locked(iter, level + 1))
btree_node_unlock(iter, level + 1);
- if (!btree_node_lock(b, k->k.p, level, iter, lock_type,
+ if (!btree_node_lock(trans, iter, b, k->k.p, level, lock_type,
lock_node_check_fn, (void *) k, trace_ip)) {
if (!trans->restarted)
goto retry;
b->c.level != level ||
race_fault())) {
six_unlock_type(&b->c.lock, lock_type);
- if (bch2_btree_node_relock(iter, level + 1))
+ if (bch2_btree_node_relock(trans, iter, level + 1))
goto retry;
trace_trans_restart_btree_node_reused(trans->ip,
* should_be_locked is not set on this iterator yet, so we need
* to relock it specifically:
*/
- if (iter &&
+ if (trans &&
(!bch2_trans_relock(trans) ||
- !bch2_btree_iter_relock_intent(iter))) {
+ !bch2_btree_iter_relock_intent(trans, iter))) {
BUG_ON(!trans->restarted);
return ERR_PTR(-EINTR);
}
if (nofill)
goto out;
- b = bch2_btree_node_fill(c, NULL, k, btree_id,
+ b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id,
level, SIX_LOCK_read, true);
/* We raced and found the btree node in the cache */
return b;
}
-int bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
+int bch2_btree_node_prefetch(struct bch_fs *c,
+ struct btree_trans *trans,
+ struct btree_iter *iter,
const struct bkey_i *k,
enum btree_id btree_id, unsigned level)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- BUG_ON(iter && !btree_node_locked(iter, level + 1));
+ BUG_ON(trans && !btree_node_locked(iter, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH);
b = btree_cache_find(bc, k);
if (b)
return 0;
- b = bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
+ b = bch2_btree_node_fill(c, trans, iter, k, btree_id,
+ level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b);
}
bch2_btree_node_unlock_write_inlined(trans, iter, b);
}
-void __bch2_btree_node_lock_write(struct btree_trans *trans,
- struct btree_iter *iter, struct btree *b)
+void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
{
- struct btree_iter *linked;
+ struct btree_iter *iter;
unsigned readers = 0;
- EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
-
- trans_for_each_iter(trans, linked)
- if (linked->l[b->c.level].b == b &&
- btree_node_read_locked(linked, b->c.level))
+ trans_for_each_iter(trans, iter)
+ if (iter->l[b->c.level].b == b &&
+ btree_node_read_locked(iter, b->c.level))
readers++;
/*
this_cpu_add(*b->c.lock.readers, readers);
}
-bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
+bool __bch2_btree_node_relock(struct btree_trans *trans,
+ struct btree_iter *iter, unsigned level)
{
struct btree *b = btree_iter_node(iter, level);
int want = __btree_lock_want(iter, level);
if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
(btree_node_lock_seq_matches(iter, b, level) &&
- btree_node_lock_increment(iter->trans, b, level, want))) {
+ btree_node_lock_increment(trans, b, level, want))) {
mark_btree_node_locked(iter, level, want);
return true;
} else {
}
}
-static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
+static bool bch2_btree_node_upgrade(struct btree_trans *trans,
+ struct btree_iter *iter, unsigned level)
{
struct btree *b = iter->l[level].b;
goto success;
if (btree_node_lock_seq_matches(iter, b, level) &&
- btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
+ btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
btree_node_unlock(iter, level);
goto success;
}
break;
if (!(upgrade
- ? bch2_btree_node_upgrade(iter, l)
- : bch2_btree_node_relock(iter, l))) {
+ ? bch2_btree_node_upgrade(trans, iter, l)
+ : bch2_btree_node_relock(trans, iter, l))) {
(upgrade
? trace_node_upgrade_fail
: trace_node_relock_fail)(trans->ip, trace_ip,
}
/* Slowpath: */
-bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
- unsigned level, struct btree_iter *iter,
+bool __bch2_btree_node_lock(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree *b, struct bpos pos, unsigned level,
enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p,
unsigned long ip)
{
- struct btree_trans *trans = iter->trans;
struct btree_iter *linked, *deadlock_iter = NULL;
u64 start_time = local_clock();
unsigned reason = 9;
/* Btree iterator locking: */
#ifdef CONFIG_BCACHEFS_DEBUG
-static void bch2_btree_iter_verify_locks(struct btree_trans *trans,
- struct btree_iter *iter)
+static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
{
unsigned l;
- if (!(trans->iters_linked & (1ULL << iter->idx))) {
- BUG_ON(iter->nodes_locked);
- return;
- }
-
for (l = 0; btree_iter_node(iter, l); l++) {
if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
!btree_node_locked(iter, l))
struct btree_iter *iter;
trans_for_each_iter(trans, iter)
- bch2_btree_iter_verify_locks(trans, iter);
+ bch2_btree_iter_verify_locks(iter);
}
#else
-static inline void bch2_btree_iter_verify_locks(struct btree_trans *trans,
- struct btree_iter *iter) {}
+static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
#endif
/*
* Only for btree_cache.c - only relocks intent locks
*/
-bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
+bool bch2_btree_iter_relock_intent(struct btree_trans *trans,
+ struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
unsigned l;
for (l = iter->level;
l < iter->locks_want && btree_iter_node(iter, l);
l++) {
- if (!bch2_btree_node_relock(iter, l)) {
+ if (!bch2_btree_node_relock(trans, iter, l)) {
trace_node_relock_fail(trans->ip, _RET_IP_,
btree_iter_type(iter) == BTREE_ITER_CACHED,
iter->btree_id, &iter->real_pos,
return ret;
}
-bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
+bool __bch2_btree_iter_upgrade(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned new_locks_want)
{
- struct btree_trans *trans = iter->trans;
struct btree_iter *linked;
EBUG_ON(iter->locks_want >= new_locks_want);
}
}
- bch2_btree_trans_verify_locks(iter->trans);
+ bch2_btree_iter_verify_locks(iter);
}
void bch2_trans_downgrade(struct btree_trans *trans)
#ifdef CONFIG_BCACHEFS_DEBUG
-static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
+static void bch2_btree_iter_verify_cached(struct btree_trans *trans,
+ struct btree_iter *iter)
{
struct bkey_cached *ck;
bool locked = btree_node_locked(iter, 0);
- if (!bch2_btree_node_relock(iter, 0))
+ if (!bch2_btree_node_relock(trans, iter, 0))
return;
ck = (void *) iter->l[0].b;
btree_node_unlock(iter, 0);
}
-static void bch2_btree_iter_verify_level(struct btree_iter *iter,
- unsigned level)
+static void bch2_btree_iter_verify_level(struct btree_trans *trans,
+ struct btree_iter *iter, unsigned level)
{
struct btree_iter_level *l;
struct btree_node_iter tmp;
if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
if (!level)
- bch2_btree_iter_verify_cached(iter);
+ bch2_btree_iter_verify_cached(trans, iter);
return;
}
if (!btree_iter_node(iter, level))
return;
- if (!bch2_btree_node_relock(iter, level))
+ if (!bch2_btree_node_relock(trans, iter, level))
return;
BUG_ON(!btree_iter_pos_in_node(iter, l->b));
break;
}
- bch2_btree_iter_verify_level(iter, i);
+ bch2_btree_iter_verify_level(trans, iter, i);
}
- bch2_btree_iter_verify_locks(trans, iter);
+ bch2_btree_iter_verify_locks(iter);
}
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
return;
trans_for_each_iter_with_node(trans, b, iter)
- bch2_btree_iter_verify_level(iter, b->c.level);
+ bch2_btree_iter_verify_level(trans, iter, b->c.level);
}
#else
-static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
+static inline void bch2_btree_iter_verify_level(struct btree_trans *trans,
+ struct btree_iter *iter, unsigned l) {}
static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
trans_for_each_iter_with_node(trans, b, linked) {
__bch2_btree_iter_fix_key_modified(linked, b, where);
- bch2_btree_iter_verify_level(linked, b->c.level);
+ bch2_btree_iter_verify_level(trans, linked, b->c.level);
}
}
__bch2_btree_node_iter_fix(linked, b,
&linked->l[b->c.level].iter, t,
where, clobber_u64s, new_u64s);
- bch2_btree_iter_verify_level(linked, b->c.level);
+ bch2_btree_iter_verify_level(trans, linked, b->c.level);
}
}
/*
* Verify that iterator for parent node points to child node:
*/
-static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
+static void btree_iter_verify_new_node(struct btree_trans *trans,
+ struct btree_iter *iter, struct btree *b)
{
struct btree_iter_level *l;
unsigned plevel;
parent_locked = btree_node_locked(iter, plevel);
- if (!bch2_btree_node_relock(iter, plevel))
+ if (!bch2_btree_node_relock(trans, iter, plevel))
return;
l = &iter->l[plevel];
char buf4[100];
struct bkey uk = bkey_unpack_key(b, k);
- bch2_dump_btree_node(iter->trans->c, l->b);
+ bch2_dump_btree_node(trans->c, l->b);
bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
bch2_bkey_to_text(&PBUF(buf2), &uk);
bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
btree_node_unlock(iter, b->c.level + 1);
}
-static inline void __btree_iter_init(struct btree_iter *iter,
- unsigned level)
+static inline void __btree_iter_level_init(struct btree_iter *iter,
+ unsigned level)
{
struct btree_iter_level *l = &iter->l[level];
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
-static inline void btree_iter_node_set(struct btree_iter *iter,
- struct btree *b)
+static inline void btree_iter_level_init(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree *b)
{
BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
- btree_iter_verify_new_node(iter, b);
+ btree_iter_verify_new_node(trans, iter, b);
EBUG_ON(!btree_iter_pos_in_node(iter, b));
EBUG_ON(b->c.lock.state.seq & 1);
iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
iter->l[b->c.level].b = b;
- __btree_iter_init(iter, b->c.level);
+ __btree_iter_level_init(iter, b->c.level);
}
/*
mark_btree_node_locked(linked, b->c.level, (enum six_lock_type) t);
}
- btree_iter_node_set(linked, b);
+ btree_iter_level_init(trans, linked, b);
}
}
struct btree_iter *linked;
trans_for_each_iter_with_node(trans, b, linked)
- __btree_iter_init(linked, b->c.level);
+ __btree_iter_level_init(linked, b->c.level);
}
static int lock_root_check_fn(struct six_lock *lock, void *p)
}
lock_type = __btree_lock_want(iter, iter->level);
- if (unlikely(!btree_node_lock(b, SPOS_MAX, iter->level,
- iter, lock_type,
+ if (unlikely(!btree_node_lock(trans, iter, b, SPOS_MAX,
+ iter->level, lock_type,
lock_root_check_fn, rootp,
trace_ip))) {
if (trans->restarted)
iter->l[i].b = NULL;
mark_btree_node_locked(iter, iter->level, lock_type);
- btree_iter_node_set(iter, b);
+ btree_iter_level_init(trans, iter, b);
return 0;
}
bch2_bkey_buf_init(&tmp);
while (nr && !ret) {
- if (!bch2_btree_node_relock(iter, iter->level))
+ if (!bch2_btree_node_relock(trans, iter, iter->level))
break;
bch2_btree_node_iter_advance(&node_iter, l->b);
break;
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
- ret = bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
- iter->level - 1);
+ ret = bch2_btree_node_prefetch(c, trans, iter, tmp.k,
+ iter->btree_id, iter->level - 1);
}
if (!was_locked)
return ret;
}
-static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
+static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned plevel, struct btree *b)
{
struct btree_iter_level *l = &iter->l[plevel];
struct bkey_packed *k;
struct bch_btree_ptr_v2 *bp;
- if (!bch2_btree_node_relock(iter, plevel))
+ if (!bch2_btree_node_relock(trans, iter, plevel))
return;
k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
goto err;
mark_btree_node_locked(iter, level, lock_type);
- btree_iter_node_set(iter, b);
+ btree_iter_level_init(trans, iter, b);
if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
unlikely(b != btree_node_mem_ptr(tmp.k)))
- btree_node_mem_ptr_set(iter, level + 1, b);
+ btree_node_mem_ptr_set(trans, iter, level + 1, b);
if (iter->flags & BTREE_ITER_PREFETCH)
ret = btree_iter_prefetch(trans, iter);
btree_node_unlock(iter, level + 1);
iter->level = level;
- bch2_btree_iter_verify_locks(trans, iter);
+ bch2_btree_iter_verify_locks(iter);
err:
bch2_bkey_buf_exit(&tmp, c);
return ret;
if (prev) {
if (iter->btree_id == prev->btree_id &&
iter->locks_want < prev->locks_want)
- __bch2_btree_iter_upgrade(iter, prev->locks_want);
+ __bch2_btree_iter_upgrade(trans, iter, prev->locks_want);
else if (!iter->locks_want && prev->locks_want)
- __bch2_btree_iter_upgrade(iter, 1);
+ __bch2_btree_iter_upgrade(trans, iter, 1);
}
prev = iter;
return __btree_iter_traverse_all(trans, 0, _RET_IP_);
}
-static inline bool btree_iter_good_node(struct btree_iter *iter,
+static inline bool btree_iter_good_node(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned l, int check_pos)
{
if (!is_btree_node(iter, l) ||
- !bch2_btree_node_relock(iter, l))
+ !bch2_btree_node_relock(trans, iter, l))
return false;
if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
return true;
}
-static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
+static inline unsigned btree_iter_up_until_good_node(struct btree_trans *trans,
+ struct btree_iter *iter,
int check_pos)
{
unsigned l = iter->level;
while (btree_iter_node(iter, l) &&
- !btree_iter_good_node(iter, l, check_pos)) {
+ !btree_iter_good_node(trans, iter, l, check_pos)) {
btree_node_unlock(iter, l);
iter->l[l].b = BTREE_ITER_NO_NODE_UP;
l++;
}
if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
- ret = bch2_btree_iter_traverse_cached(iter);
+ ret = bch2_btree_iter_traverse_cached(trans, iter);
goto out;
}
if (unlikely(iter->level >= BTREE_MAX_DEPTH))
goto out;
- iter->level = btree_iter_up_until_good_node(iter, 0);
+ iter->level = btree_iter_up_until_good_node(trans, iter, 0);
/* If we need intent locks, take them too: */
for (l = iter->level + 1;
l < iter->locks_want && btree_iter_node(iter, l);
l++)
- if (!bch2_btree_node_relock(iter, l))
+ if (!bch2_btree_node_relock(trans, iter, l))
while (iter->level <= l) {
btree_node_unlock(iter, iter->level);
iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
return;
}
- l = btree_iter_up_until_good_node(iter, cmp);
+ l = btree_iter_up_until_good_node(trans, iter, cmp);
if (btree_iter_node(iter, l)) {
/*
*/
if (cmp < 0 ||
!btree_iter_advance_to_pos(iter, &iter->l[l], 8))
- __btree_iter_init(iter, l);
+ __btree_iter_level_init(iter, l);
/* Don't leave it locked if we're not supposed to: */
if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)