BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
bch2_btree_key_cache_params));
memset(&ck->key, ~0, sizeof(ck->key));
+
+ c->nr_keys--;
}
static void bkey_cached_free(struct btree_key_cache *c,
return NULL;
}
+ c->nr_keys++;
+
list_move(&ck->list, &c->clean);
six_unlock_write(&ck->c.lock);
bch2_journal_pin_drop(j, &ck->journal);
bch2_journal_preres_put(j, &ck->res);
- clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
if (!evict) {
mutex_lock(&c->btree_key_cache.lock);
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
+ c->btree_key_cache.nr_dirty--;
+ }
+
list_move_tail(&ck->list, &c->btree_key_cache.clean);
mutex_unlock(&c->btree_key_cache.lock);
} else {
six_lock_write(&ck->c.lock, NULL, NULL);
mutex_lock(&c->btree_key_cache.lock);
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
+ c->btree_key_cache.nr_dirty--;
+ }
+
bkey_cached_evict(&c->btree_key_cache, ck);
bkey_cached_free(&c->btree_key_cache, ck);
mutex_unlock(&c->btree_key_cache.lock);
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
mutex_lock(&c->btree_key_cache.lock);
- list_del_init(&ck->list);
+ list_move(&ck->list, &c->btree_key_cache.dirty);
set_bit(BKEY_CACHED_DIRTY, &ck->flags);
+ c->btree_key_cache.nr_dirty++;
mutex_unlock(&c->btree_key_cache.lock);
}
}
#endif
-void bch2_fs_btree_key_cache_exit(struct btree_key_cache *c)
+void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
{
+ struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
struct bkey_cached *ck, *n;
- mutex_lock(&c->lock);
- list_for_each_entry_safe(ck, n, &c->clean, list) {
+ mutex_lock(&bc->lock);
+ list_splice(&bc->dirty, &bc->clean);
+
+ list_for_each_entry_safe(ck, n, &bc->clean, list) {
kfree(ck->k);
kfree(ck);
+ bc->nr_keys--;
}
- list_for_each_entry_safe(ck, n, &c->freed, list)
+
+ BUG_ON(bc->nr_dirty && !bch2_journal_error(&c->journal));
+ BUG_ON(bc->nr_keys);
+
+ list_for_each_entry_safe(ck, n, &bc->freed, list)
kfree(ck);
- mutex_unlock(&c->lock);
+ mutex_unlock(&bc->lock);
- rhashtable_destroy(&c->table);
+ rhashtable_destroy(&bc->table);
}
void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
mutex_init(&c->lock);
INIT_LIST_HEAD(&c->freed);
INIT_LIST_HEAD(&c->clean);
+ INIT_LIST_HEAD(&c->dirty);
}
int bch2_fs_btree_key_cache_init(struct btree_key_cache *c)
b->ob.nr = 0;
- clear_btree_node_dirty(b);
+ clear_btree_node_dirty(c, b);
btree_node_lock_type(c, b, SIX_LOCK_write);
__btree_node_free(c, b);
b = as->prealloc_nodes[--as->nr_prealloc_nodes];
set_btree_node_accessed(b);
- set_btree_node_dirty(b);
+ set_btree_node_dirty(c, b);
set_btree_node_need_write(b);
bch2_bset_init_first(b, &b->data->keys);
closure_wake_up(&c->btree_interior_update_wait);
}
- clear_btree_node_dirty(b);
+ clear_btree_node_dirty(c, b);
clear_btree_node_need_write(b);
/*
bch2_btree_node_iter_advance(node_iter, b);
bch2_btree_bset_insert_key(iter, b, node_iter, insert);
- set_btree_node_dirty(b);
+ set_btree_node_dirty(as->c, b);
set_btree_node_need_write(b);
}