if (IS_ERR(c))
return c;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
if (!test_bit(BCH_FS_STARTED, &c->flags)) {
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
closure_put(&c->cl);
pr_err("err mounting %s: incomplete filesystem", dev_name);
return ERR_PTR(-EINVAL);
}
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
set_bit(BCH_FS_BDEV_MOUNTED, &c->flags);
return c;
return ret;
if (opts.read_only != c->opts.read_only) {
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
if (opts.read_only) {
bch2_fs_read_only(c);
ret = bch2_fs_read_write(c);
if (ret) {
bch_err(c, "error going rw: %i", ret);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return -EINVAL;
}
c->opts.read_only = opts.read_only;
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
}
if (opts.errors >= 0)
struct bch_fs *c =
container_of(work, struct bch_fs, read_only_work);
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
bch2_fs_read_only(c);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
}
static void bch2_fs_read_only_async(struct bch_fs *c)
cancel_work_sync(&c->journal_seq_blacklist_gc_work);
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
bch2_fs_read_only(c);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
for_each_member_device(ca, c, i)
if (ca->kobj.state_in_sysfs &&
bch2_opts_create_sysfs_files(&c->opts_dir))
return "error creating sysfs objects";
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
err = "error creating sysfs objects";
__for_each_member_device(ca, c, i, NULL)
list_add(&c->list, &bch_fs_list);
err = NULL;
err:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return err;
}
c->minor = -1;
c->disk_sb.fs_sb = true;
- mutex_init(&c->state_lock);
+ init_rwsem(&c->state_lock);
mutex_init(&c->sb_lock);
mutex_init(&c->replicas_gc_lock);
mutex_init(&c->btree_root_lock);
unsigned i;
int ret = -EINVAL;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
print_mount_opts(c);
ret = 0;
out:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
err:
switch (ret) {
{
int ret;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
ret = __bch2_dev_set_state(c, ca, new_state, flags);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
unsigned dev_idx = ca->dev_idx, data;
int ret = -EINVAL;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
/*
* We consume a reference to ca->ref, regardless of whether we succeed
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
err:
if (ca->mi.state == BCH_MEMBER_STATE_RW &&
!percpu_ref_is_zero(&ca->io_ref))
__bch2_dev_read_write(c, ca);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
dev_usage_clear(ca);
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
mutex_lock(&c->sb_lock);
err = "insufficient space in new superblock";
goto err_late;
}
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
err_unlock:
mutex_unlock(&c->sb_lock);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
err:
if (ca)
bch2_dev_free(ca);
const char *err;
int ret;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
ret = bch2_read_super(path, &opts, &sb);
if (ret) {
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
err:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
bch2_free_super(&sb);
bch_err(c, "error bringing %s online: %s", path, err);
return -EINVAL;
int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
{
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
if (!bch2_dev_is_online(ca)) {
bch_err(ca, "Already offline");
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
}
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
bch_err(ca, "Cannot offline required disk");
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return -EINVAL;
}
__bch2_dev_offline(c, ca);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
}
struct bch_member *mi;
int ret = 0;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
if (nbuckets < ca->mi.nbuckets) {
bch_err(ca, "Cannot shrink yet");
bch2_recalc_capacity(c);
err:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
goto err;
err = "bch2_dev_online() error";
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
for (i = 0; i < nr_devices; i++)
if (bch2_dev_attach_bdev(c, &sb[i])) {
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
goto err_print;
}
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
err = "insufficient devices";
if (!bch2_fs_may_start(c))
return 0;
}
-STORE(__bch2_fs)
+STORE(bch2_fs)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
if (attr == &sysfs_trigger_btree_coalesce)
bch2_coalesce(c);
- if (attr == &sysfs_trigger_gc)
+ if (attr == &sysfs_trigger_gc) {
+ down_read(&c->state_lock);
bch2_gc(c, NULL, false, false);
+ up_read(&c->state_lock);
+ }
if (attr == &sysfs_trigger_alloc_write) {
bool wrote;
sc.nr_to_scan = strtoul_or_return(buf);
c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
}
+
#ifdef CONFIG_BCACHEFS_TESTS
if (attr == &sysfs_perf_test) {
char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
#endif
return size;
}
-
-STORE(bch2_fs)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
-
- mutex_lock(&c->state_lock);
- size = __bch2_fs_store(kobj, attr, buf, size);
- mutex_unlock(&c->state_lock);
-
- return size;
-}
SYSFS_OPS(bch2_fs);
struct attribute *bch2_fs_files[] = {