bch2_alloc_read_key(c, bkey_i_to_s_c(k));
}
+ percpu_down_write(&c->mark_lock);
for_each_member_device(ca, c, i)
bch2_dev_usage_from_buckets(c, ca);
+ percpu_up_write(&c->mark_lock);
mutex_lock(&c->bucket_clock[READ].lock);
for_each_member_device(ca, c, i) {
if (c) {
lockdep_assert_held(&c->sb_lock);
percpu_down_read(&c->mark_lock);
- } else {
- preempt_disable();
}
for (i = 0; i < layout->nr_superblocks; i++) {
gc_phase(GC_PHASE_SB), flags);
}
- if (c) {
+ if (c)
percpu_up_read(&c->mark_lock);
- } else {
- preempt_enable();
- }
}
static void bch2_mark_superblocks(struct bch_fs *c)
struct bucket_array *buckets;
struct bucket *g;
- percpu_down_read(&c->mark_lock);
+ /*
+ * This is only called during startup, before there's any multithreaded
+ * access to c->usage:
+ */
+ preempt_disable();
fs_usage = this_cpu_ptr(c->usage[0]);
+ preempt_enable();
+
buckets = bucket_array(ca);
for_each_bucket(g, buckets)
if (g->mark.data_type)
bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false);
- percpu_up_read(&c->mark_lock);
}
#define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \
size_t b, bool owned_by_allocator,
struct gc_pos pos, unsigned flags)
{
+ preempt_disable();
+
do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
ca, b, owned_by_allocator);
+
+ preempt_enable();
}
static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
if (c) {
percpu_down_read(&c->mark_lock);
spin_lock(&c->journal.lock);
- } else {
- preempt_disable();
}
pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
if (c) {
spin_unlock(&c->journal.lock);
percpu_up_read(&c->mark_lock);
- } else {
- preempt_enable();
}
if (!new_fs)