&journal_seq);
fifo_push(&ca->free[RESERVE_BTREE], bu);
- bucket_set_dirty(ca, bu);
}
}
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- size_t b = PTR_BUCKET_NR(ca, ptr);
- struct bucket *g = PTR_BUCKET(ca, ptr);
+ struct bucket *g = PTR_BUCKET(ca, ptr, true);
if (mustfix_fsck_err_on(!g->gen_valid, c,
"found ptr with missing gen in alloc btree,\n"
"type %u gen %u",
k.k->type, ptr->gen)) {
- g->_mark.gen = ptr->gen;
- g->gen_valid = 1;
- bucket_set_dirty(ca, b);
+ g->_mark.gen = ptr->gen;
+ g->_mark.dirty = true;
+ g->gen_valid = 1;
}
if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
"%u ptr gen in the future: %u > %u",
k.k->type, ptr->gen, g->mark.gen)) {
- g->_mark.gen = ptr->gen;
- g->gen_valid = 1;
- bucket_set_dirty(ca, b);
+ g->_mark.gen = ptr->gen;
+ g->_mark.dirty = true;
+ g->gen_valid = 1;
set_bit(BCH_FS_FIXED_GENS, &c->flags);
}
}
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- size_t b = PTR_BUCKET_NR(ca, ptr);
- struct bucket *g = __bucket(ca, b, true);
+ struct bucket *g = PTR_BUCKET(ca, ptr, true);
if (gen_after(g->oldest_gen, ptr->gen))
g->oldest_gen = ptr->gen;
struct bch_dev *ca;
unsigned i;
+ percpu_down_write(&c->mark_lock);
+
/*
* indicate to stripe code that we need to allocate for the gc stripes
* radix tree, too
*/
gc_pos_set(c, gc_phase(GC_PHASE_START));
- percpu_down_write(&c->mark_lock);
BUG_ON(c->usage[1]);
c->usage[1] = __alloc_percpu_gfp(sizeof(struct bch_fs_usage) +
/* leaf node needs to be split */
BTREE_INSERT_BTREE_NODE_FULL,
BTREE_INSERT_ENOSPC,
- BTREE_INSERT_NEED_GC_LOCK,
BTREE_INSERT_NEED_MARK_REPLICAS,
};
struct btree *b;
struct disk_reservation disk_res = { 0, 0 };
unsigned sectors = nr_nodes * c->opts.btree_node_size;
- int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD;
+ int ret, disk_res_flags = 0;
if (flags & BTREE_INSERT_NOFAIL)
disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
ret = bch2_disk_reservation_add(c, &as->reserve->disk_res,
c->opts.btree_node_size *
bch2_bkey_nr_ptrs(bkey_i_to_s_c(&new_key->k_i)),
- BCH_DISK_RESERVATION_NOFAIL|
- BCH_DISK_RESERVATION_GC_LOCK_HELD);
+ BCH_DISK_RESERVATION_NOFAIL);
BUG_ON(ret);
parent = btree_node_parent(iter, b);
ret = -EINTR;
}
break;
- case BTREE_INSERT_NEED_GC_LOCK:
- ret = -EINTR;
-
- if (!down_read_trylock(&c->gc_lock)) {
- if (flags & BTREE_INSERT_NOUNLOCK)
- goto out;
-
- bch2_btree_iter_unlock(trans->entries[0].iter);
- down_read(&c->gc_lock);
- }
- up_read(&c->gc_lock);
- break;
case BTREE_INSERT_ENOSPC:
ret = -ENOSPC;
break;
}
static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, struct bucket_mark *old,
+ size_t b, struct bucket_mark *ret,
bool gc)
{
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc);
- struct bucket_mark new;
+ struct bucket_mark old, new;
- *old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
+ old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
BUG_ON(!is_available_bucket(new));
new.owned_by_allocator = true;
new.gen++;
}));
- if (old->cached_sectors)
+ if (old.cached_sectors)
update_cached_sectors(c, fs_usage, ca->dev_idx,
- -old->cached_sectors);
+ -old.cached_sectors);
+
+ if (ret)
+ *ret = old;
}
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
__bch2_invalidate_bucket(c, ca, b, old, false);
+ if (gc_visited(c, gc_phase(GC_PHASE_START)))
+ __bch2_invalidate_bucket(c, ca, b, NULL, true);
+
if (!old->owned_by_allocator && old->cached_sectors)
trace_invalidate(ca, bucket_to_sector(ca, b),
old->cached_sectors);
return 0;
recalculate:
- /*
- * GC recalculates sectors_available when it starts, so that hopefully
- * we don't normally end up blocking here:
- */
-
- /*
- * Piss fuck, we can be called from extent_insert_fixup() with btree
- * locks held:
- */
-
- if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) {
- if (!(flags & BCH_DISK_RESERVATION_BTREE_LOCKS_HELD))
- down_read(&c->gc_lock);
- else if (!down_read_trylock(&c->gc_lock))
- return -EINTR;
- }
-
percpu_down_write(&c->mark_lock);
+
sectors_available = bch2_recalc_sectors_available(c);
if (sectors <= sectors_available ||
percpu_up_write(&c->mark_lock);
- if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
- up_read(&c->gc_lock);
-
return ret;
}
return __bucket(ca, b, false);
}
-static inline void bucket_set_dirty(struct bch_dev *ca, size_t b)
-{
- struct bucket *g;
- struct bucket_mark m;
-
- rcu_read_lock();
- g = bucket(ca, b);
- bucket_cmpxchg(g, m, m.dirty = true);
- rcu_read_unlock();
-
-}
-
static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
size_t b, int rw)
{
}
static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
- const struct bch_extent_ptr *ptr)
+ const struct bch_extent_ptr *ptr,
+ bool gc)
{
return bucket(ca, PTR_BUCKET_NR(ca, ptr));
}
}
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
-#define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 1)
-#define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD (1 << 2)
int bch2_disk_reservation_add(struct bch_fs *,
struct disk_reservation *,
if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
(sectors = bch2_extent_is_compressed(k))) {
- int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
-
- if (trans->flags & BTREE_INSERT_NOFAIL)
- flags |= BCH_DISK_RESERVATION_NOFAIL;
+ int flags = trans->flags & BTREE_INSERT_NOFAIL
+ ? BCH_DISK_RESERVATION_NOFAIL : 0;
switch (bch2_disk_reservation_add(trans->c,
trans->disk_res,
break;
case -ENOSPC:
return BTREE_INSERT_ENOSPC;
- case -EINTR:
- return BTREE_INSERT_NEED_GC_LOCK;
default:
BUG();
}