struct extent_ptr_decoded p;
struct bch_replicas_padded r;
s64 dirty_sectors = 0;
- unsigned i;
int ret;
r.e.data_type = data_type;
if (!stale)
update_cached_sectors(c, fs_usage, p.ptr.dev,
disk_sectors);
- } else if (!p.ec_nr) {
+ } else if (!p.has_ec) {
dirty_sectors += disk_sectors;
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
} else {
- for (i = 0; i < p.ec_nr; i++) {
- ret = bch2_mark_stripe_ptr(c, p.ec[i],
- data_type, fs_usage,
- disk_sectors, flags);
- if (ret)
- return ret;
- }
+ ret = bch2_mark_stripe_ptr(c, p.ec,
+ data_type, fs_usage,
+ disk_sectors, flags);
+ if (ret)
+ return ret;
r.e.nr_required = 0;
}
struct bch_replicas_padded r;
s64 dirty_sectors = 0;
bool stale;
- unsigned i;
int ret;
r.e.data_type = data_type;
if (!stale)
update_cached_sectors_list(trans, p.ptr.dev,
disk_sectors);
- } else if (!p.ec_nr) {
+ } else if (!p.has_ec) {
dirty_sectors += disk_sectors;
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
} else {
- for (i = 0; i < p.ec_nr; i++) {
- ret = bch2_trans_mark_stripe_ptr(trans, p.ec[i],
- disk_sectors, data_type);
- if (ret)
- return ret;
- }
+ ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
+ disk_sectors, data_type);
+ if (ret)
+ return ret;
r.e.nr_required = 0;
}
closure_init_stack(&cl);
- BUG_ON(!rbio->pick.idx ||
- rbio->pick.idx - 1 >= rbio->pick.ec_nr);
+ BUG_ON(!rbio->pick.has_ec);
- stripe_idx = rbio->pick.ec[rbio->pick.idx - 1].idx;
+ stripe_idx = rbio->pick.ec.idx;
buf = kzalloc(sizeof(*buf), GFP_NOIO);
if (!buf)
static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
struct extent_ptr_decoded p)
{
- unsigned i, durability = 0;
+ unsigned durability = 0;
struct bch_dev *ca;
if (p.ptr.cached)
if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
durability = max_t(unsigned, durability, ca->mi.durability);
- for (i = 0; i < p.ec_nr; i++) {
+ if (p.has_ec) {
struct stripe *s =
- genradix_ptr(&c->stripes[0], p.ec[i].idx);
+ genradix_ptr(&c->stripes[0], p.ec.idx);
if (WARN_ON(!s))
- continue;
+ goto out;
durability = max_t(unsigned, durability, s->nr_redundant);
}
-
+out:
return durability;
}
p.idx++;
if (force_reconstruct_read(c) &&
- !p.idx && p.ec_nr)
+ !p.idx && p.has_ec)
p.idx++;
- if (p.idx >= p.ec_nr + 1)
+ if (p.idx >= (unsigned) p.has_ec + 1)
continue;
if (ret > 0 && !ptr_better(c, p, *pick))
struct bch_extent_crc_unpacked crc =
bch2_extent_crc_unpack(&k->k, NULL);
union bch_extent_entry *pos;
- unsigned i;
if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
pos = ptrs.start;
p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
__extent_entry_insert(k, pos, to_entry(&p->ptr));
- for (i = 0; i < p->ec_nr; i++) {
- p->ec[i].type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
- __extent_entry_insert(k, pos, to_entry(&p->ec[i]));
+ if (p->has_ec) {
+ p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ec));
}
}
__label__ out; \
\
(_ptr).idx = 0; \
- (_ptr).ec_nr = 0; \
+ (_ptr).has_ec = false; \
\
__bkey_extent_entry_for_each_from(_entry, _end, _entry) \
switch (extent_entry_type(_entry)) { \
entry_to_crc(_entry)); \
break; \
case BCH_EXTENT_ENTRY_stripe_ptr: \
- (_ptr).ec[(_ptr).ec_nr++] = _entry->stripe_ptr; \
+ (_ptr).ec = _entry->stripe_ptr; \
+ (_ptr).has_ec = true; \
break; \
} \
out: \
struct extent_ptr_decoded {
unsigned idx;
- unsigned ec_nr;
+ bool has_ec;
struct bch_extent_crc_unpacked crc;
struct bch_extent_ptr ptr;
- struct bch_extent_stripe_ptr ec[4];
+ struct bch_extent_stripe_ptr ec;
};
struct bch_io_failures {
if (p.ptr.cached)
continue;
- if (p.ec_nr) {
+ if (p.has_ec)
r->nr_required = 0;
- break;
- }
r->devs[r->nr_devs++] = p.ptr.dev;
}