/* i_sectors: */
enum {
SECTOR_UNALLOCATED,
+ SECTOR_RESERVED,
SECTOR_DIRTY,
SECTOR_ALLOCATED,
} state:2;
s->s[i].replicas_reserved += sectors;
res->disk.sectors -= sectors;
- if (s->s[i].state == SECTOR_UNALLOCATED) {
- s->s[i].state = SECTOR_DIRTY;
+ if (s->s[i].state == SECTOR_UNALLOCATED)
dirty_sectors++;
- }
+
+ s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
}
if (dirty_sectors)
struct bvec_iter iter;
struct bio_vec bv;
unsigned nr_ptrs = bch2_bkey_nr_ptrs_allocated(k);
+ unsigned state = k.k->type == KEY_TYPE_reservation
+ ? SECTOR_RESERVED
+ : SECTOR_ALLOCATED;
BUG_ON(bio->bi_iter.bi_sector < bkey_start_offset(k.k));
BUG_ON(bio_end_sector(bio) > k.k->p.offset);
i < (bv.bv_offset + bv.bv_len) >> 9;
i++) {
s->s[i].nr_replicas = nr_ptrs;
- s->s[i].state = SECTOR_ALLOCATED;
+ s->s[i].state = state;
}
}
}
orig = *s;
for (i = 0; i < PAGE_SECTORS; i++) {
- if (s->s[i].state == SECTOR_UNALLOCATED)
+ if (s->s[i].state < SECTOR_DIRTY)
continue;
nr_replicas_this_write =
}
for (i = 0; i < PAGE_SECTORS; i++) {
- if (s->s[i].state == SECTOR_UNALLOCATED)
+ if (s->s[i].state < SECTOR_DIRTY)
continue;
s->s[i].nr_replicas = w->opts.compression
u64 sector;
while (offset < PAGE_SECTORS &&
- orig.s[offset].state == SECTOR_UNALLOCATED)
+ orig.s[offset].state < SECTOR_DIRTY)
offset++;
if (offset == PAGE_SECTORS)
sector = ((u64) page->index << PAGE_SECTOR_SHIFT) + offset;
while (offset + sectors < PAGE_SECTORS &&
- orig.s[offset + sectors].state != SECTOR_UNALLOCATED)
+ orig.s[offset + sectors].state >= SECTOR_DIRTY)
sectors++;
for (i = offset; i < offset + sectors; i++) {