l_bluestore_gc_merged,
l_bluestore_read_eio,
l_bluestore_reads_with_retries,
+ l_bluestore_fragmentation,
l_bluestore_last
};
uint64_t min_alloc_size);
/// return a collection of extents to perform GC on
- const vector<AllocExtent>& get_extents_to_collect() const {
+ const vector<bluestore_pextent_t>& get_extents_to_collect() const {
return extents_to_collect;
}
GarbageCollector(CephContext* _cct) : cct(_cct) {}
///< copies that are affected by the
///< specific write
- vector<AllocExtent> extents_to_collect; ///< protrusive extents that should
- ///< be collected if GC takes place
+ ///< protrusive extents that should be collected if GC takes place
+ vector<bluestore_pextent_t> extents_to_collect;
boost::optional<uint64_t > used_alloc_unit; ///< last processed allocation
///< unit when traversing
// onode itself isn't written, though
modified_objects.insert(o);
}
- void removed(OnodeRef& o) {
+ void note_removed_object(OnodeRef& o) {
onodes.erase(o);
- modified_objects.erase(o);
+ modified_objects.insert(o);
}
void aio_finish(BlueStore *store) override {