l_bluestore_blob_split,
l_bluestore_extent_compress,
l_bluestore_gc_merged,
+ l_bluestore_read_eio,
l_bluestore_last
};
SharedBlobRef lookup(uint64_t sbid) {
std::lock_guard<std::mutex> l(lock);
auto p = sb_map.find(sbid);
- if (p == sb_map.end()) {
+ if (p == sb_map.end() ||
+ p->second->nref == 0) {
return nullptr;
}
return p->second;
sb->coll = coll;
}
- bool remove(SharedBlob *sb) {
+ void remove(SharedBlob *sb) {
std::lock_guard<std::mutex> l(lock);
- if (sb->nref == 0) {
- assert(sb->get_parent() == this);
- sb_map.erase(sb->get_sbid());
- return true;
+ assert(sb->get_parent() == this);
+ // only remove if it still points to us
+ auto p = sb_map.find(sb->get_sbid());
+ if (p != sb_map.end() &&
+ p->second == sb) {
+ sb_map.erase(p);
}
- return false;
}
bool empty() {
std::lock_guard<std::mutex> l(lock);
return sb_map.empty();
}
+
+ void dump(CephContext *cct, int lvl);
};
//#define CACHE_BLOB_BL // not sure if this is a win yet or not... :/
void clear();
bool empty();
+ void dump(CephContext *cct, int lvl);
+
/// return true if f true for any item
bool map_any(std::function<bool(OnodeRef)> f);
};
interval_set<uint64_t> bluefs_extents; ///< block extents owned by bluefs
interval_set<uint64_t> bluefs_extents_reclaiming; ///< currently reclaiming
- std::mutex deferred_lock, deferred_submit_lock;
+ std::mutex deferred_lock;
std::atomic<uint64_t> deferred_seq = {0};
deferred_osr_queue_t deferred_queue; ///< osr's with deferred io pending
int deferred_queue_size = 0; ///< num txc's queued across all osrs
atomic_int deferred_aggressive = {0}; ///< aggressive wakeup of kv thread
+ Finisher deferred_finisher;
int m_finisher_num = 1;
vector<Finisher*> finishers;
KVSyncThread kv_sync_thread;
std::mutex kv_lock;
std::condition_variable kv_cond;
+ bool _kv_only = false;
bool kv_sync_started = false;
bool kv_stop = false;
bool kv_finalize_started = false;
PerfCounters *logger = nullptr;
- std::mutex reap_lock;
list<CollectionRef> removed_collections;
RWLock debug_read_error_lock = {"BlueStore::debug_read_error_lock"};
int _setup_block_symlink_or_file(string name, string path, uint64_t size,
bool create);
- int _write_bdev_label(string path, bluestore_bdev_label_t label);
public:
+ static int _write_bdev_label(CephContext* cct,
+ string path, bluestore_bdev_label_t label);
static int _read_bdev_label(CephContext* cct, string path,
bluestore_bdev_label_t *label);
private:
void _assign_nid(TransContext *txc, OnodeRef o);
uint64_t _assign_blobid(TransContext *txc);
- void _dump_onode(OnodeRef o, int log_level=30);
+ void _dump_onode(const OnodeRef& o, int log_level=30);
void _dump_extent_map(ExtentMap& em, int log_level=30);
void _dump_transaction(Transaction *t, int log_level = 30);
bluestore_deferred_op_t *_get_deferred_op(TransContext *txc, OnodeRef o);
void _deferred_queue(TransContext *txc);
+public:
void deferred_try_submit();
+private:
void _deferred_submit_unlock(OpSequencer *osr);
void _deferred_aio_finish(OpSequencer *osr);
int _deferred_replay();
const PExtentVector& extents,
bool compressed,
mempool_dynamic_bitset &used_blocks,
+ uint64_t granularity,
store_statfs_t& expected_statfs);
void _buffer_cache_write(
return 0;
}
- int fsck(bool deep) override;
+ int write_meta(const std::string& key, const std::string& value) override;
+ int read_meta(const std::string& key, std::string *value) override;
+
+
+ int fsck(bool deep) override {
+ return _fsck(deep, false);
+ }
+ int repair(bool deep) override {
+ return _fsck(deep, true);
+ }
+ int _fsck(bool deep, bool repair);
void set_cache_shards(unsigned num) override;
assert(db);
db->compact();
}
-
+ bool has_builtin_csum() const override {
+ return true;
+ }
+
private:
bool _debug_data_eio(const ghobject_t& o) {
if (!cct->_conf->bluestore_debug_inject_read_err) {
bool mark_unused;
bool new_blob; ///< whether new blob was created
+ bool compressed = false;
+ bufferlist compressed_bl;
+ size_t compressed_len = 0;
+
write_item(
uint64_t logical_offs,
BlobRef b,
OnodeRef o,
uint64_t offset,
set<SharedBlob*> *maybe_unshared_blobs=0);
- void _truncate(TransContext *txc,
+ int _truncate(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
uint64_t offset);