template <typename I>
void AbstractWriteLog<I>::periodic_stats() {
std::lock_guard locker(m_lock);
- ldout(m_image_ctx.cct, 1) << "STATS: m_log_entries=" << m_log_entries.size()
+ update_image_cache_state();
+ ldout(m_image_ctx.cct, 5) << "STATS: m_log_entries=" << m_log_entries.size()
<< ", m_dirty_log_entries=" << m_dirty_log_entries.size()
<< ", m_free_log_entries=" << m_free_log_entries
<< ", m_bytes_allocated=" << m_bytes_allocated
template <typename I>
void AbstractWriteLog<I>::arm_periodic_stats() {
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
- if (m_periodic_stats_enabled) {
- m_timer_ctx = new LambdaContext(
- [this](int r) {
- /* m_timer_lock is held */
- periodic_stats();
- arm_periodic_stats();
- });
- m_timer->add_event_after(LOG_STATS_INTERVAL_SECONDS, m_timer_ctx);
- }
+ m_timer_ctx = new LambdaContext([this](int r) {
+ /* m_timer_lock is held */
+ periodic_stats();
+ arm_periodic_stats();
+ });
+ m_timer->add_event_after(LOG_STATS_INTERVAL_SECONDS, m_timer_ctx);
}
template <typename I>
// Start the thread
m_thread_pool.start();
- m_periodic_stats_enabled = m_cache_state->log_periodic_stats;
/* Do these after we drop lock */
later.add(new LambdaContext([this](int r) {
- if (m_periodic_stats_enabled) {
/* Log stats for the first time */
periodic_stats();
/* Arm periodic stats logging for the first time */
std::lock_guard timer_locker(*m_timer_lock);
arm_periodic_stats();
- }
- }));
+ }));
m_image_ctx.op_work_queue->queue(on_finish, 0);
}
+template <typename I>
+void AbstractWriteLog<I>::update_image_cache_state() {
+ using klass = AbstractWriteLog<I>;
+ Context *ctx = util::create_context_callback<
+ klass, &klass::handle_update_image_cache_state>(this);
+ update_image_cache_state(ctx);
+}
+
template <typename I>
void AbstractWriteLog<I>::update_image_cache_state(Context *on_finish) {
+ ldout(m_image_ctx.cct, 10) << dendl;
+
+ ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
+ m_cache_state->allocated_bytes = m_bytes_allocated;
+ m_cache_state->cached_bytes = m_bytes_cached;
+ m_cache_state->dirty_bytes = m_bytes_dirty;
+ m_cache_state->free_bytes = m_bytes_allocated_cap - m_bytes_allocated;
+ m_cache_state->hits_full = m_perfcounter->get(l_librbd_pwl_rd_hit_req);
+ m_cache_state->hits_partial = m_perfcounter->get(l_librbd_pwl_rd_part_hit_req);
+ m_cache_state->misses = m_perfcounter->get(l_librbd_pwl_rd_req) -
+ m_cache_state->hits_full - m_cache_state->hits_partial;
+ m_cache_state->hit_bytes = m_perfcounter->get(l_librbd_pwl_rd_hit_bytes);
+ m_cache_state->miss_bytes = m_perfcounter->get(l_librbd_pwl_rd_bytes) -
+ m_cache_state->hit_bytes;
m_cache_state->write_image_cache_state(on_finish);
}
+template <typename I>
+void AbstractWriteLog<I>::handle_update_image_cache_state(int r) {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(cct) << "failed to update image cache state: " << cpp_strerror(r)
+ << dendl;
+ return;
+ }
+}
+
template <typename I>
void AbstractWriteLog<I>::init(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
Context *ctx = new LambdaContext(
[this, on_finish](int r) {
if (r >= 0) {
+ std::lock_guard locker(m_lock);
update_image_cache_state(on_finish);
} else {
on_finish->complete(r);
Context *ctx = new LambdaContext(
[this, on_finish](int r) {
+ if (m_perfcounter) {
+ perf_stop();
+ }
ldout(m_image_ctx.cct, 6) << "shutdown complete" << dendl;
m_image_ctx.op_work_queue->queue(on_finish, r);
});
[this, ctx](int r) {
ldout(m_image_ctx.cct, 6) << "image cache cleaned" << dendl;
Context *next_ctx = override_ctx(r, ctx);
- bool periodic_stats_enabled = m_periodic_stats_enabled;
- m_periodic_stats_enabled = false;
-
- if (periodic_stats_enabled) {
- /* Log stats one last time if they were enabled */
- periodic_stats();
- }
- {
- std::lock_guard locker(m_lock);
- check_image_cache_state_clean();
- m_wake_up_enabled = false;
- m_cache_state->clean = true;
- m_log_entries.clear();
-
- remove_pool_file();
+ periodic_stats();
- if (m_perfcounter) {
- perf_stop();
- }
- }
+ std::lock_guard locker(m_lock);
+ check_image_cache_state_clean();
+ m_wake_up_enabled = false;
+ m_log_entries.clear();
+ m_cache_state->clean = true;
+ m_cache_state->empty = true;
+ remove_pool_file();
update_image_cache_state(next_ctx);
});
ctx = new LambdaContext(
/* Flush all writes to OSDs (unless disabled) and wait for all
in-progress flush writes to complete */
ldout(m_image_ctx.cct, 6) << "flushing" << dendl;
- if (m_periodic_stats_enabled) {
- periodic_stats();
- }
+ periodic_stats();
}
flush_dirty_entries(next_ctx);
});
std::lock_guard locker(m_lock);
m_unpublished_reserves -= published_reserves;
m_dirty_log_entries.splice(m_dirty_log_entries.end(), dirty_entries);
+ if (m_cache_state->clean && !this->m_dirty_log_entries.empty()) {
+ m_cache_state->clean = false;
+ update_image_cache_state();
+ }
}
op->complete(result);
m_perfcounter->tinc(l_librbd_pwl_log_op_dis_to_app_t,
if (allocated_req && front_req && allocated) {
/* Push dispatch of the first allocated req to a wq */
m_work_queue.queue(new LambdaContext(
- [this, allocated_req](int r) {
+ [allocated_req](int r) {
allocated_req->dispatch();
}), 0);
allocated_req = nullptr;
ldout(cct, 20) << "Nothing new to flush" << dendl;
/* Do flush complete only when all flush ops are finished */
all_clean = !m_flush_ops_in_flight;
+ if (!m_cache_state->clean && all_clean) {
+ m_cache_state->clean = true;
+ update_image_cache_state();
+ }
break;
}
/* This sync point will acquire no more sub-ops. Activation needs
* to acquire m_lock, so defer to later*/
later.add(new LambdaContext(
- [this, old_sync_point](int r) {
+ [old_sync_point](int r) {
old_sync_point->prior_persisted_gather_activate();
}));
}
* now has its finisher. If the sub is already complete, activation will
* complete the Gather. The finisher will acquire m_lock, so we'll activate
* this when we release m_lock.*/
- later.add(new LambdaContext([this, to_append](int r) {
+ later.add(new LambdaContext([to_append](int r) {
to_append->persist_gather_activate();
}));
std::lock_guard locker(m_lock);
flushing = (0 != m_flush_ops_in_flight);
all_clean = m_dirty_log_entries.empty();
+ if (!m_cache_state->clean && all_clean && !flushing) {
+ m_cache_state->clean = true;
+ update_image_cache_state();
+ }
stop_flushing = (m_shutting_down);
}