// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/internal_stats.h"
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
#include <algorithm>
+#include <cinttypes>
+#include <cstddef>
#include <limits>
+#include <sstream>
#include <string>
#include <utility>
#include <vector>
+#include "cache/cache_entry_roles.h"
+#include "cache/cache_entry_stats.h"
#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "table/block_based_table_factory.h"
+#include "db/db_impl/db_impl.h"
+#include "port/port.h"
+#include "rocksdb/system_clock.h"
+#include "rocksdb/table.h"
+#include "table/block_based/cachable_entry.h"
+#include "util/hash_containers.h"
#include "util/string_util.h"
-namespace rocksdb {
+namespace ROCKSDB_NAMESPACE {
#ifndef ROCKSDB_LITE
{LevelStatType::AVG_SEC, LevelStat{"AvgSec", "Avg(sec)"}},
{LevelStatType::KEY_IN, LevelStat{"KeyIn", "KeyIn"}},
{LevelStatType::KEY_DROP, LevelStat{"KeyDrop", "KeyDrop"}},
+ {LevelStatType::R_BLOB_GB, LevelStat{"RblobGB", "Rblob(GB)"}},
+ {LevelStatType::W_BLOB_GB, LevelStat{"WblobGB", "Wblob(GB)"}},
+};
+
+const std::map<InternalStats::InternalDBStatsType, DBStatInfo>
+ InternalStats::db_stats_type_to_info = {
+ {InternalStats::kIntStatsWalFileBytes,
+ DBStatInfo{"db.wal_bytes_written"}},
+ {InternalStats::kIntStatsWalFileSynced, DBStatInfo{"db.wal_syncs"}},
+ {InternalStats::kIntStatsBytesWritten,
+ DBStatInfo{"db.user_bytes_written"}},
+ {InternalStats::kIntStatsNumKeysWritten,
+ DBStatInfo{"db.user_keys_written"}},
+ {InternalStats::kIntStatsWriteDoneByOther,
+ DBStatInfo{"db.user_writes_by_other"}},
+ {InternalStats::kIntStatsWriteDoneBySelf,
+ DBStatInfo{"db.user_writes_by_self"}},
+ {InternalStats::kIntStatsWriteWithWal,
+ DBStatInfo{"db.user_writes_with_wal"}},
+ {InternalStats::kIntStatsWriteStallMicros,
+ DBStatInfo{"db.user_write_stall_micros"}},
};
namespace {
const std::string& group_by) {
int written_size =
snprintf(buf, len, "\n** Compaction Stats [%s] **\n", cf_name.c_str());
+ written_size = std::min(written_size, static_cast<int>(len));
auto hdr = [](LevelStatType t) {
return InternalStats::compaction_level_stats.at(t).header_name.c_str();
};
int line_size = snprintf(
buf + written_size, len - written_size,
- "%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
+ "%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s "
+ "%s\n",
// Note that we skip COMPACTED_FILES and merge it with Files column
group_by.c_str(), hdr(LevelStatType::NUM_FILES),
hdr(LevelStatType::SIZE_BYTES), hdr(LevelStatType::SCORE),
hdr(LevelStatType::WRITE_MBPS), hdr(LevelStatType::COMP_SEC),
hdr(LevelStatType::COMP_CPU_SEC), hdr(LevelStatType::COMP_COUNT),
hdr(LevelStatType::AVG_SEC), hdr(LevelStatType::KEY_IN),
- hdr(LevelStatType::KEY_DROP));
+ hdr(LevelStatType::KEY_DROP), hdr(LevelStatType::R_BLOB_GB),
+ hdr(LevelStatType::W_BLOB_GB));
written_size += line_size;
+ written_size = std::min(written_size, static_cast<int>(len));
snprintf(buf + written_size, len - written_size, "%s\n",
std::string(line_size, '-').c_str());
}
int num_files, int being_compacted,
double total_file_size, double score, double w_amp,
const InternalStats::CompactionStats& stats) {
- uint64_t bytes_read =
- stats.bytes_read_non_output_levels + stats.bytes_read_output_level;
- int64_t bytes_new = stats.bytes_written - stats.bytes_read_output_level;
- double elapsed = (stats.micros + 1) / kMicrosInSec;
+ const uint64_t bytes_read = stats.bytes_read_non_output_levels +
+ stats.bytes_read_output_level +
+ stats.bytes_read_blob;
+ const uint64_t bytes_written = stats.bytes_written + stats.bytes_written_blob;
+ const int64_t bytes_new = stats.bytes_written - stats.bytes_read_output_level;
+ const double elapsed = (stats.micros + 1) / kMicrosInSec;
(*level_stats)[LevelStatType::NUM_FILES] = num_files;
(*level_stats)[LevelStatType::COMPACTED_FILES] = being_compacted;
(*level_stats)[LevelStatType::MOVED_GB] = stats.bytes_moved / kGB;
(*level_stats)[LevelStatType::WRITE_AMP] = w_amp;
(*level_stats)[LevelStatType::READ_MBPS] = bytes_read / kMB / elapsed;
- (*level_stats)[LevelStatType::WRITE_MBPS] =
- stats.bytes_written / kMB / elapsed;
+ (*level_stats)[LevelStatType::WRITE_MBPS] = bytes_written / kMB / elapsed;
(*level_stats)[LevelStatType::COMP_SEC] = stats.micros / kMicrosInSec;
(*level_stats)[LevelStatType::COMP_CPU_SEC] = stats.cpu_micros / kMicrosInSec;
(*level_stats)[LevelStatType::COMP_COUNT] = stats.count;
static_cast<double>(stats.num_input_records);
(*level_stats)[LevelStatType::KEY_DROP] =
static_cast<double>(stats.num_dropped_records);
+ (*level_stats)[LevelStatType::R_BLOB_GB] = stats.bytes_read_blob / kGB;
+ (*level_stats)[LevelStatType::W_BLOB_GB] = stats.bytes_written_blob / kGB;
}
void PrintLevelStats(char* buf, size_t len, const std::string& name,
"%9d " /* Comp(cnt) */
"%8.3f " /* Avg(sec) */
"%7s " /* KeyIn */
- "%6s\n", /* KeyDrop */
+ "%6s " /* KeyDrop */
+ "%9.1f " /* Rblob(GB) */
+ "%9.1f\n", /* Wblob(GB) */
name.c_str(), static_cast<int>(stat_value.at(LevelStatType::NUM_FILES)),
static_cast<int>(stat_value.at(LevelStatType::COMPACTED_FILES)),
BytesToHumanString(
.c_str(),
NumberToHumanString(
static_cast<std::int64_t>(stat_value.at(LevelStatType::KEY_DROP)))
- .c_str());
+ .c_str(),
+ stat_value.at(LevelStatType::R_BLOB_GB),
+ stat_value.at(LevelStatType::W_BLOB_GB));
}
void PrintLevelStats(char* buf, size_t len, const std::string& name,
static const std::string cf_file_histogram = "cf-file-histogram";
static const std::string dbstats = "dbstats";
static const std::string levelstats = "levelstats";
+static const std::string block_cache_entry_stats = "block-cache-entry-stats";
+static const std::string fast_block_cache_entry_stats =
+ "fast-block-cache-entry-stats";
static const std::string num_immutable_mem_table = "num-immutable-mem-table";
static const std::string num_immutable_mem_table_flushed =
"num-immutable-mem-table-flushed";
"is-file-deletions-enabled";
static const std::string num_snapshots = "num-snapshots";
static const std::string oldest_snapshot_time = "oldest-snapshot-time";
+static const std::string oldest_snapshot_sequence = "oldest-snapshot-sequence";
static const std::string num_live_versions = "num-live-versions";
static const std::string current_version_number =
"current-super-version-number";
static const std::string base_level_str = "base-level";
static const std::string total_sst_files_size = "total-sst-files-size";
static const std::string live_sst_files_size = "live-sst-files-size";
+static const std::string live_sst_files_size_at_temperature =
+ "live-sst-files-size-at-temperature";
static const std::string estimate_pending_comp_bytes =
"estimate-pending-compaction-bytes";
static const std::string aggregated_table_properties =
static const std::string block_cache_usage = "block-cache-usage";
static const std::string block_cache_pinned_usage = "block-cache-pinned-usage";
static const std::string options_statistics = "options-statistics";
+static const std::string num_blob_files = "num-blob-files";
+static const std::string blob_stats = "blob-stats";
+static const std::string total_blob_file_size = "total-blob-file-size";
+static const std::string live_blob_file_size = "live-blob-file-size";
+static const std::string live_blob_file_garbage_size =
+ "live-blob-file-garbage-size";
+static const std::string blob_cache_capacity = "blob-cache-capacity";
+static const std::string blob_cache_usage = "blob-cache-usage";
+static const std::string blob_cache_pinned_usage = "blob-cache-pinned-usage";
const std::string DB::Properties::kNumFilesAtLevelPrefix =
rocksdb_prefix + num_files_at_level_prefix;
rocksdb_prefix + cf_file_histogram;
const std::string DB::Properties::kDBStats = rocksdb_prefix + dbstats;
const std::string DB::Properties::kLevelStats = rocksdb_prefix + levelstats;
+const std::string DB::Properties::kBlockCacheEntryStats =
+ rocksdb_prefix + block_cache_entry_stats;
+const std::string DB::Properties::kFastBlockCacheEntryStats =
+ rocksdb_prefix + fast_block_cache_entry_stats;
const std::string DB::Properties::kNumImmutableMemTable =
rocksdb_prefix + num_immutable_mem_table;
const std::string DB::Properties::kNumImmutableMemTableFlushed =
rocksdb_prefix + num_snapshots;
const std::string DB::Properties::kOldestSnapshotTime =
rocksdb_prefix + oldest_snapshot_time;
+const std::string DB::Properties::kOldestSnapshotSequence =
+ rocksdb_prefix + oldest_snapshot_sequence;
const std::string DB::Properties::kNumLiveVersions =
rocksdb_prefix + num_live_versions;
const std::string DB::Properties::kCurrentSuperVersionNumber =
rocksdb_prefix + block_cache_pinned_usage;
const std::string DB::Properties::kOptionsStatistics =
rocksdb_prefix + options_statistics;
-
-const std::unordered_map<std::string, DBPropertyInfo>
+const std::string DB::Properties::kLiveSstFilesSizeAtTemperature =
+ rocksdb_prefix + live_sst_files_size_at_temperature;
+const std::string DB::Properties::kNumBlobFiles =
+ rocksdb_prefix + num_blob_files;
+const std::string DB::Properties::kBlobStats = rocksdb_prefix + blob_stats;
+const std::string DB::Properties::kTotalBlobFileSize =
+ rocksdb_prefix + total_blob_file_size;
+const std::string DB::Properties::kLiveBlobFileSize =
+ rocksdb_prefix + live_blob_file_size;
+const std::string DB::Properties::kLiveBlobFileGarbageSize =
+ rocksdb_prefix + live_blob_file_garbage_size;
+const std::string DB::Properties::kBlobCacheCapacity =
+ rocksdb_prefix + blob_cache_capacity;
+const std::string DB::Properties::kBlobCacheUsage =
+ rocksdb_prefix + blob_cache_usage;
+const std::string DB::Properties::kBlobCachePinnedUsage =
+ rocksdb_prefix + blob_cache_pinned_usage;
+
+const std::string InternalStats::kPeriodicCFStats =
+ DB::Properties::kCFStats + ".periodic";
+const int InternalStats::kMaxNoChangePeriodSinceDump = 8;
+
+const UnorderedMap<std::string, DBPropertyInfo>
InternalStats::ppt_name_to_info = {
{DB::Properties::kNumFilesAtLevelPrefix,
{false, &InternalStats::HandleNumFilesAtLevel, nullptr, nullptr,
{DB::Properties::kCFStats,
{false, &InternalStats::HandleCFStats, nullptr,
&InternalStats::HandleCFMapStats, nullptr}},
+ {InternalStats::kPeriodicCFStats,
+ {false, &InternalStats::HandleCFStatsPeriodic, nullptr, nullptr,
+ nullptr}},
{DB::Properties::kCFStatsNoFileHistogram,
{false, &InternalStats::HandleCFStatsNoFileHistogram, nullptr, nullptr,
nullptr}},
{false, &InternalStats::HandleCFFileHistogram, nullptr, nullptr,
nullptr}},
{DB::Properties::kDBStats,
- {false, &InternalStats::HandleDBStats, nullptr, nullptr, nullptr}},
+ {false, &InternalStats::HandleDBStats, nullptr,
+ &InternalStats::HandleDBMapStats, nullptr}},
+ {DB::Properties::kBlockCacheEntryStats,
+ {true, &InternalStats::HandleBlockCacheEntryStats, nullptr,
+ &InternalStats::HandleBlockCacheEntryStatsMap, nullptr}},
+ {DB::Properties::kFastBlockCacheEntryStats,
+ {true, &InternalStats::HandleFastBlockCacheEntryStats, nullptr,
+ &InternalStats::HandleFastBlockCacheEntryStatsMap, nullptr}},
{DB::Properties::kSSTables,
{false, &InternalStats::HandleSsTables, nullptr, nullptr, nullptr}},
{DB::Properties::kAggregatedTableProperties,
{false, &InternalStats::HandleAggregatedTableProperties, nullptr,
- nullptr, nullptr}},
+ &InternalStats::HandleAggregatedTablePropertiesMap, nullptr}},
{DB::Properties::kAggregatedTablePropertiesAtLevel,
{false, &InternalStats::HandleAggregatedTablePropertiesAtLevel,
- nullptr, nullptr, nullptr}},
+ nullptr, &InternalStats::HandleAggregatedTablePropertiesAtLevelMap,
+ nullptr}},
{DB::Properties::kNumImmutableMemTable,
{false, nullptr, &InternalStats::HandleNumImmutableMemTable, nullptr,
nullptr}},
{DB::Properties::kOldestSnapshotTime,
{false, nullptr, &InternalStats::HandleOldestSnapshotTime, nullptr,
nullptr}},
+ {DB::Properties::kOldestSnapshotSequence,
+ {false, nullptr, &InternalStats::HandleOldestSnapshotSequence, nullptr,
+ nullptr}},
{DB::Properties::kNumLiveVersions,
{false, nullptr, &InternalStats::HandleNumLiveVersions, nullptr,
nullptr}},
{DB::Properties::kLiveSstFilesSize,
{false, nullptr, &InternalStats::HandleLiveSstFilesSize, nullptr,
nullptr}},
+ {DB::Properties::kLiveSstFilesSizeAtTemperature,
+ {false, &InternalStats::HandleLiveSstFilesSizeAtTemperature, nullptr,
+ nullptr, nullptr}},
{DB::Properties::kEstimatePendingCompactionBytes,
{false, nullptr, &InternalStats::HandleEstimatePendingCompactionBytes,
nullptr, nullptr}},
{false, nullptr, &InternalStats::HandleBlockCachePinnedUsage, nullptr,
nullptr}},
{DB::Properties::kOptionsStatistics,
- {false, nullptr, nullptr, nullptr,
+ {true, nullptr, nullptr, nullptr,
&DBImpl::GetPropertyHandleOptionsStatistics}},
+ {DB::Properties::kNumBlobFiles,
+ {false, nullptr, &InternalStats::HandleNumBlobFiles, nullptr,
+ nullptr}},
+ {DB::Properties::kBlobStats,
+ {false, &InternalStats::HandleBlobStats, nullptr, nullptr, nullptr}},
+ {DB::Properties::kTotalBlobFileSize,
+ {false, nullptr, &InternalStats::HandleTotalBlobFileSize, nullptr,
+ nullptr}},
+ {DB::Properties::kLiveBlobFileSize,
+ {false, nullptr, &InternalStats::HandleLiveBlobFileSize, nullptr,
+ nullptr}},
+ {DB::Properties::kLiveBlobFileGarbageSize,
+ {false, nullptr, &InternalStats::HandleLiveBlobFileGarbageSize,
+ nullptr, nullptr}},
+ {DB::Properties::kBlobCacheCapacity,
+ {false, nullptr, &InternalStats::HandleBlobCacheCapacity, nullptr,
+ nullptr}},
+ {DB::Properties::kBlobCacheUsage,
+ {false, nullptr, &InternalStats::HandleBlobCacheUsage, nullptr,
+ nullptr}},
+ {DB::Properties::kBlobCachePinnedUsage,
+ {false, nullptr, &InternalStats::HandleBlobCachePinnedUsage, nullptr,
+ nullptr}},
};
+InternalStats::InternalStats(int num_levels, SystemClock* clock,
+ ColumnFamilyData* cfd)
+ : db_stats_{},
+ cf_stats_value_{},
+ cf_stats_count_{},
+ comp_stats_(num_levels),
+ comp_stats_by_pri_(Env::Priority::TOTAL),
+ file_read_latency_(num_levels),
+ has_cf_change_since_dump_(true),
+ bg_error_count_(0),
+ number_levels_(num_levels),
+ clock_(clock),
+ cfd_(cfd),
+ started_at_(clock->NowMicros()) {
+ Cache* block_cache = GetBlockCacheForStats();
+ if (block_cache) {
+ // Extract or create stats collector. Could fail in rare cases.
+ Status s = CacheEntryStatsCollector<CacheEntryRoleStats>::GetShared(
+ block_cache, clock_, &cache_entry_stats_collector_);
+ if (s.ok()) {
+ assert(cache_entry_stats_collector_);
+ } else {
+ assert(!cache_entry_stats_collector_);
+ }
+ }
+}
+
+void InternalStats::TEST_GetCacheEntryRoleStats(CacheEntryRoleStats* stats,
+ bool foreground) {
+ CollectCacheEntryStats(foreground);
+ if (cache_entry_stats_collector_) {
+ cache_entry_stats_collector_->GetStats(stats);
+ }
+}
+
+void InternalStats::CollectCacheEntryStats(bool foreground) {
+ // This function is safe to call from any thread because
+ // cache_entry_stats_collector_ field is const after constructor
+ // and ->GetStats does its own synchronization, which also suffices for
+ // cache_entry_stats_.
+
+ if (!cache_entry_stats_collector_) {
+ return; // nothing to do (e.g. no block cache)
+ }
+
+ // For "background" collections, strictly cap the collection time by
+ // expanding effective cache TTL. For foreground, be more aggressive about
+ // getting latest data.
+ int min_interval_seconds = foreground ? 10 : 180;
+ // 1/500 = max of 0.2% of one CPU thread
+ int min_interval_factor = foreground ? 10 : 500;
+ cache_entry_stats_collector_->CollectStats(min_interval_seconds,
+ min_interval_factor);
+}
+
+std::function<void(const Slice&, void*, size_t, Cache::DeleterFn)>
+InternalStats::CacheEntryRoleStats::GetEntryCallback() {
+ return [&](const Slice& /*key*/, void* /*value*/, size_t charge,
+ Cache::DeleterFn deleter) {
+ auto e = role_map_.find(deleter);
+ size_t role_idx;
+ if (e == role_map_.end()) {
+ role_idx = static_cast<size_t>(CacheEntryRole::kMisc);
+ } else {
+ role_idx = static_cast<size_t>(e->second);
+ }
+ entry_counts[role_idx]++;
+ total_charges[role_idx] += charge;
+ };
+}
+
+void InternalStats::CacheEntryRoleStats::BeginCollection(
+ Cache* cache, SystemClock*, uint64_t start_time_micros) {
+ Clear();
+ last_start_time_micros_ = start_time_micros;
+ ++collection_count;
+ role_map_ = CopyCacheDeleterRoleMap();
+ std::ostringstream str;
+ str << cache->Name() << "@" << static_cast<void*>(cache) << "#"
+ << port::GetProcessID();
+ cache_id = str.str();
+ cache_capacity = cache->GetCapacity();
+ cache_usage = cache->GetUsage();
+ table_size = cache->GetTableAddressCount();
+ occupancy = cache->GetOccupancyCount();
+}
+
+void InternalStats::CacheEntryRoleStats::EndCollection(
+ Cache*, SystemClock*, uint64_t end_time_micros) {
+ last_end_time_micros_ = end_time_micros;
+}
+
+void InternalStats::CacheEntryRoleStats::SkippedCollection() {
+ ++copies_of_last_collection;
+}
+
+uint64_t InternalStats::CacheEntryRoleStats::GetLastDurationMicros() const {
+ if (last_end_time_micros_ > last_start_time_micros_) {
+ return last_end_time_micros_ - last_start_time_micros_;
+ } else {
+ return 0U;
+ }
+}
+
+std::string InternalStats::CacheEntryRoleStats::ToString(
+ SystemClock* clock) const {
+ std::ostringstream str;
+ str << "Block cache " << cache_id
+ << " capacity: " << BytesToHumanString(cache_capacity)
+ << " usage: " << BytesToHumanString(cache_usage)
+ << " table_size: " << table_size << " occupancy: " << occupancy
+ << " collections: " << collection_count
+ << " last_copies: " << copies_of_last_collection
+ << " last_secs: " << (GetLastDurationMicros() / 1000000.0)
+ << " secs_since: "
+ << ((clock->NowMicros() - last_end_time_micros_) / 1000000U) << "\n";
+ str << "Block cache entry stats(count,size,portion):";
+ for (size_t i = 0; i < kNumCacheEntryRoles; ++i) {
+ if (entry_counts[i] > 0) {
+ str << " " << kCacheEntryRoleToCamelString[i] << "(" << entry_counts[i]
+ << "," << BytesToHumanString(total_charges[i]) << ","
+ << (100.0 * total_charges[i] / cache_capacity) << "%)";
+ }
+ }
+ str << "\n";
+ return str.str();
+}
+
+void InternalStats::CacheEntryRoleStats::ToMap(
+ std::map<std::string, std::string>* values, SystemClock* clock) const {
+ values->clear();
+ auto& v = *values;
+ v[BlockCacheEntryStatsMapKeys::CacheId()] = cache_id;
+ v[BlockCacheEntryStatsMapKeys::CacheCapacityBytes()] =
+ std::to_string(cache_capacity);
+ v[BlockCacheEntryStatsMapKeys::LastCollectionDurationSeconds()] =
+ std::to_string(GetLastDurationMicros() / 1000000.0);
+ v[BlockCacheEntryStatsMapKeys::LastCollectionAgeSeconds()] =
+ std::to_string((clock->NowMicros() - last_end_time_micros_) / 1000000U);
+ for (size_t i = 0; i < kNumCacheEntryRoles; ++i) {
+ auto role = static_cast<CacheEntryRole>(i);
+ v[BlockCacheEntryStatsMapKeys::EntryCount(role)] =
+ std::to_string(entry_counts[i]);
+ v[BlockCacheEntryStatsMapKeys::UsedBytes(role)] =
+ std::to_string(total_charges[i]);
+ v[BlockCacheEntryStatsMapKeys::UsedPercent(role)] =
+ std::to_string(100.0 * total_charges[i] / cache_capacity);
+ }
+}
+
+bool InternalStats::HandleBlockCacheEntryStatsInternal(std::string* value,
+ bool fast) {
+ if (!cache_entry_stats_collector_) {
+ return false;
+ }
+ CollectCacheEntryStats(!fast /* foreground */);
+ CacheEntryRoleStats stats;
+ cache_entry_stats_collector_->GetStats(&stats);
+ *value = stats.ToString(clock_);
+ return true;
+}
+
+bool InternalStats::HandleBlockCacheEntryStatsMapInternal(
+ std::map<std::string, std::string>* values, bool fast) {
+ if (!cache_entry_stats_collector_) {
+ return false;
+ }
+ CollectCacheEntryStats(!fast /* foreground */);
+ CacheEntryRoleStats stats;
+ cache_entry_stats_collector_->GetStats(&stats);
+ stats.ToMap(values, clock_);
+ return true;
+}
+
+bool InternalStats::HandleBlockCacheEntryStats(std::string* value,
+ Slice /*suffix*/) {
+ return HandleBlockCacheEntryStatsInternal(value, false /* fast */);
+}
+
+bool InternalStats::HandleBlockCacheEntryStatsMap(
+ std::map<std::string, std::string>* values, Slice /*suffix*/) {
+ return HandleBlockCacheEntryStatsMapInternal(values, false /* fast */);
+}
+
+bool InternalStats::HandleFastBlockCacheEntryStats(std::string* value,
+ Slice /*suffix*/) {
+ return HandleBlockCacheEntryStatsInternal(value, true /* fast */);
+}
+
+bool InternalStats::HandleFastBlockCacheEntryStatsMap(
+ std::map<std::string, std::string>* values, Slice /*suffix*/) {
+ return HandleBlockCacheEntryStatsMapInternal(values, true /* fast */);
+}
+
+bool InternalStats::HandleLiveSstFilesSizeAtTemperature(std::string* value,
+ Slice suffix) {
+ uint64_t temperature;
+ bool ok = ConsumeDecimalNumber(&suffix, &temperature) && suffix.empty();
+ if (!ok) {
+ return false;
+ }
+
+ uint64_t size = 0;
+ const auto* vstorage = cfd_->current()->storage_info();
+ for (int level = 0; level < vstorage->num_levels(); level++) {
+ for (const auto& file_meta : vstorage->LevelFiles(level)) {
+ if (static_cast<uint8_t>(file_meta->temperature) == temperature) {
+ size += file_meta->fd.GetFileSize();
+ }
+ }
+ }
+
+ *value = std::to_string(size);
+ return true;
+}
+
+bool InternalStats::HandleNumBlobFiles(uint64_t* value, DBImpl* /*db*/,
+ Version* /*version*/) {
+ assert(value);
+ assert(cfd_);
+
+ const auto* current = cfd_->current();
+ assert(current);
+
+ const auto* vstorage = current->storage_info();
+ assert(vstorage);
+
+ const auto& blob_files = vstorage->GetBlobFiles();
+
+ *value = blob_files.size();
+
+ return true;
+}
+
+bool InternalStats::HandleBlobStats(std::string* value, Slice /*suffix*/) {
+ assert(value);
+ assert(cfd_);
+
+ const auto* current = cfd_->current();
+ assert(current);
+
+ const auto* vstorage = current->storage_info();
+ assert(vstorage);
+
+ const auto blob_st = vstorage->GetBlobStats();
+
+ std::ostringstream oss;
+
+ oss << "Number of blob files: " << vstorage->GetBlobFiles().size()
+ << "\nTotal size of blob files: " << blob_st.total_file_size
+ << "\nTotal size of garbage in blob files: " << blob_st.total_garbage_size
+ << "\nBlob file space amplification: " << blob_st.space_amp << '\n';
+
+ value->append(oss.str());
+
+ return true;
+}
+
+bool InternalStats::HandleTotalBlobFileSize(uint64_t* value, DBImpl* /*db*/,
+ Version* /*version*/) {
+ assert(value);
+ assert(cfd_);
+
+ *value = cfd_->GetTotalBlobFileSize();
+
+ return true;
+}
+
+bool InternalStats::HandleLiveBlobFileSize(uint64_t* value, DBImpl* /*db*/,
+ Version* /*version*/) {
+ assert(value);
+ assert(cfd_);
+
+ const auto* current = cfd_->current();
+ assert(current);
+
+ const auto* vstorage = current->storage_info();
+ assert(vstorage);
+
+ *value = vstorage->GetBlobStats().total_file_size;
+
+ return true;
+}
+
+bool InternalStats::HandleLiveBlobFileGarbageSize(uint64_t* value,
+ DBImpl* /*db*/,
+ Version* /*version*/) {
+ assert(value);
+ assert(cfd_);
+
+ const auto* current = cfd_->current();
+ assert(current);
+
+ const auto* vstorage = current->storage_info();
+ assert(vstorage);
+
+ *value = vstorage->GetBlobStats().total_garbage_size;
+
+ return true;
+}
+
+Cache* InternalStats::GetBlobCacheForStats() {
+ return cfd_->ioptions()->blob_cache.get();
+}
+
+bool InternalStats::HandleBlobCacheCapacity(uint64_t* value, DBImpl* /*db*/,
+ Version* /*version*/) {
+ Cache* blob_cache = GetBlobCacheForStats();
+ if (blob_cache) {
+ *value = static_cast<uint64_t>(blob_cache->GetCapacity());
+ return true;
+ }
+ return false;
+}
+
+bool InternalStats::HandleBlobCacheUsage(uint64_t* value, DBImpl* /*db*/,
+ Version* /*version*/) {
+ Cache* blob_cache = GetBlobCacheForStats();
+ if (blob_cache) {
+ *value = static_cast<uint64_t>(blob_cache->GetUsage());
+ return true;
+ }
+ return false;
+}
+
+bool InternalStats::HandleBlobCachePinnedUsage(uint64_t* value, DBImpl* /*db*/,
+ Version* /*version*/) {
+ Cache* blob_cache = GetBlobCacheForStats();
+ if (blob_cache) {
+ *value = static_cast<uint64_t>(blob_cache->GetPinnedUsage());
+ return true;
+ }
+ return false;
+}
+
const DBPropertyInfo* GetPropertyInfo(const Slice& property) {
std::string ppt_name = GetPropertyNameAndArg(property).first.ToString();
auto ppt_info_iter = InternalStats::ppt_name_to_info.find(ppt_name);
}
bool InternalStats::GetMapProperty(const DBPropertyInfo& property_info,
- const Slice& /*property*/,
+ const Slice& property,
std::map<std::string, std::string>* value) {
assert(value != nullptr);
assert(property_info.handle_map != nullptr);
- return (this->*(property_info.handle_map))(value);
+ Slice arg = GetPropertyNameAndArg(property).second;
+ return (this->*(property_info.handle_map))(value, arg);
}
bool InternalStats::GetIntProperty(const DBPropertyInfo& property_info,
if (!ok || level >= static_cast<uint64_t>(number_levels_)) {
return false;
}
- *value = ToString(
+ *value = std::to_string(
vstorage->GetEstimatedCompressionRatioAtLevel(static_cast<int>(level)));
return true;
}
}
bool InternalStats::HandleCFMapStats(
- std::map<std::string, std::string>* cf_stats) {
+ std::map<std::string, std::string>* cf_stats, Slice /*suffix*/) {
DumpCFMapStats(cf_stats);
return true;
}
return true;
}
+bool InternalStats::HandleCFStatsPeriodic(std::string* value,
+ Slice /*suffix*/) {
+ bool has_change = has_cf_change_since_dump_;
+ if (!has_change) {
+ // If file histogram changes, there is activity in this period too.
+ uint64_t new_histogram_num = 0;
+ for (int level = 0; level < number_levels_; level++) {
+ new_histogram_num += file_read_latency_[level].num();
+ }
+ new_histogram_num += blob_file_read_latency_.num();
+ if (new_histogram_num != last_histogram_num) {
+ has_change = true;
+ last_histogram_num = new_histogram_num;
+ }
+ }
+ if (has_change) {
+ no_cf_change_period_since_dump_ = 0;
+ has_cf_change_since_dump_ = false;
+ } else if (no_cf_change_period_since_dump_++ > 0) {
+ // Not ready to sync
+ if (no_cf_change_period_since_dump_ == kMaxNoChangePeriodSinceDump) {
+ // Next periodic, we need to dump stats even if there is no change.
+ no_cf_change_period_since_dump_ = 0;
+ }
+ return true;
+ }
+
+ DumpCFStatsNoFileHistogram(/*is_periodic=*/true, value);
+ DumpCFFileHistogram(value);
+ return true;
+}
+
bool InternalStats::HandleCFStatsNoFileHistogram(std::string* value,
Slice /*suffix*/) {
- DumpCFStatsNoFileHistogram(value);
+ DumpCFStatsNoFileHistogram(/*is_periodic=*/false, value);
return true;
}
return true;
}
+bool InternalStats::HandleDBMapStats(
+ std::map<std::string, std::string>* db_stats, Slice /*suffix*/) {
+ DumpDBMapStats(db_stats);
+ return true;
+}
+
bool InternalStats::HandleDBStats(std::string* value, Slice /*suffix*/) {
DumpDBStats(value);
return true;
return true;
}
-bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* value,
+static std::map<std::string, std::string> MapUint64ValuesToString(
+ const std::map<std::string, uint64_t>& from) {
+ std::map<std::string, std::string> to;
+ for (const auto& e : from) {
+ to[e.first] = std::to_string(e.second);
+ }
+ return to;
+}
+
+bool InternalStats::HandleAggregatedTablePropertiesMap(
+ std::map<std::string, std::string>* values, Slice /*suffix*/) {
+ std::shared_ptr<const TableProperties> tp;
+ auto s = cfd_->current()->GetAggregatedTableProperties(&tp);
+ if (!s.ok()) {
+ return false;
+ }
+ *values = MapUint64ValuesToString(tp->GetAggregatablePropertiesAsMap());
+ return true;
+}
+
+bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* values,
Slice suffix) {
uint64_t level;
bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
if (!s.ok()) {
return false;
}
- *value = tp->ToString();
+ *values = tp->ToString();
+ return true;
+}
+
+bool InternalStats::HandleAggregatedTablePropertiesAtLevelMap(
+ std::map<std::string, std::string>* values, Slice suffix) {
+ uint64_t level;
+ bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
+ if (!ok || static_cast<int>(level) >= number_levels_) {
+ return false;
+ }
+ std::shared_ptr<const TableProperties> tp;
+ auto s = cfd_->current()->GetAggregatedTableProperties(
+ &tp, static_cast<int>(level));
+ if (!s.ok()) {
+ return false;
+ }
+ *values = MapUint64ValuesToString(tp->GetAggregatablePropertiesAsMap());
return true;
}
bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* /*db*/,
Version* /*version*/) {
- // Return number of mem tables that are ready to flush (made immutable)
*value = (cfd_->imm()->IsFlushPending() ? 1 : 0);
return true;
}
bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* /*db*/,
Version* /*version*/) {
// Current size of the active memtable
- *value = cfd_->mem()->ApproximateMemoryUsage();
+ // Using ApproximateMemoryUsageFast to avoid the need for synchronization
+ *value = cfd_->mem()->ApproximateMemoryUsageFast();
return true;
}
bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* /*db*/,
Version* /*version*/) {
// Current size of the active memtable + immutable memtables
- *value = cfd_->mem()->ApproximateMemoryUsage() +
+ // Using ApproximateMemoryUsageFast to avoid the need for synchronization
+ *value = cfd_->mem()->ApproximateMemoryUsageFast() +
cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage();
return true;
}
bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* /*db*/,
Version* /*version*/) {
- *value = cfd_->mem()->ApproximateMemoryUsage() +
+ // Using ApproximateMemoryUsageFast to avoid the need for synchronization
+ *value = cfd_->mem()->ApproximateMemoryUsageFast() +
cfd_->imm()->ApproximateMemoryUsage();
return true;
}
return true;
}
+bool InternalStats::HandleOldestSnapshotSequence(uint64_t* value, DBImpl* db,
+ Version* /*version*/) {
+ *value = static_cast<uint64_t>(db->snapshots().GetOldestSnapshotSequence());
+ return true;
+}
+
bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* /*db*/,
Version* /*version*/) {
*value = cfd_->GetNumLiveVersions();
bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db,
Version* /*version*/) {
- *value = db->IsFileDeletionsEnabled();
+ *value = db->IsFileDeletionsEnabled() ? 1 : 0;
return true;
}
return *value > 0 && *value < std::numeric_limits<uint64_t>::max();
}
-bool InternalStats::HandleBlockCacheStat(Cache** block_cache) {
- assert(block_cache != nullptr);
- auto* table_factory = cfd_->ioptions()->table_factory;
+Cache* InternalStats::GetBlockCacheForStats() {
+ auto* table_factory = cfd_->ioptions()->table_factory.get();
assert(table_factory != nullptr);
- if (BlockBasedTableFactory::kName != table_factory->Name()) {
- return false;
- }
- auto* table_options =
- reinterpret_cast<BlockBasedTableOptions*>(table_factory->GetOptions());
- if (table_options == nullptr) {
- return false;
- }
- *block_cache = table_options->block_cache.get();
- if (table_options->no_block_cache || *block_cache == nullptr) {
- return false;
- }
- return true;
+ return table_factory->GetOptions<Cache>(TableFactory::kBlockCacheOpts());
}
bool InternalStats::HandleBlockCacheCapacity(uint64_t* value, DBImpl* /*db*/,
Version* /*version*/) {
- Cache* block_cache;
- bool ok = HandleBlockCacheStat(&block_cache);
- if (!ok) {
- return false;
+ Cache* block_cache = GetBlockCacheForStats();
+ if (block_cache) {
+ *value = static_cast<uint64_t>(block_cache->GetCapacity());
+ return true;
}
- *value = static_cast<uint64_t>(block_cache->GetCapacity());
- return true;
+ return false;
}
bool InternalStats::HandleBlockCacheUsage(uint64_t* value, DBImpl* /*db*/,
Version* /*version*/) {
- Cache* block_cache;
- bool ok = HandleBlockCacheStat(&block_cache);
- if (!ok) {
- return false;
+ Cache* block_cache = GetBlockCacheForStats();
+ if (block_cache) {
+ *value = static_cast<uint64_t>(block_cache->GetUsage());
+ return true;
}
- *value = static_cast<uint64_t>(block_cache->GetUsage());
- return true;
+ return false;
}
bool InternalStats::HandleBlockCachePinnedUsage(uint64_t* value, DBImpl* /*db*/,
Version* /*version*/) {
- Cache* block_cache;
- bool ok = HandleBlockCacheStat(&block_cache);
- if (!ok) {
- return false;
+ Cache* block_cache = GetBlockCacheForStats();
+ if (block_cache) {
+ *value = static_cast<uint64_t>(block_cache->GetPinnedUsage());
+ return true;
}
- *value = static_cast<uint64_t>(block_cache->GetPinnedUsage());
- return true;
+ return false;
+}
+
+void InternalStats::DumpDBMapStats(
+ std::map<std::string, std::string>* db_stats) {
+ for (int i = 0; i < static_cast<int>(kIntStatsNumMax); ++i) {
+ InternalDBStatsType type = static_cast<InternalDBStatsType>(i);
+ (*db_stats)[db_stats_type_to_info.at(type).property_name] =
+ std::to_string(GetDBStats(type));
+ }
+ double seconds_up = (clock_->NowMicros() - started_at_) / kMicrosInSec;
+ (*db_stats)["db.uptime"] = std::to_string(seconds_up);
}
void InternalStats::DumpDBStats(std::string* value) {
char buf[1000];
// DB-level stats, only available from default column family
- double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec;
+ double seconds_up = (clock_->NowMicros() - started_at_) / kMicrosInSec;
double interval_seconds_up = seconds_up - db_stats_snapshot_.seconds_up;
snprintf(buf, sizeof(buf),
"\n** DB Stats **\nUptime(secs): %.1f total, %.1f interval\n",
seconds_up, interval_seconds_up);
value->append(buf);
// Cumulative
- uint64_t user_bytes_written = GetDBStats(InternalStats::BYTES_WRITTEN);
- uint64_t num_keys_written = GetDBStats(InternalStats::NUMBER_KEYS_WRITTEN);
- uint64_t write_other = GetDBStats(InternalStats::WRITE_DONE_BY_OTHER);
- uint64_t write_self = GetDBStats(InternalStats::WRITE_DONE_BY_SELF);
- uint64_t wal_bytes = GetDBStats(InternalStats::WAL_FILE_BYTES);
- uint64_t wal_synced = GetDBStats(InternalStats::WAL_FILE_SYNCED);
- uint64_t write_with_wal = GetDBStats(InternalStats::WRITE_WITH_WAL);
- uint64_t write_stall_micros = GetDBStats(InternalStats::WRITE_STALL_MICROS);
+ uint64_t user_bytes_written =
+ GetDBStats(InternalStats::kIntStatsBytesWritten);
+ uint64_t num_keys_written =
+ GetDBStats(InternalStats::kIntStatsNumKeysWritten);
+ uint64_t write_other = GetDBStats(InternalStats::kIntStatsWriteDoneByOther);
+ uint64_t write_self = GetDBStats(InternalStats::kIntStatsWriteDoneBySelf);
+ uint64_t wal_bytes = GetDBStats(InternalStats::kIntStatsWalFileBytes);
+ uint64_t wal_synced = GetDBStats(InternalStats::kIntStatsWalFileSynced);
+ uint64_t write_with_wal = GetDBStats(InternalStats::kIntStatsWriteWithWal);
+ uint64_t write_stall_micros =
+ GetDBStats(InternalStats::kIntStatsWriteStallMicros);
const int kHumanMicrosLen = 32;
char human_micros[kHumanMicrosLen];
NumberToHumanString(write_other + write_self).c_str(),
NumberToHumanString(num_keys_written).c_str(),
NumberToHumanString(write_self).c_str(),
- (write_other + write_self) / static_cast<double>(write_self + 1),
- user_bytes_written / kGB, user_bytes_written / kMB / seconds_up);
+ (write_other + write_self) /
+ std::max(1.0, static_cast<double>(write_self)),
+ user_bytes_written / kGB,
+ user_bytes_written / kMB / std::max(seconds_up, 0.001));
value->append(buf);
// WAL
snprintf(buf, sizeof(buf),
"%.2f writes per sync, written: %.2f GB, %.2f MB/s\n",
NumberToHumanString(write_with_wal).c_str(),
NumberToHumanString(wal_synced).c_str(),
- write_with_wal / static_cast<double>(wal_synced + 1),
- wal_bytes / kGB, wal_bytes / kMB / seconds_up);
+ write_with_wal / std::max(1.0, static_cast<double>(wal_synced)),
+ wal_bytes / kGB, wal_bytes / kMB / std::max(seconds_up, 0.001));
value->append(buf);
// Stall
AppendHumanMicros(write_stall_micros, human_micros, kHumanMicrosLen, true);
NumberToHumanString(interval_num_keys_written).c_str(),
NumberToHumanString(interval_write_self).c_str(),
static_cast<double>(interval_write_other + interval_write_self) /
- (interval_write_self + 1),
+ std::max(1.0, static_cast<double>(interval_write_self)),
(user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB,
(user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB /
std::max(interval_seconds_up, 0.001)),
uint64_t interval_wal_synced = wal_synced - db_stats_snapshot_.wal_synced;
uint64_t interval_wal_bytes = wal_bytes - db_stats_snapshot_.wal_bytes;
- snprintf(
- buf, sizeof(buf),
- "Interval WAL: %s writes, %s syncs, "
- "%.2f writes per sync, written: %.2f MB, %.2f MB/s\n",
- NumberToHumanString(interval_write_with_wal).c_str(),
- NumberToHumanString(interval_wal_synced).c_str(),
- interval_write_with_wal / static_cast<double>(interval_wal_synced + 1),
- interval_wal_bytes / kGB,
- interval_wal_bytes / kMB / std::max(interval_seconds_up, 0.001));
+ snprintf(buf, sizeof(buf),
+ "Interval WAL: %s writes, %s syncs, "
+ "%.2f writes per sync, written: %.2f GB, %.2f MB/s\n",
+ NumberToHumanString(interval_write_with_wal).c_str(),
+ NumberToHumanString(interval_wal_synced).c_str(),
+ interval_write_with_wal /
+ std::max(1.0, static_cast<double>(interval_wal_synced)),
+ interval_wal_bytes / kGB,
+ interval_wal_bytes / kMB / std::max(interval_seconds_up, 0.001));
value->append(buf);
// Stall
*/
void InternalStats::DumpCFMapStats(
std::map<std::string, std::string>* cf_stats) {
+ const VersionStorageInfo* vstorage = cfd_->current()->storage_info();
CompactionStats compaction_stats_sum;
std::map<int, std::map<LevelStatType, double>> levels_stats;
- DumpCFMapStats(&levels_stats, &compaction_stats_sum);
+ DumpCFMapStats(vstorage, &levels_stats, &compaction_stats_sum);
for (auto const& level_ent : levels_stats) {
auto level_str =
- level_ent.first == -1 ? "Sum" : "L" + ToString(level_ent.first);
+ level_ent.first == -1 ? "Sum" : "L" + std::to_string(level_ent.first);
for (auto const& stat_ent : level_ent.second) {
auto stat_type = stat_ent.first;
auto key_str =
}
void InternalStats::DumpCFMapStats(
+ const VersionStorageInfo* vstorage,
std::map<int, std::map<LevelStatType, double>>* levels_stats,
CompactionStats* compaction_stats_sum) {
- const VersionStorageInfo* vstorage = cfd_->current()->storage_info();
+ assert(vstorage);
int num_levels_to_check =
(cfd_->ioptions()->compaction_style != kCompactionStyleFIFO)
int files = vstorage->NumLevelFiles(level);
total_files += files;
total_files_being_compacted += files_being_compacted[level];
- if (comp_stats_[level].micros > 0 || files > 0) {
+ if (comp_stats_[level].micros > 0 || comp_stats_[level].cpu_micros > 0 ||
+ files > 0) {
compaction_stats_sum->Add(comp_stats_[level]);
total_file_size += vstorage->NumLevelBytes(level);
uint64_t input_bytes;
if (level == 0) {
input_bytes = curr_ingest;
} else {
- input_bytes = comp_stats_[level].bytes_read_non_output_levels;
+ input_bytes = comp_stats_[level].bytes_read_non_output_levels +
+ comp_stats_[level].bytes_read_blob;
}
double w_amp =
(input_bytes == 0)
? 0.0
- : static_cast<double>(comp_stats_[level].bytes_written) /
+ : static_cast<double>(comp_stats_[level].bytes_written +
+ comp_stats_[level].bytes_written_blob) /
input_bytes;
std::map<LevelStatType, double> level_stats;
PrepareLevelStats(&level_stats, files, files_being_compacted[level],
}
}
// Cumulative summary
- double w_amp = compaction_stats_sum->bytes_written /
- static_cast<double>(curr_ingest + 1);
+ double w_amp = (0 == curr_ingest)
+ ? 0.0
+ : (compaction_stats_sum->bytes_written +
+ compaction_stats_sum->bytes_written_blob) /
+ static_cast<double>(curr_ingest);
// Stats summary across levels
std::map<LevelStatType, double> sum_stats;
PrepareLevelStats(&sum_stats, total_files, total_files_being_compacted,
}
void InternalStats::DumpCFStats(std::string* value) {
- DumpCFStatsNoFileHistogram(value);
+ DumpCFStatsNoFileHistogram(/*is_periodic=*/false, value);
DumpCFFileHistogram(value);
}
-void InternalStats::DumpCFStatsNoFileHistogram(std::string* value) {
+void InternalStats::DumpCFStatsNoFileHistogram(bool is_periodic,
+ std::string* value) {
char buf[2000];
// Per-ColumnFamily stats
PrintLevelStatsHeader(buf, sizeof(buf), cfd_->GetName(), "Level");
value->append(buf);
// Print stats for each level
+ const VersionStorageInfo* vstorage = cfd_->current()->storage_info();
std::map<int, std::map<LevelStatType, double>> levels_stats;
CompactionStats compaction_stats_sum;
- DumpCFMapStats(&levels_stats, &compaction_stats_sum);
+ DumpCFMapStats(vstorage, &levels_stats, &compaction_stats_sum);
for (int l = 0; l < number_levels_; ++l) {
if (levels_stats.find(l) != levels_stats.end()) {
- PrintLevelStats(buf, sizeof(buf), "L" + ToString(l), levels_stats[l]);
+ PrintLevelStats(buf, sizeof(buf), "L" + std::to_string(l),
+ levels_stats[l]);
value->append(buf);
}
}
CompactionStats interval_stats(compaction_stats_sum);
interval_stats.Subtract(cf_stats_snapshot_.comp_stats);
double w_amp =
- interval_stats.bytes_written / static_cast<double>(interval_ingest);
+ (interval_stats.bytes_written + interval_stats.bytes_written_blob) /
+ static_cast<double>(interval_ingest);
PrintLevelStats(buf, sizeof(buf), "Int", 0, 0, 0, 0, w_amp, interval_stats);
value->append(buf);
}
}
- double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec;
+ const auto blob_st = vstorage->GetBlobStats();
+
+ snprintf(buf, sizeof(buf),
+ "\nBlob file count: %" ROCKSDB_PRIszt
+ ", total size: %.1f GB, garbage size: %.1f GB, space amp: %.1f\n\n",
+ vstorage->GetBlobFiles().size(), blob_st.total_file_size / kGB,
+ blob_st.total_garbage_size / kGB, blob_st.space_amp);
+ value->append(buf);
+
+ uint64_t now_micros = clock_->NowMicros();
+ double seconds_up = (now_micros - started_at_) / kMicrosInSec;
double interval_seconds_up = seconds_up - cf_stats_snapshot_.seconds_up;
snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n",
seconds_up, interval_seconds_up);
uint64_t compact_micros = 0;
for (int level = 0; level < number_levels_; level++) {
compact_bytes_read += comp_stats_[level].bytes_read_output_level +
- comp_stats_[level].bytes_read_non_output_levels;
- compact_bytes_write += comp_stats_[level].bytes_written;
+ comp_stats_[level].bytes_read_non_output_levels +
+ comp_stats_[level].bytes_read_blob;
+ compact_bytes_write += comp_stats_[level].bytes_written +
+ comp_stats_[level].bytes_written_blob;
compact_micros += comp_stats_[level].micros;
}
snprintf(buf, sizeof(buf),
"Cumulative compaction: %.2f GB write, %.2f MB/s write, "
"%.2f GB read, %.2f MB/s read, %.1f seconds\n",
- compact_bytes_write / kGB, compact_bytes_write / kMB / seconds_up,
- compact_bytes_read / kGB, compact_bytes_read / kMB / seconds_up,
+ compact_bytes_write / kGB,
+ compact_bytes_write / kMB / std::max(seconds_up, 0.001),
+ compact_bytes_read / kGB,
+ compact_bytes_read / kMB / std::max(seconds_up, 0.001),
compact_micros / kMicrosInSec);
value->append(buf);
interval_compact_bytes_read / kMB / std::max(interval_seconds_up, 0.001),
interval_compact_micros / kMicrosInSec);
value->append(buf);
- cf_stats_snapshot_.compact_bytes_write = compact_bytes_write;
- cf_stats_snapshot_.compact_bytes_read = compact_bytes_read;
- cf_stats_snapshot_.compact_micros = compact_micros;
+ if (is_periodic) {
+ cf_stats_snapshot_.compact_bytes_write = compact_bytes_write;
+ cf_stats_snapshot_.compact_bytes_read = compact_bytes_read;
+ cf_stats_snapshot_.compact_micros = compact_micros;
+ }
snprintf(buf, sizeof(buf),
"Stalls(count): %" PRIu64
total_stall_count - cf_stats_snapshot_.stall_count);
value->append(buf);
- cf_stats_snapshot_.seconds_up = seconds_up;
- cf_stats_snapshot_.ingest_bytes_flush = flush_ingest;
- cf_stats_snapshot_.ingest_bytes_addfile = add_file_ingest;
- cf_stats_snapshot_.ingest_files_addfile = ingest_files_addfile;
- cf_stats_snapshot_.ingest_l0_files_addfile = ingest_l0_files_addfile;
- cf_stats_snapshot_.ingest_keys_addfile = ingest_keys_addfile;
- cf_stats_snapshot_.comp_stats = compaction_stats_sum;
- cf_stats_snapshot_.stall_count = total_stall_count;
+ if (is_periodic) {
+ cf_stats_snapshot_.seconds_up = seconds_up;
+ cf_stats_snapshot_.ingest_bytes_flush = flush_ingest;
+ cf_stats_snapshot_.ingest_bytes_addfile = add_file_ingest;
+ cf_stats_snapshot_.ingest_files_addfile = ingest_files_addfile;
+ cf_stats_snapshot_.ingest_l0_files_addfile = ingest_l0_files_addfile;
+ cf_stats_snapshot_.ingest_keys_addfile = ingest_keys_addfile;
+ cf_stats_snapshot_.comp_stats = compaction_stats_sum;
+ cf_stats_snapshot_.stall_count = total_stall_count;
+ }
+
+ // Do not gather cache entry stats during CFStats because DB
+ // mutex is held. Only dump last cached collection (rely on DB
+ // periodic stats dump to update)
+ if (cache_entry_stats_collector_) {
+ CacheEntryRoleStats stats;
+ // thread safe
+ cache_entry_stats_collector_->GetStats(&stats);
+
+ constexpr uint64_t kDayInMicros = uint64_t{86400} * 1000000U;
+
+ // Skip if stats are extremely old (> 1 day, incl not yet populated)
+ if (now_micros - stats.last_end_time_micros_ < kDayInMicros) {
+ value->append(stats.ToString(clock_));
+ }
+ }
}
void InternalStats::DumpCFFileHistogram(std::string* value) {
- char buf[2000];
- snprintf(buf, sizeof(buf),
- "\n** File Read Latency Histogram By Level [%s] **\n",
- cfd_->GetName().c_str());
- value->append(buf);
+ assert(value);
+ assert(cfd_);
+
+ std::ostringstream oss;
+ oss << "\n** File Read Latency Histogram By Level [" << cfd_->GetName()
+ << "] **\n";
for (int level = 0; level < number_levels_; level++) {
if (!file_read_latency_[level].Empty()) {
- char buf2[5000];
- snprintf(buf2, sizeof(buf2),
- "** Level %d read latency histogram (micros):\n%s\n", level,
- file_read_latency_[level].ToString().c_str());
- value->append(buf2);
+ oss << "** Level " << level << " read latency histogram (micros):\n"
+ << file_read_latency_[level].ToString() << '\n';
}
}
+
+ if (!blob_file_read_latency_.Empty()) {
+ oss << "** Blob file read latency histogram (micros):\n"
+ << blob_file_read_latency_.ToString() << '\n';
+ }
+
+ value->append(oss.str());
}
#else
#endif // !ROCKSDB_LITE
-} // namespace rocksdb
+} // namespace ROCKSDB_NAMESPACE