1 // This source code is licensed under both the GPLv2 (found in the
2 // COPYING file in the root directory) and Apache 2.0 License
3 // (found in the LICENSE.Apache file in the root directory).
5 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
6 // Use of this source code is governed by a BSD-style license that can be
7 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 #include "db/internal_stats.h"
11 #ifndef __STDC_FORMAT_MACROS
12 #define __STDC_FORMAT_MACROS
22 #include "db/column_family.h"
23 #include "db/db_impl.h"
24 #include "table/block_based_table_factory.h"
25 #include "util/string_util.h"
31 const std::map
<LevelStatType
, LevelStat
> InternalStats::compaction_level_stats
=
33 {LevelStatType::NUM_FILES
, LevelStat
{"NumFiles", "Files"}},
34 {LevelStatType::COMPACTED_FILES
,
35 LevelStat
{"CompactedFiles", "CompactedFiles"}},
36 {LevelStatType::SIZE_BYTES
, LevelStat
{"SizeBytes", "Size"}},
37 {LevelStatType::SCORE
, LevelStat
{"Score", "Score"}},
38 {LevelStatType::READ_GB
, LevelStat
{"ReadGB", "Read(GB)"}},
39 {LevelStatType::RN_GB
, LevelStat
{"RnGB", "Rn(GB)"}},
40 {LevelStatType::RNP1_GB
, LevelStat
{"Rnp1GB", "Rnp1(GB)"}},
41 {LevelStatType::WRITE_GB
, LevelStat
{"WriteGB", "Write(GB)"}},
42 {LevelStatType::W_NEW_GB
, LevelStat
{"WnewGB", "Wnew(GB)"}},
43 {LevelStatType::MOVED_GB
, LevelStat
{"MovedGB", "Moved(GB)"}},
44 {LevelStatType::WRITE_AMP
, LevelStat
{"WriteAmp", "W-Amp"}},
45 {LevelStatType::READ_MBPS
, LevelStat
{"ReadMBps", "Rd(MB/s)"}},
46 {LevelStatType::WRITE_MBPS
, LevelStat
{"WriteMBps", "Wr(MB/s)"}},
47 {LevelStatType::COMP_SEC
, LevelStat
{"CompSec", "Comp(sec)"}},
48 {LevelStatType::COMP_CPU_SEC
,
49 LevelStat
{"CompMergeCPU", "CompMergeCPU(sec)"}},
50 {LevelStatType::COMP_COUNT
, LevelStat
{"CompCount", "Comp(cnt)"}},
51 {LevelStatType::AVG_SEC
, LevelStat
{"AvgSec", "Avg(sec)"}},
52 {LevelStatType::KEY_IN
, LevelStat
{"KeyIn", "KeyIn"}},
53 {LevelStatType::KEY_DROP
, LevelStat
{"KeyDrop", "KeyDrop"}},
57 const double kMB
= 1048576.0;
58 const double kGB
= kMB
* 1024;
59 const double kMicrosInSec
= 1000000.0;
61 void PrintLevelStatsHeader(char* buf
, size_t len
, const std::string
& cf_name
,
62 const std::string
& group_by
) {
64 snprintf(buf
, len
, "\n** Compaction Stats [%s] **\n", cf_name
.c_str());
65 auto hdr
= [](LevelStatType t
) {
66 return InternalStats::compaction_level_stats
.at(t
).header_name
.c_str();
68 int line_size
= snprintf(
69 buf
+ written_size
, len
- written_size
,
70 "%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
71 // Note that we skip COMPACTED_FILES and merge it with Files column
72 group_by
.c_str(), hdr(LevelStatType::NUM_FILES
),
73 hdr(LevelStatType::SIZE_BYTES
), hdr(LevelStatType::SCORE
),
74 hdr(LevelStatType::READ_GB
), hdr(LevelStatType::RN_GB
),
75 hdr(LevelStatType::RNP1_GB
), hdr(LevelStatType::WRITE_GB
),
76 hdr(LevelStatType::W_NEW_GB
), hdr(LevelStatType::MOVED_GB
),
77 hdr(LevelStatType::WRITE_AMP
), hdr(LevelStatType::READ_MBPS
),
78 hdr(LevelStatType::WRITE_MBPS
), hdr(LevelStatType::COMP_SEC
),
79 hdr(LevelStatType::COMP_CPU_SEC
), hdr(LevelStatType::COMP_COUNT
),
80 hdr(LevelStatType::AVG_SEC
), hdr(LevelStatType::KEY_IN
),
81 hdr(LevelStatType::KEY_DROP
));
83 written_size
+= line_size
;
84 snprintf(buf
+ written_size
, len
- written_size
, "%s\n",
85 std::string(line_size
, '-').c_str());
88 void PrepareLevelStats(std::map
<LevelStatType
, double>* level_stats
,
89 int num_files
, int being_compacted
,
90 double total_file_size
, double score
, double w_amp
,
91 const InternalStats::CompactionStats
& stats
) {
93 stats
.bytes_read_non_output_levels
+ stats
.bytes_read_output_level
;
94 int64_t bytes_new
= stats
.bytes_written
- stats
.bytes_read_output_level
;
95 double elapsed
= (stats
.micros
+ 1) / kMicrosInSec
;
97 (*level_stats
)[LevelStatType::NUM_FILES
] = num_files
;
98 (*level_stats
)[LevelStatType::COMPACTED_FILES
] = being_compacted
;
99 (*level_stats
)[LevelStatType::SIZE_BYTES
] = total_file_size
;
100 (*level_stats
)[LevelStatType::SCORE
] = score
;
101 (*level_stats
)[LevelStatType::READ_GB
] = bytes_read
/ kGB
;
102 (*level_stats
)[LevelStatType::RN_GB
] =
103 stats
.bytes_read_non_output_levels
/ kGB
;
104 (*level_stats
)[LevelStatType::RNP1_GB
] = stats
.bytes_read_output_level
/ kGB
;
105 (*level_stats
)[LevelStatType::WRITE_GB
] = stats
.bytes_written
/ kGB
;
106 (*level_stats
)[LevelStatType::W_NEW_GB
] = bytes_new
/ kGB
;
107 (*level_stats
)[LevelStatType::MOVED_GB
] = stats
.bytes_moved
/ kGB
;
108 (*level_stats
)[LevelStatType::WRITE_AMP
] = w_amp
;
109 (*level_stats
)[LevelStatType::READ_MBPS
] = bytes_read
/ kMB
/ elapsed
;
110 (*level_stats
)[LevelStatType::WRITE_MBPS
] =
111 stats
.bytes_written
/ kMB
/ elapsed
;
112 (*level_stats
)[LevelStatType::COMP_SEC
] = stats
.micros
/ kMicrosInSec
;
113 (*level_stats
)[LevelStatType::COMP_CPU_SEC
] = stats
.cpu_micros
/ kMicrosInSec
;
114 (*level_stats
)[LevelStatType::COMP_COUNT
] = stats
.count
;
115 (*level_stats
)[LevelStatType::AVG_SEC
] =
116 stats
.count
== 0 ? 0 : stats
.micros
/ kMicrosInSec
/ stats
.count
;
117 (*level_stats
)[LevelStatType::KEY_IN
] =
118 static_cast<double>(stats
.num_input_records
);
119 (*level_stats
)[LevelStatType::KEY_DROP
] =
120 static_cast<double>(stats
.num_dropped_records
);
123 void PrintLevelStats(char* buf
, size_t len
, const std::string
& name
,
124 const std::map
<LevelStatType
, double>& stat_value
) {
128 "%6d/%-3d " /* Files */
131 "%8.1f " /* Read(GB) */
132 "%7.1f " /* Rn(GB) */
133 "%8.1f " /* Rnp1(GB) */
134 "%9.1f " /* Write(GB) */
135 "%8.1f " /* Wnew(GB) */
136 "%9.1f " /* Moved(GB) */
138 "%8.1f " /* Rd(MB/s) */
139 "%8.1f " /* Wr(MB/s) */
140 "%9.2f " /* Comp(sec) */
141 "%17.2f " /* CompMergeCPU(sec) */
142 "%9d " /* Comp(cnt) */
143 "%8.3f " /* Avg(sec) */
145 "%6s\n", /* KeyDrop */
146 name
.c_str(), static_cast<int>(stat_value
.at(LevelStatType::NUM_FILES
)),
147 static_cast<int>(stat_value
.at(LevelStatType::COMPACTED_FILES
)),
149 static_cast<uint64_t>(stat_value
.at(LevelStatType::SIZE_BYTES
)))
151 stat_value
.at(LevelStatType::SCORE
),
152 stat_value
.at(LevelStatType::READ_GB
),
153 stat_value
.at(LevelStatType::RN_GB
),
154 stat_value
.at(LevelStatType::RNP1_GB
),
155 stat_value
.at(LevelStatType::WRITE_GB
),
156 stat_value
.at(LevelStatType::W_NEW_GB
),
157 stat_value
.at(LevelStatType::MOVED_GB
),
158 stat_value
.at(LevelStatType::WRITE_AMP
),
159 stat_value
.at(LevelStatType::READ_MBPS
),
160 stat_value
.at(LevelStatType::WRITE_MBPS
),
161 stat_value
.at(LevelStatType::COMP_SEC
),
162 stat_value
.at(LevelStatType::COMP_CPU_SEC
),
163 static_cast<int>(stat_value
.at(LevelStatType::COMP_COUNT
)),
164 stat_value
.at(LevelStatType::AVG_SEC
),
166 static_cast<std::int64_t>(stat_value
.at(LevelStatType::KEY_IN
)))
169 static_cast<std::int64_t>(stat_value
.at(LevelStatType::KEY_DROP
)))
173 void PrintLevelStats(char* buf
, size_t len
, const std::string
& name
,
174 int num_files
, int being_compacted
, double total_file_size
,
175 double score
, double w_amp
,
176 const InternalStats::CompactionStats
& stats
) {
177 std::map
<LevelStatType
, double> level_stats
;
178 PrepareLevelStats(&level_stats
, num_files
, being_compacted
, total_file_size
,
179 score
, w_amp
, stats
);
180 PrintLevelStats(buf
, len
, name
, level_stats
);
183 // Assumes that trailing numbers represent an optional argument. This requires
184 // property names to not end with numbers.
185 std::pair
<Slice
, Slice
> GetPropertyNameAndArg(const Slice
& property
) {
186 Slice name
= property
, arg
= property
;
188 while (sfx_len
< property
.size() &&
189 isdigit(property
[property
.size() - sfx_len
- 1])) {
192 name
.remove_suffix(sfx_len
);
193 arg
.remove_prefix(property
.size() - sfx_len
);
196 } // anonymous namespace
198 static const std::string rocksdb_prefix
= "rocksdb.";
200 static const std::string num_files_at_level_prefix
= "num-files-at-level";
201 static const std::string compression_ratio_at_level_prefix
=
202 "compression-ratio-at-level";
203 static const std::string allstats
= "stats";
204 static const std::string sstables
= "sstables";
205 static const std::string cfstats
= "cfstats";
206 static const std::string cfstats_no_file_histogram
=
207 "cfstats-no-file-histogram";
208 static const std::string cf_file_histogram
= "cf-file-histogram";
209 static const std::string dbstats
= "dbstats";
210 static const std::string levelstats
= "levelstats";
211 static const std::string num_immutable_mem_table
= "num-immutable-mem-table";
212 static const std::string num_immutable_mem_table_flushed
=
213 "num-immutable-mem-table-flushed";
214 static const std::string mem_table_flush_pending
= "mem-table-flush-pending";
215 static const std::string compaction_pending
= "compaction-pending";
216 static const std::string background_errors
= "background-errors";
217 static const std::string cur_size_active_mem_table
=
218 "cur-size-active-mem-table";
219 static const std::string cur_size_all_mem_tables
= "cur-size-all-mem-tables";
220 static const std::string size_all_mem_tables
= "size-all-mem-tables";
221 static const std::string num_entries_active_mem_table
=
222 "num-entries-active-mem-table";
223 static const std::string num_entries_imm_mem_tables
=
224 "num-entries-imm-mem-tables";
225 static const std::string num_deletes_active_mem_table
=
226 "num-deletes-active-mem-table";
227 static const std::string num_deletes_imm_mem_tables
=
228 "num-deletes-imm-mem-tables";
229 static const std::string estimate_num_keys
= "estimate-num-keys";
230 static const std::string estimate_table_readers_mem
=
231 "estimate-table-readers-mem";
232 static const std::string is_file_deletions_enabled
=
233 "is-file-deletions-enabled";
234 static const std::string num_snapshots
= "num-snapshots";
235 static const std::string oldest_snapshot_time
= "oldest-snapshot-time";
236 static const std::string num_live_versions
= "num-live-versions";
237 static const std::string current_version_number
=
238 "current-super-version-number";
239 static const std::string estimate_live_data_size
= "estimate-live-data-size";
240 static const std::string min_log_number_to_keep_str
= "min-log-number-to-keep";
241 static const std::string min_obsolete_sst_number_to_keep_str
=
242 "min-obsolete-sst-number-to-keep";
243 static const std::string base_level_str
= "base-level";
244 static const std::string total_sst_files_size
= "total-sst-files-size";
245 static const std::string live_sst_files_size
= "live-sst-files-size";
246 static const std::string estimate_pending_comp_bytes
=
247 "estimate-pending-compaction-bytes";
248 static const std::string aggregated_table_properties
=
249 "aggregated-table-properties";
250 static const std::string aggregated_table_properties_at_level
=
251 aggregated_table_properties
+ "-at-level";
252 static const std::string num_running_compactions
= "num-running-compactions";
253 static const std::string num_running_flushes
= "num-running-flushes";
254 static const std::string actual_delayed_write_rate
=
255 "actual-delayed-write-rate";
256 static const std::string is_write_stopped
= "is-write-stopped";
257 static const std::string estimate_oldest_key_time
= "estimate-oldest-key-time";
258 static const std::string block_cache_capacity
= "block-cache-capacity";
259 static const std::string block_cache_usage
= "block-cache-usage";
260 static const std::string block_cache_pinned_usage
= "block-cache-pinned-usage";
261 static const std::string options_statistics
= "options-statistics";
263 const std::string
DB::Properties::kNumFilesAtLevelPrefix
=
264 rocksdb_prefix
+ num_files_at_level_prefix
;
265 const std::string
DB::Properties::kCompressionRatioAtLevelPrefix
=
266 rocksdb_prefix
+ compression_ratio_at_level_prefix
;
267 const std::string
DB::Properties::kStats
= rocksdb_prefix
+ allstats
;
268 const std::string
DB::Properties::kSSTables
= rocksdb_prefix
+ sstables
;
269 const std::string
DB::Properties::kCFStats
= rocksdb_prefix
+ cfstats
;
270 const std::string
DB::Properties::kCFStatsNoFileHistogram
=
271 rocksdb_prefix
+ cfstats_no_file_histogram
;
272 const std::string
DB::Properties::kCFFileHistogram
=
273 rocksdb_prefix
+ cf_file_histogram
;
274 const std::string
DB::Properties::kDBStats
= rocksdb_prefix
+ dbstats
;
275 const std::string
DB::Properties::kLevelStats
= rocksdb_prefix
+ levelstats
;
276 const std::string
DB::Properties::kNumImmutableMemTable
=
277 rocksdb_prefix
+ num_immutable_mem_table
;
278 const std::string
DB::Properties::kNumImmutableMemTableFlushed
=
279 rocksdb_prefix
+ num_immutable_mem_table_flushed
;
280 const std::string
DB::Properties::kMemTableFlushPending
=
281 rocksdb_prefix
+ mem_table_flush_pending
;
282 const std::string
DB::Properties::kCompactionPending
=
283 rocksdb_prefix
+ compaction_pending
;
284 const std::string
DB::Properties::kNumRunningCompactions
=
285 rocksdb_prefix
+ num_running_compactions
;
286 const std::string
DB::Properties::kNumRunningFlushes
=
287 rocksdb_prefix
+ num_running_flushes
;
288 const std::string
DB::Properties::kBackgroundErrors
=
289 rocksdb_prefix
+ background_errors
;
290 const std::string
DB::Properties::kCurSizeActiveMemTable
=
291 rocksdb_prefix
+ cur_size_active_mem_table
;
292 const std::string
DB::Properties::kCurSizeAllMemTables
=
293 rocksdb_prefix
+ cur_size_all_mem_tables
;
294 const std::string
DB::Properties::kSizeAllMemTables
=
295 rocksdb_prefix
+ size_all_mem_tables
;
296 const std::string
DB::Properties::kNumEntriesActiveMemTable
=
297 rocksdb_prefix
+ num_entries_active_mem_table
;
298 const std::string
DB::Properties::kNumEntriesImmMemTables
=
299 rocksdb_prefix
+ num_entries_imm_mem_tables
;
300 const std::string
DB::Properties::kNumDeletesActiveMemTable
=
301 rocksdb_prefix
+ num_deletes_active_mem_table
;
302 const std::string
DB::Properties::kNumDeletesImmMemTables
=
303 rocksdb_prefix
+ num_deletes_imm_mem_tables
;
304 const std::string
DB::Properties::kEstimateNumKeys
=
305 rocksdb_prefix
+ estimate_num_keys
;
306 const std::string
DB::Properties::kEstimateTableReadersMem
=
307 rocksdb_prefix
+ estimate_table_readers_mem
;
308 const std::string
DB::Properties::kIsFileDeletionsEnabled
=
309 rocksdb_prefix
+ is_file_deletions_enabled
;
310 const std::string
DB::Properties::kNumSnapshots
=
311 rocksdb_prefix
+ num_snapshots
;
312 const std::string
DB::Properties::kOldestSnapshotTime
=
313 rocksdb_prefix
+ oldest_snapshot_time
;
314 const std::string
DB::Properties::kNumLiveVersions
=
315 rocksdb_prefix
+ num_live_versions
;
316 const std::string
DB::Properties::kCurrentSuperVersionNumber
=
317 rocksdb_prefix
+ current_version_number
;
318 const std::string
DB::Properties::kEstimateLiveDataSize
=
319 rocksdb_prefix
+ estimate_live_data_size
;
320 const std::string
DB::Properties::kMinLogNumberToKeep
=
321 rocksdb_prefix
+ min_log_number_to_keep_str
;
322 const std::string
DB::Properties::kMinObsoleteSstNumberToKeep
=
323 rocksdb_prefix
+ min_obsolete_sst_number_to_keep_str
;
324 const std::string
DB::Properties::kTotalSstFilesSize
=
325 rocksdb_prefix
+ total_sst_files_size
;
326 const std::string
DB::Properties::kLiveSstFilesSize
=
327 rocksdb_prefix
+ live_sst_files_size
;
328 const std::string
DB::Properties::kBaseLevel
= rocksdb_prefix
+ base_level_str
;
329 const std::string
DB::Properties::kEstimatePendingCompactionBytes
=
330 rocksdb_prefix
+ estimate_pending_comp_bytes
;
331 const std::string
DB::Properties::kAggregatedTableProperties
=
332 rocksdb_prefix
+ aggregated_table_properties
;
333 const std::string
DB::Properties::kAggregatedTablePropertiesAtLevel
=
334 rocksdb_prefix
+ aggregated_table_properties_at_level
;
335 const std::string
DB::Properties::kActualDelayedWriteRate
=
336 rocksdb_prefix
+ actual_delayed_write_rate
;
337 const std::string
DB::Properties::kIsWriteStopped
=
338 rocksdb_prefix
+ is_write_stopped
;
339 const std::string
DB::Properties::kEstimateOldestKeyTime
=
340 rocksdb_prefix
+ estimate_oldest_key_time
;
341 const std::string
DB::Properties::kBlockCacheCapacity
=
342 rocksdb_prefix
+ block_cache_capacity
;
343 const std::string
DB::Properties::kBlockCacheUsage
=
344 rocksdb_prefix
+ block_cache_usage
;
345 const std::string
DB::Properties::kBlockCachePinnedUsage
=
346 rocksdb_prefix
+ block_cache_pinned_usage
;
347 const std::string
DB::Properties::kOptionsStatistics
=
348 rocksdb_prefix
+ options_statistics
;
350 const std::unordered_map
<std::string
, DBPropertyInfo
>
351 InternalStats::ppt_name_to_info
= {
352 {DB::Properties::kNumFilesAtLevelPrefix
,
353 {false, &InternalStats::HandleNumFilesAtLevel
, nullptr, nullptr,
355 {DB::Properties::kCompressionRatioAtLevelPrefix
,
356 {false, &InternalStats::HandleCompressionRatioAtLevelPrefix
, nullptr,
358 {DB::Properties::kLevelStats
,
359 {false, &InternalStats::HandleLevelStats
, nullptr, nullptr, nullptr}},
360 {DB::Properties::kStats
,
361 {false, &InternalStats::HandleStats
, nullptr, nullptr, nullptr}},
362 {DB::Properties::kCFStats
,
363 {false, &InternalStats::HandleCFStats
, nullptr,
364 &InternalStats::HandleCFMapStats
, nullptr}},
365 {DB::Properties::kCFStatsNoFileHistogram
,
366 {false, &InternalStats::HandleCFStatsNoFileHistogram
, nullptr, nullptr,
368 {DB::Properties::kCFFileHistogram
,
369 {false, &InternalStats::HandleCFFileHistogram
, nullptr, nullptr,
371 {DB::Properties::kDBStats
,
372 {false, &InternalStats::HandleDBStats
, nullptr, nullptr, nullptr}},
373 {DB::Properties::kSSTables
,
374 {false, &InternalStats::HandleSsTables
, nullptr, nullptr, nullptr}},
375 {DB::Properties::kAggregatedTableProperties
,
376 {false, &InternalStats::HandleAggregatedTableProperties
, nullptr,
378 {DB::Properties::kAggregatedTablePropertiesAtLevel
,
379 {false, &InternalStats::HandleAggregatedTablePropertiesAtLevel
,
380 nullptr, nullptr, nullptr}},
381 {DB::Properties::kNumImmutableMemTable
,
382 {false, nullptr, &InternalStats::HandleNumImmutableMemTable
, nullptr,
384 {DB::Properties::kNumImmutableMemTableFlushed
,
385 {false, nullptr, &InternalStats::HandleNumImmutableMemTableFlushed
,
387 {DB::Properties::kMemTableFlushPending
,
388 {false, nullptr, &InternalStats::HandleMemTableFlushPending
, nullptr,
390 {DB::Properties::kCompactionPending
,
391 {false, nullptr, &InternalStats::HandleCompactionPending
, nullptr,
393 {DB::Properties::kBackgroundErrors
,
394 {false, nullptr, &InternalStats::HandleBackgroundErrors
, nullptr,
396 {DB::Properties::kCurSizeActiveMemTable
,
397 {false, nullptr, &InternalStats::HandleCurSizeActiveMemTable
, nullptr,
399 {DB::Properties::kCurSizeAllMemTables
,
400 {false, nullptr, &InternalStats::HandleCurSizeAllMemTables
, nullptr,
402 {DB::Properties::kSizeAllMemTables
,
403 {false, nullptr, &InternalStats::HandleSizeAllMemTables
, nullptr,
405 {DB::Properties::kNumEntriesActiveMemTable
,
406 {false, nullptr, &InternalStats::HandleNumEntriesActiveMemTable
,
408 {DB::Properties::kNumEntriesImmMemTables
,
409 {false, nullptr, &InternalStats::HandleNumEntriesImmMemTables
, nullptr,
411 {DB::Properties::kNumDeletesActiveMemTable
,
412 {false, nullptr, &InternalStats::HandleNumDeletesActiveMemTable
,
414 {DB::Properties::kNumDeletesImmMemTables
,
415 {false, nullptr, &InternalStats::HandleNumDeletesImmMemTables
, nullptr,
417 {DB::Properties::kEstimateNumKeys
,
418 {false, nullptr, &InternalStats::HandleEstimateNumKeys
, nullptr,
420 {DB::Properties::kEstimateTableReadersMem
,
421 {true, nullptr, &InternalStats::HandleEstimateTableReadersMem
, nullptr,
423 {DB::Properties::kIsFileDeletionsEnabled
,
424 {false, nullptr, &InternalStats::HandleIsFileDeletionsEnabled
, nullptr,
426 {DB::Properties::kNumSnapshots
,
427 {false, nullptr, &InternalStats::HandleNumSnapshots
, nullptr,
429 {DB::Properties::kOldestSnapshotTime
,
430 {false, nullptr, &InternalStats::HandleOldestSnapshotTime
, nullptr,
432 {DB::Properties::kNumLiveVersions
,
433 {false, nullptr, &InternalStats::HandleNumLiveVersions
, nullptr,
435 {DB::Properties::kCurrentSuperVersionNumber
,
436 {false, nullptr, &InternalStats::HandleCurrentSuperVersionNumber
,
438 {DB::Properties::kEstimateLiveDataSize
,
439 {true, nullptr, &InternalStats::HandleEstimateLiveDataSize
, nullptr,
441 {DB::Properties::kMinLogNumberToKeep
,
442 {false, nullptr, &InternalStats::HandleMinLogNumberToKeep
, nullptr,
444 {DB::Properties::kMinObsoleteSstNumberToKeep
,
445 {false, nullptr, &InternalStats::HandleMinObsoleteSstNumberToKeep
,
447 {DB::Properties::kBaseLevel
,
448 {false, nullptr, &InternalStats::HandleBaseLevel
, nullptr, nullptr}},
449 {DB::Properties::kTotalSstFilesSize
,
450 {false, nullptr, &InternalStats::HandleTotalSstFilesSize
, nullptr,
452 {DB::Properties::kLiveSstFilesSize
,
453 {false, nullptr, &InternalStats::HandleLiveSstFilesSize
, nullptr,
455 {DB::Properties::kEstimatePendingCompactionBytes
,
456 {false, nullptr, &InternalStats::HandleEstimatePendingCompactionBytes
,
458 {DB::Properties::kNumRunningFlushes
,
459 {false, nullptr, &InternalStats::HandleNumRunningFlushes
, nullptr,
461 {DB::Properties::kNumRunningCompactions
,
462 {false, nullptr, &InternalStats::HandleNumRunningCompactions
, nullptr,
464 {DB::Properties::kActualDelayedWriteRate
,
465 {false, nullptr, &InternalStats::HandleActualDelayedWriteRate
, nullptr,
467 {DB::Properties::kIsWriteStopped
,
468 {false, nullptr, &InternalStats::HandleIsWriteStopped
, nullptr,
470 {DB::Properties::kEstimateOldestKeyTime
,
471 {false, nullptr, &InternalStats::HandleEstimateOldestKeyTime
, nullptr,
473 {DB::Properties::kBlockCacheCapacity
,
474 {false, nullptr, &InternalStats::HandleBlockCacheCapacity
, nullptr,
476 {DB::Properties::kBlockCacheUsage
,
477 {false, nullptr, &InternalStats::HandleBlockCacheUsage
, nullptr,
479 {DB::Properties::kBlockCachePinnedUsage
,
480 {false, nullptr, &InternalStats::HandleBlockCachePinnedUsage
, nullptr,
482 {DB::Properties::kOptionsStatistics
,
483 {false, nullptr, nullptr, nullptr,
484 &DBImpl::GetPropertyHandleOptionsStatistics
}},
487 const DBPropertyInfo
* GetPropertyInfo(const Slice
& property
) {
488 std::string ppt_name
= GetPropertyNameAndArg(property
).first
.ToString();
489 auto ppt_info_iter
= InternalStats::ppt_name_to_info
.find(ppt_name
);
490 if (ppt_info_iter
== InternalStats::ppt_name_to_info
.end()) {
493 return &ppt_info_iter
->second
;
496 bool InternalStats::GetStringProperty(const DBPropertyInfo
& property_info
,
497 const Slice
& property
,
498 std::string
* value
) {
499 assert(value
!= nullptr);
500 assert(property_info
.handle_string
!= nullptr);
501 Slice arg
= GetPropertyNameAndArg(property
).second
;
502 return (this->*(property_info
.handle_string
))(value
, arg
);
505 bool InternalStats::GetMapProperty(const DBPropertyInfo
& property_info
,
506 const Slice
& /*property*/,
507 std::map
<std::string
, std::string
>* value
) {
508 assert(value
!= nullptr);
509 assert(property_info
.handle_map
!= nullptr);
510 return (this->*(property_info
.handle_map
))(value
);
513 bool InternalStats::GetIntProperty(const DBPropertyInfo
& property_info
,
514 uint64_t* value
, DBImpl
* db
) {
515 assert(value
!= nullptr);
516 assert(property_info
.handle_int
!= nullptr &&
517 !property_info
.need_out_of_mutex
);
518 db
->mutex_
.AssertHeld();
519 return (this->*(property_info
.handle_int
))(value
, db
, nullptr /* version */);
522 bool InternalStats::GetIntPropertyOutOfMutex(
523 const DBPropertyInfo
& property_info
, Version
* version
, uint64_t* value
) {
524 assert(value
!= nullptr);
525 assert(property_info
.handle_int
!= nullptr &&
526 property_info
.need_out_of_mutex
);
527 return (this->*(property_info
.handle_int
))(value
, nullptr /* db */, version
);
530 bool InternalStats::HandleNumFilesAtLevel(std::string
* value
, Slice suffix
) {
532 const auto* vstorage
= cfd_
->current()->storage_info();
533 bool ok
= ConsumeDecimalNumber(&suffix
, &level
) && suffix
.empty();
534 if (!ok
|| static_cast<int>(level
) >= number_levels_
) {
538 snprintf(buf
, sizeof(buf
), "%d",
539 vstorage
->NumLevelFiles(static_cast<int>(level
)));
545 bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string
* value
,
548 const auto* vstorage
= cfd_
->current()->storage_info();
549 bool ok
= ConsumeDecimalNumber(&suffix
, &level
) && suffix
.empty();
550 if (!ok
|| level
>= static_cast<uint64_t>(number_levels_
)) {
554 vstorage
->GetEstimatedCompressionRatioAtLevel(static_cast<int>(level
)));
558 bool InternalStats::HandleLevelStats(std::string
* value
, Slice
/*suffix*/) {
560 const auto* vstorage
= cfd_
->current()->storage_info();
561 snprintf(buf
, sizeof(buf
),
562 "Level Files Size(MB)\n"
563 "--------------------\n");
566 for (int level
= 0; level
< number_levels_
; level
++) {
567 snprintf(buf
, sizeof(buf
), "%3d %8d %8.0f\n", level
,
568 vstorage
->NumLevelFiles(level
),
569 vstorage
->NumLevelBytes(level
) / kMB
);
575 bool InternalStats::HandleStats(std::string
* value
, Slice suffix
) {
576 if (!HandleCFStats(value
, suffix
)) {
579 if (!HandleDBStats(value
, suffix
)) {
585 bool InternalStats::HandleCFMapStats(
586 std::map
<std::string
, std::string
>* cf_stats
) {
587 DumpCFMapStats(cf_stats
);
591 bool InternalStats::HandleCFStats(std::string
* value
, Slice
/*suffix*/) {
596 bool InternalStats::HandleCFStatsNoFileHistogram(std::string
* value
,
598 DumpCFStatsNoFileHistogram(value
);
602 bool InternalStats::HandleCFFileHistogram(std::string
* value
,
604 DumpCFFileHistogram(value
);
608 bool InternalStats::HandleDBStats(std::string
* value
, Slice
/*suffix*/) {
613 bool InternalStats::HandleSsTables(std::string
* value
, Slice
/*suffix*/) {
614 auto* current
= cfd_
->current();
615 *value
= current
->DebugString(true, true);
619 bool InternalStats::HandleAggregatedTableProperties(std::string
* value
,
621 std::shared_ptr
<const TableProperties
> tp
;
622 auto s
= cfd_
->current()->GetAggregatedTableProperties(&tp
);
626 *value
= tp
->ToString();
630 bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string
* value
,
633 bool ok
= ConsumeDecimalNumber(&suffix
, &level
) && suffix
.empty();
634 if (!ok
|| static_cast<int>(level
) >= number_levels_
) {
637 std::shared_ptr
<const TableProperties
> tp
;
638 auto s
= cfd_
->current()->GetAggregatedTableProperties(
639 &tp
, static_cast<int>(level
));
643 *value
= tp
->ToString();
647 bool InternalStats::HandleNumImmutableMemTable(uint64_t* value
, DBImpl
* /*db*/,
648 Version
* /*version*/) {
649 *value
= cfd_
->imm()->NumNotFlushed();
653 bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value
,
655 Version
* /*version*/) {
656 *value
= cfd_
->imm()->NumFlushed();
660 bool InternalStats::HandleMemTableFlushPending(uint64_t* value
, DBImpl
* /*db*/,
661 Version
* /*version*/) {
662 // Return number of mem tables that are ready to flush (made immutable)
663 *value
= (cfd_
->imm()->IsFlushPending() ? 1 : 0);
667 bool InternalStats::HandleNumRunningFlushes(uint64_t* value
, DBImpl
* db
,
668 Version
* /*version*/) {
669 *value
= db
->num_running_flushes();
673 bool InternalStats::HandleCompactionPending(uint64_t* value
, DBImpl
* /*db*/,
674 Version
* /*version*/) {
675 // 1 if the system already determines at least one compaction is needed.
677 const auto* vstorage
= cfd_
->current()->storage_info();
678 *value
= (cfd_
->compaction_picker()->NeedsCompaction(vstorage
) ? 1 : 0);
682 bool InternalStats::HandleNumRunningCompactions(uint64_t* value
, DBImpl
* db
,
683 Version
* /*version*/) {
684 *value
= db
->num_running_compactions_
;
688 bool InternalStats::HandleBackgroundErrors(uint64_t* value
, DBImpl
* /*db*/,
689 Version
* /*version*/) {
690 // Accumulated number of errors in background flushes or compactions.
691 *value
= GetBackgroundErrorCount();
695 bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value
, DBImpl
* /*db*/,
696 Version
* /*version*/) {
697 // Current size of the active memtable
698 *value
= cfd_
->mem()->ApproximateMemoryUsage();
702 bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value
, DBImpl
* /*db*/,
703 Version
* /*version*/) {
704 // Current size of the active memtable + immutable memtables
705 *value
= cfd_
->mem()->ApproximateMemoryUsage() +
706 cfd_
->imm()->ApproximateUnflushedMemTablesMemoryUsage();
710 bool InternalStats::HandleSizeAllMemTables(uint64_t* value
, DBImpl
* /*db*/,
711 Version
* /*version*/) {
712 *value
= cfd_
->mem()->ApproximateMemoryUsage() +
713 cfd_
->imm()->ApproximateMemoryUsage();
717 bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value
,
719 Version
* /*version*/) {
720 // Current number of entires in the active memtable
721 *value
= cfd_
->mem()->num_entries();
725 bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value
,
727 Version
* /*version*/) {
728 // Current number of entries in the immutable memtables
729 *value
= cfd_
->imm()->current()->GetTotalNumEntries();
733 bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value
,
735 Version
* /*version*/) {
736 // Current number of entires in the active memtable
737 *value
= cfd_
->mem()->num_deletes();
741 bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value
,
743 Version
* /*version*/) {
744 // Current number of entries in the immutable memtables
745 *value
= cfd_
->imm()->current()->GetTotalNumDeletes();
749 bool InternalStats::HandleEstimateNumKeys(uint64_t* value
, DBImpl
* /*db*/,
750 Version
* /*version*/) {
751 // Estimate number of entries in the column family:
752 // Use estimated entries in tables + total entries in memtables.
753 const auto* vstorage
= cfd_
->current()->storage_info();
754 uint64_t estimate_keys
= cfd_
->mem()->num_entries() +
755 cfd_
->imm()->current()->GetTotalNumEntries() +
756 vstorage
->GetEstimatedActiveKeys();
757 uint64_t estimate_deletes
=
758 cfd_
->mem()->num_deletes() + cfd_
->imm()->current()->GetTotalNumDeletes();
759 *value
= estimate_keys
> estimate_deletes
* 2
760 ? estimate_keys
- (estimate_deletes
* 2)
765 bool InternalStats::HandleNumSnapshots(uint64_t* value
, DBImpl
* db
,
766 Version
* /*version*/) {
767 *value
= db
->snapshots().count();
771 bool InternalStats::HandleOldestSnapshotTime(uint64_t* value
, DBImpl
* db
,
772 Version
* /*version*/) {
773 *value
= static_cast<uint64_t>(db
->snapshots().GetOldestSnapshotTime());
777 bool InternalStats::HandleNumLiveVersions(uint64_t* value
, DBImpl
* /*db*/,
778 Version
* /*version*/) {
779 *value
= cfd_
->GetNumLiveVersions();
783 bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value
,
785 Version
* /*version*/) {
786 *value
= cfd_
->GetSuperVersionNumber();
790 bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value
, DBImpl
* db
,
791 Version
* /*version*/) {
792 *value
= db
->IsFileDeletionsEnabled();
796 bool InternalStats::HandleBaseLevel(uint64_t* value
, DBImpl
* /*db*/,
797 Version
* /*version*/) {
798 const auto* vstorage
= cfd_
->current()->storage_info();
799 *value
= vstorage
->base_level();
803 bool InternalStats::HandleTotalSstFilesSize(uint64_t* value
, DBImpl
* /*db*/,
804 Version
* /*version*/) {
805 *value
= cfd_
->GetTotalSstFilesSize();
809 bool InternalStats::HandleLiveSstFilesSize(uint64_t* value
, DBImpl
* /*db*/,
810 Version
* /*version*/) {
811 *value
= cfd_
->GetLiveSstFilesSize();
815 bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value
,
817 Version
* /*version*/) {
818 const auto* vstorage
= cfd_
->current()->storage_info();
819 *value
= vstorage
->estimated_compaction_needed_bytes();
823 bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value
,
826 *value
= (version
== nullptr) ? 0 : version
->GetMemoryUsageByTableReaders();
830 bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value
, DBImpl
* /*db*/,
832 const auto* vstorage
= version
->storage_info();
833 *value
= vstorage
->EstimateLiveDataSize();
837 bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value
, DBImpl
* db
,
838 Version
* /*version*/) {
839 *value
= db
->MinLogNumberToKeep();
843 bool InternalStats::HandleMinObsoleteSstNumberToKeep(uint64_t* value
,
845 Version
* /*version*/) {
846 *value
= db
->MinObsoleteSstNumberToKeep();
850 bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value
, DBImpl
* db
,
851 Version
* /*version*/) {
852 const WriteController
& wc
= db
->write_controller();
853 if (!wc
.NeedsDelay()) {
856 *value
= wc
.delayed_write_rate();
861 bool InternalStats::HandleIsWriteStopped(uint64_t* value
, DBImpl
* db
,
862 Version
* /*version*/) {
863 *value
= db
->write_controller().IsStopped() ? 1 : 0;
867 bool InternalStats::HandleEstimateOldestKeyTime(uint64_t* value
, DBImpl
* /*db*/,
868 Version
* /*version*/) {
869 // TODO(yiwu): The property is currently available for fifo compaction
870 // with allow_compaction = false. This is because we don't propagate
871 // oldest_key_time on compaction.
872 if (cfd_
->ioptions()->compaction_style
!= kCompactionStyleFIFO
||
873 cfd_
->GetCurrentMutableCFOptions()
874 ->compaction_options_fifo
.allow_compaction
) {
878 TablePropertiesCollection collection
;
879 auto s
= cfd_
->current()->GetPropertiesOfAllTables(&collection
);
883 *value
= std::numeric_limits
<uint64_t>::max();
884 for (auto& p
: collection
) {
885 *value
= std::min(*value
, p
.second
->oldest_key_time
);
891 *value
= std::min({cfd_
->mem()->ApproximateOldestKeyTime(),
892 cfd_
->imm()->ApproximateOldestKeyTime(), *value
});
894 return *value
> 0 && *value
< std::numeric_limits
<uint64_t>::max();
897 bool InternalStats::HandleBlockCacheStat(Cache
** block_cache
) {
898 assert(block_cache
!= nullptr);
899 auto* table_factory
= cfd_
->ioptions()->table_factory
;
900 assert(table_factory
!= nullptr);
901 if (BlockBasedTableFactory::kName
!= table_factory
->Name()) {
904 auto* table_options
=
905 reinterpret_cast<BlockBasedTableOptions
*>(table_factory
->GetOptions());
906 if (table_options
== nullptr) {
909 *block_cache
= table_options
->block_cache
.get();
910 if (table_options
->no_block_cache
|| *block_cache
== nullptr) {
916 bool InternalStats::HandleBlockCacheCapacity(uint64_t* value
, DBImpl
* /*db*/,
917 Version
* /*version*/) {
919 bool ok
= HandleBlockCacheStat(&block_cache
);
923 *value
= static_cast<uint64_t>(block_cache
->GetCapacity());
927 bool InternalStats::HandleBlockCacheUsage(uint64_t* value
, DBImpl
* /*db*/,
928 Version
* /*version*/) {
930 bool ok
= HandleBlockCacheStat(&block_cache
);
934 *value
= static_cast<uint64_t>(block_cache
->GetUsage());
938 bool InternalStats::HandleBlockCachePinnedUsage(uint64_t* value
, DBImpl
* /*db*/,
939 Version
* /*version*/) {
941 bool ok
= HandleBlockCacheStat(&block_cache
);
945 *value
= static_cast<uint64_t>(block_cache
->GetPinnedUsage());
949 void InternalStats::DumpDBStats(std::string
* value
) {
951 // DB-level stats, only available from default column family
952 double seconds_up
= (env_
->NowMicros() - started_at_
+ 1) / kMicrosInSec
;
953 double interval_seconds_up
= seconds_up
- db_stats_snapshot_
.seconds_up
;
954 snprintf(buf
, sizeof(buf
),
955 "\n** DB Stats **\nUptime(secs): %.1f total, %.1f interval\n",
956 seconds_up
, interval_seconds_up
);
959 uint64_t user_bytes_written
= GetDBStats(InternalStats::BYTES_WRITTEN
);
960 uint64_t num_keys_written
= GetDBStats(InternalStats::NUMBER_KEYS_WRITTEN
);
961 uint64_t write_other
= GetDBStats(InternalStats::WRITE_DONE_BY_OTHER
);
962 uint64_t write_self
= GetDBStats(InternalStats::WRITE_DONE_BY_SELF
);
963 uint64_t wal_bytes
= GetDBStats(InternalStats::WAL_FILE_BYTES
);
964 uint64_t wal_synced
= GetDBStats(InternalStats::WAL_FILE_SYNCED
);
965 uint64_t write_with_wal
= GetDBStats(InternalStats::WRITE_WITH_WAL
);
966 uint64_t write_stall_micros
= GetDBStats(InternalStats::WRITE_STALL_MICROS
);
968 const int kHumanMicrosLen
= 32;
969 char human_micros
[kHumanMicrosLen
];
972 // writes: total number of write requests.
973 // keys: total number of key updates issued by all the write requests
974 // commit groups: number of group commits issued to the DB. Each group can
975 // contain one or more writes.
976 // so writes/keys is the average number of put in multi-put or put
977 // writes/groups is the average group commit size.
979 // The format is the same for interval stats.
980 snprintf(buf
, sizeof(buf
),
981 "Cumulative writes: %s writes, %s keys, %s commit groups, "
982 "%.1f writes per commit group, ingest: %.2f GB, %.2f MB/s\n",
983 NumberToHumanString(write_other
+ write_self
).c_str(),
984 NumberToHumanString(num_keys_written
).c_str(),
985 NumberToHumanString(write_self
).c_str(),
986 (write_other
+ write_self
) / static_cast<double>(write_self
+ 1),
987 user_bytes_written
/ kGB
, user_bytes_written
/ kMB
/ seconds_up
);
990 snprintf(buf
, sizeof(buf
),
991 "Cumulative WAL: %s writes, %s syncs, "
992 "%.2f writes per sync, written: %.2f GB, %.2f MB/s\n",
993 NumberToHumanString(write_with_wal
).c_str(),
994 NumberToHumanString(wal_synced
).c_str(),
995 write_with_wal
/ static_cast<double>(wal_synced
+ 1),
996 wal_bytes
/ kGB
, wal_bytes
/ kMB
/ seconds_up
);
999 AppendHumanMicros(write_stall_micros
, human_micros
, kHumanMicrosLen
, true);
1000 snprintf(buf
, sizeof(buf
), "Cumulative stall: %s, %.1f percent\n",
1002 // 10000 = divide by 1M to get secs, then multiply by 100 for pct
1003 write_stall_micros
/ 10000.0 / std::max(seconds_up
, 0.001));
1007 uint64_t interval_write_other
= write_other
- db_stats_snapshot_
.write_other
;
1008 uint64_t interval_write_self
= write_self
- db_stats_snapshot_
.write_self
;
1009 uint64_t interval_num_keys_written
=
1010 num_keys_written
- db_stats_snapshot_
.num_keys_written
;
1013 "Interval writes: %s writes, %s keys, %s commit groups, "
1014 "%.1f writes per commit group, ingest: %.2f MB, %.2f MB/s\n",
1015 NumberToHumanString(interval_write_other
+ interval_write_self
).c_str(),
1016 NumberToHumanString(interval_num_keys_written
).c_str(),
1017 NumberToHumanString(interval_write_self
).c_str(),
1018 static_cast<double>(interval_write_other
+ interval_write_self
) /
1019 (interval_write_self
+ 1),
1020 (user_bytes_written
- db_stats_snapshot_
.ingest_bytes
) / kMB
,
1021 (user_bytes_written
- db_stats_snapshot_
.ingest_bytes
) / kMB
/
1022 std::max(interval_seconds_up
, 0.001)),
1025 uint64_t interval_write_with_wal
=
1026 write_with_wal
- db_stats_snapshot_
.write_with_wal
;
1027 uint64_t interval_wal_synced
= wal_synced
- db_stats_snapshot_
.wal_synced
;
1028 uint64_t interval_wal_bytes
= wal_bytes
- db_stats_snapshot_
.wal_bytes
;
1032 "Interval WAL: %s writes, %s syncs, "
1033 "%.2f writes per sync, written: %.2f MB, %.2f MB/s\n",
1034 NumberToHumanString(interval_write_with_wal
).c_str(),
1035 NumberToHumanString(interval_wal_synced
).c_str(),
1036 interval_write_with_wal
/ static_cast<double>(interval_wal_synced
+ 1),
1037 interval_wal_bytes
/ kGB
,
1038 interval_wal_bytes
/ kMB
/ std::max(interval_seconds_up
, 0.001));
1042 AppendHumanMicros(write_stall_micros
- db_stats_snapshot_
.write_stall_micros
,
1043 human_micros
, kHumanMicrosLen
, true);
1044 snprintf(buf
, sizeof(buf
), "Interval stall: %s, %.1f percent\n", human_micros
,
1045 // 10000 = divide by 1M to get secs, then multiply by 100 for pct
1046 (write_stall_micros
- db_stats_snapshot_
.write_stall_micros
) /
1047 10000.0 / std::max(interval_seconds_up
, 0.001));
1050 db_stats_snapshot_
.seconds_up
= seconds_up
;
1051 db_stats_snapshot_
.ingest_bytes
= user_bytes_written
;
1052 db_stats_snapshot_
.write_other
= write_other
;
1053 db_stats_snapshot_
.write_self
= write_self
;
1054 db_stats_snapshot_
.num_keys_written
= num_keys_written
;
1055 db_stats_snapshot_
.wal_bytes
= wal_bytes
;
1056 db_stats_snapshot_
.wal_synced
= wal_synced
;
1057 db_stats_snapshot_
.write_with_wal
= write_with_wal
;
1058 db_stats_snapshot_
.write_stall_micros
= write_stall_micros
;
1062 * Dump Compaction Level stats to a map of stat name with "compaction." prefix
1063 * to value in double as string. The level in stat name is represented with
1064 * a prefix "Lx" where "x" is the level number. A special level "Sum"
1065 * represents the sum of a stat for all levels.
1066 * The result also contains IO stall counters which keys start with "io_stalls."
1067 * and values represent uint64 encoded as strings.
1069 void InternalStats::DumpCFMapStats(
1070 std::map
<std::string
, std::string
>* cf_stats
) {
1071 CompactionStats compaction_stats_sum
;
1072 std::map
<int, std::map
<LevelStatType
, double>> levels_stats
;
1073 DumpCFMapStats(&levels_stats
, &compaction_stats_sum
);
1074 for (auto const& level_ent
: levels_stats
) {
1076 level_ent
.first
== -1 ? "Sum" : "L" + ToString(level_ent
.first
);
1077 for (auto const& stat_ent
: level_ent
.second
) {
1078 auto stat_type
= stat_ent
.first
;
1080 "compaction." + level_str
+ "." +
1081 InternalStats::compaction_level_stats
.at(stat_type
).property_name
;
1082 (*cf_stats
)[key_str
] = std::to_string(stat_ent
.second
);
1086 DumpCFMapStatsIOStalls(cf_stats
);
1089 void InternalStats::DumpCFMapStats(
1090 std::map
<int, std::map
<LevelStatType
, double>>* levels_stats
,
1091 CompactionStats
* compaction_stats_sum
) {
1092 const VersionStorageInfo
* vstorage
= cfd_
->current()->storage_info();
1094 int num_levels_to_check
=
1095 (cfd_
->ioptions()->compaction_style
!= kCompactionStyleFIFO
)
1096 ? vstorage
->num_levels() - 1
1099 // Compaction scores are sorted based on its value. Restore them to the
1101 std::vector
<double> compaction_score(number_levels_
, 0);
1102 for (int i
= 0; i
< num_levels_to_check
; ++i
) {
1103 compaction_score
[vstorage
->CompactionScoreLevel(i
)] =
1104 vstorage
->CompactionScore(i
);
1106 // Count # of files being compacted for each level
1107 std::vector
<int> files_being_compacted(number_levels_
, 0);
1108 for (int level
= 0; level
< number_levels_
; ++level
) {
1109 for (auto* f
: vstorage
->LevelFiles(level
)) {
1110 if (f
->being_compacted
) {
1111 ++files_being_compacted
[level
];
1116 int total_files
= 0;
1117 int total_files_being_compacted
= 0;
1118 double total_file_size
= 0;
1119 uint64_t flush_ingest
= cf_stats_value_
[BYTES_FLUSHED
];
1120 uint64_t add_file_ingest
= cf_stats_value_
[BYTES_INGESTED_ADD_FILE
];
1121 uint64_t curr_ingest
= flush_ingest
+ add_file_ingest
;
1122 for (int level
= 0; level
< number_levels_
; level
++) {
1123 int files
= vstorage
->NumLevelFiles(level
);
1124 total_files
+= files
;
1125 total_files_being_compacted
+= files_being_compacted
[level
];
1126 if (comp_stats_
[level
].micros
> 0 || files
> 0) {
1127 compaction_stats_sum
->Add(comp_stats_
[level
]);
1128 total_file_size
+= vstorage
->NumLevelBytes(level
);
1129 uint64_t input_bytes
;
1131 input_bytes
= curr_ingest
;
1133 input_bytes
= comp_stats_
[level
].bytes_read_non_output_levels
;
1138 : static_cast<double>(comp_stats_
[level
].bytes_written
) /
1140 std::map
<LevelStatType
, double> level_stats
;
1141 PrepareLevelStats(&level_stats
, files
, files_being_compacted
[level
],
1142 static_cast<double>(vstorage
->NumLevelBytes(level
)),
1143 compaction_score
[level
], w_amp
, comp_stats_
[level
]);
1144 (*levels_stats
)[level
] = level_stats
;
1147 // Cumulative summary
1148 double w_amp
= compaction_stats_sum
->bytes_written
/
1149 static_cast<double>(curr_ingest
+ 1);
1150 // Stats summary across levels
1151 std::map
<LevelStatType
, double> sum_stats
;
1152 PrepareLevelStats(&sum_stats
, total_files
, total_files_being_compacted
,
1153 total_file_size
, 0, w_amp
, *compaction_stats_sum
);
1154 (*levels_stats
)[-1] = sum_stats
; // -1 is for the Sum level
1157 void InternalStats::DumpCFMapStatsByPriority(
1158 std::map
<int, std::map
<LevelStatType
, double>>* priorities_stats
) {
1159 for (size_t priority
= 0; priority
< comp_stats_by_pri_
.size(); priority
++) {
1160 if (comp_stats_by_pri_
[priority
].micros
> 0) {
1161 std::map
<LevelStatType
, double> priority_stats
;
1162 PrepareLevelStats(&priority_stats
, 0 /* num_files */,
1163 0 /* being_compacted */, 0 /* total_file_size */,
1164 0 /* compaction_score */, 0 /* w_amp */,
1165 comp_stats_by_pri_
[priority
]);
1166 (*priorities_stats
)[static_cast<int>(priority
)] = priority_stats
;
1171 void InternalStats::DumpCFMapStatsIOStalls(
1172 std::map
<std::string
, std::string
>* cf_stats
) {
1173 (*cf_stats
)["io_stalls.level0_slowdown"] =
1174 std::to_string(cf_stats_count_
[L0_FILE_COUNT_LIMIT_SLOWDOWNS
]);
1175 (*cf_stats
)["io_stalls.level0_slowdown_with_compaction"] =
1176 std::to_string(cf_stats_count_
[LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS
]);
1177 (*cf_stats
)["io_stalls.level0_numfiles"] =
1178 std::to_string(cf_stats_count_
[L0_FILE_COUNT_LIMIT_STOPS
]);
1179 (*cf_stats
)["io_stalls.level0_numfiles_with_compaction"] =
1180 std::to_string(cf_stats_count_
[LOCKED_L0_FILE_COUNT_LIMIT_STOPS
]);
1181 (*cf_stats
)["io_stalls.stop_for_pending_compaction_bytes"] =
1182 std::to_string(cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_STOPS
]);
1183 (*cf_stats
)["io_stalls.slowdown_for_pending_compaction_bytes"] =
1184 std::to_string(cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS
]);
1185 (*cf_stats
)["io_stalls.memtable_compaction"] =
1186 std::to_string(cf_stats_count_
[MEMTABLE_LIMIT_STOPS
]);
1187 (*cf_stats
)["io_stalls.memtable_slowdown"] =
1188 std::to_string(cf_stats_count_
[MEMTABLE_LIMIT_SLOWDOWNS
]);
1190 uint64_t total_stop
= cf_stats_count_
[L0_FILE_COUNT_LIMIT_STOPS
] +
1191 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_STOPS
] +
1192 cf_stats_count_
[MEMTABLE_LIMIT_STOPS
];
1194 uint64_t total_slowdown
=
1195 cf_stats_count_
[L0_FILE_COUNT_LIMIT_SLOWDOWNS
] +
1196 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS
] +
1197 cf_stats_count_
[MEMTABLE_LIMIT_SLOWDOWNS
];
1199 (*cf_stats
)["io_stalls.total_stop"] = std::to_string(total_stop
);
1200 (*cf_stats
)["io_stalls.total_slowdown"] = std::to_string(total_slowdown
);
1203 void InternalStats::DumpCFStats(std::string
* value
) {
1204 DumpCFStatsNoFileHistogram(value
);
1205 DumpCFFileHistogram(value
);
1208 void InternalStats::DumpCFStatsNoFileHistogram(std::string
* value
) {
1210 // Per-ColumnFamily stats
1211 PrintLevelStatsHeader(buf
, sizeof(buf
), cfd_
->GetName(), "Level");
1214 // Print stats for each level
1215 std::map
<int, std::map
<LevelStatType
, double>> levels_stats
;
1216 CompactionStats compaction_stats_sum
;
1217 DumpCFMapStats(&levels_stats
, &compaction_stats_sum
);
1218 for (int l
= 0; l
< number_levels_
; ++l
) {
1219 if (levels_stats
.find(l
) != levels_stats
.end()) {
1220 PrintLevelStats(buf
, sizeof(buf
), "L" + ToString(l
), levels_stats
[l
]);
1225 // Print sum of level stats
1226 PrintLevelStats(buf
, sizeof(buf
), "Sum", levels_stats
[-1]);
1229 uint64_t flush_ingest
= cf_stats_value_
[BYTES_FLUSHED
];
1230 uint64_t add_file_ingest
= cf_stats_value_
[BYTES_INGESTED_ADD_FILE
];
1231 uint64_t ingest_files_addfile
= cf_stats_value_
[INGESTED_NUM_FILES_TOTAL
];
1232 uint64_t ingest_l0_files_addfile
=
1233 cf_stats_value_
[INGESTED_LEVEL0_NUM_FILES_TOTAL
];
1234 uint64_t ingest_keys_addfile
= cf_stats_value_
[INGESTED_NUM_KEYS_TOTAL
];
1235 // Cumulative summary
1236 uint64_t total_stall_count
=
1237 cf_stats_count_
[L0_FILE_COUNT_LIMIT_SLOWDOWNS
] +
1238 cf_stats_count_
[L0_FILE_COUNT_LIMIT_STOPS
] +
1239 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS
] +
1240 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_STOPS
] +
1241 cf_stats_count_
[MEMTABLE_LIMIT_STOPS
] +
1242 cf_stats_count_
[MEMTABLE_LIMIT_SLOWDOWNS
];
1244 uint64_t interval_flush_ingest
=
1245 flush_ingest
- cf_stats_snapshot_
.ingest_bytes_flush
;
1246 uint64_t interval_add_file_inget
=
1247 add_file_ingest
- cf_stats_snapshot_
.ingest_bytes_addfile
;
1248 uint64_t interval_ingest
=
1249 interval_flush_ingest
+ interval_add_file_inget
+ 1;
1250 CompactionStats
interval_stats(compaction_stats_sum
);
1251 interval_stats
.Subtract(cf_stats_snapshot_
.comp_stats
);
1253 interval_stats
.bytes_written
/ static_cast<double>(interval_ingest
);
1254 PrintLevelStats(buf
, sizeof(buf
), "Int", 0, 0, 0, 0, w_amp
, interval_stats
);
1257 PrintLevelStatsHeader(buf
, sizeof(buf
), cfd_
->GetName(), "Priority");
1259 std::map
<int, std::map
<LevelStatType
, double>> priorities_stats
;
1260 DumpCFMapStatsByPriority(&priorities_stats
);
1261 for (size_t priority
= 0; priority
< comp_stats_by_pri_
.size(); ++priority
) {
1262 if (priorities_stats
.find(static_cast<int>(priority
)) !=
1263 priorities_stats
.end()) {
1266 Env::PriorityToString(static_cast<Env::Priority
>(priority
)),
1267 priorities_stats
[static_cast<int>(priority
)]);
1272 double seconds_up
= (env_
->NowMicros() - started_at_
+ 1) / kMicrosInSec
;
1273 double interval_seconds_up
= seconds_up
- cf_stats_snapshot_
.seconds_up
;
1274 snprintf(buf
, sizeof(buf
), "Uptime(secs): %.1f total, %.1f interval\n",
1275 seconds_up
, interval_seconds_up
);
1277 snprintf(buf
, sizeof(buf
), "Flush(GB): cumulative %.3f, interval %.3f\n",
1278 flush_ingest
/ kGB
, interval_flush_ingest
/ kGB
);
1280 snprintf(buf
, sizeof(buf
), "AddFile(GB): cumulative %.3f, interval %.3f\n",
1281 add_file_ingest
/ kGB
, interval_add_file_inget
/ kGB
);
1284 uint64_t interval_ingest_files_addfile
=
1285 ingest_files_addfile
- cf_stats_snapshot_
.ingest_files_addfile
;
1286 snprintf(buf
, sizeof(buf
),
1287 "AddFile(Total Files): cumulative %" PRIu64
", interval %" PRIu64
1289 ingest_files_addfile
, interval_ingest_files_addfile
);
1292 uint64_t interval_ingest_l0_files_addfile
=
1293 ingest_l0_files_addfile
- cf_stats_snapshot_
.ingest_l0_files_addfile
;
1294 snprintf(buf
, sizeof(buf
),
1295 "AddFile(L0 Files): cumulative %" PRIu64
", interval %" PRIu64
"\n",
1296 ingest_l0_files_addfile
, interval_ingest_l0_files_addfile
);
1299 uint64_t interval_ingest_keys_addfile
=
1300 ingest_keys_addfile
- cf_stats_snapshot_
.ingest_keys_addfile
;
1301 snprintf(buf
, sizeof(buf
),
1302 "AddFile(Keys): cumulative %" PRIu64
", interval %" PRIu64
"\n",
1303 ingest_keys_addfile
, interval_ingest_keys_addfile
);
1307 uint64_t compact_bytes_read
= 0;
1308 uint64_t compact_bytes_write
= 0;
1309 uint64_t compact_micros
= 0;
1310 for (int level
= 0; level
< number_levels_
; level
++) {
1311 compact_bytes_read
+= comp_stats_
[level
].bytes_read_output_level
+
1312 comp_stats_
[level
].bytes_read_non_output_levels
;
1313 compact_bytes_write
+= comp_stats_
[level
].bytes_written
;
1314 compact_micros
+= comp_stats_
[level
].micros
;
1317 snprintf(buf
, sizeof(buf
),
1318 "Cumulative compaction: %.2f GB write, %.2f MB/s write, "
1319 "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
1320 compact_bytes_write
/ kGB
, compact_bytes_write
/ kMB
/ seconds_up
,
1321 compact_bytes_read
/ kGB
, compact_bytes_read
/ kMB
/ seconds_up
,
1322 compact_micros
/ kMicrosInSec
);
1325 // Compaction interval
1326 uint64_t interval_compact_bytes_write
=
1327 compact_bytes_write
- cf_stats_snapshot_
.compact_bytes_write
;
1328 uint64_t interval_compact_bytes_read
=
1329 compact_bytes_read
- cf_stats_snapshot_
.compact_bytes_read
;
1330 uint64_t interval_compact_micros
=
1331 compact_micros
- cf_stats_snapshot_
.compact_micros
;
1335 "Interval compaction: %.2f GB write, %.2f MB/s write, "
1336 "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
1337 interval_compact_bytes_write
/ kGB
,
1338 interval_compact_bytes_write
/ kMB
/ std::max(interval_seconds_up
, 0.001),
1339 interval_compact_bytes_read
/ kGB
,
1340 interval_compact_bytes_read
/ kMB
/ std::max(interval_seconds_up
, 0.001),
1341 interval_compact_micros
/ kMicrosInSec
);
1343 cf_stats_snapshot_
.compact_bytes_write
= compact_bytes_write
;
1344 cf_stats_snapshot_
.compact_bytes_read
= compact_bytes_read
;
1345 cf_stats_snapshot_
.compact_micros
= compact_micros
;
1347 snprintf(buf
, sizeof(buf
),
1348 "Stalls(count): %" PRIu64
1349 " level0_slowdown, "
1351 " level0_slowdown_with_compaction, "
1353 " level0_numfiles, "
1355 " level0_numfiles_with_compaction, "
1357 " stop for pending_compaction_bytes, "
1359 " slowdown for pending_compaction_bytes, "
1361 " memtable_compaction, "
1363 " memtable_slowdown, "
1364 "interval %" PRIu64
" total count\n",
1365 cf_stats_count_
[L0_FILE_COUNT_LIMIT_SLOWDOWNS
],
1366 cf_stats_count_
[LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS
],
1367 cf_stats_count_
[L0_FILE_COUNT_LIMIT_STOPS
],
1368 cf_stats_count_
[LOCKED_L0_FILE_COUNT_LIMIT_STOPS
],
1369 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_STOPS
],
1370 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS
],
1371 cf_stats_count_
[MEMTABLE_LIMIT_STOPS
],
1372 cf_stats_count_
[MEMTABLE_LIMIT_SLOWDOWNS
],
1373 total_stall_count
- cf_stats_snapshot_
.stall_count
);
1376 cf_stats_snapshot_
.seconds_up
= seconds_up
;
1377 cf_stats_snapshot_
.ingest_bytes_flush
= flush_ingest
;
1378 cf_stats_snapshot_
.ingest_bytes_addfile
= add_file_ingest
;
1379 cf_stats_snapshot_
.ingest_files_addfile
= ingest_files_addfile
;
1380 cf_stats_snapshot_
.ingest_l0_files_addfile
= ingest_l0_files_addfile
;
1381 cf_stats_snapshot_
.ingest_keys_addfile
= ingest_keys_addfile
;
1382 cf_stats_snapshot_
.comp_stats
= compaction_stats_sum
;
1383 cf_stats_snapshot_
.stall_count
= total_stall_count
;
1386 void InternalStats::DumpCFFileHistogram(std::string
* value
) {
1388 snprintf(buf
, sizeof(buf
),
1389 "\n** File Read Latency Histogram By Level [%s] **\n",
1390 cfd_
->GetName().c_str());
1393 for (int level
= 0; level
< number_levels_
; level
++) {
1394 if (!file_read_latency_
[level
].Empty()) {
1396 snprintf(buf2
, sizeof(buf2
),
1397 "** Level %d read latency histogram (micros):\n%s\n", level
,
1398 file_read_latency_
[level
].ToString().c_str());
1399 value
->append(buf2
);
1406 const DBPropertyInfo
* GetPropertyInfo(const Slice
& /*property*/) {
1410 #endif // !ROCKSDB_LITE
1412 } // namespace rocksdb