1 // This source code is licensed under both the GPLv2 (found in the
2 // COPYING file in the root directory) and Apache 2.0 License
3 // (found in the LICENSE.Apache file in the root directory).
5 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
6 // Use of this source code is governed by a BSD-style license that can be
7 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 #include "db/internal_stats.h"
11 #ifndef __STDC_FORMAT_MACROS
12 #define __STDC_FORMAT_MACROS
22 #include "db/column_family.h"
23 #include "db/db_impl.h"
24 #include "table/block_based_table_factory.h"
25 #include "util/string_util.h"
31 const std::map
<LevelStatType
, LevelStat
> InternalStats::compaction_level_stats
=
33 {LevelStatType::NUM_FILES
, LevelStat
{"NumFiles", "Files"}},
34 {LevelStatType::COMPACTED_FILES
,
35 LevelStat
{"CompactedFiles", "CompactedFiles"}},
36 {LevelStatType::SIZE_BYTES
, LevelStat
{"SizeBytes", "Size"}},
37 {LevelStatType::SCORE
, LevelStat
{"Score", "Score"}},
38 {LevelStatType::READ_GB
, LevelStat
{"ReadGB", "Read(GB)"}},
39 {LevelStatType::RN_GB
, LevelStat
{"RnGB", "Rn(GB)"}},
40 {LevelStatType::RNP1_GB
, LevelStat
{"Rnp1GB", "Rnp1(GB)"}},
41 {LevelStatType::WRITE_GB
, LevelStat
{"WriteGB", "Write(GB)"}},
42 {LevelStatType::W_NEW_GB
, LevelStat
{"WnewGB", "Wnew(GB)"}},
43 {LevelStatType::MOVED_GB
, LevelStat
{"MovedGB", "Moved(GB)"}},
44 {LevelStatType::WRITE_AMP
, LevelStat
{"WriteAmp", "W-Amp"}},
45 {LevelStatType::READ_MBPS
, LevelStat
{"ReadMBps", "Rd(MB/s)"}},
46 {LevelStatType::WRITE_MBPS
, LevelStat
{"WriteMBps", "Wr(MB/s)"}},
47 {LevelStatType::COMP_SEC
, LevelStat
{"CompSec", "Comp(sec)"}},
48 {LevelStatType::COMP_COUNT
, LevelStat
{"CompCount", "Comp(cnt)"}},
49 {LevelStatType::AVG_SEC
, LevelStat
{"AvgSec", "Avg(sec)"}},
50 {LevelStatType::KEY_IN
, LevelStat
{"KeyIn", "KeyIn"}},
51 {LevelStatType::KEY_DROP
, LevelStat
{"KeyDrop", "KeyDrop"}},
55 const double kMB
= 1048576.0;
56 const double kGB
= kMB
* 1024;
57 const double kMicrosInSec
= 1000000.0;
59 void PrintLevelStatsHeader(char* buf
, size_t len
, const std::string
& cf_name
) {
61 snprintf(buf
, len
, "\n** Compaction Stats [%s] **\n", cf_name
.c_str());
62 auto hdr
= [](LevelStatType t
) {
63 return InternalStats::compaction_level_stats
.at(t
).header_name
.c_str();
65 int line_size
= snprintf(
66 buf
+ written_size
, len
- written_size
,
67 "Level %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
68 // Note that we skip COMPACTED_FILES and merge it with Files column
69 hdr(LevelStatType::NUM_FILES
), hdr(LevelStatType::SIZE_BYTES
),
70 hdr(LevelStatType::SCORE
), hdr(LevelStatType::READ_GB
),
71 hdr(LevelStatType::RN_GB
), hdr(LevelStatType::RNP1_GB
),
72 hdr(LevelStatType::WRITE_GB
), hdr(LevelStatType::W_NEW_GB
),
73 hdr(LevelStatType::MOVED_GB
), hdr(LevelStatType::WRITE_AMP
),
74 hdr(LevelStatType::READ_MBPS
), hdr(LevelStatType::WRITE_MBPS
),
75 hdr(LevelStatType::COMP_SEC
), hdr(LevelStatType::COMP_COUNT
),
76 hdr(LevelStatType::AVG_SEC
), hdr(LevelStatType::KEY_IN
),
77 hdr(LevelStatType::KEY_DROP
));
79 written_size
+= line_size
;
80 snprintf(buf
+ written_size
, len
- written_size
, "%s\n",
81 std::string(line_size
, '-').c_str());
84 void PrepareLevelStats(std::map
<LevelStatType
, double>* level_stats
,
85 int num_files
, int being_compacted
,
86 double total_file_size
, double score
, double w_amp
,
87 const InternalStats::CompactionStats
& stats
) {
89 stats
.bytes_read_non_output_levels
+ stats
.bytes_read_output_level
;
90 int64_t bytes_new
= stats
.bytes_written
- stats
.bytes_read_output_level
;
91 double elapsed
= (stats
.micros
+ 1) / kMicrosInSec
;
93 (*level_stats
)[LevelStatType::NUM_FILES
] = num_files
;
94 (*level_stats
)[LevelStatType::COMPACTED_FILES
] = being_compacted
;
95 (*level_stats
)[LevelStatType::SIZE_BYTES
] = total_file_size
;
96 (*level_stats
)[LevelStatType::SCORE
] = score
;
97 (*level_stats
)[LevelStatType::READ_GB
] = bytes_read
/ kGB
;
98 (*level_stats
)[LevelStatType::RN_GB
] =
99 stats
.bytes_read_non_output_levels
/ kGB
;
100 (*level_stats
)[LevelStatType::RNP1_GB
] = stats
.bytes_read_output_level
/ kGB
;
101 (*level_stats
)[LevelStatType::WRITE_GB
] = stats
.bytes_written
/ kGB
;
102 (*level_stats
)[LevelStatType::W_NEW_GB
] = bytes_new
/ kGB
;
103 (*level_stats
)[LevelStatType::MOVED_GB
] = stats
.bytes_moved
/ kGB
;
104 (*level_stats
)[LevelStatType::WRITE_AMP
] = w_amp
;
105 (*level_stats
)[LevelStatType::READ_MBPS
] = bytes_read
/ kMB
/ elapsed
;
106 (*level_stats
)[LevelStatType::WRITE_MBPS
] =
107 stats
.bytes_written
/ kMB
/ elapsed
;
108 (*level_stats
)[LevelStatType::COMP_SEC
] = stats
.micros
/ kMicrosInSec
;
109 (*level_stats
)[LevelStatType::COMP_COUNT
] = stats
.count
;
110 (*level_stats
)[LevelStatType::AVG_SEC
] =
111 stats
.count
== 0 ? 0 : stats
.micros
/ kMicrosInSec
/ stats
.count
;
112 (*level_stats
)[LevelStatType::KEY_IN
] =
113 static_cast<double>(stats
.num_input_records
);
114 (*level_stats
)[LevelStatType::KEY_DROP
] =
115 static_cast<double>(stats
.num_dropped_records
);
118 void PrintLevelStats(char* buf
, size_t len
, const std::string
& name
,
119 const std::map
<LevelStatType
, double>& stat_value
) {
123 "%6d/%-3d " /* Files */
126 "%8.1f " /* Read(GB) */
127 "%7.1f " /* Rn(GB) */
128 "%8.1f " /* Rnp1(GB) */
129 "%9.1f " /* Write(GB) */
130 "%8.1f " /* Wnew(GB) */
131 "%9.1f " /* Moved(GB) */
133 "%8.1f " /* Rd(MB/s) */
134 "%8.1f " /* Wr(MB/s) */
135 "%9.0f " /* Comp(sec) */
136 "%9d " /* Comp(cnt) */
137 "%8.3f " /* Avg(sec) */
139 "%6s\n", /* KeyDrop */
140 name
.c_str(), static_cast<int>(stat_value
.at(LevelStatType::NUM_FILES
)),
141 static_cast<int>(stat_value
.at(LevelStatType::COMPACTED_FILES
)),
143 static_cast<uint64_t>(stat_value
.at(LevelStatType::SIZE_BYTES
)))
145 stat_value
.at(LevelStatType::SCORE
),
146 stat_value
.at(LevelStatType::READ_GB
),
147 stat_value
.at(LevelStatType::RN_GB
),
148 stat_value
.at(LevelStatType::RNP1_GB
),
149 stat_value
.at(LevelStatType::WRITE_GB
),
150 stat_value
.at(LevelStatType::W_NEW_GB
),
151 stat_value
.at(LevelStatType::MOVED_GB
),
152 stat_value
.at(LevelStatType::WRITE_AMP
),
153 stat_value
.at(LevelStatType::READ_MBPS
),
154 stat_value
.at(LevelStatType::WRITE_MBPS
),
155 stat_value
.at(LevelStatType::COMP_SEC
),
156 static_cast<int>(stat_value
.at(LevelStatType::COMP_COUNT
)),
157 stat_value
.at(LevelStatType::AVG_SEC
),
159 static_cast<std::int64_t>(stat_value
.at(LevelStatType::KEY_IN
)))
162 static_cast<std::int64_t>(stat_value
.at(LevelStatType::KEY_DROP
)))
166 void PrintLevelStats(char* buf
, size_t len
, const std::string
& name
,
167 int num_files
, int being_compacted
, double total_file_size
,
168 double score
, double w_amp
,
169 const InternalStats::CompactionStats
& stats
) {
170 std::map
<LevelStatType
, double> level_stats
;
171 PrepareLevelStats(&level_stats
, num_files
, being_compacted
, total_file_size
,
172 score
, w_amp
, stats
);
173 PrintLevelStats(buf
, len
, name
, level_stats
);
176 // Assumes that trailing numbers represent an optional argument. This requires
177 // property names to not end with numbers.
178 std::pair
<Slice
, Slice
> GetPropertyNameAndArg(const Slice
& property
) {
179 Slice name
= property
, arg
= property
;
181 while (sfx_len
< property
.size() &&
182 isdigit(property
[property
.size() - sfx_len
- 1])) {
185 name
.remove_suffix(sfx_len
);
186 arg
.remove_prefix(property
.size() - sfx_len
);
189 } // anonymous namespace
191 static const std::string rocksdb_prefix
= "rocksdb.";
193 static const std::string num_files_at_level_prefix
= "num-files-at-level";
194 static const std::string compression_ratio_at_level_prefix
=
195 "compression-ratio-at-level";
196 static const std::string allstats
= "stats";
197 static const std::string sstables
= "sstables";
198 static const std::string cfstats
= "cfstats";
199 static const std::string cfstats_no_file_histogram
=
200 "cfstats-no-file-histogram";
201 static const std::string cf_file_histogram
= "cf-file-histogram";
202 static const std::string dbstats
= "dbstats";
203 static const std::string levelstats
= "levelstats";
204 static const std::string num_immutable_mem_table
= "num-immutable-mem-table";
205 static const std::string num_immutable_mem_table_flushed
=
206 "num-immutable-mem-table-flushed";
207 static const std::string mem_table_flush_pending
= "mem-table-flush-pending";
208 static const std::string compaction_pending
= "compaction-pending";
209 static const std::string background_errors
= "background-errors";
210 static const std::string cur_size_active_mem_table
=
211 "cur-size-active-mem-table";
212 static const std::string cur_size_all_mem_tables
= "cur-size-all-mem-tables";
213 static const std::string size_all_mem_tables
= "size-all-mem-tables";
214 static const std::string num_entries_active_mem_table
=
215 "num-entries-active-mem-table";
216 static const std::string num_entries_imm_mem_tables
=
217 "num-entries-imm-mem-tables";
218 static const std::string num_deletes_active_mem_table
=
219 "num-deletes-active-mem-table";
220 static const std::string num_deletes_imm_mem_tables
=
221 "num-deletes-imm-mem-tables";
222 static const std::string estimate_num_keys
= "estimate-num-keys";
223 static const std::string estimate_table_readers_mem
=
224 "estimate-table-readers-mem";
225 static const std::string is_file_deletions_enabled
=
226 "is-file-deletions-enabled";
227 static const std::string num_snapshots
= "num-snapshots";
228 static const std::string oldest_snapshot_time
= "oldest-snapshot-time";
229 static const std::string num_live_versions
= "num-live-versions";
230 static const std::string current_version_number
=
231 "current-super-version-number";
232 static const std::string estimate_live_data_size
= "estimate-live-data-size";
233 static const std::string min_log_number_to_keep_str
= "min-log-number-to-keep";
234 static const std::string base_level_str
= "base-level";
235 static const std::string total_sst_files_size
= "total-sst-files-size";
236 static const std::string live_sst_files_size
= "live-sst-files-size";
237 static const std::string estimate_pending_comp_bytes
=
238 "estimate-pending-compaction-bytes";
239 static const std::string aggregated_table_properties
=
240 "aggregated-table-properties";
241 static const std::string aggregated_table_properties_at_level
=
242 aggregated_table_properties
+ "-at-level";
243 static const std::string num_running_compactions
= "num-running-compactions";
244 static const std::string num_running_flushes
= "num-running-flushes";
245 static const std::string actual_delayed_write_rate
=
246 "actual-delayed-write-rate";
247 static const std::string is_write_stopped
= "is-write-stopped";
248 static const std::string estimate_oldest_key_time
= "estimate-oldest-key-time";
249 static const std::string block_cache_capacity
= "block-cache-capacity";
250 static const std::string block_cache_usage
= "block-cache-usage";
251 static const std::string block_cache_pinned_usage
= "block-cache-pinned-usage";
252 static const std::string options_statistics
= "options-statistics";
254 const std::string
DB::Properties::kNumFilesAtLevelPrefix
=
255 rocksdb_prefix
+ num_files_at_level_prefix
;
256 const std::string
DB::Properties::kCompressionRatioAtLevelPrefix
=
257 rocksdb_prefix
+ compression_ratio_at_level_prefix
;
258 const std::string
DB::Properties::kStats
= rocksdb_prefix
+ allstats
;
259 const std::string
DB::Properties::kSSTables
= rocksdb_prefix
+ sstables
;
260 const std::string
DB::Properties::kCFStats
= rocksdb_prefix
+ cfstats
;
261 const std::string
DB::Properties::kCFStatsNoFileHistogram
=
262 rocksdb_prefix
+ cfstats_no_file_histogram
;
263 const std::string
DB::Properties::kCFFileHistogram
=
264 rocksdb_prefix
+ cf_file_histogram
;
265 const std::string
DB::Properties::kDBStats
= rocksdb_prefix
+ dbstats
;
266 const std::string
DB::Properties::kLevelStats
= rocksdb_prefix
+ levelstats
;
267 const std::string
DB::Properties::kNumImmutableMemTable
=
268 rocksdb_prefix
+ num_immutable_mem_table
;
269 const std::string
DB::Properties::kNumImmutableMemTableFlushed
=
270 rocksdb_prefix
+ num_immutable_mem_table_flushed
;
271 const std::string
DB::Properties::kMemTableFlushPending
=
272 rocksdb_prefix
+ mem_table_flush_pending
;
273 const std::string
DB::Properties::kCompactionPending
=
274 rocksdb_prefix
+ compaction_pending
;
275 const std::string
DB::Properties::kNumRunningCompactions
=
276 rocksdb_prefix
+ num_running_compactions
;
277 const std::string
DB::Properties::kNumRunningFlushes
=
278 rocksdb_prefix
+ num_running_flushes
;
279 const std::string
DB::Properties::kBackgroundErrors
=
280 rocksdb_prefix
+ background_errors
;
281 const std::string
DB::Properties::kCurSizeActiveMemTable
=
282 rocksdb_prefix
+ cur_size_active_mem_table
;
283 const std::string
DB::Properties::kCurSizeAllMemTables
=
284 rocksdb_prefix
+ cur_size_all_mem_tables
;
285 const std::string
DB::Properties::kSizeAllMemTables
=
286 rocksdb_prefix
+ size_all_mem_tables
;
287 const std::string
DB::Properties::kNumEntriesActiveMemTable
=
288 rocksdb_prefix
+ num_entries_active_mem_table
;
289 const std::string
DB::Properties::kNumEntriesImmMemTables
=
290 rocksdb_prefix
+ num_entries_imm_mem_tables
;
291 const std::string
DB::Properties::kNumDeletesActiveMemTable
=
292 rocksdb_prefix
+ num_deletes_active_mem_table
;
293 const std::string
DB::Properties::kNumDeletesImmMemTables
=
294 rocksdb_prefix
+ num_deletes_imm_mem_tables
;
295 const std::string
DB::Properties::kEstimateNumKeys
=
296 rocksdb_prefix
+ estimate_num_keys
;
297 const std::string
DB::Properties::kEstimateTableReadersMem
=
298 rocksdb_prefix
+ estimate_table_readers_mem
;
299 const std::string
DB::Properties::kIsFileDeletionsEnabled
=
300 rocksdb_prefix
+ is_file_deletions_enabled
;
301 const std::string
DB::Properties::kNumSnapshots
=
302 rocksdb_prefix
+ num_snapshots
;
303 const std::string
DB::Properties::kOldestSnapshotTime
=
304 rocksdb_prefix
+ oldest_snapshot_time
;
305 const std::string
DB::Properties::kNumLiveVersions
=
306 rocksdb_prefix
+ num_live_versions
;
307 const std::string
DB::Properties::kCurrentSuperVersionNumber
=
308 rocksdb_prefix
+ current_version_number
;
309 const std::string
DB::Properties::kEstimateLiveDataSize
=
310 rocksdb_prefix
+ estimate_live_data_size
;
311 const std::string
DB::Properties::kMinLogNumberToKeep
=
312 rocksdb_prefix
+ min_log_number_to_keep_str
;
313 const std::string
DB::Properties::kTotalSstFilesSize
=
314 rocksdb_prefix
+ total_sst_files_size
;
315 const std::string
DB::Properties::kLiveSstFilesSize
=
316 rocksdb_prefix
+ live_sst_files_size
;
317 const std::string
DB::Properties::kBaseLevel
= rocksdb_prefix
+ base_level_str
;
318 const std::string
DB::Properties::kEstimatePendingCompactionBytes
=
319 rocksdb_prefix
+ estimate_pending_comp_bytes
;
320 const std::string
DB::Properties::kAggregatedTableProperties
=
321 rocksdb_prefix
+ aggregated_table_properties
;
322 const std::string
DB::Properties::kAggregatedTablePropertiesAtLevel
=
323 rocksdb_prefix
+ aggregated_table_properties_at_level
;
324 const std::string
DB::Properties::kActualDelayedWriteRate
=
325 rocksdb_prefix
+ actual_delayed_write_rate
;
326 const std::string
DB::Properties::kIsWriteStopped
=
327 rocksdb_prefix
+ is_write_stopped
;
328 const std::string
DB::Properties::kEstimateOldestKeyTime
=
329 rocksdb_prefix
+ estimate_oldest_key_time
;
330 const std::string
DB::Properties::kBlockCacheCapacity
=
331 rocksdb_prefix
+ block_cache_capacity
;
332 const std::string
DB::Properties::kBlockCacheUsage
=
333 rocksdb_prefix
+ block_cache_usage
;
334 const std::string
DB::Properties::kBlockCachePinnedUsage
=
335 rocksdb_prefix
+ block_cache_pinned_usage
;
336 const std::string
DB::Properties::kOptionsStatistics
=
337 rocksdb_prefix
+ options_statistics
;
339 const std::unordered_map
<std::string
, DBPropertyInfo
>
340 InternalStats::ppt_name_to_info
= {
341 {DB::Properties::kNumFilesAtLevelPrefix
,
342 {false, &InternalStats::HandleNumFilesAtLevel
, nullptr, nullptr,
344 {DB::Properties::kCompressionRatioAtLevelPrefix
,
345 {false, &InternalStats::HandleCompressionRatioAtLevelPrefix
, nullptr,
347 {DB::Properties::kLevelStats
,
348 {false, &InternalStats::HandleLevelStats
, nullptr, nullptr, nullptr}},
349 {DB::Properties::kStats
,
350 {false, &InternalStats::HandleStats
, nullptr, nullptr, nullptr}},
351 {DB::Properties::kCFStats
,
352 {false, &InternalStats::HandleCFStats
, nullptr,
353 &InternalStats::HandleCFMapStats
, nullptr}},
354 {DB::Properties::kCFStatsNoFileHistogram
,
355 {false, &InternalStats::HandleCFStatsNoFileHistogram
, nullptr, nullptr,
357 {DB::Properties::kCFFileHistogram
,
358 {false, &InternalStats::HandleCFFileHistogram
, nullptr, nullptr,
360 {DB::Properties::kDBStats
,
361 {false, &InternalStats::HandleDBStats
, nullptr, nullptr, nullptr}},
362 {DB::Properties::kSSTables
,
363 {false, &InternalStats::HandleSsTables
, nullptr, nullptr, nullptr}},
364 {DB::Properties::kAggregatedTableProperties
,
365 {false, &InternalStats::HandleAggregatedTableProperties
, nullptr,
367 {DB::Properties::kAggregatedTablePropertiesAtLevel
,
368 {false, &InternalStats::HandleAggregatedTablePropertiesAtLevel
,
369 nullptr, nullptr, nullptr}},
370 {DB::Properties::kNumImmutableMemTable
,
371 {false, nullptr, &InternalStats::HandleNumImmutableMemTable
, nullptr,
373 {DB::Properties::kNumImmutableMemTableFlushed
,
374 {false, nullptr, &InternalStats::HandleNumImmutableMemTableFlushed
,
376 {DB::Properties::kMemTableFlushPending
,
377 {false, nullptr, &InternalStats::HandleMemTableFlushPending
, nullptr,
379 {DB::Properties::kCompactionPending
,
380 {false, nullptr, &InternalStats::HandleCompactionPending
, nullptr,
382 {DB::Properties::kBackgroundErrors
,
383 {false, nullptr, &InternalStats::HandleBackgroundErrors
, nullptr,
385 {DB::Properties::kCurSizeActiveMemTable
,
386 {false, nullptr, &InternalStats::HandleCurSizeActiveMemTable
, nullptr,
388 {DB::Properties::kCurSizeAllMemTables
,
389 {false, nullptr, &InternalStats::HandleCurSizeAllMemTables
, nullptr,
391 {DB::Properties::kSizeAllMemTables
,
392 {false, nullptr, &InternalStats::HandleSizeAllMemTables
, nullptr,
394 {DB::Properties::kNumEntriesActiveMemTable
,
395 {false, nullptr, &InternalStats::HandleNumEntriesActiveMemTable
,
397 {DB::Properties::kNumEntriesImmMemTables
,
398 {false, nullptr, &InternalStats::HandleNumEntriesImmMemTables
, nullptr,
400 {DB::Properties::kNumDeletesActiveMemTable
,
401 {false, nullptr, &InternalStats::HandleNumDeletesActiveMemTable
,
403 {DB::Properties::kNumDeletesImmMemTables
,
404 {false, nullptr, &InternalStats::HandleNumDeletesImmMemTables
, nullptr,
406 {DB::Properties::kEstimateNumKeys
,
407 {false, nullptr, &InternalStats::HandleEstimateNumKeys
, nullptr,
409 {DB::Properties::kEstimateTableReadersMem
,
410 {true, nullptr, &InternalStats::HandleEstimateTableReadersMem
, nullptr,
412 {DB::Properties::kIsFileDeletionsEnabled
,
413 {false, nullptr, &InternalStats::HandleIsFileDeletionsEnabled
, nullptr,
415 {DB::Properties::kNumSnapshots
,
416 {false, nullptr, &InternalStats::HandleNumSnapshots
, nullptr,
418 {DB::Properties::kOldestSnapshotTime
,
419 {false, nullptr, &InternalStats::HandleOldestSnapshotTime
, nullptr,
421 {DB::Properties::kNumLiveVersions
,
422 {false, nullptr, &InternalStats::HandleNumLiveVersions
, nullptr,
424 {DB::Properties::kCurrentSuperVersionNumber
,
425 {false, nullptr, &InternalStats::HandleCurrentSuperVersionNumber
,
427 {DB::Properties::kEstimateLiveDataSize
,
428 {true, nullptr, &InternalStats::HandleEstimateLiveDataSize
, nullptr,
430 {DB::Properties::kMinLogNumberToKeep
,
431 {false, nullptr, &InternalStats::HandleMinLogNumberToKeep
, nullptr,
433 {DB::Properties::kBaseLevel
,
434 {false, nullptr, &InternalStats::HandleBaseLevel
, nullptr, nullptr}},
435 {DB::Properties::kTotalSstFilesSize
,
436 {false, nullptr, &InternalStats::HandleTotalSstFilesSize
, nullptr,
438 {DB::Properties::kLiveSstFilesSize
,
439 {false, nullptr, &InternalStats::HandleLiveSstFilesSize
, nullptr,
441 {DB::Properties::kEstimatePendingCompactionBytes
,
442 {false, nullptr, &InternalStats::HandleEstimatePendingCompactionBytes
,
444 {DB::Properties::kNumRunningFlushes
,
445 {false, nullptr, &InternalStats::HandleNumRunningFlushes
, nullptr,
447 {DB::Properties::kNumRunningCompactions
,
448 {false, nullptr, &InternalStats::HandleNumRunningCompactions
, nullptr,
450 {DB::Properties::kActualDelayedWriteRate
,
451 {false, nullptr, &InternalStats::HandleActualDelayedWriteRate
, nullptr,
453 {DB::Properties::kIsWriteStopped
,
454 {false, nullptr, &InternalStats::HandleIsWriteStopped
, nullptr,
456 {DB::Properties::kEstimateOldestKeyTime
,
457 {false, nullptr, &InternalStats::HandleEstimateOldestKeyTime
, nullptr,
459 {DB::Properties::kBlockCacheCapacity
,
460 {false, nullptr, &InternalStats::HandleBlockCacheCapacity
, nullptr,
462 {DB::Properties::kBlockCacheUsage
,
463 {false, nullptr, &InternalStats::HandleBlockCacheUsage
, nullptr,
465 {DB::Properties::kBlockCachePinnedUsage
,
466 {false, nullptr, &InternalStats::HandleBlockCachePinnedUsage
, nullptr,
468 {DB::Properties::kOptionsStatistics
,
469 {false, nullptr, nullptr, nullptr,
470 &DBImpl::GetPropertyHandleOptionsStatistics
}},
473 const DBPropertyInfo
* GetPropertyInfo(const Slice
& property
) {
474 std::string ppt_name
= GetPropertyNameAndArg(property
).first
.ToString();
475 auto ppt_info_iter
= InternalStats::ppt_name_to_info
.find(ppt_name
);
476 if (ppt_info_iter
== InternalStats::ppt_name_to_info
.end()) {
479 return &ppt_info_iter
->second
;
482 bool InternalStats::GetStringProperty(const DBPropertyInfo
& property_info
,
483 const Slice
& property
,
484 std::string
* value
) {
485 assert(value
!= nullptr);
486 assert(property_info
.handle_string
!= nullptr);
487 Slice arg
= GetPropertyNameAndArg(property
).second
;
488 return (this->*(property_info
.handle_string
))(value
, arg
);
491 bool InternalStats::GetMapProperty(const DBPropertyInfo
& property_info
,
492 const Slice
& /*property*/,
493 std::map
<std::string
, std::string
>* value
) {
494 assert(value
!= nullptr);
495 assert(property_info
.handle_map
!= nullptr);
496 return (this->*(property_info
.handle_map
))(value
);
499 bool InternalStats::GetIntProperty(const DBPropertyInfo
& property_info
,
500 uint64_t* value
, DBImpl
* db
) {
501 assert(value
!= nullptr);
502 assert(property_info
.handle_int
!= nullptr &&
503 !property_info
.need_out_of_mutex
);
504 db
->mutex_
.AssertHeld();
505 return (this->*(property_info
.handle_int
))(value
, db
, nullptr /* version */);
508 bool InternalStats::GetIntPropertyOutOfMutex(
509 const DBPropertyInfo
& property_info
, Version
* version
, uint64_t* value
) {
510 assert(value
!= nullptr);
511 assert(property_info
.handle_int
!= nullptr &&
512 property_info
.need_out_of_mutex
);
513 return (this->*(property_info
.handle_int
))(value
, nullptr /* db */, version
);
516 bool InternalStats::HandleNumFilesAtLevel(std::string
* value
, Slice suffix
) {
518 const auto* vstorage
= cfd_
->current()->storage_info();
519 bool ok
= ConsumeDecimalNumber(&suffix
, &level
) && suffix
.empty();
520 if (!ok
|| static_cast<int>(level
) >= number_levels_
) {
524 snprintf(buf
, sizeof(buf
), "%d",
525 vstorage
->NumLevelFiles(static_cast<int>(level
)));
531 bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string
* value
,
534 const auto* vstorage
= cfd_
->current()->storage_info();
535 bool ok
= ConsumeDecimalNumber(&suffix
, &level
) && suffix
.empty();
536 if (!ok
|| level
>= static_cast<uint64_t>(number_levels_
)) {
540 vstorage
->GetEstimatedCompressionRatioAtLevel(static_cast<int>(level
)));
544 bool InternalStats::HandleLevelStats(std::string
* value
, Slice
/*suffix*/) {
546 const auto* vstorage
= cfd_
->current()->storage_info();
547 snprintf(buf
, sizeof(buf
),
548 "Level Files Size(MB)\n"
549 "--------------------\n");
552 for (int level
= 0; level
< number_levels_
; level
++) {
553 snprintf(buf
, sizeof(buf
), "%3d %8d %8.0f\n", level
,
554 vstorage
->NumLevelFiles(level
),
555 vstorage
->NumLevelBytes(level
) / kMB
);
561 bool InternalStats::HandleStats(std::string
* value
, Slice suffix
) {
562 if (!HandleCFStats(value
, suffix
)) {
565 if (!HandleDBStats(value
, suffix
)) {
571 bool InternalStats::HandleCFMapStats(
572 std::map
<std::string
, std::string
>* cf_stats
) {
573 DumpCFMapStats(cf_stats
);
577 bool InternalStats::HandleCFStats(std::string
* value
, Slice
/*suffix*/) {
582 bool InternalStats::HandleCFStatsNoFileHistogram(std::string
* value
,
584 DumpCFStatsNoFileHistogram(value
);
588 bool InternalStats::HandleCFFileHistogram(std::string
* value
,
590 DumpCFFileHistogram(value
);
594 bool InternalStats::HandleDBStats(std::string
* value
, Slice
/*suffix*/) {
599 bool InternalStats::HandleSsTables(std::string
* value
, Slice
/*suffix*/) {
600 auto* current
= cfd_
->current();
601 *value
= current
->DebugString(true, true);
605 bool InternalStats::HandleAggregatedTableProperties(std::string
* value
,
607 std::shared_ptr
<const TableProperties
> tp
;
608 auto s
= cfd_
->current()->GetAggregatedTableProperties(&tp
);
612 *value
= tp
->ToString();
616 bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string
* value
,
619 bool ok
= ConsumeDecimalNumber(&suffix
, &level
) && suffix
.empty();
620 if (!ok
|| static_cast<int>(level
) >= number_levels_
) {
623 std::shared_ptr
<const TableProperties
> tp
;
624 auto s
= cfd_
->current()->GetAggregatedTableProperties(
625 &tp
, static_cast<int>(level
));
629 *value
= tp
->ToString();
633 bool InternalStats::HandleNumImmutableMemTable(uint64_t* value
, DBImpl
* /*db*/,
634 Version
* /*version*/) {
635 *value
= cfd_
->imm()->NumNotFlushed();
639 bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value
,
641 Version
* /*version*/) {
642 *value
= cfd_
->imm()->NumFlushed();
646 bool InternalStats::HandleMemTableFlushPending(uint64_t* value
, DBImpl
* /*db*/,
647 Version
* /*version*/) {
648 // Return number of mem tables that are ready to flush (made immutable)
649 *value
= (cfd_
->imm()->IsFlushPending() ? 1 : 0);
653 bool InternalStats::HandleNumRunningFlushes(uint64_t* value
, DBImpl
* db
,
654 Version
* /*version*/) {
655 *value
= db
->num_running_flushes();
659 bool InternalStats::HandleCompactionPending(uint64_t* value
, DBImpl
* /*db*/,
660 Version
* /*version*/) {
661 // 1 if the system already determines at least one compaction is needed.
663 const auto* vstorage
= cfd_
->current()->storage_info();
664 *value
= (cfd_
->compaction_picker()->NeedsCompaction(vstorage
) ? 1 : 0);
668 bool InternalStats::HandleNumRunningCompactions(uint64_t* value
, DBImpl
* db
,
669 Version
* /*version*/) {
670 *value
= db
->num_running_compactions_
;
674 bool InternalStats::HandleBackgroundErrors(uint64_t* value
, DBImpl
* /*db*/,
675 Version
* /*version*/) {
676 // Accumulated number of errors in background flushes or compactions.
677 *value
= GetBackgroundErrorCount();
681 bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value
, DBImpl
* /*db*/,
682 Version
* /*version*/) {
683 // Current size of the active memtable
684 *value
= cfd_
->mem()->ApproximateMemoryUsage();
688 bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value
, DBImpl
* /*db*/,
689 Version
* /*version*/) {
690 // Current size of the active memtable + immutable memtables
691 *value
= cfd_
->mem()->ApproximateMemoryUsage() +
692 cfd_
->imm()->ApproximateUnflushedMemTablesMemoryUsage();
696 bool InternalStats::HandleSizeAllMemTables(uint64_t* value
, DBImpl
* /*db*/,
697 Version
* /*version*/) {
698 *value
= cfd_
->mem()->ApproximateMemoryUsage() +
699 cfd_
->imm()->ApproximateMemoryUsage();
703 bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value
,
705 Version
* /*version*/) {
706 // Current number of entires in the active memtable
707 *value
= cfd_
->mem()->num_entries();
711 bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value
,
713 Version
* /*version*/) {
714 // Current number of entries in the immutable memtables
715 *value
= cfd_
->imm()->current()->GetTotalNumEntries();
719 bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value
,
721 Version
* /*version*/) {
722 // Current number of entires in the active memtable
723 *value
= cfd_
->mem()->num_deletes();
727 bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value
,
729 Version
* /*version*/) {
730 // Current number of entries in the immutable memtables
731 *value
= cfd_
->imm()->current()->GetTotalNumDeletes();
735 bool InternalStats::HandleEstimateNumKeys(uint64_t* value
, DBImpl
* /*db*/,
736 Version
* /*version*/) {
737 // Estimate number of entries in the column family:
738 // Use estimated entries in tables + total entries in memtables.
739 const auto* vstorage
= cfd_
->current()->storage_info();
740 uint64_t estimate_keys
= cfd_
->mem()->num_entries() +
741 cfd_
->imm()->current()->GetTotalNumEntries() +
742 vstorage
->GetEstimatedActiveKeys();
743 uint64_t estimate_deletes
=
744 cfd_
->mem()->num_deletes() + cfd_
->imm()->current()->GetTotalNumDeletes();
745 *value
= estimate_keys
> estimate_deletes
* 2
746 ? estimate_keys
- (estimate_deletes
* 2)
751 bool InternalStats::HandleNumSnapshots(uint64_t* value
, DBImpl
* db
,
752 Version
* /*version*/) {
753 *value
= db
->snapshots().count();
757 bool InternalStats::HandleOldestSnapshotTime(uint64_t* value
, DBImpl
* db
,
758 Version
* /*version*/) {
759 *value
= static_cast<uint64_t>(db
->snapshots().GetOldestSnapshotTime());
763 bool InternalStats::HandleNumLiveVersions(uint64_t* value
, DBImpl
* /*db*/,
764 Version
* /*version*/) {
765 *value
= cfd_
->GetNumLiveVersions();
769 bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value
,
771 Version
* /*version*/) {
772 *value
= cfd_
->GetSuperVersionNumber();
776 bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value
, DBImpl
* db
,
777 Version
* /*version*/) {
778 *value
= db
->IsFileDeletionsEnabled();
782 bool InternalStats::HandleBaseLevel(uint64_t* value
, DBImpl
* /*db*/,
783 Version
* /*version*/) {
784 const auto* vstorage
= cfd_
->current()->storage_info();
785 *value
= vstorage
->base_level();
789 bool InternalStats::HandleTotalSstFilesSize(uint64_t* value
, DBImpl
* /*db*/,
790 Version
* /*version*/) {
791 *value
= cfd_
->GetTotalSstFilesSize();
795 bool InternalStats::HandleLiveSstFilesSize(uint64_t* value
, DBImpl
* /*db*/,
796 Version
* /*version*/) {
797 *value
= cfd_
->GetLiveSstFilesSize();
801 bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value
,
803 Version
* /*version*/) {
804 const auto* vstorage
= cfd_
->current()->storage_info();
805 *value
= vstorage
->estimated_compaction_needed_bytes();
809 bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value
,
812 *value
= (version
== nullptr) ? 0 : version
->GetMemoryUsageByTableReaders();
816 bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value
, DBImpl
* /*db*/,
818 const auto* vstorage
= version
->storage_info();
819 *value
= vstorage
->EstimateLiveDataSize();
823 bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value
, DBImpl
* db
,
824 Version
* /*version*/) {
825 *value
= db
->MinLogNumberToKeep();
829 bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value
, DBImpl
* db
,
830 Version
* /*version*/) {
831 const WriteController
& wc
= db
->write_controller();
832 if (!wc
.NeedsDelay()) {
835 *value
= wc
.delayed_write_rate();
840 bool InternalStats::HandleIsWriteStopped(uint64_t* value
, DBImpl
* db
,
841 Version
* /*version*/) {
842 *value
= db
->write_controller().IsStopped() ? 1 : 0;
846 bool InternalStats::HandleEstimateOldestKeyTime(uint64_t* value
, DBImpl
* /*db*/,
847 Version
* /*version*/) {
848 // TODO(yiwu): The property is currently available for fifo compaction
849 // with allow_compaction = false. This is because we don't propagate
850 // oldest_key_time on compaction.
851 if (cfd_
->ioptions()->compaction_style
!= kCompactionStyleFIFO
||
852 cfd_
->GetCurrentMutableCFOptions()
853 ->compaction_options_fifo
.allow_compaction
) {
857 TablePropertiesCollection collection
;
858 auto s
= cfd_
->current()->GetPropertiesOfAllTables(&collection
);
862 *value
= std::numeric_limits
<uint64_t>::max();
863 for (auto& p
: collection
) {
864 *value
= std::min(*value
, p
.second
->oldest_key_time
);
870 *value
= std::min({cfd_
->mem()->ApproximateOldestKeyTime(),
871 cfd_
->imm()->ApproximateOldestKeyTime(), *value
});
873 return *value
> 0 && *value
< std::numeric_limits
<uint64_t>::max();
876 bool InternalStats::HandleBlockCacheStat(Cache
** block_cache
) {
877 assert(block_cache
!= nullptr);
878 auto* table_factory
= cfd_
->ioptions()->table_factory
;
879 assert(table_factory
!= nullptr);
880 if (BlockBasedTableFactory::kName
!= table_factory
->Name()) {
883 auto* table_options
=
884 reinterpret_cast<BlockBasedTableOptions
*>(table_factory
->GetOptions());
885 if (table_options
== nullptr) {
888 *block_cache
= table_options
->block_cache
.get();
889 if (table_options
->no_block_cache
|| *block_cache
== nullptr) {
895 bool InternalStats::HandleBlockCacheCapacity(uint64_t* value
, DBImpl
* /*db*/,
896 Version
* /*version*/) {
898 bool ok
= HandleBlockCacheStat(&block_cache
);
902 *value
= static_cast<uint64_t>(block_cache
->GetCapacity());
906 bool InternalStats::HandleBlockCacheUsage(uint64_t* value
, DBImpl
* /*db*/,
907 Version
* /*version*/) {
909 bool ok
= HandleBlockCacheStat(&block_cache
);
913 *value
= static_cast<uint64_t>(block_cache
->GetUsage());
917 bool InternalStats::HandleBlockCachePinnedUsage(uint64_t* value
, DBImpl
* /*db*/,
918 Version
* /*version*/) {
920 bool ok
= HandleBlockCacheStat(&block_cache
);
924 *value
= static_cast<uint64_t>(block_cache
->GetPinnedUsage());
928 void InternalStats::DumpDBStats(std::string
* value
) {
930 // DB-level stats, only available from default column family
931 double seconds_up
= (env_
->NowMicros() - started_at_
+ 1) / kMicrosInSec
;
932 double interval_seconds_up
= seconds_up
- db_stats_snapshot_
.seconds_up
;
933 snprintf(buf
, sizeof(buf
),
934 "\n** DB Stats **\nUptime(secs): %.1f total, %.1f interval\n",
935 seconds_up
, interval_seconds_up
);
938 uint64_t user_bytes_written
= GetDBStats(InternalStats::BYTES_WRITTEN
);
939 uint64_t num_keys_written
= GetDBStats(InternalStats::NUMBER_KEYS_WRITTEN
);
940 uint64_t write_other
= GetDBStats(InternalStats::WRITE_DONE_BY_OTHER
);
941 uint64_t write_self
= GetDBStats(InternalStats::WRITE_DONE_BY_SELF
);
942 uint64_t wal_bytes
= GetDBStats(InternalStats::WAL_FILE_BYTES
);
943 uint64_t wal_synced
= GetDBStats(InternalStats::WAL_FILE_SYNCED
);
944 uint64_t write_with_wal
= GetDBStats(InternalStats::WRITE_WITH_WAL
);
945 uint64_t write_stall_micros
= GetDBStats(InternalStats::WRITE_STALL_MICROS
);
947 const int kHumanMicrosLen
= 32;
948 char human_micros
[kHumanMicrosLen
];
951 // writes: total number of write requests.
952 // keys: total number of key updates issued by all the write requests
953 // commit groups: number of group commits issued to the DB. Each group can
954 // contain one or more writes.
955 // so writes/keys is the average number of put in multi-put or put
956 // writes/groups is the average group commit size.
958 // The format is the same for interval stats.
959 snprintf(buf
, sizeof(buf
),
960 "Cumulative writes: %s writes, %s keys, %s commit groups, "
961 "%.1f writes per commit group, ingest: %.2f GB, %.2f MB/s\n",
962 NumberToHumanString(write_other
+ write_self
).c_str(),
963 NumberToHumanString(num_keys_written
).c_str(),
964 NumberToHumanString(write_self
).c_str(),
965 (write_other
+ write_self
) / static_cast<double>(write_self
+ 1),
966 user_bytes_written
/ kGB
, user_bytes_written
/ kMB
/ seconds_up
);
969 snprintf(buf
, sizeof(buf
),
970 "Cumulative WAL: %s writes, %s syncs, "
971 "%.2f writes per sync, written: %.2f GB, %.2f MB/s\n",
972 NumberToHumanString(write_with_wal
).c_str(),
973 NumberToHumanString(wal_synced
).c_str(),
974 write_with_wal
/ static_cast<double>(wal_synced
+ 1),
975 wal_bytes
/ kGB
, wal_bytes
/ kMB
/ seconds_up
);
978 AppendHumanMicros(write_stall_micros
, human_micros
, kHumanMicrosLen
, true);
979 snprintf(buf
, sizeof(buf
), "Cumulative stall: %s, %.1f percent\n",
981 // 10000 = divide by 1M to get secs, then multiply by 100 for pct
982 write_stall_micros
/ 10000.0 / std::max(seconds_up
, 0.001));
986 uint64_t interval_write_other
= write_other
- db_stats_snapshot_
.write_other
;
987 uint64_t interval_write_self
= write_self
- db_stats_snapshot_
.write_self
;
988 uint64_t interval_num_keys_written
=
989 num_keys_written
- db_stats_snapshot_
.num_keys_written
;
992 "Interval writes: %s writes, %s keys, %s commit groups, "
993 "%.1f writes per commit group, ingest: %.2f MB, %.2f MB/s\n",
994 NumberToHumanString(interval_write_other
+ interval_write_self
).c_str(),
995 NumberToHumanString(interval_num_keys_written
).c_str(),
996 NumberToHumanString(interval_write_self
).c_str(),
997 static_cast<double>(interval_write_other
+ interval_write_self
) /
998 (interval_write_self
+ 1),
999 (user_bytes_written
- db_stats_snapshot_
.ingest_bytes
) / kMB
,
1000 (user_bytes_written
- db_stats_snapshot_
.ingest_bytes
) / kMB
/
1001 std::max(interval_seconds_up
, 0.001)),
1004 uint64_t interval_write_with_wal
=
1005 write_with_wal
- db_stats_snapshot_
.write_with_wal
;
1006 uint64_t interval_wal_synced
= wal_synced
- db_stats_snapshot_
.wal_synced
;
1007 uint64_t interval_wal_bytes
= wal_bytes
- db_stats_snapshot_
.wal_bytes
;
1011 "Interval WAL: %s writes, %s syncs, "
1012 "%.2f writes per sync, written: %.2f MB, %.2f MB/s\n",
1013 NumberToHumanString(interval_write_with_wal
).c_str(),
1014 NumberToHumanString(interval_wal_synced
).c_str(),
1015 interval_write_with_wal
/ static_cast<double>(interval_wal_synced
+ 1),
1016 interval_wal_bytes
/ kGB
,
1017 interval_wal_bytes
/ kMB
/ std::max(interval_seconds_up
, 0.001));
1021 AppendHumanMicros(write_stall_micros
- db_stats_snapshot_
.write_stall_micros
,
1022 human_micros
, kHumanMicrosLen
, true);
1023 snprintf(buf
, sizeof(buf
), "Interval stall: %s, %.1f percent\n", human_micros
,
1024 // 10000 = divide by 1M to get secs, then multiply by 100 for pct
1025 (write_stall_micros
- db_stats_snapshot_
.write_stall_micros
) /
1026 10000.0 / std::max(interval_seconds_up
, 0.001));
1029 db_stats_snapshot_
.seconds_up
= seconds_up
;
1030 db_stats_snapshot_
.ingest_bytes
= user_bytes_written
;
1031 db_stats_snapshot_
.write_other
= write_other
;
1032 db_stats_snapshot_
.write_self
= write_self
;
1033 db_stats_snapshot_
.num_keys_written
= num_keys_written
;
1034 db_stats_snapshot_
.wal_bytes
= wal_bytes
;
1035 db_stats_snapshot_
.wal_synced
= wal_synced
;
1036 db_stats_snapshot_
.write_with_wal
= write_with_wal
;
1037 db_stats_snapshot_
.write_stall_micros
= write_stall_micros
;
1041 * Dump Compaction Level stats to a map of stat name with "compaction." prefix
1042 * to value in double as string. The level in stat name is represented with
1043 * a prefix "Lx" where "x" is the level number. A special level "Sum"
1044 * represents the sum of a stat for all levels.
1045 * The result also contains IO stall counters which keys start with "io_stalls."
1046 * and values represent uint64 encoded as strings.
1048 void InternalStats::DumpCFMapStats(
1049 std::map
<std::string
, std::string
>* cf_stats
) {
1050 CompactionStats compaction_stats_sum
;
1051 std::map
<int, std::map
<LevelStatType
, double>> levels_stats
;
1052 DumpCFMapStats(&levels_stats
, &compaction_stats_sum
);
1053 for (auto const& level_ent
: levels_stats
) {
1055 level_ent
.first
== -1 ? "Sum" : "L" + ToString(level_ent
.first
);
1056 for (auto const& stat_ent
: level_ent
.second
) {
1057 auto stat_type
= stat_ent
.first
;
1059 "compaction." + level_str
+ "." +
1060 InternalStats::compaction_level_stats
.at(stat_type
).property_name
;
1061 (*cf_stats
)[key_str
] = std::to_string(stat_ent
.second
);
1065 DumpCFMapStatsIOStalls(cf_stats
);
1068 void InternalStats::DumpCFMapStats(
1069 std::map
<int, std::map
<LevelStatType
, double>>* levels_stats
,
1070 CompactionStats
* compaction_stats_sum
) {
1071 const VersionStorageInfo
* vstorage
= cfd_
->current()->storage_info();
1073 int num_levels_to_check
=
1074 (cfd_
->ioptions()->compaction_style
!= kCompactionStyleFIFO
)
1075 ? vstorage
->num_levels() - 1
1078 // Compaction scores are sorted based on its value. Restore them to the
1080 std::vector
<double> compaction_score(number_levels_
, 0);
1081 for (int i
= 0; i
< num_levels_to_check
; ++i
) {
1082 compaction_score
[vstorage
->CompactionScoreLevel(i
)] =
1083 vstorage
->CompactionScore(i
);
1085 // Count # of files being compacted for each level
1086 std::vector
<int> files_being_compacted(number_levels_
, 0);
1087 for (int level
= 0; level
< number_levels_
; ++level
) {
1088 for (auto* f
: vstorage
->LevelFiles(level
)) {
1089 if (f
->being_compacted
) {
1090 ++files_being_compacted
[level
];
1095 int total_files
= 0;
1096 int total_files_being_compacted
= 0;
1097 double total_file_size
= 0;
1098 uint64_t flush_ingest
= cf_stats_value_
[BYTES_FLUSHED
];
1099 uint64_t add_file_ingest
= cf_stats_value_
[BYTES_INGESTED_ADD_FILE
];
1100 uint64_t curr_ingest
= flush_ingest
+ add_file_ingest
;
1101 for (int level
= 0; level
< number_levels_
; level
++) {
1102 int files
= vstorage
->NumLevelFiles(level
);
1103 total_files
+= files
;
1104 total_files_being_compacted
+= files_being_compacted
[level
];
1105 if (comp_stats_
[level
].micros
> 0 || files
> 0) {
1106 compaction_stats_sum
->Add(comp_stats_
[level
]);
1107 total_file_size
+= vstorage
->NumLevelBytes(level
);
1108 uint64_t input_bytes
;
1110 input_bytes
= curr_ingest
;
1112 input_bytes
= comp_stats_
[level
].bytes_read_non_output_levels
;
1117 : static_cast<double>(comp_stats_
[level
].bytes_written
) /
1119 std::map
<LevelStatType
, double> level_stats
;
1120 PrepareLevelStats(&level_stats
, files
, files_being_compacted
[level
],
1121 static_cast<double>(vstorage
->NumLevelBytes(level
)),
1122 compaction_score
[level
], w_amp
, comp_stats_
[level
]);
1123 (*levels_stats
)[level
] = level_stats
;
1126 // Cumulative summary
1127 double w_amp
= compaction_stats_sum
->bytes_written
/
1128 static_cast<double>(curr_ingest
+ 1);
1129 // Stats summary across levels
1130 std::map
<LevelStatType
, double> sum_stats
;
1131 PrepareLevelStats(&sum_stats
, total_files
, total_files_being_compacted
,
1132 total_file_size
, 0, w_amp
, *compaction_stats_sum
);
1133 (*levels_stats
)[-1] = sum_stats
; // -1 is for the Sum level
1136 void InternalStats::DumpCFMapStatsIOStalls(
1137 std::map
<std::string
, std::string
>* cf_stats
) {
1138 (*cf_stats
)["io_stalls.level0_slowdown"] =
1139 std::to_string(cf_stats_count_
[L0_FILE_COUNT_LIMIT_SLOWDOWNS
]);
1140 (*cf_stats
)["io_stalls.level0_slowdown_with_compaction"] =
1141 std::to_string(cf_stats_count_
[LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS
]);
1142 (*cf_stats
)["io_stalls.level0_numfiles"] =
1143 std::to_string(cf_stats_count_
[L0_FILE_COUNT_LIMIT_STOPS
]);
1144 (*cf_stats
)["io_stalls.level0_numfiles_with_compaction"] =
1145 std::to_string(cf_stats_count_
[LOCKED_L0_FILE_COUNT_LIMIT_STOPS
]);
1146 (*cf_stats
)["io_stalls.stop_for_pending_compaction_bytes"] =
1147 std::to_string(cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_STOPS
]);
1148 (*cf_stats
)["io_stalls.slowdown_for_pending_compaction_bytes"] =
1149 std::to_string(cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS
]);
1150 (*cf_stats
)["io_stalls.memtable_compaction"] =
1151 std::to_string(cf_stats_count_
[MEMTABLE_LIMIT_STOPS
]);
1152 (*cf_stats
)["io_stalls.memtable_slowdown"] =
1153 std::to_string(cf_stats_count_
[MEMTABLE_LIMIT_SLOWDOWNS
]);
1155 uint64_t total_stop
= cf_stats_count_
[L0_FILE_COUNT_LIMIT_STOPS
] +
1156 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_STOPS
] +
1157 cf_stats_count_
[MEMTABLE_LIMIT_STOPS
];
1159 uint64_t total_slowdown
=
1160 cf_stats_count_
[L0_FILE_COUNT_LIMIT_SLOWDOWNS
] +
1161 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS
] +
1162 cf_stats_count_
[MEMTABLE_LIMIT_SLOWDOWNS
];
1164 (*cf_stats
)["io_stalls.total_stop"] = std::to_string(total_stop
);
1165 (*cf_stats
)["io_stalls.total_slowdown"] = std::to_string(total_slowdown
);
1168 void InternalStats::DumpCFStats(std::string
* value
) {
1169 DumpCFStatsNoFileHistogram(value
);
1170 DumpCFFileHistogram(value
);
1173 void InternalStats::DumpCFStatsNoFileHistogram(std::string
* value
) {
1175 // Per-ColumnFamily stats
1176 PrintLevelStatsHeader(buf
, sizeof(buf
), cfd_
->GetName());
1179 // Print stats for each level
1180 std::map
<int, std::map
<LevelStatType
, double>> levels_stats
;
1181 CompactionStats compaction_stats_sum
;
1182 DumpCFMapStats(&levels_stats
, &compaction_stats_sum
);
1183 for (int l
= 0; l
< number_levels_
; ++l
) {
1184 if (levels_stats
.find(l
) != levels_stats
.end()) {
1185 PrintLevelStats(buf
, sizeof(buf
), "L" + ToString(l
), levels_stats
[l
]);
1190 // Print sum of level stats
1191 PrintLevelStats(buf
, sizeof(buf
), "Sum", levels_stats
[-1]);
1194 uint64_t flush_ingest
= cf_stats_value_
[BYTES_FLUSHED
];
1195 uint64_t add_file_ingest
= cf_stats_value_
[BYTES_INGESTED_ADD_FILE
];
1196 uint64_t ingest_files_addfile
= cf_stats_value_
[INGESTED_NUM_FILES_TOTAL
];
1197 uint64_t ingest_l0_files_addfile
=
1198 cf_stats_value_
[INGESTED_LEVEL0_NUM_FILES_TOTAL
];
1199 uint64_t ingest_keys_addfile
= cf_stats_value_
[INGESTED_NUM_KEYS_TOTAL
];
1200 // Cumulative summary
1201 uint64_t total_stall_count
=
1202 cf_stats_count_
[L0_FILE_COUNT_LIMIT_SLOWDOWNS
] +
1203 cf_stats_count_
[L0_FILE_COUNT_LIMIT_STOPS
] +
1204 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS
] +
1205 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_STOPS
] +
1206 cf_stats_count_
[MEMTABLE_LIMIT_STOPS
] +
1207 cf_stats_count_
[MEMTABLE_LIMIT_SLOWDOWNS
];
1209 uint64_t interval_flush_ingest
=
1210 flush_ingest
- cf_stats_snapshot_
.ingest_bytes_flush
;
1211 uint64_t interval_add_file_inget
=
1212 add_file_ingest
- cf_stats_snapshot_
.ingest_bytes_addfile
;
1213 uint64_t interval_ingest
=
1214 interval_flush_ingest
+ interval_add_file_inget
+ 1;
1215 CompactionStats
interval_stats(compaction_stats_sum
);
1216 interval_stats
.Subtract(cf_stats_snapshot_
.comp_stats
);
1218 interval_stats
.bytes_written
/ static_cast<double>(interval_ingest
);
1219 PrintLevelStats(buf
, sizeof(buf
), "Int", 0, 0, 0, 0, w_amp
, interval_stats
);
1222 double seconds_up
= (env_
->NowMicros() - started_at_
+ 1) / kMicrosInSec
;
1223 double interval_seconds_up
= seconds_up
- cf_stats_snapshot_
.seconds_up
;
1224 snprintf(buf
, sizeof(buf
), "Uptime(secs): %.1f total, %.1f interval\n",
1225 seconds_up
, interval_seconds_up
);
1227 snprintf(buf
, sizeof(buf
), "Flush(GB): cumulative %.3f, interval %.3f\n",
1228 flush_ingest
/ kGB
, interval_flush_ingest
/ kGB
);
1230 snprintf(buf
, sizeof(buf
), "AddFile(GB): cumulative %.3f, interval %.3f\n",
1231 add_file_ingest
/ kGB
, interval_add_file_inget
/ kGB
);
1234 uint64_t interval_ingest_files_addfile
=
1235 ingest_files_addfile
- cf_stats_snapshot_
.ingest_files_addfile
;
1236 snprintf(buf
, sizeof(buf
),
1237 "AddFile(Total Files): cumulative %" PRIu64
", interval %" PRIu64
1239 ingest_files_addfile
, interval_ingest_files_addfile
);
1242 uint64_t interval_ingest_l0_files_addfile
=
1243 ingest_l0_files_addfile
- cf_stats_snapshot_
.ingest_l0_files_addfile
;
1244 snprintf(buf
, sizeof(buf
),
1245 "AddFile(L0 Files): cumulative %" PRIu64
", interval %" PRIu64
"\n",
1246 ingest_l0_files_addfile
, interval_ingest_l0_files_addfile
);
1249 uint64_t interval_ingest_keys_addfile
=
1250 ingest_keys_addfile
- cf_stats_snapshot_
.ingest_keys_addfile
;
1251 snprintf(buf
, sizeof(buf
),
1252 "AddFile(Keys): cumulative %" PRIu64
", interval %" PRIu64
"\n",
1253 ingest_keys_addfile
, interval_ingest_keys_addfile
);
1257 uint64_t compact_bytes_read
= 0;
1258 uint64_t compact_bytes_write
= 0;
1259 uint64_t compact_micros
= 0;
1260 for (int level
= 0; level
< number_levels_
; level
++) {
1261 compact_bytes_read
+= comp_stats_
[level
].bytes_read_output_level
+
1262 comp_stats_
[level
].bytes_read_non_output_levels
;
1263 compact_bytes_write
+= comp_stats_
[level
].bytes_written
;
1264 compact_micros
+= comp_stats_
[level
].micros
;
1267 snprintf(buf
, sizeof(buf
),
1268 "Cumulative compaction: %.2f GB write, %.2f MB/s write, "
1269 "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
1270 compact_bytes_write
/ kGB
, compact_bytes_write
/ kMB
/ seconds_up
,
1271 compact_bytes_read
/ kGB
, compact_bytes_read
/ kMB
/ seconds_up
,
1272 compact_micros
/ kMicrosInSec
);
1275 // Compaction interval
1276 uint64_t interval_compact_bytes_write
=
1277 compact_bytes_write
- cf_stats_snapshot_
.compact_bytes_write
;
1278 uint64_t interval_compact_bytes_read
=
1279 compact_bytes_read
- cf_stats_snapshot_
.compact_bytes_read
;
1280 uint64_t interval_compact_micros
=
1281 compact_micros
- cf_stats_snapshot_
.compact_micros
;
1285 "Interval compaction: %.2f GB write, %.2f MB/s write, "
1286 "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
1287 interval_compact_bytes_write
/ kGB
,
1288 interval_compact_bytes_write
/ kMB
/ std::max(interval_seconds_up
, 0.001),
1289 interval_compact_bytes_read
/ kGB
,
1290 interval_compact_bytes_read
/ kMB
/ std::max(interval_seconds_up
, 0.001),
1291 interval_compact_micros
/ kMicrosInSec
);
1293 cf_stats_snapshot_
.compact_bytes_write
= compact_bytes_write
;
1294 cf_stats_snapshot_
.compact_bytes_read
= compact_bytes_read
;
1295 cf_stats_snapshot_
.compact_micros
= compact_micros
;
1297 snprintf(buf
, sizeof(buf
),
1298 "Stalls(count): %" PRIu64
1299 " level0_slowdown, "
1301 " level0_slowdown_with_compaction, "
1303 " level0_numfiles, "
1305 " level0_numfiles_with_compaction, "
1307 " stop for pending_compaction_bytes, "
1309 " slowdown for pending_compaction_bytes, "
1311 " memtable_compaction, "
1313 " memtable_slowdown, "
1314 "interval %" PRIu64
" total count\n",
1315 cf_stats_count_
[L0_FILE_COUNT_LIMIT_SLOWDOWNS
],
1316 cf_stats_count_
[LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS
],
1317 cf_stats_count_
[L0_FILE_COUNT_LIMIT_STOPS
],
1318 cf_stats_count_
[LOCKED_L0_FILE_COUNT_LIMIT_STOPS
],
1319 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_STOPS
],
1320 cf_stats_count_
[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS
],
1321 cf_stats_count_
[MEMTABLE_LIMIT_STOPS
],
1322 cf_stats_count_
[MEMTABLE_LIMIT_SLOWDOWNS
],
1323 total_stall_count
- cf_stats_snapshot_
.stall_count
);
1326 cf_stats_snapshot_
.seconds_up
= seconds_up
;
1327 cf_stats_snapshot_
.ingest_bytes_flush
= flush_ingest
;
1328 cf_stats_snapshot_
.ingest_bytes_addfile
= add_file_ingest
;
1329 cf_stats_snapshot_
.ingest_files_addfile
= ingest_files_addfile
;
1330 cf_stats_snapshot_
.ingest_l0_files_addfile
= ingest_l0_files_addfile
;
1331 cf_stats_snapshot_
.ingest_keys_addfile
= ingest_keys_addfile
;
1332 cf_stats_snapshot_
.comp_stats
= compaction_stats_sum
;
1333 cf_stats_snapshot_
.stall_count
= total_stall_count
;
1336 void InternalStats::DumpCFFileHistogram(std::string
* value
) {
1338 snprintf(buf
, sizeof(buf
),
1339 "\n** File Read Latency Histogram By Level [%s] **\n",
1340 cfd_
->GetName().c_str());
1343 for (int level
= 0; level
< number_levels_
; level
++) {
1344 if (!file_read_latency_
[level
].Empty()) {
1346 snprintf(buf2
, sizeof(buf2
),
1347 "** Level %d read latency histogram (micros):\n%s\n", level
,
1348 file_read_latency_
[level
].ToString().c_str());
1349 value
->append(buf2
);
1356 const DBPropertyInfo
* GetPropertyInfo(const Slice
& /*property*/) {
1360 #endif // !ROCKSDB_LITE
1362 } // namespace rocksdb