]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // This source code is licensed under the BSD-style license found in the |
2 | // LICENSE file in the root directory of this source tree. An additional grant | |
3 | // of patent rights can be found in the PATENTS file in the same directory. | |
4 | // | |
5 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |
6 | // Use of this source code is governed by a BSD-style license that can be | |
7 | // found in the LICENSE file. See the AUTHORS file for names of contributors. | |
8 | ||
9 | #include "db/internal_stats.h" | |
10 | ||
11 | #ifndef __STDC_FORMAT_MACROS | |
12 | #define __STDC_FORMAT_MACROS | |
13 | #endif | |
14 | ||
15 | #include <inttypes.h> | |
16 | #include <string> | |
17 | #include <algorithm> | |
18 | #include <utility> | |
19 | #include <vector> | |
20 | #include "db/column_family.h" | |
21 | ||
22 | #include "db/db_impl.h" | |
23 | #include "util/string_util.h" | |
24 | ||
25 | namespace rocksdb { | |
26 | ||
27 | #ifndef ROCKSDB_LITE | |
28 | ||
29 | const std::map<LevelStatType, LevelStat> InternalStats::compaction_level_stats = | |
30 | { | |
31 | {LevelStatType::NUM_FILES, LevelStat{"NumFiles", "Files"}}, | |
32 | {LevelStatType::COMPACTED_FILES, | |
33 | LevelStat{"CompactedFiles", "CompactedFiles"}}, | |
34 | {LevelStatType::SIZE_BYTES, LevelStat{"SizeBytes", "Size"}}, | |
35 | {LevelStatType::SCORE, LevelStat{"Score", "Score"}}, | |
36 | {LevelStatType::READ_GB, LevelStat{"ReadGB", "Read(GB)"}}, | |
37 | {LevelStatType::RN_GB, LevelStat{"RnGB", "Rn(GB)"}}, | |
38 | {LevelStatType::RNP1_GB, LevelStat{"Rnp1GB", "Rnp1(GB)"}}, | |
39 | {LevelStatType::WRITE_GB, LevelStat{"WriteGB", "Write(GB)"}}, | |
40 | {LevelStatType::W_NEW_GB, LevelStat{"WnewGB", "Wnew(GB)"}}, | |
41 | {LevelStatType::MOVED_GB, LevelStat{"MovedGB", "Moved(GB)"}}, | |
42 | {LevelStatType::WRITE_AMP, LevelStat{"WriteAmp", "W-Amp"}}, | |
43 | {LevelStatType::READ_MBPS, LevelStat{"ReadMBps", "Rd(MB/s)"}}, | |
44 | {LevelStatType::WRITE_MBPS, LevelStat{"WriteMBps", "Wr(MB/s)"}}, | |
45 | {LevelStatType::COMP_SEC, LevelStat{"CompSec", "Comp(sec)"}}, | |
46 | {LevelStatType::COMP_COUNT, LevelStat{"CompCount", "Comp(cnt)"}}, | |
47 | {LevelStatType::AVG_SEC, LevelStat{"AvgSec", "Avg(sec)"}}, | |
48 | {LevelStatType::KEY_IN, LevelStat{"KeyIn", "KeyIn"}}, | |
49 | {LevelStatType::KEY_DROP, LevelStat{"KeyDrop", "KeyDrop"}}, | |
50 | }; | |
51 | ||
52 | namespace { | |
53 | const double kMB = 1048576.0; | |
54 | const double kGB = kMB * 1024; | |
55 | const double kMicrosInSec = 1000000.0; | |
56 | ||
57 | void PrintLevelStatsHeader(char* buf, size_t len, const std::string& cf_name) { | |
58 | int written_size = | |
59 | snprintf(buf, len, "\n** Compaction Stats [%s] **\n", cf_name.c_str()); | |
60 | auto hdr = [](LevelStatType t) { | |
61 | return InternalStats::compaction_level_stats.at(t).header_name.c_str(); | |
62 | }; | |
63 | int line_size = snprintf( | |
64 | buf + written_size, len - written_size, | |
65 | "Level %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n", | |
66 | // Note that we skip COMPACTED_FILES and merge it with Files column | |
67 | hdr(LevelStatType::NUM_FILES), hdr(LevelStatType::SIZE_BYTES), | |
68 | hdr(LevelStatType::SCORE), hdr(LevelStatType::READ_GB), | |
69 | hdr(LevelStatType::RN_GB), hdr(LevelStatType::RNP1_GB), | |
70 | hdr(LevelStatType::WRITE_GB), hdr(LevelStatType::W_NEW_GB), | |
71 | hdr(LevelStatType::MOVED_GB), hdr(LevelStatType::WRITE_AMP), | |
72 | hdr(LevelStatType::READ_MBPS), hdr(LevelStatType::WRITE_MBPS), | |
73 | hdr(LevelStatType::COMP_SEC), hdr(LevelStatType::COMP_COUNT), | |
74 | hdr(LevelStatType::AVG_SEC), hdr(LevelStatType::KEY_IN), | |
75 | hdr(LevelStatType::KEY_DROP)); | |
76 | ||
77 | written_size += line_size; | |
78 | snprintf(buf + written_size, len - written_size, "%s\n", | |
79 | std::string(line_size, '-').c_str()); | |
80 | } | |
81 | ||
82 | void PrepareLevelStats(std::map<LevelStatType, double>* level_stats, | |
83 | int num_files, int being_compacted, | |
84 | double total_file_size, double score, double w_amp, | |
85 | const InternalStats::CompactionStats& stats) { | |
86 | uint64_t bytes_read = | |
87 | stats.bytes_read_non_output_levels + stats.bytes_read_output_level; | |
88 | int64_t bytes_new = | |
89 | stats.bytes_written - stats.bytes_read_output_level; | |
90 | double elapsed = (stats.micros + 1) / kMicrosInSec; | |
91 | ||
92 | (*level_stats)[LevelStatType::NUM_FILES] = num_files; | |
93 | (*level_stats)[LevelStatType::COMPACTED_FILES] = being_compacted; | |
94 | (*level_stats)[LevelStatType::SIZE_BYTES] = total_file_size; | |
95 | (*level_stats)[LevelStatType::SCORE] = score; | |
96 | (*level_stats)[LevelStatType::READ_GB] = bytes_read / kGB; | |
97 | (*level_stats)[LevelStatType::RN_GB] = | |
98 | stats.bytes_read_non_output_levels / kGB; | |
99 | (*level_stats)[LevelStatType::RNP1_GB] = stats.bytes_read_output_level / kGB; | |
100 | (*level_stats)[LevelStatType::WRITE_GB] = stats.bytes_written / kGB; | |
101 | (*level_stats)[LevelStatType::W_NEW_GB] = bytes_new / kGB; | |
102 | (*level_stats)[LevelStatType::MOVED_GB] = stats.bytes_moved / kGB; | |
103 | (*level_stats)[LevelStatType::WRITE_AMP] = w_amp; | |
104 | (*level_stats)[LevelStatType::READ_MBPS] = bytes_read / kMB / elapsed; | |
105 | (*level_stats)[LevelStatType::WRITE_MBPS] = | |
106 | stats.bytes_written / kMB / elapsed; | |
107 | (*level_stats)[LevelStatType::COMP_SEC] = stats.micros / kMicrosInSec; | |
108 | (*level_stats)[LevelStatType::COMP_COUNT] = stats.count; | |
109 | (*level_stats)[LevelStatType::AVG_SEC] = | |
110 | stats.count == 0 ? 0 : stats.micros / kMicrosInSec / stats.count; | |
111 | (*level_stats)[LevelStatType::KEY_IN] = | |
112 | static_cast<double>(stats.num_input_records); | |
113 | (*level_stats)[LevelStatType::KEY_DROP] = | |
114 | static_cast<double>(stats.num_dropped_records); | |
115 | } | |
116 | ||
117 | void PrintLevelStats(char* buf, size_t len, const std::string& name, | |
118 | const std::map<LevelStatType, double>& stat_value) { | |
119 | snprintf(buf, len, | |
120 | "%4s " /* Level */ | |
121 | "%6d/%-3d " /* Files */ | |
122 | "%8s " /* Size */ | |
123 | "%5.1f " /* Score */ | |
124 | "%8.1f " /* Read(GB) */ | |
125 | "%7.1f " /* Rn(GB) */ | |
126 | "%8.1f " /* Rnp1(GB) */ | |
127 | "%9.1f " /* Write(GB) */ | |
128 | "%8.1f " /* Wnew(GB) */ | |
129 | "%9.1f " /* Moved(GB) */ | |
130 | "%5.1f " /* W-Amp */ | |
131 | "%8.1f " /* Rd(MB/s) */ | |
132 | "%8.1f " /* Wr(MB/s) */ | |
133 | "%9.0f " /* Comp(sec) */ | |
134 | "%9d " /* Comp(cnt) */ | |
135 | "%8.3f " /* Avg(sec) */ | |
136 | "%7s " /* KeyIn */ | |
137 | "%6s\n", /* KeyDrop */ | |
138 | name.c_str(), | |
139 | static_cast<int>(stat_value.at(LevelStatType::NUM_FILES)), | |
140 | static_cast<int>(stat_value.at(LevelStatType::COMPACTED_FILES)), | |
141 | BytesToHumanString( | |
142 | static_cast<uint64_t>(stat_value.at(LevelStatType::SIZE_BYTES))) | |
143 | .c_str(), | |
144 | stat_value.at(LevelStatType::SCORE), | |
145 | stat_value.at(LevelStatType::READ_GB), | |
146 | stat_value.at(LevelStatType::RN_GB), | |
147 | stat_value.at(LevelStatType::RNP1_GB), | |
148 | stat_value.at(LevelStatType::WRITE_GB), | |
149 | stat_value.at(LevelStatType::W_NEW_GB), | |
150 | stat_value.at(LevelStatType::MOVED_GB), | |
151 | stat_value.at(LevelStatType::WRITE_AMP), | |
152 | stat_value.at(LevelStatType::READ_MBPS), | |
153 | stat_value.at(LevelStatType::WRITE_MBPS), | |
154 | stat_value.at(LevelStatType::COMP_SEC), | |
155 | static_cast<int>(stat_value.at(LevelStatType::COMP_COUNT)), | |
156 | stat_value.at(LevelStatType::AVG_SEC), | |
157 | NumberToHumanString( | |
158 | static_cast<std::int64_t>(stat_value.at(LevelStatType::KEY_IN))) | |
159 | .c_str(), | |
160 | NumberToHumanString(static_cast<std::int64_t>( | |
161 | stat_value.at(LevelStatType::KEY_DROP))) | |
162 | .c_str()); | |
163 | } | |
164 | ||
165 | void PrintLevelStats(char* buf, size_t len, const std::string& name, | |
166 | int num_files, int being_compacted, double total_file_size, | |
167 | double score, double w_amp, | |
168 | const InternalStats::CompactionStats& stats) { | |
169 | std::map<LevelStatType, double> level_stats; | |
170 | PrepareLevelStats(&level_stats, num_files, being_compacted, total_file_size, | |
171 | score, w_amp, stats); | |
172 | PrintLevelStats(buf, len, name, level_stats); | |
173 | } | |
174 | ||
175 | // Assumes that trailing numbers represent an optional argument. This requires | |
176 | // property names to not end with numbers. | |
177 | std::pair<Slice, Slice> GetPropertyNameAndArg(const Slice& property) { | |
178 | Slice name = property, arg = property; | |
179 | size_t sfx_len = 0; | |
180 | while (sfx_len < property.size() && | |
181 | isdigit(property[property.size() - sfx_len - 1])) { | |
182 | ++sfx_len; | |
183 | } | |
184 | name.remove_suffix(sfx_len); | |
185 | arg.remove_prefix(property.size() - sfx_len); | |
186 | return {name, arg}; | |
187 | } | |
188 | } // anonymous namespace | |
189 | ||
190 | static const std::string rocksdb_prefix = "rocksdb."; | |
191 | ||
192 | static const std::string num_files_at_level_prefix = "num-files-at-level"; | |
193 | static const std::string compression_ratio_at_level_prefix = | |
194 | "compression-ratio-at-level"; | |
195 | static const std::string allstats = "stats"; | |
196 | static const std::string sstables = "sstables"; | |
197 | static const std::string cfstats = "cfstats"; | |
198 | static const std::string cfstats_no_file_histogram = | |
199 | "cfstats-no-file-histogram"; | |
200 | static const std::string cf_file_histogram = "cf-file-histogram"; | |
201 | static const std::string dbstats = "dbstats"; | |
202 | static const std::string levelstats = "levelstats"; | |
203 | static const std::string num_immutable_mem_table = "num-immutable-mem-table"; | |
204 | static const std::string num_immutable_mem_table_flushed = | |
205 | "num-immutable-mem-table-flushed"; | |
206 | static const std::string mem_table_flush_pending = "mem-table-flush-pending"; | |
207 | static const std::string compaction_pending = "compaction-pending"; | |
208 | static const std::string background_errors = "background-errors"; | |
209 | static const std::string cur_size_active_mem_table = | |
210 | "cur-size-active-mem-table"; | |
211 | static const std::string cur_size_all_mem_tables = "cur-size-all-mem-tables"; | |
212 | static const std::string size_all_mem_tables = "size-all-mem-tables"; | |
213 | static const std::string num_entries_active_mem_table = | |
214 | "num-entries-active-mem-table"; | |
215 | static const std::string num_entries_imm_mem_tables = | |
216 | "num-entries-imm-mem-tables"; | |
217 | static const std::string num_deletes_active_mem_table = | |
218 | "num-deletes-active-mem-table"; | |
219 | static const std::string num_deletes_imm_mem_tables = | |
220 | "num-deletes-imm-mem-tables"; | |
221 | static const std::string estimate_num_keys = "estimate-num-keys"; | |
222 | static const std::string estimate_table_readers_mem = | |
223 | "estimate-table-readers-mem"; | |
224 | static const std::string is_file_deletions_enabled = | |
225 | "is-file-deletions-enabled"; | |
226 | static const std::string num_snapshots = "num-snapshots"; | |
227 | static const std::string oldest_snapshot_time = "oldest-snapshot-time"; | |
228 | static const std::string num_live_versions = "num-live-versions"; | |
229 | static const std::string current_version_number = | |
230 | "current-super-version-number"; | |
231 | static const std::string estimate_live_data_size = "estimate-live-data-size"; | |
232 | static const std::string min_log_number_to_keep = "min-log-number-to-keep"; | |
233 | static const std::string base_level = "base-level"; | |
234 | static const std::string total_sst_files_size = "total-sst-files-size"; | |
235 | static const std::string estimate_pending_comp_bytes = | |
236 | "estimate-pending-compaction-bytes"; | |
237 | static const std::string aggregated_table_properties = | |
238 | "aggregated-table-properties"; | |
239 | static const std::string aggregated_table_properties_at_level = | |
240 | aggregated_table_properties + "-at-level"; | |
241 | static const std::string num_running_compactions = "num-running-compactions"; | |
242 | static const std::string num_running_flushes = "num-running-flushes"; | |
243 | static const std::string actual_delayed_write_rate = | |
244 | "actual-delayed-write-rate"; | |
245 | static const std::string is_write_stopped = "is-write-stopped"; | |
246 | ||
247 | const std::string DB::Properties::kNumFilesAtLevelPrefix = | |
248 | rocksdb_prefix + num_files_at_level_prefix; | |
249 | const std::string DB::Properties::kCompressionRatioAtLevelPrefix = | |
250 | rocksdb_prefix + compression_ratio_at_level_prefix; | |
251 | const std::string DB::Properties::kStats = rocksdb_prefix + allstats; | |
252 | const std::string DB::Properties::kSSTables = rocksdb_prefix + sstables; | |
253 | const std::string DB::Properties::kCFStats = rocksdb_prefix + cfstats; | |
254 | const std::string DB::Properties::kCFStatsNoFileHistogram = | |
255 | rocksdb_prefix + cfstats_no_file_histogram; | |
256 | const std::string DB::Properties::kCFFileHistogram = | |
257 | rocksdb_prefix + cf_file_histogram; | |
258 | const std::string DB::Properties::kDBStats = rocksdb_prefix + dbstats; | |
259 | const std::string DB::Properties::kLevelStats = rocksdb_prefix + levelstats; | |
260 | const std::string DB::Properties::kNumImmutableMemTable = | |
261 | rocksdb_prefix + num_immutable_mem_table; | |
262 | const std::string DB::Properties::kNumImmutableMemTableFlushed = | |
263 | rocksdb_prefix + num_immutable_mem_table_flushed; | |
264 | const std::string DB::Properties::kMemTableFlushPending = | |
265 | rocksdb_prefix + mem_table_flush_pending; | |
266 | const std::string DB::Properties::kCompactionPending = | |
267 | rocksdb_prefix + compaction_pending; | |
268 | const std::string DB::Properties::kNumRunningCompactions = | |
269 | rocksdb_prefix + num_running_compactions; | |
270 | const std::string DB::Properties::kNumRunningFlushes = | |
271 | rocksdb_prefix + num_running_flushes; | |
272 | const std::string DB::Properties::kBackgroundErrors = | |
273 | rocksdb_prefix + background_errors; | |
274 | const std::string DB::Properties::kCurSizeActiveMemTable = | |
275 | rocksdb_prefix + cur_size_active_mem_table; | |
276 | const std::string DB::Properties::kCurSizeAllMemTables = | |
277 | rocksdb_prefix + cur_size_all_mem_tables; | |
278 | const std::string DB::Properties::kSizeAllMemTables = | |
279 | rocksdb_prefix + size_all_mem_tables; | |
280 | const std::string DB::Properties::kNumEntriesActiveMemTable = | |
281 | rocksdb_prefix + num_entries_active_mem_table; | |
282 | const std::string DB::Properties::kNumEntriesImmMemTables = | |
283 | rocksdb_prefix + num_entries_imm_mem_tables; | |
284 | const std::string DB::Properties::kNumDeletesActiveMemTable = | |
285 | rocksdb_prefix + num_deletes_active_mem_table; | |
286 | const std::string DB::Properties::kNumDeletesImmMemTables = | |
287 | rocksdb_prefix + num_deletes_imm_mem_tables; | |
288 | const std::string DB::Properties::kEstimateNumKeys = | |
289 | rocksdb_prefix + estimate_num_keys; | |
290 | const std::string DB::Properties::kEstimateTableReadersMem = | |
291 | rocksdb_prefix + estimate_table_readers_mem; | |
292 | const std::string DB::Properties::kIsFileDeletionsEnabled = | |
293 | rocksdb_prefix + is_file_deletions_enabled; | |
294 | const std::string DB::Properties::kNumSnapshots = | |
295 | rocksdb_prefix + num_snapshots; | |
296 | const std::string DB::Properties::kOldestSnapshotTime = | |
297 | rocksdb_prefix + oldest_snapshot_time; | |
298 | const std::string DB::Properties::kNumLiveVersions = | |
299 | rocksdb_prefix + num_live_versions; | |
300 | const std::string DB::Properties::kCurrentSuperVersionNumber = | |
301 | rocksdb_prefix + current_version_number; | |
302 | const std::string DB::Properties::kEstimateLiveDataSize = | |
303 | rocksdb_prefix + estimate_live_data_size; | |
304 | const std::string DB::Properties::kMinLogNumberToKeep = | |
305 | rocksdb_prefix + min_log_number_to_keep; | |
306 | const std::string DB::Properties::kTotalSstFilesSize = | |
307 | rocksdb_prefix + total_sst_files_size; | |
308 | const std::string DB::Properties::kBaseLevel = rocksdb_prefix + base_level; | |
309 | const std::string DB::Properties::kEstimatePendingCompactionBytes = | |
310 | rocksdb_prefix + estimate_pending_comp_bytes; | |
311 | const std::string DB::Properties::kAggregatedTableProperties = | |
312 | rocksdb_prefix + aggregated_table_properties; | |
313 | const std::string DB::Properties::kAggregatedTablePropertiesAtLevel = | |
314 | rocksdb_prefix + aggregated_table_properties_at_level; | |
315 | const std::string DB::Properties::kActualDelayedWriteRate = | |
316 | rocksdb_prefix + actual_delayed_write_rate; | |
317 | const std::string DB::Properties::kIsWriteStopped = | |
318 | rocksdb_prefix + is_write_stopped; | |
319 | ||
320 | const std::unordered_map<std::string, DBPropertyInfo> | |
321 | InternalStats::ppt_name_to_info = { | |
322 | {DB::Properties::kNumFilesAtLevelPrefix, | |
323 | {false, &InternalStats::HandleNumFilesAtLevel, nullptr, nullptr}}, | |
324 | {DB::Properties::kCompressionRatioAtLevelPrefix, | |
325 | {false, &InternalStats::HandleCompressionRatioAtLevelPrefix, nullptr, | |
326 | nullptr}}, | |
327 | {DB::Properties::kLevelStats, | |
328 | {false, &InternalStats::HandleLevelStats, nullptr, nullptr}}, | |
329 | {DB::Properties::kStats, | |
330 | {false, &InternalStats::HandleStats, nullptr, nullptr}}, | |
331 | {DB::Properties::kCFStats, | |
332 | {false, &InternalStats::HandleCFStats, nullptr, | |
333 | &InternalStats::HandleCFMapStats}}, | |
334 | {DB::Properties::kCFStatsNoFileHistogram, | |
335 | {false, &InternalStats::HandleCFStatsNoFileHistogram, nullptr, | |
336 | nullptr}}, | |
337 | {DB::Properties::kCFFileHistogram, | |
338 | {false, &InternalStats::HandleCFFileHistogram, nullptr, nullptr}}, | |
339 | {DB::Properties::kDBStats, | |
340 | {false, &InternalStats::HandleDBStats, nullptr, nullptr}}, | |
341 | {DB::Properties::kSSTables, | |
342 | {false, &InternalStats::HandleSsTables, nullptr, nullptr}}, | |
343 | {DB::Properties::kAggregatedTableProperties, | |
344 | {false, &InternalStats::HandleAggregatedTableProperties, nullptr, | |
345 | nullptr}}, | |
346 | {DB::Properties::kAggregatedTablePropertiesAtLevel, | |
347 | {false, &InternalStats::HandleAggregatedTablePropertiesAtLevel, | |
348 | nullptr, nullptr}}, | |
349 | {DB::Properties::kNumImmutableMemTable, | |
350 | {false, nullptr, &InternalStats::HandleNumImmutableMemTable, nullptr}}, | |
351 | {DB::Properties::kNumImmutableMemTableFlushed, | |
352 | {false, nullptr, &InternalStats::HandleNumImmutableMemTableFlushed, | |
353 | nullptr}}, | |
354 | {DB::Properties::kMemTableFlushPending, | |
355 | {false, nullptr, &InternalStats::HandleMemTableFlushPending, nullptr}}, | |
356 | {DB::Properties::kCompactionPending, | |
357 | {false, nullptr, &InternalStats::HandleCompactionPending, nullptr}}, | |
358 | {DB::Properties::kBackgroundErrors, | |
359 | {false, nullptr, &InternalStats::HandleBackgroundErrors, nullptr}}, | |
360 | {DB::Properties::kCurSizeActiveMemTable, | |
361 | {false, nullptr, &InternalStats::HandleCurSizeActiveMemTable, | |
362 | nullptr}}, | |
363 | {DB::Properties::kCurSizeAllMemTables, | |
364 | {false, nullptr, &InternalStats::HandleCurSizeAllMemTables, nullptr}}, | |
365 | {DB::Properties::kSizeAllMemTables, | |
366 | {false, nullptr, &InternalStats::HandleSizeAllMemTables, nullptr}}, | |
367 | {DB::Properties::kNumEntriesActiveMemTable, | |
368 | {false, nullptr, &InternalStats::HandleNumEntriesActiveMemTable, | |
369 | nullptr}}, | |
370 | {DB::Properties::kNumEntriesImmMemTables, | |
371 | {false, nullptr, &InternalStats::HandleNumEntriesImmMemTables, | |
372 | nullptr}}, | |
373 | {DB::Properties::kNumDeletesActiveMemTable, | |
374 | {false, nullptr, &InternalStats::HandleNumDeletesActiveMemTable, | |
375 | nullptr}}, | |
376 | {DB::Properties::kNumDeletesImmMemTables, | |
377 | {false, nullptr, &InternalStats::HandleNumDeletesImmMemTables, | |
378 | nullptr}}, | |
379 | {DB::Properties::kEstimateNumKeys, | |
380 | {false, nullptr, &InternalStats::HandleEstimateNumKeys, nullptr}}, | |
381 | {DB::Properties::kEstimateTableReadersMem, | |
382 | {true, nullptr, &InternalStats::HandleEstimateTableReadersMem, | |
383 | nullptr}}, | |
384 | {DB::Properties::kIsFileDeletionsEnabled, | |
385 | {false, nullptr, &InternalStats::HandleIsFileDeletionsEnabled, | |
386 | nullptr}}, | |
387 | {DB::Properties::kNumSnapshots, | |
388 | {false, nullptr, &InternalStats::HandleNumSnapshots, nullptr}}, | |
389 | {DB::Properties::kOldestSnapshotTime, | |
390 | {false, nullptr, &InternalStats::HandleOldestSnapshotTime, nullptr}}, | |
391 | {DB::Properties::kNumLiveVersions, | |
392 | {false, nullptr, &InternalStats::HandleNumLiveVersions, nullptr}}, | |
393 | {DB::Properties::kCurrentSuperVersionNumber, | |
394 | {false, nullptr, &InternalStats::HandleCurrentSuperVersionNumber, | |
395 | nullptr}}, | |
396 | {DB::Properties::kEstimateLiveDataSize, | |
397 | {true, nullptr, &InternalStats::HandleEstimateLiveDataSize, nullptr}}, | |
398 | {DB::Properties::kMinLogNumberToKeep, | |
399 | {false, nullptr, &InternalStats::HandleMinLogNumberToKeep, nullptr}}, | |
400 | {DB::Properties::kBaseLevel, | |
401 | {false, nullptr, &InternalStats::HandleBaseLevel, nullptr}}, | |
402 | {DB::Properties::kTotalSstFilesSize, | |
403 | {false, nullptr, &InternalStats::HandleTotalSstFilesSize, nullptr}}, | |
404 | {DB::Properties::kEstimatePendingCompactionBytes, | |
405 | {false, nullptr, &InternalStats::HandleEstimatePendingCompactionBytes, | |
406 | nullptr}}, | |
407 | {DB::Properties::kNumRunningFlushes, | |
408 | {false, nullptr, &InternalStats::HandleNumRunningFlushes, nullptr}}, | |
409 | {DB::Properties::kNumRunningCompactions, | |
410 | {false, nullptr, &InternalStats::HandleNumRunningCompactions, | |
411 | nullptr}}, | |
412 | {DB::Properties::kActualDelayedWriteRate, | |
413 | {false, nullptr, &InternalStats::HandleActualDelayedWriteRate, | |
414 | nullptr}}, | |
415 | {DB::Properties::kIsWriteStopped, | |
416 | {false, nullptr, &InternalStats::HandleIsWriteStopped, nullptr}}, | |
417 | }; | |
418 | ||
419 | const DBPropertyInfo* GetPropertyInfo(const Slice& property) { | |
420 | std::string ppt_name = GetPropertyNameAndArg(property).first.ToString(); | |
421 | auto ppt_info_iter = InternalStats::ppt_name_to_info.find(ppt_name); | |
422 | if (ppt_info_iter == InternalStats::ppt_name_to_info.end()) { | |
423 | return nullptr; | |
424 | } | |
425 | return &ppt_info_iter->second; | |
426 | } | |
427 | ||
428 | bool InternalStats::GetStringProperty(const DBPropertyInfo& property_info, | |
429 | const Slice& property, | |
430 | std::string* value) { | |
431 | assert(value != nullptr); | |
432 | assert(property_info.handle_string != nullptr); | |
433 | Slice arg = GetPropertyNameAndArg(property).second; | |
434 | return (this->*(property_info.handle_string))(value, arg); | |
435 | } | |
436 | ||
437 | bool InternalStats::GetMapProperty(const DBPropertyInfo& property_info, | |
438 | const Slice& property, | |
439 | std::map<std::string, double>* value) { | |
440 | assert(value != nullptr); | |
441 | assert(property_info.handle_map != nullptr); | |
442 | return (this->*(property_info.handle_map))(value); | |
443 | } | |
444 | ||
445 | bool InternalStats::GetIntProperty(const DBPropertyInfo& property_info, | |
446 | uint64_t* value, DBImpl* db) { | |
447 | assert(value != nullptr); | |
448 | assert(property_info.handle_int != nullptr && | |
449 | !property_info.need_out_of_mutex); | |
450 | db->mutex_.AssertHeld(); | |
451 | return (this->*(property_info.handle_int))(value, db, nullptr /* version */); | |
452 | } | |
453 | ||
454 | bool InternalStats::GetIntPropertyOutOfMutex( | |
455 | const DBPropertyInfo& property_info, Version* version, uint64_t* value) { | |
456 | assert(value != nullptr); | |
457 | assert(property_info.handle_int != nullptr && | |
458 | property_info.need_out_of_mutex); | |
459 | return (this->*(property_info.handle_int))(value, nullptr /* db */, version); | |
460 | } | |
461 | ||
462 | bool InternalStats::HandleNumFilesAtLevel(std::string* value, Slice suffix) { | |
463 | uint64_t level; | |
464 | const auto* vstorage = cfd_->current()->storage_info(); | |
465 | bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty(); | |
466 | if (!ok || static_cast<int>(level) >= number_levels_) { | |
467 | return false; | |
468 | } else { | |
469 | char buf[100]; | |
470 | snprintf(buf, sizeof(buf), "%d", | |
471 | vstorage->NumLevelFiles(static_cast<int>(level))); | |
472 | *value = buf; | |
473 | return true; | |
474 | } | |
475 | } | |
476 | ||
477 | bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string* value, | |
478 | Slice suffix) { | |
479 | uint64_t level; | |
480 | const auto* vstorage = cfd_->current()->storage_info(); | |
481 | bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty(); | |
482 | if (!ok || level >= static_cast<uint64_t>(number_levels_)) { | |
483 | return false; | |
484 | } | |
485 | *value = ToString( | |
486 | vstorage->GetEstimatedCompressionRatioAtLevel(static_cast<int>(level))); | |
487 | return true; | |
488 | } | |
489 | ||
490 | bool InternalStats::HandleLevelStats(std::string* value, Slice suffix) { | |
491 | char buf[1000]; | |
492 | const auto* vstorage = cfd_->current()->storage_info(); | |
493 | snprintf(buf, sizeof(buf), | |
494 | "Level Files Size(MB)\n" | |
495 | "--------------------\n"); | |
496 | value->append(buf); | |
497 | ||
498 | for (int level = 0; level < number_levels_; level++) { | |
499 | snprintf(buf, sizeof(buf), "%3d %8d %8.0f\n", level, | |
500 | vstorage->NumLevelFiles(level), | |
501 | vstorage->NumLevelBytes(level) / kMB); | |
502 | value->append(buf); | |
503 | } | |
504 | return true; | |
505 | } | |
506 | ||
507 | bool InternalStats::HandleStats(std::string* value, Slice suffix) { | |
508 | if (!HandleCFStats(value, suffix)) { | |
509 | return false; | |
510 | } | |
511 | if (!HandleDBStats(value, suffix)) { | |
512 | return false; | |
513 | } | |
514 | return true; | |
515 | } | |
516 | ||
517 | bool InternalStats::HandleCFMapStats(std::map<std::string, double>* cf_stats) { | |
518 | DumpCFMapStats(cf_stats); | |
519 | return true; | |
520 | } | |
521 | ||
522 | bool InternalStats::HandleCFStats(std::string* value, Slice suffix) { | |
523 | DumpCFStats(value); | |
524 | return true; | |
525 | } | |
526 | ||
527 | bool InternalStats::HandleCFStatsNoFileHistogram(std::string* value, | |
528 | Slice suffix) { | |
529 | DumpCFStatsNoFileHistogram(value); | |
530 | return true; | |
531 | } | |
532 | ||
533 | bool InternalStats::HandleCFFileHistogram(std::string* value, Slice suffix) { | |
534 | DumpCFFileHistogram(value); | |
535 | return true; | |
536 | } | |
537 | ||
538 | bool InternalStats::HandleDBStats(std::string* value, Slice suffix) { | |
539 | DumpDBStats(value); | |
540 | return true; | |
541 | } | |
542 | ||
543 | bool InternalStats::HandleSsTables(std::string* value, Slice suffix) { | |
544 | auto* current = cfd_->current(); | |
545 | *value = current->DebugString(); | |
546 | return true; | |
547 | } | |
548 | ||
549 | bool InternalStats::HandleAggregatedTableProperties(std::string* value, | |
550 | Slice suffix) { | |
551 | std::shared_ptr<const TableProperties> tp; | |
552 | auto s = cfd_->current()->GetAggregatedTableProperties(&tp); | |
553 | if (!s.ok()) { | |
554 | return false; | |
555 | } | |
556 | *value = tp->ToString(); | |
557 | return true; | |
558 | } | |
559 | ||
560 | bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* value, | |
561 | Slice suffix) { | |
562 | uint64_t level; | |
563 | bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty(); | |
564 | if (!ok || static_cast<int>(level) >= number_levels_) { | |
565 | return false; | |
566 | } | |
567 | std::shared_ptr<const TableProperties> tp; | |
568 | auto s = cfd_->current()->GetAggregatedTableProperties( | |
569 | &tp, static_cast<int>(level)); | |
570 | if (!s.ok()) { | |
571 | return false; | |
572 | } | |
573 | *value = tp->ToString(); | |
574 | return true; | |
575 | } | |
576 | ||
577 | bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* db, | |
578 | Version* version) { | |
579 | *value = cfd_->imm()->NumNotFlushed(); | |
580 | return true; | |
581 | } | |
582 | ||
583 | bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value, | |
584 | DBImpl* db, | |
585 | Version* version) { | |
586 | *value = cfd_->imm()->NumFlushed(); | |
587 | return true; | |
588 | } | |
589 | ||
590 | bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* db, | |
591 | Version* version) { | |
592 | // Return number of mem tables that are ready to flush (made immutable) | |
593 | *value = (cfd_->imm()->IsFlushPending() ? 1 : 0); | |
594 | return true; | |
595 | } | |
596 | ||
597 | bool InternalStats::HandleNumRunningFlushes(uint64_t* value, DBImpl* db, | |
598 | Version* version) { | |
599 | *value = db->num_running_flushes(); | |
600 | return true; | |
601 | } | |
602 | ||
603 | bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* db, | |
604 | Version* version) { | |
605 | // 1 if the system already determines at least one compaction is needed. | |
606 | // 0 otherwise, | |
607 | const auto* vstorage = cfd_->current()->storage_info(); | |
608 | *value = (cfd_->compaction_picker()->NeedsCompaction(vstorage) ? 1 : 0); | |
609 | return true; | |
610 | } | |
611 | ||
612 | bool InternalStats::HandleNumRunningCompactions(uint64_t* value, DBImpl* db, | |
613 | Version* version) { | |
614 | *value = db->num_running_compactions_; | |
615 | return true; | |
616 | } | |
617 | ||
618 | bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* db, | |
619 | Version* version) { | |
620 | // Accumulated number of errors in background flushes or compactions. | |
621 | *value = GetBackgroundErrorCount(); | |
622 | return true; | |
623 | } | |
624 | ||
625 | bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* db, | |
626 | Version* version) { | |
627 | // Current size of the active memtable | |
628 | *value = cfd_->mem()->ApproximateMemoryUsage(); | |
629 | return true; | |
630 | } | |
631 | ||
632 | bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* db, | |
633 | Version* version) { | |
634 | // Current size of the active memtable + immutable memtables | |
635 | *value = cfd_->mem()->ApproximateMemoryUsage() + | |
636 | cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage(); | |
637 | return true; | |
638 | } | |
639 | ||
640 | bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* db, | |
641 | Version* version) { | |
642 | *value = cfd_->mem()->ApproximateMemoryUsage() + | |
643 | cfd_->imm()->ApproximateMemoryUsage(); | |
644 | return true; | |
645 | } | |
646 | ||
647 | bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value, DBImpl* db, | |
648 | Version* version) { | |
649 | // Current number of entires in the active memtable | |
650 | *value = cfd_->mem()->num_entries(); | |
651 | return true; | |
652 | } | |
653 | ||
654 | bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value, DBImpl* db, | |
655 | Version* version) { | |
656 | // Current number of entries in the immutable memtables | |
657 | *value = cfd_->imm()->current()->GetTotalNumEntries(); | |
658 | return true; | |
659 | } | |
660 | ||
661 | bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value, DBImpl* db, | |
662 | Version* version) { | |
663 | // Current number of entires in the active memtable | |
664 | *value = cfd_->mem()->num_deletes(); | |
665 | return true; | |
666 | } | |
667 | ||
668 | bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value, DBImpl* db, | |
669 | Version* version) { | |
670 | // Current number of entries in the immutable memtables | |
671 | *value = cfd_->imm()->current()->GetTotalNumDeletes(); | |
672 | return true; | |
673 | } | |
674 | ||
675 | bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* db, | |
676 | Version* version) { | |
677 | // Estimate number of entries in the column family: | |
678 | // Use estimated entries in tables + total entries in memtables. | |
679 | const auto* vstorage = cfd_->current()->storage_info(); | |
680 | *value = cfd_->mem()->num_entries() + | |
681 | cfd_->imm()->current()->GetTotalNumEntries() - | |
682 | (cfd_->mem()->num_deletes() + | |
683 | cfd_->imm()->current()->GetTotalNumDeletes()) * | |
684 | 2 + | |
685 | vstorage->GetEstimatedActiveKeys(); | |
686 | return true; | |
687 | } | |
688 | ||
689 | bool InternalStats::HandleNumSnapshots(uint64_t* value, DBImpl* db, | |
690 | Version* version) { | |
691 | *value = db->snapshots().count(); | |
692 | return true; | |
693 | } | |
694 | ||
695 | bool InternalStats::HandleOldestSnapshotTime(uint64_t* value, DBImpl* db, | |
696 | Version* version) { | |
697 | *value = static_cast<uint64_t>(db->snapshots().GetOldestSnapshotTime()); | |
698 | return true; | |
699 | } | |
700 | ||
701 | bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* db, | |
702 | Version* version) { | |
703 | *value = cfd_->GetNumLiveVersions(); | |
704 | return true; | |
705 | } | |
706 | ||
707 | bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value, DBImpl* db, | |
708 | Version* version) { | |
709 | *value = cfd_->GetSuperVersionNumber(); | |
710 | return true; | |
711 | } | |
712 | ||
713 | bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db, | |
714 | Version* version) { | |
715 | *value = db->IsFileDeletionsEnabled(); | |
716 | return true; | |
717 | } | |
718 | ||
719 | bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* db, | |
720 | Version* version) { | |
721 | const auto* vstorage = cfd_->current()->storage_info(); | |
722 | *value = vstorage->base_level(); | |
723 | return true; | |
724 | } | |
725 | ||
726 | bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* db, | |
727 | Version* version) { | |
728 | *value = cfd_->GetTotalSstFilesSize(); | |
729 | return true; | |
730 | } | |
731 | ||
732 | bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value, | |
733 | DBImpl* db, | |
734 | Version* version) { | |
735 | const auto* vstorage = cfd_->current()->storage_info(); | |
736 | *value = vstorage->estimated_compaction_needed_bytes(); | |
737 | return true; | |
738 | } | |
739 | ||
740 | bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value, DBImpl* db, | |
741 | Version* version) { | |
742 | *value = (version == nullptr) ? 0 : version->GetMemoryUsageByTableReaders(); | |
743 | return true; | |
744 | } | |
745 | ||
746 | bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* db, | |
747 | Version* version) { | |
748 | const auto* vstorage = cfd_->current()->storage_info(); | |
749 | *value = vstorage->EstimateLiveDataSize(); | |
750 | return true; | |
751 | } | |
752 | ||
753 | bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db, | |
754 | Version* version) { | |
755 | *value = db->MinLogNumberToKeep(); | |
756 | return true; | |
757 | } | |
758 | ||
759 | bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db, | |
760 | Version* version) { | |
761 | const WriteController& wc = db->write_controller(); | |
762 | if (!wc.NeedsDelay()) { | |
763 | *value = 0; | |
764 | } else { | |
765 | *value = wc.delayed_write_rate(); | |
766 | } | |
767 | return true; | |
768 | } | |
769 | ||
770 | bool InternalStats::HandleIsWriteStopped(uint64_t* value, DBImpl* db, | |
771 | Version* version) { | |
772 | *value = db->write_controller().IsStopped() ? 1 : 0; | |
773 | return true; | |
774 | } | |
775 | ||
776 | void InternalStats::DumpDBStats(std::string* value) { | |
777 | char buf[1000]; | |
778 | // DB-level stats, only available from default column family | |
779 | double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec; | |
780 | double interval_seconds_up = seconds_up - db_stats_snapshot_.seconds_up; | |
781 | snprintf(buf, sizeof(buf), | |
782 | "\n** DB Stats **\nUptime(secs): %.1f total, %.1f interval\n", | |
783 | seconds_up, interval_seconds_up); | |
784 | value->append(buf); | |
785 | // Cumulative | |
786 | uint64_t user_bytes_written = GetDBStats(InternalStats::BYTES_WRITTEN); | |
787 | uint64_t num_keys_written = GetDBStats(InternalStats::NUMBER_KEYS_WRITTEN); | |
788 | uint64_t write_other = GetDBStats(InternalStats::WRITE_DONE_BY_OTHER); | |
789 | uint64_t write_self = GetDBStats(InternalStats::WRITE_DONE_BY_SELF); | |
790 | uint64_t wal_bytes = GetDBStats(InternalStats::WAL_FILE_BYTES); | |
791 | uint64_t wal_synced = GetDBStats(InternalStats::WAL_FILE_SYNCED); | |
792 | uint64_t write_with_wal = GetDBStats(InternalStats::WRITE_WITH_WAL); | |
793 | uint64_t write_stall_micros = GetDBStats(InternalStats::WRITE_STALL_MICROS); | |
794 | ||
795 | const int kHumanMicrosLen = 32; | |
796 | char human_micros[kHumanMicrosLen]; | |
797 | ||
798 | // Data | |
799 | // writes: total number of write requests. | |
800 | // keys: total number of key updates issued by all the write requests | |
801 | // commit groups: number of group commits issued to the DB. Each group can | |
802 | // contain one or more writes. | |
803 | // so writes/keys is the average number of put in multi-put or put | |
804 | // writes/groups is the average group commit size. | |
805 | // | |
806 | // The format is the same for interval stats. | |
807 | snprintf(buf, sizeof(buf), | |
808 | "Cumulative writes: %s writes, %s keys, %s commit groups, " | |
809 | "%.1f writes per commit group, ingest: %.2f GB, %.2f MB/s\n", | |
810 | NumberToHumanString(write_other + write_self).c_str(), | |
811 | NumberToHumanString(num_keys_written).c_str(), | |
812 | NumberToHumanString(write_self).c_str(), | |
813 | (write_other + write_self) / static_cast<double>(write_self + 1), | |
814 | user_bytes_written / kGB, user_bytes_written / kMB / seconds_up); | |
815 | value->append(buf); | |
816 | // WAL | |
817 | snprintf(buf, sizeof(buf), | |
818 | "Cumulative WAL: %s writes, %s syncs, " | |
819 | "%.2f writes per sync, written: %.2f GB, %.2f MB/s\n", | |
820 | NumberToHumanString(write_with_wal).c_str(), | |
821 | NumberToHumanString(wal_synced).c_str(), | |
822 | write_with_wal / static_cast<double>(wal_synced + 1), | |
823 | wal_bytes / kGB, wal_bytes / kMB / seconds_up); | |
824 | value->append(buf); | |
825 | // Stall | |
826 | AppendHumanMicros(write_stall_micros, human_micros, kHumanMicrosLen, true); | |
827 | snprintf(buf, sizeof(buf), | |
828 | "Cumulative stall: %s, %.1f percent\n", | |
829 | human_micros, | |
830 | // 10000 = divide by 1M to get secs, then multiply by 100 for pct | |
831 | write_stall_micros / 10000.0 / std::max(seconds_up, 0.001)); | |
832 | value->append(buf); | |
833 | ||
834 | // Interval | |
835 | uint64_t interval_write_other = write_other - db_stats_snapshot_.write_other; | |
836 | uint64_t interval_write_self = write_self - db_stats_snapshot_.write_self; | |
837 | uint64_t interval_num_keys_written = | |
838 | num_keys_written - db_stats_snapshot_.num_keys_written; | |
839 | snprintf(buf, sizeof(buf), | |
840 | "Interval writes: %s writes, %s keys, %s commit groups, " | |
841 | "%.1f writes per commit group, ingest: %.2f MB, %.2f MB/s\n", | |
842 | NumberToHumanString( | |
843 | interval_write_other + interval_write_self).c_str(), | |
844 | NumberToHumanString(interval_num_keys_written).c_str(), | |
845 | NumberToHumanString(interval_write_self).c_str(), | |
846 | static_cast<double>(interval_write_other + interval_write_self) / | |
847 | (interval_write_self + 1), | |
848 | (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB, | |
849 | (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB / | |
850 | std::max(interval_seconds_up, 0.001)), | |
851 | value->append(buf); | |
852 | ||
853 | uint64_t interval_write_with_wal = | |
854 | write_with_wal - db_stats_snapshot_.write_with_wal; | |
855 | uint64_t interval_wal_synced = wal_synced - db_stats_snapshot_.wal_synced; | |
856 | uint64_t interval_wal_bytes = wal_bytes - db_stats_snapshot_.wal_bytes; | |
857 | ||
858 | snprintf(buf, sizeof(buf), | |
859 | "Interval WAL: %s writes, %s syncs, " | |
860 | "%.2f writes per sync, written: %.2f MB, %.2f MB/s\n", | |
861 | NumberToHumanString(interval_write_with_wal).c_str(), | |
862 | NumberToHumanString(interval_wal_synced).c_str(), | |
863 | interval_write_with_wal / | |
864 | static_cast<double>(interval_wal_synced + 1), | |
865 | interval_wal_bytes / kGB, | |
866 | interval_wal_bytes / kMB / std::max(interval_seconds_up, 0.001)); | |
867 | value->append(buf); | |
868 | ||
869 | // Stall | |
870 | AppendHumanMicros( | |
871 | write_stall_micros - db_stats_snapshot_.write_stall_micros, | |
872 | human_micros, kHumanMicrosLen, true); | |
873 | snprintf(buf, sizeof(buf), | |
874 | "Interval stall: %s, %.1f percent\n", | |
875 | human_micros, | |
876 | // 10000 = divide by 1M to get secs, then multiply by 100 for pct | |
877 | (write_stall_micros - db_stats_snapshot_.write_stall_micros) / | |
878 | 10000.0 / std::max(interval_seconds_up, 0.001)); | |
879 | value->append(buf); | |
880 | ||
881 | db_stats_snapshot_.seconds_up = seconds_up; | |
882 | db_stats_snapshot_.ingest_bytes = user_bytes_written; | |
883 | db_stats_snapshot_.write_other = write_other; | |
884 | db_stats_snapshot_.write_self = write_self; | |
885 | db_stats_snapshot_.num_keys_written = num_keys_written; | |
886 | db_stats_snapshot_.wal_bytes = wal_bytes; | |
887 | db_stats_snapshot_.wal_synced = wal_synced; | |
888 | db_stats_snapshot_.write_with_wal = write_with_wal; | |
889 | db_stats_snapshot_.write_stall_micros = write_stall_micros; | |
890 | } | |
891 | ||
892 | /** | |
893 | * Dump Compaction Level stats to a map of stat name to value in double. | |
894 | * The level in stat name is represented with a prefix "Lx" where "x" | |
895 | * is the level number. A special level "Sum" represents the sum of a stat | |
896 | * for all levels. | |
897 | */ | |
898 | void InternalStats::DumpCFMapStats(std::map<std::string, double>* cf_stats) { | |
899 | CompactionStats compaction_stats_sum(0); | |
900 | std::map<int, std::map<LevelStatType, double>> levels_stats; | |
901 | DumpCFMapStats(&levels_stats, &compaction_stats_sum); | |
902 | for (auto const& level_ent : levels_stats) { | |
903 | auto level_str = | |
904 | level_ent.first == -1 ? "Sum" : "L" + ToString(level_ent.first); | |
905 | for (auto const& stat_ent : level_ent.second) { | |
906 | auto stat_type = stat_ent.first; | |
907 | auto key_str = | |
908 | level_str + "." + | |
909 | InternalStats::compaction_level_stats.at(stat_type).property_name; | |
910 | (*cf_stats)[key_str] = stat_ent.second; | |
911 | } | |
912 | } | |
913 | } | |
914 | ||
915 | void InternalStats::DumpCFMapStats( | |
916 | std::map<int, std::map<LevelStatType, double>>* levels_stats, | |
917 | CompactionStats* compaction_stats_sum) { | |
918 | const VersionStorageInfo* vstorage = cfd_->current()->storage_info(); | |
919 | ||
920 | int num_levels_to_check = | |
921 | (cfd_->ioptions()->compaction_style != kCompactionStyleFIFO) | |
922 | ? vstorage->num_levels() - 1 | |
923 | : 1; | |
924 | ||
925 | // Compaction scores are sorted based on its value. Restore them to the | |
926 | // level order | |
927 | std::vector<double> compaction_score(number_levels_, 0); | |
928 | for (int i = 0; i < num_levels_to_check; ++i) { | |
929 | compaction_score[vstorage->CompactionScoreLevel(i)] = | |
930 | vstorage->CompactionScore(i); | |
931 | } | |
932 | // Count # of files being compacted for each level | |
933 | std::vector<int> files_being_compacted(number_levels_, 0); | |
934 | for (int level = 0; level < number_levels_; ++level) { | |
935 | for (auto* f : vstorage->LevelFiles(level)) { | |
936 | if (f->being_compacted) { | |
937 | ++files_being_compacted[level]; | |
938 | } | |
939 | } | |
940 | } | |
941 | ||
942 | int total_files = 0; | |
943 | int total_files_being_compacted = 0; | |
944 | double total_file_size = 0; | |
945 | uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED]; | |
946 | uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE]; | |
947 | uint64_t curr_ingest = flush_ingest + add_file_ingest; | |
948 | for (int level = 0; level < number_levels_; level++) { | |
949 | int files = vstorage->NumLevelFiles(level); | |
950 | total_files += files; | |
951 | total_files_being_compacted += files_being_compacted[level]; | |
952 | if (comp_stats_[level].micros > 0 || files > 0) { | |
953 | compaction_stats_sum->Add(comp_stats_[level]); | |
954 | total_file_size += vstorage->NumLevelBytes(level); | |
955 | uint64_t input_bytes; | |
956 | if (level == 0) { | |
957 | input_bytes = curr_ingest; | |
958 | } else { | |
959 | input_bytes = comp_stats_[level].bytes_read_non_output_levels; | |
960 | } | |
961 | double w_amp = | |
962 | (input_bytes == 0) | |
963 | ? 0.0 | |
964 | : static_cast<double>(comp_stats_[level].bytes_written) / | |
965 | input_bytes; | |
966 | std::map<LevelStatType, double> level_stats; | |
967 | PrepareLevelStats(&level_stats, files, files_being_compacted[level], | |
968 | static_cast<double>(vstorage->NumLevelBytes(level)), | |
969 | compaction_score[level], w_amp, comp_stats_[level]); | |
970 | (*levels_stats)[level] = level_stats; | |
971 | } | |
972 | } | |
973 | // Cumulative summary | |
974 | double w_amp = compaction_stats_sum->bytes_written / | |
975 | static_cast<double>(curr_ingest + 1); | |
976 | // Stats summary across levels | |
977 | std::map<LevelStatType, double> sum_stats; | |
978 | PrepareLevelStats(&sum_stats, total_files, total_files_being_compacted, | |
979 | total_file_size, 0, w_amp, *compaction_stats_sum); | |
980 | (*levels_stats)[-1] = sum_stats; // -1 is for the Sum level | |
981 | } | |
982 | ||
983 | void InternalStats::DumpCFStats(std::string* value) { | |
984 | DumpCFStatsNoFileHistogram(value); | |
985 | DumpCFFileHistogram(value); | |
986 | } | |
987 | ||
988 | void InternalStats::DumpCFStatsNoFileHistogram(std::string* value) { | |
989 | char buf[2000]; | |
990 | // Per-ColumnFamily stats | |
991 | PrintLevelStatsHeader(buf, sizeof(buf), cfd_->GetName()); | |
992 | value->append(buf); | |
993 | ||
994 | // Print stats for each level | |
995 | std::map<int, std::map<LevelStatType, double>> levels_stats; | |
996 | CompactionStats compaction_stats_sum(0); | |
997 | DumpCFMapStats(&levels_stats, &compaction_stats_sum); | |
998 | for (int l = 0; l < number_levels_; ++l) { | |
999 | if (levels_stats.find(l) != levels_stats.end()) { | |
1000 | PrintLevelStats(buf, sizeof(buf), "L" + ToString(l), levels_stats[l]); | |
1001 | value->append(buf); | |
1002 | } | |
1003 | } | |
1004 | ||
1005 | // Print sum of level stats | |
1006 | PrintLevelStats(buf, sizeof(buf), "Sum", levels_stats[-1]); | |
1007 | value->append(buf); | |
1008 | ||
1009 | uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED]; | |
1010 | uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE]; | |
1011 | uint64_t ingest_files_addfile = cf_stats_value_[INGESTED_NUM_FILES_TOTAL]; | |
1012 | uint64_t ingest_l0_files_addfile = | |
1013 | cf_stats_value_[INGESTED_LEVEL0_NUM_FILES_TOTAL]; | |
1014 | uint64_t ingest_keys_addfile = cf_stats_value_[INGESTED_NUM_KEYS_TOTAL]; | |
1015 | // Cumulative summary | |
1016 | uint64_t total_stall_count = | |
1017 | cf_stats_count_[LEVEL0_SLOWDOWN_TOTAL] + | |
1018 | cf_stats_count_[LEVEL0_NUM_FILES_TOTAL] + | |
1019 | cf_stats_count_[SOFT_PENDING_COMPACTION_BYTES_LIMIT] + | |
1020 | cf_stats_count_[HARD_PENDING_COMPACTION_BYTES_LIMIT] + | |
1021 | cf_stats_count_[MEMTABLE_COMPACTION] + cf_stats_count_[MEMTABLE_SLOWDOWN]; | |
1022 | // Interval summary | |
1023 | uint64_t interval_flush_ingest = | |
1024 | flush_ingest - cf_stats_snapshot_.ingest_bytes_flush; | |
1025 | uint64_t interval_add_file_inget = | |
1026 | add_file_ingest - cf_stats_snapshot_.ingest_bytes_addfile; | |
1027 | uint64_t interval_ingest = | |
1028 | interval_flush_ingest + interval_add_file_inget + 1; | |
1029 | CompactionStats interval_stats(compaction_stats_sum); | |
1030 | interval_stats.Subtract(cf_stats_snapshot_.comp_stats); | |
1031 | double w_amp = | |
1032 | interval_stats.bytes_written / static_cast<double>(interval_ingest); | |
1033 | PrintLevelStats(buf, sizeof(buf), "Int", 0, 0, 0, 0, w_amp, interval_stats); | |
1034 | value->append(buf); | |
1035 | ||
1036 | double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec; | |
1037 | double interval_seconds_up = seconds_up - cf_stats_snapshot_.seconds_up; | |
1038 | snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n", | |
1039 | seconds_up, interval_seconds_up); | |
1040 | value->append(buf); | |
1041 | snprintf(buf, sizeof(buf), "Flush(GB): cumulative %.3f, interval %.3f\n", | |
1042 | flush_ingest / kGB, interval_flush_ingest / kGB); | |
1043 | value->append(buf); | |
1044 | snprintf(buf, sizeof(buf), "AddFile(GB): cumulative %.3f, interval %.3f\n", | |
1045 | add_file_ingest / kGB, interval_add_file_inget / kGB); | |
1046 | value->append(buf); | |
1047 | ||
1048 | uint64_t interval_ingest_files_addfile = | |
1049 | ingest_files_addfile - cf_stats_snapshot_.ingest_files_addfile; | |
1050 | snprintf(buf, sizeof(buf), "AddFile(Total Files): cumulative %" PRIu64 | |
1051 | ", interval %" PRIu64 "\n", | |
1052 | ingest_files_addfile, interval_ingest_files_addfile); | |
1053 | value->append(buf); | |
1054 | ||
1055 | uint64_t interval_ingest_l0_files_addfile = | |
1056 | ingest_l0_files_addfile - cf_stats_snapshot_.ingest_l0_files_addfile; | |
1057 | snprintf(buf, sizeof(buf), | |
1058 | "AddFile(L0 Files): cumulative %" PRIu64 ", interval %" PRIu64 "\n", | |
1059 | ingest_l0_files_addfile, interval_ingest_l0_files_addfile); | |
1060 | value->append(buf); | |
1061 | ||
1062 | uint64_t interval_ingest_keys_addfile = | |
1063 | ingest_keys_addfile - cf_stats_snapshot_.ingest_keys_addfile; | |
1064 | snprintf(buf, sizeof(buf), | |
1065 | "AddFile(Keys): cumulative %" PRIu64 ", interval %" PRIu64 "\n", | |
1066 | ingest_keys_addfile, interval_ingest_keys_addfile); | |
1067 | value->append(buf); | |
1068 | ||
1069 | // Compact | |
1070 | uint64_t compact_bytes_read = 0; | |
1071 | uint64_t compact_bytes_write = 0; | |
1072 | uint64_t compact_micros = 0; | |
1073 | for (int level = 0; level < number_levels_; level++) { | |
1074 | compact_bytes_read += comp_stats_[level].bytes_read_output_level + | |
1075 | comp_stats_[level].bytes_read_non_output_levels; | |
1076 | compact_bytes_write += comp_stats_[level].bytes_written; | |
1077 | compact_micros += comp_stats_[level].micros; | |
1078 | } | |
1079 | ||
1080 | snprintf(buf, sizeof(buf), | |
1081 | "Cumulative compaction: %.2f GB write, %.2f MB/s write, " | |
1082 | "%.2f GB read, %.2f MB/s read, %.1f seconds\n", | |
1083 | compact_bytes_write / kGB, compact_bytes_write / kMB / seconds_up, | |
1084 | compact_bytes_read / kGB, compact_bytes_read / kMB / seconds_up, | |
1085 | compact_micros / kMicrosInSec); | |
1086 | value->append(buf); | |
1087 | ||
1088 | // Compaction interval | |
1089 | uint64_t interval_compact_bytes_write = | |
1090 | compact_bytes_write - cf_stats_snapshot_.compact_bytes_write; | |
1091 | uint64_t interval_compact_bytes_read = | |
1092 | compact_bytes_read - cf_stats_snapshot_.compact_bytes_read; | |
1093 | uint64_t interval_compact_micros = | |
1094 | compact_micros - cf_stats_snapshot_.compact_micros; | |
1095 | ||
1096 | snprintf( | |
1097 | buf, sizeof(buf), | |
1098 | "Interval compaction: %.2f GB write, %.2f MB/s write, " | |
1099 | "%.2f GB read, %.2f MB/s read, %.1f seconds\n", | |
1100 | interval_compact_bytes_write / kGB, | |
1101 | interval_compact_bytes_write / kMB / std::max(interval_seconds_up, 0.001), | |
1102 | interval_compact_bytes_read / kGB, | |
1103 | interval_compact_bytes_read / kMB / std::max(interval_seconds_up, 0.001), | |
1104 | interval_compact_micros / kMicrosInSec); | |
1105 | value->append(buf); | |
1106 | cf_stats_snapshot_.compact_bytes_write = compact_bytes_write; | |
1107 | cf_stats_snapshot_.compact_bytes_read = compact_bytes_read; | |
1108 | cf_stats_snapshot_.compact_micros = compact_micros; | |
1109 | ||
1110 | snprintf(buf, sizeof(buf), "Stalls(count): %" PRIu64 | |
1111 | " level0_slowdown, " | |
1112 | "%" PRIu64 | |
1113 | " level0_slowdown_with_compaction, " | |
1114 | "%" PRIu64 | |
1115 | " level0_numfiles, " | |
1116 | "%" PRIu64 | |
1117 | " level0_numfiles_with_compaction, " | |
1118 | "%" PRIu64 | |
1119 | " stop for pending_compaction_bytes, " | |
1120 | "%" PRIu64 | |
1121 | " slowdown for pending_compaction_bytes, " | |
1122 | "%" PRIu64 | |
1123 | " memtable_compaction, " | |
1124 | "%" PRIu64 | |
1125 | " memtable_slowdown, " | |
1126 | "interval %" PRIu64 " total count\n", | |
1127 | cf_stats_count_[LEVEL0_SLOWDOWN_TOTAL], | |
1128 | cf_stats_count_[LEVEL0_SLOWDOWN_WITH_COMPACTION], | |
1129 | cf_stats_count_[LEVEL0_NUM_FILES_TOTAL], | |
1130 | cf_stats_count_[LEVEL0_NUM_FILES_WITH_COMPACTION], | |
1131 | cf_stats_count_[HARD_PENDING_COMPACTION_BYTES_LIMIT], | |
1132 | cf_stats_count_[SOFT_PENDING_COMPACTION_BYTES_LIMIT], | |
1133 | cf_stats_count_[MEMTABLE_COMPACTION], | |
1134 | cf_stats_count_[MEMTABLE_SLOWDOWN], | |
1135 | total_stall_count - cf_stats_snapshot_.stall_count); | |
1136 | value->append(buf); | |
1137 | ||
1138 | cf_stats_snapshot_.ingest_bytes_flush = flush_ingest; | |
1139 | cf_stats_snapshot_.ingest_bytes_addfile = add_file_ingest; | |
1140 | cf_stats_snapshot_.ingest_files_addfile = ingest_files_addfile; | |
1141 | cf_stats_snapshot_.ingest_l0_files_addfile = ingest_l0_files_addfile; | |
1142 | cf_stats_snapshot_.ingest_keys_addfile = ingest_keys_addfile; | |
1143 | cf_stats_snapshot_.comp_stats = compaction_stats_sum; | |
1144 | cf_stats_snapshot_.stall_count = total_stall_count; | |
1145 | } | |
1146 | ||
1147 | void InternalStats::DumpCFFileHistogram(std::string* value) { | |
1148 | char buf[2000]; | |
1149 | snprintf(buf, sizeof(buf), | |
1150 | "\n** File Read Latency Histogram By Level [%s] **\n", | |
1151 | cfd_->GetName().c_str()); | |
1152 | value->append(buf); | |
1153 | ||
1154 | for (int level = 0; level < number_levels_; level++) { | |
1155 | if (!file_read_latency_[level].Empty()) { | |
1156 | char buf2[5000]; | |
1157 | snprintf(buf2, sizeof(buf2), | |
1158 | "** Level %d read latency histogram (micros):\n%s\n", level, | |
1159 | file_read_latency_[level].ToString().c_str()); | |
1160 | value->append(buf2); | |
1161 | } | |
1162 | } | |
1163 | } | |
1164 | ||
1165 | #else | |
1166 | ||
1167 | const DBPropertyInfo* GetPropertyInfo(const Slice& property) { return nullptr; } | |
1168 | ||
1169 | #endif // !ROCKSDB_LITE | |
1170 | ||
1171 | } // namespace rocksdb |