]>
Commit | Line | Data |
---|---|---|
1 | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. | |
2 | // This source code is licensed under both the GPLv2 (found in the | |
3 | // COPYING file in the root directory) and Apache 2.0 License | |
4 | // (found in the LICENSE.Apache file in the root directory). | |
5 | // | |
6 | #include "monitoring/statistics.h" | |
7 | ||
8 | #include <algorithm> | |
9 | #include <cinttypes> | |
10 | #include <cstdio> | |
11 | #include "port/likely.h" | |
12 | #include "rocksdb/statistics.h" | |
13 | ||
14 | namespace ROCKSDB_NAMESPACE { | |
15 | ||
16 | // The order of items listed in Tickers should be the same as | |
17 | // the order listed in TickersNameMap | |
18 | const std::vector<std::pair<Tickers, std::string>> TickersNameMap = { | |
19 | {BLOCK_CACHE_MISS, "rocksdb.block.cache.miss"}, | |
20 | {BLOCK_CACHE_HIT, "rocksdb.block.cache.hit"}, | |
21 | {BLOCK_CACHE_ADD, "rocksdb.block.cache.add"}, | |
22 | {BLOCK_CACHE_ADD_FAILURES, "rocksdb.block.cache.add.failures"}, | |
23 | {BLOCK_CACHE_INDEX_MISS, "rocksdb.block.cache.index.miss"}, | |
24 | {BLOCK_CACHE_INDEX_HIT, "rocksdb.block.cache.index.hit"}, | |
25 | {BLOCK_CACHE_INDEX_ADD, "rocksdb.block.cache.index.add"}, | |
26 | {BLOCK_CACHE_INDEX_BYTES_INSERT, "rocksdb.block.cache.index.bytes.insert"}, | |
27 | {BLOCK_CACHE_INDEX_BYTES_EVICT, "rocksdb.block.cache.index.bytes.evict"}, | |
28 | {BLOCK_CACHE_FILTER_MISS, "rocksdb.block.cache.filter.miss"}, | |
29 | {BLOCK_CACHE_FILTER_HIT, "rocksdb.block.cache.filter.hit"}, | |
30 | {BLOCK_CACHE_FILTER_ADD, "rocksdb.block.cache.filter.add"}, | |
31 | {BLOCK_CACHE_FILTER_BYTES_INSERT, | |
32 | "rocksdb.block.cache.filter.bytes.insert"}, | |
33 | {BLOCK_CACHE_FILTER_BYTES_EVICT, "rocksdb.block.cache.filter.bytes.evict"}, | |
34 | {BLOCK_CACHE_DATA_MISS, "rocksdb.block.cache.data.miss"}, | |
35 | {BLOCK_CACHE_DATA_HIT, "rocksdb.block.cache.data.hit"}, | |
36 | {BLOCK_CACHE_DATA_ADD, "rocksdb.block.cache.data.add"}, | |
37 | {BLOCK_CACHE_DATA_BYTES_INSERT, "rocksdb.block.cache.data.bytes.insert"}, | |
38 | {BLOCK_CACHE_BYTES_READ, "rocksdb.block.cache.bytes.read"}, | |
39 | {BLOCK_CACHE_BYTES_WRITE, "rocksdb.block.cache.bytes.write"}, | |
40 | {BLOOM_FILTER_USEFUL, "rocksdb.bloom.filter.useful"}, | |
41 | {BLOOM_FILTER_FULL_POSITIVE, "rocksdb.bloom.filter.full.positive"}, | |
42 | {BLOOM_FILTER_FULL_TRUE_POSITIVE, | |
43 | "rocksdb.bloom.filter.full.true.positive"}, | |
44 | {BLOOM_FILTER_MICROS, "rocksdb.bloom.filter.micros"}, | |
45 | {PERSISTENT_CACHE_HIT, "rocksdb.persistent.cache.hit"}, | |
46 | {PERSISTENT_CACHE_MISS, "rocksdb.persistent.cache.miss"}, | |
47 | {SIM_BLOCK_CACHE_HIT, "rocksdb.sim.block.cache.hit"}, | |
48 | {SIM_BLOCK_CACHE_MISS, "rocksdb.sim.block.cache.miss"}, | |
49 | {MEMTABLE_HIT, "rocksdb.memtable.hit"}, | |
50 | {MEMTABLE_MISS, "rocksdb.memtable.miss"}, | |
51 | {GET_HIT_L0, "rocksdb.l0.hit"}, | |
52 | {GET_HIT_L1, "rocksdb.l1.hit"}, | |
53 | {GET_HIT_L2_AND_UP, "rocksdb.l2andup.hit"}, | |
54 | {COMPACTION_KEY_DROP_NEWER_ENTRY, "rocksdb.compaction.key.drop.new"}, | |
55 | {COMPACTION_KEY_DROP_OBSOLETE, "rocksdb.compaction.key.drop.obsolete"}, | |
56 | {COMPACTION_KEY_DROP_RANGE_DEL, "rocksdb.compaction.key.drop.range_del"}, | |
57 | {COMPACTION_KEY_DROP_USER, "rocksdb.compaction.key.drop.user"}, | |
58 | {COMPACTION_RANGE_DEL_DROP_OBSOLETE, | |
59 | "rocksdb.compaction.range_del.drop.obsolete"}, | |
60 | {COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE, | |
61 | "rocksdb.compaction.optimized.del.drop.obsolete"}, | |
62 | {COMPACTION_CANCELLED, "rocksdb.compaction.cancelled"}, | |
63 | {NUMBER_KEYS_WRITTEN, "rocksdb.number.keys.written"}, | |
64 | {NUMBER_KEYS_READ, "rocksdb.number.keys.read"}, | |
65 | {NUMBER_KEYS_UPDATED, "rocksdb.number.keys.updated"}, | |
66 | {BYTES_WRITTEN, "rocksdb.bytes.written"}, | |
67 | {BYTES_READ, "rocksdb.bytes.read"}, | |
68 | {NUMBER_DB_SEEK, "rocksdb.number.db.seek"}, | |
69 | {NUMBER_DB_NEXT, "rocksdb.number.db.next"}, | |
70 | {NUMBER_DB_PREV, "rocksdb.number.db.prev"}, | |
71 | {NUMBER_DB_SEEK_FOUND, "rocksdb.number.db.seek.found"}, | |
72 | {NUMBER_DB_NEXT_FOUND, "rocksdb.number.db.next.found"}, | |
73 | {NUMBER_DB_PREV_FOUND, "rocksdb.number.db.prev.found"}, | |
74 | {ITER_BYTES_READ, "rocksdb.db.iter.bytes.read"}, | |
75 | {NO_FILE_CLOSES, "rocksdb.no.file.closes"}, | |
76 | {NO_FILE_OPENS, "rocksdb.no.file.opens"}, | |
77 | {NO_FILE_ERRORS, "rocksdb.no.file.errors"}, | |
78 | {STALL_L0_SLOWDOWN_MICROS, "rocksdb.l0.slowdown.micros"}, | |
79 | {STALL_MEMTABLE_COMPACTION_MICROS, "rocksdb.memtable.compaction.micros"}, | |
80 | {STALL_L0_NUM_FILES_MICROS, "rocksdb.l0.num.files.stall.micros"}, | |
81 | {STALL_MICROS, "rocksdb.stall.micros"}, | |
82 | {DB_MUTEX_WAIT_MICROS, "rocksdb.db.mutex.wait.micros"}, | |
83 | {RATE_LIMIT_DELAY_MILLIS, "rocksdb.rate.limit.delay.millis"}, | |
84 | {NO_ITERATORS, "rocksdb.num.iterators"}, | |
85 | {NUMBER_MULTIGET_CALLS, "rocksdb.number.multiget.get"}, | |
86 | {NUMBER_MULTIGET_KEYS_READ, "rocksdb.number.multiget.keys.read"}, | |
87 | {NUMBER_MULTIGET_BYTES_READ, "rocksdb.number.multiget.bytes.read"}, | |
88 | {NUMBER_FILTERED_DELETES, "rocksdb.number.deletes.filtered"}, | |
89 | {NUMBER_MERGE_FAILURES, "rocksdb.number.merge.failures"}, | |
90 | {BLOOM_FILTER_PREFIX_CHECKED, "rocksdb.bloom.filter.prefix.checked"}, | |
91 | {BLOOM_FILTER_PREFIX_USEFUL, "rocksdb.bloom.filter.prefix.useful"}, | |
92 | {NUMBER_OF_RESEEKS_IN_ITERATION, "rocksdb.number.reseeks.iteration"}, | |
93 | {GET_UPDATES_SINCE_CALLS, "rocksdb.getupdatessince.calls"}, | |
94 | {BLOCK_CACHE_COMPRESSED_MISS, "rocksdb.block.cachecompressed.miss"}, | |
95 | {BLOCK_CACHE_COMPRESSED_HIT, "rocksdb.block.cachecompressed.hit"}, | |
96 | {BLOCK_CACHE_COMPRESSED_ADD, "rocksdb.block.cachecompressed.add"}, | |
97 | {BLOCK_CACHE_COMPRESSED_ADD_FAILURES, | |
98 | "rocksdb.block.cachecompressed.add.failures"}, | |
99 | {WAL_FILE_SYNCED, "rocksdb.wal.synced"}, | |
100 | {WAL_FILE_BYTES, "rocksdb.wal.bytes"}, | |
101 | {WRITE_DONE_BY_SELF, "rocksdb.write.self"}, | |
102 | {WRITE_DONE_BY_OTHER, "rocksdb.write.other"}, | |
103 | {WRITE_TIMEDOUT, "rocksdb.write.timeout"}, | |
104 | {WRITE_WITH_WAL, "rocksdb.write.wal"}, | |
105 | {COMPACT_READ_BYTES, "rocksdb.compact.read.bytes"}, | |
106 | {COMPACT_WRITE_BYTES, "rocksdb.compact.write.bytes"}, | |
107 | {FLUSH_WRITE_BYTES, "rocksdb.flush.write.bytes"}, | |
108 | {NUMBER_DIRECT_LOAD_TABLE_PROPERTIES, | |
109 | "rocksdb.number.direct.load.table.properties"}, | |
110 | {NUMBER_SUPERVERSION_ACQUIRES, "rocksdb.number.superversion_acquires"}, | |
111 | {NUMBER_SUPERVERSION_RELEASES, "rocksdb.number.superversion_releases"}, | |
112 | {NUMBER_SUPERVERSION_CLEANUPS, "rocksdb.number.superversion_cleanups"}, | |
113 | {NUMBER_BLOCK_COMPRESSED, "rocksdb.number.block.compressed"}, | |
114 | {NUMBER_BLOCK_DECOMPRESSED, "rocksdb.number.block.decompressed"}, | |
115 | {NUMBER_BLOCK_NOT_COMPRESSED, "rocksdb.number.block.not_compressed"}, | |
116 | {MERGE_OPERATION_TOTAL_TIME, "rocksdb.merge.operation.time.nanos"}, | |
117 | {FILTER_OPERATION_TOTAL_TIME, "rocksdb.filter.operation.time.nanos"}, | |
118 | {ROW_CACHE_HIT, "rocksdb.row.cache.hit"}, | |
119 | {ROW_CACHE_MISS, "rocksdb.row.cache.miss"}, | |
120 | {READ_AMP_ESTIMATE_USEFUL_BYTES, "rocksdb.read.amp.estimate.useful.bytes"}, | |
121 | {READ_AMP_TOTAL_READ_BYTES, "rocksdb.read.amp.total.read.bytes"}, | |
122 | {NUMBER_RATE_LIMITER_DRAINS, "rocksdb.number.rate_limiter.drains"}, | |
123 | {NUMBER_ITER_SKIP, "rocksdb.number.iter.skip"}, | |
124 | {BLOB_DB_NUM_PUT, "rocksdb.blobdb.num.put"}, | |
125 | {BLOB_DB_NUM_WRITE, "rocksdb.blobdb.num.write"}, | |
126 | {BLOB_DB_NUM_GET, "rocksdb.blobdb.num.get"}, | |
127 | {BLOB_DB_NUM_MULTIGET, "rocksdb.blobdb.num.multiget"}, | |
128 | {BLOB_DB_NUM_SEEK, "rocksdb.blobdb.num.seek"}, | |
129 | {BLOB_DB_NUM_NEXT, "rocksdb.blobdb.num.next"}, | |
130 | {BLOB_DB_NUM_PREV, "rocksdb.blobdb.num.prev"}, | |
131 | {BLOB_DB_NUM_KEYS_WRITTEN, "rocksdb.blobdb.num.keys.written"}, | |
132 | {BLOB_DB_NUM_KEYS_READ, "rocksdb.blobdb.num.keys.read"}, | |
133 | {BLOB_DB_BYTES_WRITTEN, "rocksdb.blobdb.bytes.written"}, | |
134 | {BLOB_DB_BYTES_READ, "rocksdb.blobdb.bytes.read"}, | |
135 | {BLOB_DB_WRITE_INLINED, "rocksdb.blobdb.write.inlined"}, | |
136 | {BLOB_DB_WRITE_INLINED_TTL, "rocksdb.blobdb.write.inlined.ttl"}, | |
137 | {BLOB_DB_WRITE_BLOB, "rocksdb.blobdb.write.blob"}, | |
138 | {BLOB_DB_WRITE_BLOB_TTL, "rocksdb.blobdb.write.blob.ttl"}, | |
139 | {BLOB_DB_BLOB_FILE_BYTES_WRITTEN, "rocksdb.blobdb.blob.file.bytes.written"}, | |
140 | {BLOB_DB_BLOB_FILE_BYTES_READ, "rocksdb.blobdb.blob.file.bytes.read"}, | |
141 | {BLOB_DB_BLOB_FILE_SYNCED, "rocksdb.blobdb.blob.file.synced"}, | |
142 | {BLOB_DB_BLOB_INDEX_EXPIRED_COUNT, | |
143 | "rocksdb.blobdb.blob.index.expired.count"}, | |
144 | {BLOB_DB_BLOB_INDEX_EXPIRED_SIZE, "rocksdb.blobdb.blob.index.expired.size"}, | |
145 | {BLOB_DB_BLOB_INDEX_EVICTED_COUNT, | |
146 | "rocksdb.blobdb.blob.index.evicted.count"}, | |
147 | {BLOB_DB_BLOB_INDEX_EVICTED_SIZE, "rocksdb.blobdb.blob.index.evicted.size"}, | |
148 | {BLOB_DB_GC_NUM_FILES, "rocksdb.blobdb.gc.num.files"}, | |
149 | {BLOB_DB_GC_NUM_NEW_FILES, "rocksdb.blobdb.gc.num.new.files"}, | |
150 | {BLOB_DB_GC_FAILURES, "rocksdb.blobdb.gc.failures"}, | |
151 | {BLOB_DB_GC_NUM_KEYS_OVERWRITTEN, "rocksdb.blobdb.gc.num.keys.overwritten"}, | |
152 | {BLOB_DB_GC_NUM_KEYS_EXPIRED, "rocksdb.blobdb.gc.num.keys.expired"}, | |
153 | {BLOB_DB_GC_NUM_KEYS_RELOCATED, "rocksdb.blobdb.gc.num.keys.relocated"}, | |
154 | {BLOB_DB_GC_BYTES_OVERWRITTEN, "rocksdb.blobdb.gc.bytes.overwritten"}, | |
155 | {BLOB_DB_GC_BYTES_EXPIRED, "rocksdb.blobdb.gc.bytes.expired"}, | |
156 | {BLOB_DB_GC_BYTES_RELOCATED, "rocksdb.blobdb.gc.bytes.relocated"}, | |
157 | {BLOB_DB_FIFO_NUM_FILES_EVICTED, "rocksdb.blobdb.fifo.num.files.evicted"}, | |
158 | {BLOB_DB_FIFO_NUM_KEYS_EVICTED, "rocksdb.blobdb.fifo.num.keys.evicted"}, | |
159 | {BLOB_DB_FIFO_BYTES_EVICTED, "rocksdb.blobdb.fifo.bytes.evicted"}, | |
160 | {TXN_PREPARE_MUTEX_OVERHEAD, "rocksdb.txn.overhead.mutex.prepare"}, | |
161 | {TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD, | |
162 | "rocksdb.txn.overhead.mutex.old.commit.map"}, | |
163 | {TXN_DUPLICATE_KEY_OVERHEAD, "rocksdb.txn.overhead.duplicate.key"}, | |
164 | {TXN_SNAPSHOT_MUTEX_OVERHEAD, "rocksdb.txn.overhead.mutex.snapshot"}, | |
165 | {TXN_GET_TRY_AGAIN, "rocksdb.txn.get.tryagain"}, | |
166 | {NUMBER_MULTIGET_KEYS_FOUND, "rocksdb.number.multiget.keys.found"}, | |
167 | {NO_ITERATOR_CREATED, "rocksdb.num.iterator.created"}, | |
168 | {NO_ITERATOR_DELETED, "rocksdb.num.iterator.deleted"}, | |
169 | {BLOCK_CACHE_COMPRESSION_DICT_MISS, | |
170 | "rocksdb.block.cache.compression.dict.miss"}, | |
171 | {BLOCK_CACHE_COMPRESSION_DICT_HIT, | |
172 | "rocksdb.block.cache.compression.dict.hit"}, | |
173 | {BLOCK_CACHE_COMPRESSION_DICT_ADD, | |
174 | "rocksdb.block.cache.compression.dict.add"}, | |
175 | {BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT, | |
176 | "rocksdb.block.cache.compression.dict.bytes.insert"}, | |
177 | {BLOCK_CACHE_COMPRESSION_DICT_BYTES_EVICT, | |
178 | "rocksdb.block.cache.compression.dict.bytes.evict"}, | |
179 | }; | |
180 | ||
181 | const std::vector<std::pair<Histograms, std::string>> HistogramsNameMap = { | |
182 | {DB_GET, "rocksdb.db.get.micros"}, | |
183 | {DB_WRITE, "rocksdb.db.write.micros"}, | |
184 | {COMPACTION_TIME, "rocksdb.compaction.times.micros"}, | |
185 | {COMPACTION_CPU_TIME, "rocksdb.compaction.times.cpu_micros"}, | |
186 | {SUBCOMPACTION_SETUP_TIME, "rocksdb.subcompaction.setup.times.micros"}, | |
187 | {TABLE_SYNC_MICROS, "rocksdb.table.sync.micros"}, | |
188 | {COMPACTION_OUTFILE_SYNC_MICROS, "rocksdb.compaction.outfile.sync.micros"}, | |
189 | {WAL_FILE_SYNC_MICROS, "rocksdb.wal.file.sync.micros"}, | |
190 | {MANIFEST_FILE_SYNC_MICROS, "rocksdb.manifest.file.sync.micros"}, | |
191 | {TABLE_OPEN_IO_MICROS, "rocksdb.table.open.io.micros"}, | |
192 | {DB_MULTIGET, "rocksdb.db.multiget.micros"}, | |
193 | {READ_BLOCK_COMPACTION_MICROS, "rocksdb.read.block.compaction.micros"}, | |
194 | {READ_BLOCK_GET_MICROS, "rocksdb.read.block.get.micros"}, | |
195 | {WRITE_RAW_BLOCK_MICROS, "rocksdb.write.raw.block.micros"}, | |
196 | {STALL_L0_SLOWDOWN_COUNT, "rocksdb.l0.slowdown.count"}, | |
197 | {STALL_MEMTABLE_COMPACTION_COUNT, "rocksdb.memtable.compaction.count"}, | |
198 | {STALL_L0_NUM_FILES_COUNT, "rocksdb.num.files.stall.count"}, | |
199 | {HARD_RATE_LIMIT_DELAY_COUNT, "rocksdb.hard.rate.limit.delay.count"}, | |
200 | {SOFT_RATE_LIMIT_DELAY_COUNT, "rocksdb.soft.rate.limit.delay.count"}, | |
201 | {NUM_FILES_IN_SINGLE_COMPACTION, "rocksdb.numfiles.in.singlecompaction"}, | |
202 | {DB_SEEK, "rocksdb.db.seek.micros"}, | |
203 | {WRITE_STALL, "rocksdb.db.write.stall"}, | |
204 | {SST_READ_MICROS, "rocksdb.sst.read.micros"}, | |
205 | {NUM_SUBCOMPACTIONS_SCHEDULED, "rocksdb.num.subcompactions.scheduled"}, | |
206 | {BYTES_PER_READ, "rocksdb.bytes.per.read"}, | |
207 | {BYTES_PER_WRITE, "rocksdb.bytes.per.write"}, | |
208 | {BYTES_PER_MULTIGET, "rocksdb.bytes.per.multiget"}, | |
209 | {BYTES_COMPRESSED, "rocksdb.bytes.compressed"}, | |
210 | {BYTES_DECOMPRESSED, "rocksdb.bytes.decompressed"}, | |
211 | {COMPRESSION_TIMES_NANOS, "rocksdb.compression.times.nanos"}, | |
212 | {DECOMPRESSION_TIMES_NANOS, "rocksdb.decompression.times.nanos"}, | |
213 | {READ_NUM_MERGE_OPERANDS, "rocksdb.read.num.merge_operands"}, | |
214 | {BLOB_DB_KEY_SIZE, "rocksdb.blobdb.key.size"}, | |
215 | {BLOB_DB_VALUE_SIZE, "rocksdb.blobdb.value.size"}, | |
216 | {BLOB_DB_WRITE_MICROS, "rocksdb.blobdb.write.micros"}, | |
217 | {BLOB_DB_GET_MICROS, "rocksdb.blobdb.get.micros"}, | |
218 | {BLOB_DB_MULTIGET_MICROS, "rocksdb.blobdb.multiget.micros"}, | |
219 | {BLOB_DB_SEEK_MICROS, "rocksdb.blobdb.seek.micros"}, | |
220 | {BLOB_DB_NEXT_MICROS, "rocksdb.blobdb.next.micros"}, | |
221 | {BLOB_DB_PREV_MICROS, "rocksdb.blobdb.prev.micros"}, | |
222 | {BLOB_DB_BLOB_FILE_WRITE_MICROS, "rocksdb.blobdb.blob.file.write.micros"}, | |
223 | {BLOB_DB_BLOB_FILE_READ_MICROS, "rocksdb.blobdb.blob.file.read.micros"}, | |
224 | {BLOB_DB_BLOB_FILE_SYNC_MICROS, "rocksdb.blobdb.blob.file.sync.micros"}, | |
225 | {BLOB_DB_GC_MICROS, "rocksdb.blobdb.gc.micros"}, | |
226 | {BLOB_DB_COMPRESSION_MICROS, "rocksdb.blobdb.compression.micros"}, | |
227 | {BLOB_DB_DECOMPRESSION_MICROS, "rocksdb.blobdb.decompression.micros"}, | |
228 | {FLUSH_TIME, "rocksdb.db.flush.micros"}, | |
229 | {SST_BATCH_SIZE, "rocksdb.sst.batch.size"}, | |
230 | }; | |
231 | ||
232 | std::shared_ptr<Statistics> CreateDBStatistics() { | |
233 | return std::make_shared<StatisticsImpl>(nullptr); | |
234 | } | |
235 | ||
236 | StatisticsImpl::StatisticsImpl(std::shared_ptr<Statistics> stats) | |
237 | : stats_(std::move(stats)) {} | |
238 | ||
239 | StatisticsImpl::~StatisticsImpl() {} | |
240 | ||
241 | uint64_t StatisticsImpl::getTickerCount(uint32_t tickerType) const { | |
242 | MutexLock lock(&aggregate_lock_); | |
243 | return getTickerCountLocked(tickerType); | |
244 | } | |
245 | ||
246 | uint64_t StatisticsImpl::getTickerCountLocked(uint32_t tickerType) const { | |
247 | assert(tickerType < TICKER_ENUM_MAX); | |
248 | uint64_t res = 0; | |
249 | for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) { | |
250 | res += per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType]; | |
251 | } | |
252 | return res; | |
253 | } | |
254 | ||
255 | void StatisticsImpl::histogramData(uint32_t histogramType, | |
256 | HistogramData* const data) const { | |
257 | MutexLock lock(&aggregate_lock_); | |
258 | getHistogramImplLocked(histogramType)->Data(data); | |
259 | } | |
260 | ||
261 | std::unique_ptr<HistogramImpl> StatisticsImpl::getHistogramImplLocked( | |
262 | uint32_t histogramType) const { | |
263 | assert(histogramType < HISTOGRAM_ENUM_MAX); | |
264 | std::unique_ptr<HistogramImpl> res_hist(new HistogramImpl()); | |
265 | for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) { | |
266 | res_hist->Merge( | |
267 | per_core_stats_.AccessAtCore(core_idx)->histograms_[histogramType]); | |
268 | } | |
269 | return res_hist; | |
270 | } | |
271 | ||
272 | std::string StatisticsImpl::getHistogramString(uint32_t histogramType) const { | |
273 | MutexLock lock(&aggregate_lock_); | |
274 | return getHistogramImplLocked(histogramType)->ToString(); | |
275 | } | |
276 | ||
277 | void StatisticsImpl::setTickerCount(uint32_t tickerType, uint64_t count) { | |
278 | { | |
279 | MutexLock lock(&aggregate_lock_); | |
280 | setTickerCountLocked(tickerType, count); | |
281 | } | |
282 | if (stats_ && tickerType < TICKER_ENUM_MAX) { | |
283 | stats_->setTickerCount(tickerType, count); | |
284 | } | |
285 | } | |
286 | ||
287 | void StatisticsImpl::setTickerCountLocked(uint32_t tickerType, uint64_t count) { | |
288 | assert(tickerType < TICKER_ENUM_MAX); | |
289 | for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) { | |
290 | if (core_idx == 0) { | |
291 | per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType] = count; | |
292 | } else { | |
293 | per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType] = 0; | |
294 | } | |
295 | } | |
296 | } | |
297 | ||
298 | uint64_t StatisticsImpl::getAndResetTickerCount(uint32_t tickerType) { | |
299 | uint64_t sum = 0; | |
300 | { | |
301 | MutexLock lock(&aggregate_lock_); | |
302 | assert(tickerType < TICKER_ENUM_MAX); | |
303 | for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) { | |
304 | sum += | |
305 | per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType].exchange( | |
306 | 0, std::memory_order_relaxed); | |
307 | } | |
308 | } | |
309 | if (stats_ && tickerType < TICKER_ENUM_MAX) { | |
310 | stats_->setTickerCount(tickerType, 0); | |
311 | } | |
312 | return sum; | |
313 | } | |
314 | ||
315 | void StatisticsImpl::recordTick(uint32_t tickerType, uint64_t count) { | |
316 | assert(tickerType < TICKER_ENUM_MAX); | |
317 | per_core_stats_.Access()->tickers_[tickerType].fetch_add( | |
318 | count, std::memory_order_relaxed); | |
319 | if (stats_ && tickerType < TICKER_ENUM_MAX) { | |
320 | stats_->recordTick(tickerType, count); | |
321 | } | |
322 | } | |
323 | ||
324 | void StatisticsImpl::recordInHistogram(uint32_t histogramType, uint64_t value) { | |
325 | assert(histogramType < HISTOGRAM_ENUM_MAX); | |
326 | if (get_stats_level() <= StatsLevel::kExceptHistogramOrTimers) { | |
327 | return; | |
328 | } | |
329 | per_core_stats_.Access()->histograms_[histogramType].Add(value); | |
330 | if (stats_ && histogramType < HISTOGRAM_ENUM_MAX) { | |
331 | stats_->recordInHistogram(histogramType, value); | |
332 | } | |
333 | } | |
334 | ||
335 | Status StatisticsImpl::Reset() { | |
336 | MutexLock lock(&aggregate_lock_); | |
337 | for (uint32_t i = 0; i < TICKER_ENUM_MAX; ++i) { | |
338 | setTickerCountLocked(i, 0); | |
339 | } | |
340 | for (uint32_t i = 0; i < HISTOGRAM_ENUM_MAX; ++i) { | |
341 | for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) { | |
342 | per_core_stats_.AccessAtCore(core_idx)->histograms_[i].Clear(); | |
343 | } | |
344 | } | |
345 | return Status::OK(); | |
346 | } | |
347 | ||
348 | namespace { | |
349 | ||
350 | // a buffer size used for temp string buffers | |
351 | const int kTmpStrBufferSize = 200; | |
352 | ||
353 | } // namespace | |
354 | ||
355 | std::string StatisticsImpl::ToString() const { | |
356 | MutexLock lock(&aggregate_lock_); | |
357 | std::string res; | |
358 | res.reserve(20000); | |
359 | for (const auto& t : TickersNameMap) { | |
360 | assert(t.first < TICKER_ENUM_MAX); | |
361 | char buffer[kTmpStrBufferSize]; | |
362 | snprintf(buffer, kTmpStrBufferSize, "%s COUNT : %" PRIu64 "\n", | |
363 | t.second.c_str(), getTickerCountLocked(t.first)); | |
364 | res.append(buffer); | |
365 | } | |
366 | for (const auto& h : HistogramsNameMap) { | |
367 | assert(h.first < HISTOGRAM_ENUM_MAX); | |
368 | char buffer[kTmpStrBufferSize]; | |
369 | HistogramData hData; | |
370 | getHistogramImplLocked(h.first)->Data(&hData); | |
371 | // don't handle failures - buffer should always be big enough and arguments | |
372 | // should be provided correctly | |
373 | int ret = | |
374 | snprintf(buffer, kTmpStrBufferSize, | |
375 | "%s P50 : %f P95 : %f P99 : %f P100 : %f COUNT : %" PRIu64 | |
376 | " SUM : %" PRIu64 "\n", | |
377 | h.second.c_str(), hData.median, hData.percentile95, | |
378 | hData.percentile99, hData.max, hData.count, hData.sum); | |
379 | if (ret < 0 || ret >= kTmpStrBufferSize) { | |
380 | assert(false); | |
381 | continue; | |
382 | } | |
383 | res.append(buffer); | |
384 | } | |
385 | res.shrink_to_fit(); | |
386 | return res; | |
387 | } | |
388 | ||
389 | bool StatisticsImpl::getTickerMap( | |
390 | std::map<std::string, uint64_t>* stats_map) const { | |
391 | assert(stats_map); | |
392 | if (!stats_map) return false; | |
393 | stats_map->clear(); | |
394 | MutexLock lock(&aggregate_lock_); | |
395 | for (const auto& t : TickersNameMap) { | |
396 | assert(t.first < TICKER_ENUM_MAX); | |
397 | (*stats_map)[t.second.c_str()] = getTickerCountLocked(t.first); | |
398 | } | |
399 | return true; | |
400 | } | |
401 | ||
402 | bool StatisticsImpl::HistEnabledForType(uint32_t type) const { | |
403 | return type < HISTOGRAM_ENUM_MAX; | |
404 | } | |
405 | ||
406 | } // namespace ROCKSDB_NAMESPACE |