1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
15 #include "db/db_test_util.h"
16 #include "options/cf_options.h"
17 #include "port/stack_trace.h"
18 #include "rocksdb/listener.h"
19 #include "rocksdb/options.h"
20 #include "rocksdb/perf_context.h"
21 #include "rocksdb/perf_level.h"
22 #include "rocksdb/table.h"
23 #include "table/block_based/block.h"
24 #include "table/format.h"
25 #include "table/meta_blocks.h"
26 #include "table/table_builder.h"
27 #include "test_util/mock_time_env.h"
28 #include "util/random.h"
29 #include "util/string_util.h"
31 namespace ROCKSDB_NAMESPACE
{
33 class DBPropertiesTest
: public DBTestBase
{
36 : DBTestBase("db_properties_test", /*env_do_fsync=*/false) {}
38 void AssertDbStats(const std::map
<std::string
, std::string
>& db_stats
,
39 double expected_uptime
, int expected_user_bytes_written
,
40 int expected_wal_bytes_written
,
41 int expected_user_writes_by_self
,
42 int expected_user_writes_with_wal
) {
43 ASSERT_EQ(std::to_string(expected_uptime
), db_stats
.at("db.uptime"));
44 ASSERT_EQ(std::to_string(expected_wal_bytes_written
),
45 db_stats
.at("db.wal_bytes_written"));
46 ASSERT_EQ("0", db_stats
.at("db.wal_syncs"));
47 ASSERT_EQ(std::to_string(expected_user_bytes_written
),
48 db_stats
.at("db.user_bytes_written"));
49 ASSERT_EQ("0", db_stats
.at("db.user_writes_by_other"));
50 ASSERT_EQ(std::to_string(expected_user_writes_by_self
),
51 db_stats
.at("db.user_writes_by_self"));
52 ASSERT_EQ(std::to_string(expected_user_writes_with_wal
),
53 db_stats
.at("db.user_writes_with_wal"));
54 ASSERT_EQ("0", db_stats
.at("db.user_write_stall_micros"));
59 TEST_F(DBPropertiesTest
, Empty
) {
63 options
.write_buffer_size
= 100000; // Small write buffer
64 options
.allow_concurrent_memtable_write
= false;
65 options
= CurrentOptions(options
);
66 CreateAndReopenWithCF({"pikachu"}, options
);
69 ASSERT_TRUE(dbfull()->GetProperty(
70 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
73 ASSERT_OK(Put(1, "foo", "v1"));
74 ASSERT_EQ("v1", Get(1, "foo"));
75 ASSERT_TRUE(dbfull()->GetProperty(
76 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
80 env_
->delay_sstable_sync_
.store(true, std::memory_order_release
);
81 ASSERT_OK(Put(1, "k1", std::string(100000, 'x'))); // Fill memtable
82 ASSERT_TRUE(dbfull()->GetProperty(
83 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
86 ASSERT_OK(Put(1, "k2", std::string(100000, 'y'))); // Trigger compaction
87 ASSERT_TRUE(dbfull()->GetProperty(
88 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
91 ASSERT_EQ("v1", Get(1, "foo"));
93 env_
->delay_sstable_sync_
.store(false, std::memory_order_release
);
95 ASSERT_OK(db_
->DisableFileDeletions());
97 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num
));
100 ASSERT_OK(db_
->DisableFileDeletions());
102 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num
));
105 ASSERT_OK(db_
->DisableFileDeletions());
107 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num
));
110 ASSERT_OK(db_
->EnableFileDeletions(false));
112 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num
));
115 ASSERT_OK(db_
->EnableFileDeletions());
117 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num
));
119 } while (ChangeOptions());
122 TEST_F(DBPropertiesTest
, CurrentVersionNumber
) {
125 dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v1
));
126 ASSERT_OK(Put("12345678", ""));
128 dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v2
));
131 dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v3
));
137 TEST_F(DBPropertiesTest
, GetAggregatedIntPropertyTest
) {
138 const int kKeySize
= 100;
139 const int kValueSize
= 500;
140 const int kKeyNum
= 100;
144 options
.create_if_missing
= true;
145 options
.write_buffer_size
= (kKeySize
+ kValueSize
) * kKeyNum
/ 10;
146 // Make them never flush
147 options
.min_write_buffer_number_to_merge
= 1000;
148 options
.max_write_buffer_number
= 1000;
149 options
= CurrentOptions(options
);
150 CreateAndReopenWithCF({"one", "two", "three", "four"}, options
);
153 for (auto* handle
: handles_
) {
154 for (int i
= 0; i
< kKeyNum
; ++i
) {
155 ASSERT_OK(db_
->Put(WriteOptions(), handle
, rnd
.RandomString(kKeySize
),
156 rnd
.RandomString(kValueSize
)));
160 uint64_t manual_sum
= 0;
161 uint64_t api_sum
= 0;
163 for (auto* handle
: handles_
) {
165 db_
->GetIntProperty(handle
, DB::Properties::kSizeAllMemTables
, &value
));
168 ASSERT_TRUE(db_
->GetAggregatedIntProperty(DB::Properties::kSizeAllMemTables
,
170 ASSERT_GT(manual_sum
, 0);
171 ASSERT_EQ(manual_sum
, api_sum
);
173 ASSERT_FALSE(db_
->GetAggregatedIntProperty(DB::Properties::kDBStats
, &value
));
175 uint64_t before_flush_trm
;
176 uint64_t after_flush_trm
;
177 for (auto* handle
: handles_
) {
178 ASSERT_TRUE(db_
->GetAggregatedIntProperty(
179 DB::Properties::kEstimateTableReadersMem
, &before_flush_trm
));
181 // Issue flush and expect larger memory usage of table readers.
182 ASSERT_OK(db_
->Flush(FlushOptions(), handle
));
184 ASSERT_TRUE(db_
->GetAggregatedIntProperty(
185 DB::Properties::kEstimateTableReadersMem
, &after_flush_trm
));
186 ASSERT_GT(after_flush_trm
, before_flush_trm
);
191 void ResetTableProperties(TableProperties
* tp
) {
195 tp
->raw_key_size
= 0;
196 tp
->raw_value_size
= 0;
197 tp
->num_data_blocks
= 0;
199 tp
->num_deletions
= 0;
200 tp
->num_merge_operands
= 0;
201 tp
->num_range_deletions
= 0;
204 void ParseTablePropertiesString(std::string tp_string
, TableProperties
* tp
) {
206 std::replace(tp_string
.begin(), tp_string
.end(), ';', ' ');
207 std::replace(tp_string
.begin(), tp_string
.end(), '=', ' ');
208 ResetTableProperties(tp
);
209 sscanf(tp_string
.c_str(),
210 "# data blocks %" SCNu64
" # entries %" SCNu64
" # deletions %" SCNu64
211 " # merge operands %" SCNu64
" # range deletions %" SCNu64
212 " raw key size %" SCNu64
213 " raw average key size %lf "
214 " raw value size %" SCNu64
215 " raw average value size %lf "
216 " data block size %" SCNu64
" index block size (user-key? %" SCNu64
217 ", delta-value? %" SCNu64
") %" SCNu64
" filter block size %" SCNu64
,
218 &tp
->num_data_blocks
, &tp
->num_entries
, &tp
->num_deletions
,
219 &tp
->num_merge_operands
, &tp
->num_range_deletions
, &tp
->raw_key_size
,
220 &dummy_double
, &tp
->raw_value_size
, &dummy_double
, &tp
->data_size
,
221 &tp
->index_key_is_user_key
, &tp
->index_value_is_delta_encoded
,
222 &tp
->index_size
, &tp
->filter_size
);
225 void VerifySimilar(uint64_t a
, uint64_t b
, double bias
) {
226 ASSERT_EQ(a
== 0U, b
== 0U);
230 double dbl_a
= static_cast<double>(a
);
231 double dbl_b
= static_cast<double>(b
);
233 ASSERT_LT(static_cast<double>(dbl_a
- dbl_b
) / (dbl_a
+ dbl_b
), bias
);
235 ASSERT_LT(static_cast<double>(dbl_b
- dbl_a
) / (dbl_a
+ dbl_b
), bias
);
239 void VerifyTableProperties(
240 const TableProperties
& base_tp
, const TableProperties
& new_tp
,
241 double filter_size_bias
= CACHE_LINE_SIZE
>= 256 ? 0.18 : 0.1,
242 double index_size_bias
= 0.1, double data_size_bias
= 0.1,
243 double num_data_blocks_bias
= 0.05) {
244 VerifySimilar(base_tp
.data_size
, new_tp
.data_size
, data_size_bias
);
245 VerifySimilar(base_tp
.index_size
, new_tp
.index_size
, index_size_bias
);
246 VerifySimilar(base_tp
.filter_size
, new_tp
.filter_size
, filter_size_bias
);
247 VerifySimilar(base_tp
.num_data_blocks
, new_tp
.num_data_blocks
,
248 num_data_blocks_bias
);
250 ASSERT_EQ(base_tp
.raw_key_size
, new_tp
.raw_key_size
);
251 ASSERT_EQ(base_tp
.raw_value_size
, new_tp
.raw_value_size
);
252 ASSERT_EQ(base_tp
.num_entries
, new_tp
.num_entries
);
253 ASSERT_EQ(base_tp
.num_deletions
, new_tp
.num_deletions
);
254 ASSERT_EQ(base_tp
.num_range_deletions
, new_tp
.num_range_deletions
);
256 // Merge operands may become Puts, so we only have an upper bound the exact
257 // number of merge operands.
258 ASSERT_GE(base_tp
.num_merge_operands
, new_tp
.num_merge_operands
);
261 void GetExpectedTableProperties(
262 TableProperties
* expected_tp
, const int kKeySize
, const int kValueSize
,
263 const int kPutsPerTable
, const int kDeletionsPerTable
,
264 const int kMergeOperandsPerTable
, const int kRangeDeletionsPerTable
,
265 const int kTableCount
, const int kBloomBitsPerKey
, const size_t kBlockSize
,
266 const bool index_key_is_user_key
, const bool value_delta_encoding
) {
267 const int kKeysPerTable
=
268 kPutsPerTable
+ kDeletionsPerTable
+ kMergeOperandsPerTable
;
269 const int kPutCount
= kTableCount
* kPutsPerTable
;
270 const int kDeletionCount
= kTableCount
* kDeletionsPerTable
;
271 const int kMergeCount
= kTableCount
* kMergeOperandsPerTable
;
272 const int kRangeDeletionCount
= kTableCount
* kRangeDeletionsPerTable
;
273 const int kKeyCount
=
274 kPutCount
+ kDeletionCount
+ kMergeCount
+ kRangeDeletionCount
;
275 const int kAvgSuccessorSize
= kKeySize
/ 5;
276 const int kEncodingSavePerKey
= kKeySize
/ 4;
277 expected_tp
->raw_key_size
= kKeyCount
* (kKeySize
+ 8);
278 expected_tp
->raw_value_size
=
279 (kPutCount
+ kMergeCount
+ kRangeDeletionCount
) * kValueSize
;
280 expected_tp
->num_entries
= kKeyCount
;
281 expected_tp
->num_deletions
= kDeletionCount
+ kRangeDeletionCount
;
282 expected_tp
->num_merge_operands
= kMergeCount
;
283 expected_tp
->num_range_deletions
= kRangeDeletionCount
;
284 expected_tp
->num_data_blocks
=
286 (kKeysPerTable
* (kKeySize
- kEncodingSavePerKey
+ kValueSize
)) /
288 expected_tp
->data_size
=
289 kTableCount
* (kKeysPerTable
* (kKeySize
+ 8 + kValueSize
));
290 expected_tp
->index_size
=
291 expected_tp
->num_data_blocks
*
292 (kAvgSuccessorSize
+ (index_key_is_user_key
? 0 : 8) -
293 // discount 1 byte as value size is not encoded in value delta encoding
294 (value_delta_encoding
? 1 : 0));
295 expected_tp
->filter_size
=
296 kTableCount
* ((kKeysPerTable
* kBloomBitsPerKey
+ 7) / 8 +
297 /*average-ish overhead*/ CACHE_LINE_SIZE
/ 2);
299 } // anonymous namespace
301 TEST_F(DBPropertiesTest
, ValidatePropertyInfo
) {
302 for (const auto& ppt_name_and_info
: InternalStats::ppt_name_to_info
) {
303 // If C++ gets a std::string_literal, this would be better to check at
304 // compile-time using static_assert.
305 ASSERT_TRUE(ppt_name_and_info
.first
.empty() ||
306 !isdigit(ppt_name_and_info
.first
.back()));
309 count
+= (ppt_name_and_info
.second
.handle_string
== nullptr) ? 0 : 1;
310 count
+= (ppt_name_and_info
.second
.handle_int
== nullptr) ? 0 : 1;
311 count
+= (ppt_name_and_info
.second
.handle_string_dbimpl
== nullptr) ? 0 : 1;
312 ASSERT_TRUE(count
== 1);
316 TEST_F(DBPropertiesTest
, ValidateSampleNumber
) {
317 // When "max_open_files" is -1, we read all the files for
318 // "rocksdb.estimate-num-keys" computation, which is the ground truth.
319 // Otherwise, we sample 20 newest files to make an estimation.
320 // Formula: lastest_20_files_active_key_ratio * total_files
321 Options options
= CurrentOptions();
322 options
.disable_auto_compactions
= true;
323 options
.level0_stop_writes_trigger
= 1000;
324 DestroyAndReopen(options
);
326 for (int files
= 20; files
>= 10; files
-= 10) {
327 for (int i
= 0; i
< files
; i
++) {
328 int rows
= files
/ 10;
329 for (int j
= 0; j
< rows
; j
++) {
330 ASSERT_OK(db_
->Put(WriteOptions(), std::to_string(++key
), "foo"));
332 ASSERT_OK(db_
->Flush(FlushOptions()));
337 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num
));
338 ASSERT_EQ("45", num
);
339 options
.max_open_files
= -1;
341 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num
));
342 ASSERT_EQ("50", num
);
345 TEST_F(DBPropertiesTest
, AggregatedTableProperties
) {
346 for (int kTableCount
= 40; kTableCount
<= 100; kTableCount
+= 30) {
347 const int kDeletionsPerTable
= 0;
348 const int kMergeOperandsPerTable
= 15;
349 const int kRangeDeletionsPerTable
= 5;
350 const int kPutsPerTable
= 100;
351 const int kKeySize
= 80;
352 const int kValueSize
= 200;
353 const int kBloomBitsPerKey
= 20;
355 Options options
= CurrentOptions();
356 options
.level0_file_num_compaction_trigger
= 8;
357 options
.compression
= kNoCompression
;
358 options
.create_if_missing
= true;
359 options
.merge_operator
.reset(new TestPutOperator());
361 BlockBasedTableOptions table_options
;
362 table_options
.filter_policy
.reset(
363 NewBloomFilterPolicy(kBloomBitsPerKey
, false));
364 table_options
.block_size
= 1024;
365 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
367 DestroyAndReopen(options
);
369 // Hold open a snapshot to prevent range tombstones from being compacted
371 ManagedSnapshot
snapshot(db_
);
374 for (int table
= 1; table
<= kTableCount
; ++table
) {
375 for (int i
= 0; i
< kPutsPerTable
; ++i
) {
376 ASSERT_OK(db_
->Put(WriteOptions(), rnd
.RandomString(kKeySize
),
377 rnd
.RandomString(kValueSize
)));
379 for (int i
= 0; i
< kDeletionsPerTable
; i
++) {
380 ASSERT_OK(db_
->Delete(WriteOptions(), rnd
.RandomString(kKeySize
)));
382 for (int i
= 0; i
< kMergeOperandsPerTable
; i
++) {
383 ASSERT_OK(db_
->Merge(WriteOptions(), rnd
.RandomString(kKeySize
),
384 rnd
.RandomString(kValueSize
)));
386 for (int i
= 0; i
< kRangeDeletionsPerTable
; i
++) {
387 std::string start
= rnd
.RandomString(kKeySize
);
388 std::string end
= start
;
389 end
.resize(kValueSize
);
390 ASSERT_OK(db_
->DeleteRange(WriteOptions(), db_
->DefaultColumnFamily(),
393 ASSERT_OK(db_
->Flush(FlushOptions()));
395 std::string property
;
396 db_
->GetProperty(DB::Properties::kAggregatedTableProperties
, &property
);
397 TableProperties output_tp
;
398 ParseTablePropertiesString(property
, &output_tp
);
399 bool index_key_is_user_key
= output_tp
.index_key_is_user_key
> 0;
400 bool value_is_delta_encoded
= output_tp
.index_value_is_delta_encoded
> 0;
402 TableProperties expected_tp
;
403 GetExpectedTableProperties(
404 &expected_tp
, kKeySize
, kValueSize
, kPutsPerTable
, kDeletionsPerTable
,
405 kMergeOperandsPerTable
, kRangeDeletionsPerTable
, kTableCount
,
406 kBloomBitsPerKey
, table_options
.block_size
, index_key_is_user_key
,
407 value_is_delta_encoded
);
409 VerifyTableProperties(expected_tp
, output_tp
);
413 TEST_F(DBPropertiesTest
, ReadLatencyHistogramByLevel
) {
414 Options options
= CurrentOptions();
415 options
.write_buffer_size
= 110 << 10;
416 options
.level0_file_num_compaction_trigger
= 6;
417 options
.num_levels
= 4;
418 options
.compression
= kNoCompression
;
419 options
.max_bytes_for_level_base
= 4500 << 10;
420 options
.target_file_size_base
= 98 << 10;
421 options
.max_write_buffer_number
= 2;
422 options
.statistics
= ROCKSDB_NAMESPACE::CreateDBStatistics();
423 options
.max_open_files
= 11; // Make sure no proloading of table readers
425 // RocksDB sanitize max open files to at least 20. Modify it back.
426 ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
427 "SanitizeOptions::AfterChangeMaxOpenFiles", [&](void* arg
) {
428 int* max_open_files
= static_cast<int*>(arg
);
429 *max_open_files
= 11;
431 ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
433 BlockBasedTableOptions table_options
;
434 table_options
.no_block_cache
= true;
436 CreateAndReopenWithCF({"pikachu"}, options
);
439 for (int num
= 0; num
< 8; num
++) {
440 ASSERT_OK(Put("foo", "bar"));
441 GenerateNewFile(&rnd
, &key_index
);
442 ASSERT_OK(dbfull()->TEST_WaitForCompact());
444 ASSERT_OK(dbfull()->TEST_WaitForCompact());
447 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop
));
449 // Get() after flushes, See latency histogram tracked.
450 for (int key
= 0; key
< key_index
; key
++) {
453 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop
));
454 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
455 ASSERT_NE(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
456 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
458 // Reopen and issue Get(). See thee latency tracked
459 ReopenWithColumnFamilies({"default", "pikachu"}, options
);
460 ASSERT_OK(dbfull()->TEST_WaitForCompact());
461 for (int key
= 0; key
< key_index
; key
++) {
465 // Test for getting immutable_db_options_.statistics
466 ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
467 "rocksdb.options-statistics", &prop
));
468 ASSERT_NE(std::string::npos
, prop
.find("rocksdb.block.cache.miss"));
469 ASSERT_EQ(std::string::npos
, prop
.find("rocksdb.db.f.micros"));
471 ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
472 "rocksdb.cf-file-histogram", &prop
));
473 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
474 ASSERT_NE(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
475 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
477 // Reopen and issue iterating. See thee latency tracked
478 ReopenWithColumnFamilies({"default", "pikachu"}, options
);
479 ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
480 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop
));
481 ASSERT_EQ(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
482 ASSERT_EQ(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
483 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
485 std::unique_ptr
<Iterator
> iter(db_
->NewIterator(ReadOptions()));
486 for (iter
->Seek(Key(0)); iter
->Valid(); iter
->Next()) {
488 ASSERT_OK(iter
->status());
490 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop
));
491 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
492 ASSERT_NE(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
493 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
495 // CF 1 should show no histogram.
497 dbfull()->GetProperty(handles_
[1], "rocksdb.cf-file-histogram", &prop
));
498 ASSERT_EQ(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
499 ASSERT_EQ(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
500 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
501 // put something and read it back , CF 1 should show histogram.
502 ASSERT_OK(Put(1, "foo", "bar"));
504 ASSERT_OK(dbfull()->TEST_WaitForCompact());
505 ASSERT_EQ("bar", Get(1, "foo"));
508 dbfull()->GetProperty(handles_
[1], "rocksdb.cf-file-histogram", &prop
));
509 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
510 ASSERT_EQ(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
511 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
513 // options.max_open_files preloads table readers.
514 options
.max_open_files
= -1;
515 ReopenWithColumnFamilies({"default", "pikachu"}, options
);
516 ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
517 "rocksdb.cf-file-histogram", &prop
));
518 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
519 ASSERT_NE(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
520 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
521 for (int key
= 0; key
< key_index
; key
++) {
524 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop
));
525 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
526 ASSERT_NE(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
527 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
529 // Clear internal stats
530 ASSERT_OK(dbfull()->ResetStats());
531 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop
));
532 ASSERT_EQ(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
533 ASSERT_EQ(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
534 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
537 TEST_F(DBPropertiesTest
, AggregatedTablePropertiesAtLevel
) {
538 const int kTableCount
= 100;
539 const int kDeletionsPerTable
= 0;
540 const int kMergeOperandsPerTable
= 2;
541 const int kRangeDeletionsPerTable
= 2;
542 const int kPutsPerTable
= 10;
543 const int kKeySize
= 50;
544 const int kValueSize
= 400;
545 const int kMaxLevel
= 7;
546 const int kBloomBitsPerKey
= 20;
548 Options options
= CurrentOptions();
549 options
.level0_file_num_compaction_trigger
= 8;
550 options
.compression
= kNoCompression
;
551 options
.create_if_missing
= true;
552 options
.level0_file_num_compaction_trigger
= 2;
553 options
.target_file_size_base
= 8192;
554 options
.max_bytes_for_level_base
= 10000;
555 options
.max_bytes_for_level_multiplier
= 2;
556 // This ensures there no compaction happening when we call GetProperty().
557 options
.disable_auto_compactions
= true;
558 options
.merge_operator
.reset(new TestPutOperator());
560 BlockBasedTableOptions table_options
;
561 table_options
.filter_policy
.reset(
562 NewBloomFilterPolicy(kBloomBitsPerKey
, false));
563 table_options
.block_size
= 1024;
564 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
566 DestroyAndReopen(options
);
568 // Hold open a snapshot to prevent range tombstones from being compacted away.
569 ManagedSnapshot
snapshot(db_
);
571 std::string level_tp_strings
[kMaxLevel
];
572 std::string tp_string
;
573 TableProperties level_tps
[kMaxLevel
];
574 TableProperties tp
, sum_tp
, expected_tp
;
575 for (int table
= 1; table
<= kTableCount
; ++table
) {
576 for (int i
= 0; i
< kPutsPerTable
; ++i
) {
577 ASSERT_OK(db_
->Put(WriteOptions(), rnd
.RandomString(kKeySize
),
578 rnd
.RandomString(kValueSize
)));
580 for (int i
= 0; i
< kDeletionsPerTable
; i
++) {
581 ASSERT_OK(db_
->Delete(WriteOptions(), rnd
.RandomString(kKeySize
)));
583 for (int i
= 0; i
< kMergeOperandsPerTable
; i
++) {
584 ASSERT_OK(db_
->Merge(WriteOptions(), rnd
.RandomString(kKeySize
),
585 rnd
.RandomString(kValueSize
)));
587 for (int i
= 0; i
< kRangeDeletionsPerTable
; i
++) {
588 std::string start
= rnd
.RandomString(kKeySize
);
589 std::string end
= start
;
590 end
.resize(kValueSize
);
591 ASSERT_OK(db_
->DeleteRange(WriteOptions(), db_
->DefaultColumnFamily(),
594 ASSERT_OK(db_
->Flush(FlushOptions()));
595 ASSERT_OK(db_
->CompactRange(CompactRangeOptions(), nullptr, nullptr));
596 ResetTableProperties(&sum_tp
);
597 for (int level
= 0; level
< kMaxLevel
; ++level
) {
598 db_
->GetProperty(DB::Properties::kAggregatedTablePropertiesAtLevel
+
599 std::to_string(level
),
600 &level_tp_strings
[level
]);
601 ParseTablePropertiesString(level_tp_strings
[level
], &level_tps
[level
]);
602 sum_tp
.data_size
+= level_tps
[level
].data_size
;
603 sum_tp
.index_size
+= level_tps
[level
].index_size
;
604 sum_tp
.filter_size
+= level_tps
[level
].filter_size
;
605 sum_tp
.raw_key_size
+= level_tps
[level
].raw_key_size
;
606 sum_tp
.raw_value_size
+= level_tps
[level
].raw_value_size
;
607 sum_tp
.num_data_blocks
+= level_tps
[level
].num_data_blocks
;
608 sum_tp
.num_entries
+= level_tps
[level
].num_entries
;
609 sum_tp
.num_deletions
+= level_tps
[level
].num_deletions
;
610 sum_tp
.num_merge_operands
+= level_tps
[level
].num_merge_operands
;
611 sum_tp
.num_range_deletions
+= level_tps
[level
].num_range_deletions
;
613 db_
->GetProperty(DB::Properties::kAggregatedTableProperties
, &tp_string
);
614 ParseTablePropertiesString(tp_string
, &tp
);
615 bool index_key_is_user_key
= tp
.index_key_is_user_key
> 0;
616 bool value_is_delta_encoded
= tp
.index_value_is_delta_encoded
> 0;
617 ASSERT_EQ(sum_tp
.data_size
, tp
.data_size
);
618 ASSERT_EQ(sum_tp
.index_size
, tp
.index_size
);
619 ASSERT_EQ(sum_tp
.filter_size
, tp
.filter_size
);
620 ASSERT_EQ(sum_tp
.raw_key_size
, tp
.raw_key_size
);
621 ASSERT_EQ(sum_tp
.raw_value_size
, tp
.raw_value_size
);
622 ASSERT_EQ(sum_tp
.num_data_blocks
, tp
.num_data_blocks
);
623 ASSERT_EQ(sum_tp
.num_entries
, tp
.num_entries
);
624 ASSERT_EQ(sum_tp
.num_deletions
, tp
.num_deletions
);
625 ASSERT_EQ(sum_tp
.num_merge_operands
, tp
.num_merge_operands
);
626 ASSERT_EQ(sum_tp
.num_range_deletions
, tp
.num_range_deletions
);
628 GetExpectedTableProperties(
629 &expected_tp
, kKeySize
, kValueSize
, kPutsPerTable
, kDeletionsPerTable
,
630 kMergeOperandsPerTable
, kRangeDeletionsPerTable
, table
,
631 kBloomBitsPerKey
, table_options
.block_size
, index_key_is_user_key
,
632 value_is_delta_encoded
);
633 // Gives larger bias here as index block size, filter block size,
634 // and data block size become much harder to estimate in this test.
635 VerifyTableProperties(expected_tp
, tp
, CACHE_LINE_SIZE
>= 256 ? 0.6 : 0.5,
641 TEST_F(DBPropertiesTest
, NumImmutableMemTable
) {
643 Options options
= CurrentOptions();
644 WriteOptions writeOpt
= WriteOptions();
645 writeOpt
.disableWAL
= true;
646 options
.max_write_buffer_number
= 4;
647 options
.min_write_buffer_number_to_merge
= 3;
648 options
.write_buffer_size
= 1000000;
649 options
.max_write_buffer_size_to_maintain
=
650 5 * static_cast<int64_t>(options
.write_buffer_size
);
651 CreateAndReopenWithCF({"pikachu"}, options
);
653 std::string
big_value(1000000 * 2, 'x');
656 SetPerfLevel(kEnableTime
);
657 ASSERT_TRUE(GetPerfLevel() == kEnableTime
);
659 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k1", big_value
));
660 ASSERT_TRUE(dbfull()->GetProperty(handles_
[1],
661 "rocksdb.num-immutable-mem-table", &num
));
663 ASSERT_TRUE(dbfull()->GetProperty(
664 handles_
[1], DB::Properties::kNumImmutableMemTableFlushed
, &num
));
666 ASSERT_TRUE(dbfull()->GetProperty(
667 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
669 get_perf_context()->Reset();
671 ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count
));
673 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k2", big_value
));
674 ASSERT_TRUE(dbfull()->GetProperty(handles_
[1],
675 "rocksdb.num-immutable-mem-table", &num
));
677 ASSERT_TRUE(dbfull()->GetProperty(
678 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
680 ASSERT_TRUE(dbfull()->GetProperty(
681 handles_
[1], "rocksdb.num-entries-imm-mem-tables", &num
));
684 get_perf_context()->Reset();
686 ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count
));
687 get_perf_context()->Reset();
689 ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count
));
691 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k3", big_value
));
692 ASSERT_TRUE(dbfull()->GetProperty(
693 handles_
[1], "rocksdb.cur-size-active-mem-table", &num
));
694 ASSERT_TRUE(dbfull()->GetProperty(handles_
[1],
695 "rocksdb.num-immutable-mem-table", &num
));
697 ASSERT_TRUE(dbfull()->GetProperty(
698 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
700 ASSERT_TRUE(dbfull()->GetProperty(
701 handles_
[1], "rocksdb.num-entries-imm-mem-tables", &num
));
703 get_perf_context()->Reset();
705 ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count
));
706 get_perf_context()->Reset();
708 ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count
));
709 get_perf_context()->Reset();
711 ASSERT_EQ(3, static_cast<int>(get_perf_context()->get_from_memtable_count
));
714 ASSERT_TRUE(dbfull()->GetProperty(handles_
[1],
715 "rocksdb.num-immutable-mem-table", &num
));
717 ASSERT_TRUE(dbfull()->GetProperty(
718 handles_
[1], DB::Properties::kNumImmutableMemTableFlushed
, &num
));
720 ASSERT_TRUE(dbfull()->GetIntProperty(
721 handles_
[1], "rocksdb.cur-size-active-mem-table", &value
));
722 // "192" is the size of the metadata of two empty skiplists, this would
723 // break if we change the default skiplist implementation
724 ASSERT_GE(value
, 192);
727 uint64_t base_total_size
;
728 ASSERT_TRUE(dbfull()->GetIntProperty(
729 handles_
[1], "rocksdb.estimate-num-keys", &base_total_size
));
731 ASSERT_OK(dbfull()->Delete(writeOpt
, handles_
[1], "k2"));
732 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k3", ""));
733 ASSERT_OK(dbfull()->Delete(writeOpt
, handles_
[1], "k3"));
734 ASSERT_TRUE(dbfull()->GetIntProperty(
735 handles_
[1], "rocksdb.num-deletes-active-mem-table", &int_num
));
736 ASSERT_EQ(int_num
, 2U);
737 ASSERT_TRUE(dbfull()->GetIntProperty(
738 handles_
[1], "rocksdb.num-entries-active-mem-table", &int_num
));
739 ASSERT_EQ(int_num
, 3U);
741 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k2", big_value
));
742 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k2", big_value
));
743 ASSERT_TRUE(dbfull()->GetIntProperty(
744 handles_
[1], "rocksdb.num-entries-imm-mem-tables", &int_num
));
745 ASSERT_EQ(int_num
, 4U);
746 ASSERT_TRUE(dbfull()->GetIntProperty(
747 handles_
[1], "rocksdb.num-deletes-imm-mem-tables", &int_num
));
748 ASSERT_EQ(int_num
, 2U);
750 ASSERT_TRUE(dbfull()->GetIntProperty(
751 handles_
[1], "rocksdb.estimate-num-keys", &int_num
));
752 ASSERT_EQ(int_num
, base_total_size
+ 1);
754 SetPerfLevel(kDisable
);
755 ASSERT_TRUE(GetPerfLevel() == kDisable
);
756 } while (ChangeCompactOptions());
759 // TODO(techdept) : Disabled flaky test #12863555
760 TEST_F(DBPropertiesTest
, DISABLED_GetProperty
) {
761 // Set sizes to both background thread pool to be 1 and block them.
762 env_
->SetBackgroundThreads(1, Env::HIGH
);
763 env_
->SetBackgroundThreads(1, Env::LOW
);
764 test::SleepingBackgroundTask sleeping_task_low
;
765 env_
->Schedule(&test::SleepingBackgroundTask::DoSleepTask
, &sleeping_task_low
,
767 test::SleepingBackgroundTask sleeping_task_high
;
768 env_
->Schedule(&test::SleepingBackgroundTask::DoSleepTask
,
769 &sleeping_task_high
, Env::Priority::HIGH
);
771 Options options
= CurrentOptions();
772 WriteOptions writeOpt
= WriteOptions();
773 writeOpt
.disableWAL
= true;
774 options
.compaction_style
= kCompactionStyleUniversal
;
775 options
.level0_file_num_compaction_trigger
= 1;
776 options
.compaction_options_universal
.size_ratio
= 50;
777 options
.max_background_compactions
= 1;
778 options
.max_background_flushes
= 1;
779 options
.max_write_buffer_number
= 10;
780 options
.min_write_buffer_number_to_merge
= 1;
781 options
.max_write_buffer_size_to_maintain
= 0;
782 options
.write_buffer_size
= 1000000;
785 std::string
big_value(1000000 * 2, 'x');
788 SetPerfLevel(kEnableTime
);
791 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num
));
792 ASSERT_EQ(int_num
, 0U);
794 dbfull()->GetIntProperty("rocksdb.estimate-live-data-size", &int_num
));
795 ASSERT_EQ(int_num
, 0U);
797 ASSERT_OK(dbfull()->Put(writeOpt
, "k1", big_value
));
798 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num
));
800 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num
));
802 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num
));
804 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num
));
806 get_perf_context()->Reset();
808 ASSERT_OK(dbfull()->Put(writeOpt
, "k2", big_value
));
809 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num
));
811 ASSERT_OK(dbfull()->Delete(writeOpt
, "k-non-existing"));
812 ASSERT_OK(dbfull()->Put(writeOpt
, "k3", big_value
));
813 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num
));
815 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num
));
817 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num
));
819 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num
));
821 // Verify the same set of properties through GetIntProperty
823 dbfull()->GetIntProperty("rocksdb.num-immutable-mem-table", &int_num
));
824 ASSERT_EQ(int_num
, 2U);
826 dbfull()->GetIntProperty("rocksdb.mem-table-flush-pending", &int_num
));
827 ASSERT_EQ(int_num
, 1U);
828 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.compaction-pending", &int_num
));
829 ASSERT_EQ(int_num
, 0U);
830 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num
));
831 ASSERT_EQ(int_num
, 2U);
834 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num
));
835 ASSERT_EQ(int_num
, 0U);
837 sleeping_task_high
.WakeUp();
838 sleeping_task_high
.WaitUntilDone();
839 dbfull()->TEST_WaitForFlushMemTable();
841 ASSERT_OK(dbfull()->Put(writeOpt
, "k4", big_value
));
842 ASSERT_OK(dbfull()->Put(writeOpt
, "k5", big_value
));
843 dbfull()->TEST_WaitForFlushMemTable();
844 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num
));
846 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num
));
848 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num
));
852 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num
));
853 ASSERT_GT(int_num
, 0U);
855 sleeping_task_low
.WakeUp();
856 sleeping_task_low
.WaitUntilDone();
858 // Wait for compaction to be done. This is important because otherwise RocksDB
859 // might schedule a compaction when reopening the database, failing assertion
861 ASSERT_OK(dbfull()->TEST_WaitForCompact());
862 options
.max_open_files
= 10;
864 // After reopening, no table reader is loaded, so no memory for table readers
866 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num
));
867 ASSERT_EQ(int_num
, 0U); // (A)
868 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num
));
869 ASSERT_GT(int_num
, 0U);
871 // After reading a key, at least one table reader is loaded.
874 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num
));
875 ASSERT_GT(int_num
, 0U);
877 // Test rocksdb.num-live-versions
879 options
.level0_file_num_compaction_trigger
= 20;
882 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num
));
883 ASSERT_EQ(int_num
, 1U);
885 // Use an iterator to hold current version
886 std::unique_ptr
<Iterator
> iter1(dbfull()->NewIterator(ReadOptions()));
888 ASSERT_OK(dbfull()->Put(writeOpt
, "k6", big_value
));
891 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num
));
892 ASSERT_EQ(int_num
, 2U);
894 // Use an iterator to hold current version
895 std::unique_ptr
<Iterator
> iter2(dbfull()->NewIterator(ReadOptions()));
897 ASSERT_OK(dbfull()->Put(writeOpt
, "k7", big_value
));
900 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num
));
901 ASSERT_EQ(int_num
, 3U);
905 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num
));
906 ASSERT_EQ(int_num
, 2U);
910 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num
));
911 ASSERT_EQ(int_num
, 1U);
915 TEST_F(DBPropertiesTest
, ApproximateMemoryUsage
) {
916 const int kNumRounds
= 10;
917 // TODO(noetzli) kFlushesPerRound does not really correlate with how many
919 const int kFlushesPerRound
= 10;
920 const int kWritesPerFlush
= 10;
921 const int kKeySize
= 100;
922 const int kValueSize
= 1000;
924 options
.write_buffer_size
= 1000; // small write buffer
925 options
.min_write_buffer_number_to_merge
= 4;
926 options
.compression
= kNoCompression
;
927 options
.create_if_missing
= true;
928 options
= CurrentOptions(options
);
929 DestroyAndReopen(options
);
933 std::vector
<Iterator
*> iters
;
936 uint64_t unflushed_mem
;
938 uint64_t prev_all_mem
;
940 // Phase 0. The verify the initial value of all these properties are the same
941 // as we have no mem-tables.
942 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem
);
943 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem
);
944 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
945 ASSERT_EQ(all_mem
, active_mem
);
946 ASSERT_EQ(all_mem
, unflushed_mem
);
948 // Phase 1. Simply issue Put() and expect "cur-size-all-mem-tables" equals to
949 // "size-all-mem-tables"
950 for (int r
= 0; r
< kNumRounds
; ++r
) {
951 for (int f
= 0; f
< kFlushesPerRound
; ++f
) {
952 for (int w
= 0; w
< kWritesPerFlush
; ++w
) {
954 Put(rnd
.RandomString(kKeySize
), rnd
.RandomString(kValueSize
)));
957 // Make sure that there is no flush between getting the two properties.
958 ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
959 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem
);
960 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
961 // in no iterator case, these two number should be the same.
962 ASSERT_EQ(unflushed_mem
, all_mem
);
964 prev_all_mem
= all_mem
;
966 // Phase 2. Keep issuing Put() but also create new iterators. This time we
967 // expect "size-all-mem-tables" > "cur-size-all-mem-tables".
968 for (int r
= 0; r
< kNumRounds
; ++r
) {
969 iters
.push_back(db_
->NewIterator(ReadOptions()));
970 for (int f
= 0; f
< kFlushesPerRound
; ++f
) {
971 for (int w
= 0; w
< kWritesPerFlush
; ++w
) {
973 Put(rnd
.RandomString(kKeySize
), rnd
.RandomString(kValueSize
)));
976 // Force flush to prevent flush from happening between getting the
977 // properties or after getting the properties and before the new round.
980 // In the second round, add iterators.
981 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem
);
982 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem
);
983 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
984 ASSERT_GT(all_mem
, active_mem
);
985 ASSERT_GT(all_mem
, unflushed_mem
);
986 ASSERT_GT(all_mem
, prev_all_mem
);
987 prev_all_mem
= all_mem
;
990 // Phase 3. Delete iterators and expect "size-all-mem-tables" shrinks
991 // whenever we release an iterator.
992 for (auto* iter
: iters
) {
993 ASSERT_OK(iter
->status());
995 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
996 // Expect the size shrinking
997 ASSERT_LT(all_mem
, prev_all_mem
);
998 prev_all_mem
= all_mem
;
1001 // Expect all these three counters to be the same.
1002 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem
);
1003 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem
);
1004 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
1005 ASSERT_EQ(active_mem
, unflushed_mem
);
1006 ASSERT_EQ(unflushed_mem
, all_mem
);
1008 // Phase 5. Reopen, and expect all these three counters to be the same again.
1010 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem
);
1011 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem
);
1012 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
1013 ASSERT_EQ(active_mem
, unflushed_mem
);
1014 ASSERT_EQ(unflushed_mem
, all_mem
);
1017 TEST_F(DBPropertiesTest
, EstimatePendingCompBytes
) {
1018 // Set sizes to both background thread pool to be 1 and block them.
1019 env_
->SetBackgroundThreads(1, Env::HIGH
);
1020 env_
->SetBackgroundThreads(1, Env::LOW
);
1021 test::SleepingBackgroundTask sleeping_task_low
;
1022 env_
->Schedule(&test::SleepingBackgroundTask::DoSleepTask
, &sleeping_task_low
,
1023 Env::Priority::LOW
);
1025 Options options
= CurrentOptions();
1026 WriteOptions writeOpt
= WriteOptions();
1027 writeOpt
.disableWAL
= true;
1028 options
.compaction_style
= kCompactionStyleLevel
;
1029 options
.level0_file_num_compaction_trigger
= 2;
1030 options
.max_background_compactions
= 1;
1031 options
.max_background_flushes
= 1;
1032 options
.max_write_buffer_number
= 10;
1033 options
.min_write_buffer_number_to_merge
= 1;
1034 options
.max_write_buffer_size_to_maintain
= 0;
1035 options
.write_buffer_size
= 1000000;
1038 std::string
big_value(1000000 * 2, 'x');
1042 ASSERT_OK(dbfull()->Put(writeOpt
, "k1", big_value
));
1044 ASSERT_TRUE(dbfull()->GetIntProperty(
1045 "rocksdb.estimate-pending-compaction-bytes", &int_num
));
1046 ASSERT_EQ(int_num
, 0U);
1048 ASSERT_OK(dbfull()->Put(writeOpt
, "k2", big_value
));
1050 ASSERT_TRUE(dbfull()->GetIntProperty(
1051 "rocksdb.estimate-pending-compaction-bytes", &int_num
));
1052 ASSERT_GT(int_num
, 0U);
1054 ASSERT_OK(dbfull()->Put(writeOpt
, "k3", big_value
));
1056 ASSERT_TRUE(dbfull()->GetIntProperty(
1057 "rocksdb.estimate-pending-compaction-bytes", &int_num
));
1058 ASSERT_GT(int_num
, 0U);
1060 sleeping_task_low
.WakeUp();
1061 sleeping_task_low
.WaitUntilDone();
1063 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1064 ASSERT_TRUE(dbfull()->GetIntProperty(
1065 "rocksdb.estimate-pending-compaction-bytes", &int_num
));
1066 ASSERT_EQ(int_num
, 0U);
1069 TEST_F(DBPropertiesTest
, EstimateCompressionRatio
) {
1070 if (!Snappy_Supported()) {
1073 const int kNumL0Files
= 3;
1074 const int kNumEntriesPerFile
= 1000;
1076 Options options
= CurrentOptions();
1077 options
.disable_auto_compactions
= true;
1078 options
.num_levels
= 3;
1081 ASSERT_OK(db_
->SetOptions(
1082 {{"compression_per_level", "kNoCompression:kSnappyCompression"}}));
1083 auto opts
= db_
->GetOptions();
1084 ASSERT_EQ(opts
.compression_per_level
.size(), 2);
1085 ASSERT_EQ(opts
.compression_per_level
[0], kNoCompression
);
1086 ASSERT_EQ(opts
.compression_per_level
[1], kSnappyCompression
);
1088 // compression ratio is -1.0 when no open files at level
1089 ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
1091 const std::string
kVal(100, 'a');
1092 for (int i
= 0; i
< kNumL0Files
; ++i
) {
1093 for (int j
= 0; j
< kNumEntriesPerFile
; ++j
) {
1094 // Put common data ("key") at end to prevent delta encoding from
1095 // compressing the key effectively
1096 std::string key
= std::to_string(i
) + std::to_string(j
) + "key";
1097 ASSERT_OK(dbfull()->Put(WriteOptions(), key
, kVal
));
1102 // no compression at L0, so ratio is less than one
1103 ASSERT_LT(CompressionRatioAtLevel(0), 1.0);
1104 ASSERT_GT(CompressionRatioAtLevel(0), 0.0);
1105 ASSERT_EQ(CompressionRatioAtLevel(1), -1.0);
1107 ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
1109 ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
1110 // Data at L1 should be highly compressed thanks to Snappy and redundant data
1111 // in values (ratio is 12.846 as of 4/19/2016).
1112 ASSERT_GT(CompressionRatioAtLevel(1), 10.0);
1115 #endif // ROCKSDB_LITE
1117 class CountingUserTblPropCollector
: public TablePropertiesCollector
{
1119 const char* Name() const override
{ return "CountingUserTblPropCollector"; }
1121 Status
Finish(UserCollectedProperties
* properties
) override
{
1122 std::string encoded
;
1123 PutVarint32(&encoded
, count_
);
1124 *properties
= UserCollectedProperties
{
1125 {"CountingUserTblPropCollector", message_
},
1128 return Status::OK();
1131 Status
AddUserKey(const Slice
& /*user_key*/, const Slice
& /*value*/,
1132 EntryType
/*type*/, SequenceNumber
/*seq*/,
1133 uint64_t /*file_size*/) override
{
1135 return Status::OK();
1138 UserCollectedProperties
GetReadableProperties() const override
{
1139 return UserCollectedProperties
{};
1143 std::string message_
= "Rocksdb";
1144 uint32_t count_
= 0;
1147 class CountingUserTblPropCollectorFactory
1148 : public TablePropertiesCollectorFactory
{
1150 explicit CountingUserTblPropCollectorFactory(
1151 uint32_t expected_column_family_id
)
1152 : expected_column_family_id_(expected_column_family_id
),
1154 TablePropertiesCollector
* CreateTablePropertiesCollector(
1155 TablePropertiesCollectorFactory::Context context
) override
{
1156 EXPECT_EQ(expected_column_family_id_
, context
.column_family_id
);
1158 return new CountingUserTblPropCollector();
1160 const char* Name() const override
{
1161 return "CountingUserTblPropCollectorFactory";
1163 void set_expected_column_family_id(uint32_t v
) {
1164 expected_column_family_id_
= v
;
1166 uint32_t expected_column_family_id_
;
1167 uint32_t num_created_
;
1170 class CountingDeleteTabPropCollector
: public TablePropertiesCollector
{
1172 const char* Name() const override
{ return "CountingDeleteTabPropCollector"; }
1174 Status
AddUserKey(const Slice
& /*user_key*/, const Slice
& /*value*/,
1175 EntryType type
, SequenceNumber
/*seq*/,
1176 uint64_t /*file_size*/) override
{
1177 if (type
== kEntryDelete
) {
1180 return Status::OK();
1183 bool NeedCompact() const override
{ return num_deletes_
> 10; }
1185 UserCollectedProperties
GetReadableProperties() const override
{
1186 return UserCollectedProperties
{};
1189 Status
Finish(UserCollectedProperties
* properties
) override
{
1191 UserCollectedProperties
{{"num_delete", std::to_string(num_deletes_
)}};
1192 return Status::OK();
1196 uint32_t num_deletes_
= 0;
1199 class CountingDeleteTabPropCollectorFactory
1200 : public TablePropertiesCollectorFactory
{
1202 TablePropertiesCollector
* CreateTablePropertiesCollector(
1203 TablePropertiesCollectorFactory::Context
/*context*/) override
{
1204 return new CountingDeleteTabPropCollector();
1206 const char* Name() const override
{
1207 return "CountingDeleteTabPropCollectorFactory";
1211 class BlockCountingTablePropertiesCollector
: public TablePropertiesCollector
{
1213 static const std::string kNumSampledBlocksPropertyName
;
1215 const char* Name() const override
{
1216 return "BlockCountingTablePropertiesCollector";
1219 Status
Finish(UserCollectedProperties
* properties
) override
{
1220 (*properties
)[kNumSampledBlocksPropertyName
] =
1221 std::to_string(num_sampled_blocks_
);
1222 return Status::OK();
1225 Status
AddUserKey(const Slice
& /*user_key*/, const Slice
& /*value*/,
1226 EntryType
/*type*/, SequenceNumber
/*seq*/,
1227 uint64_t /*file_size*/) override
{
1228 return Status::OK();
1231 void BlockAdd(uint64_t /* block_uncomp_bytes */,
1232 uint64_t block_compressed_bytes_fast
,
1233 uint64_t block_compressed_bytes_slow
) override
{
1234 if (block_compressed_bytes_fast
> 0 || block_compressed_bytes_slow
> 0) {
1235 num_sampled_blocks_
++;
1239 UserCollectedProperties
GetReadableProperties() const override
{
1240 return UserCollectedProperties
{
1241 {kNumSampledBlocksPropertyName
, std::to_string(num_sampled_blocks_
)},
1246 uint32_t num_sampled_blocks_
= 0;
1250 BlockCountingTablePropertiesCollector::kNumSampledBlocksPropertyName
=
1253 class BlockCountingTablePropertiesCollectorFactory
1254 : public TablePropertiesCollectorFactory
{
1256 const char* Name() const override
{
1257 return "BlockCountingTablePropertiesCollectorFactory";
1260 TablePropertiesCollector
* CreateTablePropertiesCollector(
1261 TablePropertiesCollectorFactory::Context
/* context */) override
{
1262 return new BlockCountingTablePropertiesCollector();
1266 #ifndef ROCKSDB_LITE
1267 TEST_F(DBPropertiesTest
, GetUserDefinedTableProperties
) {
1268 Options options
= CurrentOptions();
1269 options
.level0_file_num_compaction_trigger
= (1 << 30);
1270 options
.table_properties_collector_factories
.resize(1);
1271 std::shared_ptr
<CountingUserTblPropCollectorFactory
> collector_factory
=
1272 std::make_shared
<CountingUserTblPropCollectorFactory
>(0);
1273 options
.table_properties_collector_factories
[0] = collector_factory
;
1276 for (int table
= 0; table
< 4; ++table
) {
1277 for (int i
= 0; i
< 10 + table
; ++i
) {
1279 db_
->Put(WriteOptions(), std::to_string(table
* 100 + i
), "val"));
1281 ASSERT_OK(db_
->Flush(FlushOptions()));
1284 TablePropertiesCollection props
;
1285 ASSERT_OK(db_
->GetPropertiesOfAllTables(&props
));
1286 ASSERT_EQ(4U, props
.size());
1288 for (const auto& item
: props
) {
1289 auto& user_collected
= item
.second
->user_collected_properties
;
1290 ASSERT_TRUE(user_collected
.find("CountingUserTblPropCollector") !=
1291 user_collected
.end());
1292 ASSERT_EQ(user_collected
.at("CountingUserTblPropCollector"), "Rocksdb");
1293 ASSERT_TRUE(user_collected
.find("Count") != user_collected
.end());
1294 Slice
key(user_collected
.at("Count"));
1296 ASSERT_TRUE(GetVarint32(&key
, &count
));
1299 ASSERT_EQ(10u + 11u + 12u + 13u, sum
);
1301 ASSERT_GT(collector_factory
->num_created_
, 0U);
1302 collector_factory
->num_created_
= 0;
1303 ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
1304 ASSERT_GT(collector_factory
->num_created_
, 0U);
1306 #endif // ROCKSDB_LITE
1308 TEST_F(DBPropertiesTest
, UserDefinedTablePropertiesContext
) {
1309 Options options
= CurrentOptions();
1310 options
.level0_file_num_compaction_trigger
= 3;
1311 options
.table_properties_collector_factories
.resize(1);
1312 std::shared_ptr
<CountingUserTblPropCollectorFactory
> collector_factory
=
1313 std::make_shared
<CountingUserTblPropCollectorFactory
>(1);
1314 options
.table_properties_collector_factories
[0] = collector_factory
,
1315 CreateAndReopenWithCF({"pikachu"}, options
);
1317 for (int table
= 0; table
< 2; ++table
) {
1318 for (int i
= 0; i
< 10 + table
; ++i
) {
1319 ASSERT_OK(Put(1, std::to_string(table
* 100 + i
), "val"));
1321 ASSERT_OK(Flush(1));
1323 ASSERT_GT(collector_factory
->num_created_
, 0U);
1325 collector_factory
->num_created_
= 0;
1326 // Trigger automatic compactions.
1327 for (int table
= 0; table
< 3; ++table
) {
1328 for (int i
= 0; i
< 10 + table
; ++i
) {
1329 ASSERT_OK(Put(1, std::to_string(table
* 100 + i
), "val"));
1331 ASSERT_OK(Flush(1));
1332 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1334 ASSERT_GT(collector_factory
->num_created_
, 0U);
1336 collector_factory
->num_created_
= 0;
1337 ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_
[1]));
1338 ASSERT_GT(collector_factory
->num_created_
, 0U);
1340 // Come back to write to default column family
1341 collector_factory
->num_created_
= 0;
1342 collector_factory
->set_expected_column_family_id(0); // default CF
1343 // Create 4 tables in default column family
1344 for (int table
= 0; table
< 2; ++table
) {
1345 for (int i
= 0; i
< 10 + table
; ++i
) {
1346 ASSERT_OK(Put(std::to_string(table
* 100 + i
), "val"));
1350 ASSERT_GT(collector_factory
->num_created_
, 0U);
1352 collector_factory
->num_created_
= 0;
1353 // Trigger automatic compactions.
1354 for (int table
= 0; table
< 3; ++table
) {
1355 for (int i
= 0; i
< 10 + table
; ++i
) {
1356 ASSERT_OK(Put(std::to_string(table
* 100 + i
), "val"));
1359 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1361 ASSERT_GT(collector_factory
->num_created_
, 0U);
1363 collector_factory
->num_created_
= 0;
1364 ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
1365 ASSERT_GT(collector_factory
->num_created_
, 0U);
1368 #ifndef ROCKSDB_LITE
1369 TEST_F(DBPropertiesTest
, TablePropertiesNeedCompactTest
) {
1373 options
.create_if_missing
= true;
1374 options
.write_buffer_size
= 4096;
1375 options
.max_write_buffer_number
= 8;
1376 options
.level0_file_num_compaction_trigger
= 2;
1377 options
.level0_slowdown_writes_trigger
= 2;
1378 options
.level0_stop_writes_trigger
= 4;
1379 options
.target_file_size_base
= 2048;
1380 options
.max_bytes_for_level_base
= 10240;
1381 options
.max_bytes_for_level_multiplier
= 4;
1382 options
.soft_pending_compaction_bytes_limit
= 1024 * 1024;
1383 options
.num_levels
= 8;
1386 std::shared_ptr
<TablePropertiesCollectorFactory
> collector_factory
=
1387 std::make_shared
<CountingDeleteTabPropCollectorFactory
>();
1388 options
.table_properties_collector_factories
.resize(1);
1389 options
.table_properties_collector_factories
[0] = collector_factory
;
1391 DestroyAndReopen(options
);
1393 const int kMaxKey
= 1000;
1394 for (int i
= 0; i
< kMaxKey
; i
++) {
1395 ASSERT_OK(Put(Key(i
), rnd
.RandomString(102)));
1396 ASSERT_OK(Put(Key(kMaxKey
+ i
), rnd
.RandomString(102)));
1399 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1400 if (NumTableFilesAtLevel(0) == 1) {
1401 // Clear Level 0 so that when later flush a file with deletions,
1402 // we don't trigger an organic compaction.
1403 ASSERT_OK(Put(Key(0), ""));
1404 ASSERT_OK(Put(Key(kMaxKey
* 2), ""));
1406 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1408 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1412 std::unique_ptr
<Iterator
> iter(db_
->NewIterator(ReadOptions()));
1413 iter
->Seek(Key(kMaxKey
- 100));
1414 while (iter
->Valid() && iter
->key().compare(Key(kMaxKey
+ 100)) < 0) {
1418 ASSERT_OK(iter
->status());
1422 ASSERT_OK(Delete(Key(0)));
1423 for (int i
= kMaxKey
- 100; i
< kMaxKey
+ 100; i
++) {
1424 ASSERT_OK(Delete(Key(i
)));
1426 ASSERT_OK(Delete(Key(kMaxKey
* 2)));
1429 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1432 SetPerfLevel(kEnableCount
);
1433 get_perf_context()->Reset();
1435 std::unique_ptr
<Iterator
> iter(db_
->NewIterator(ReadOptions()));
1436 iter
->Seek(Key(kMaxKey
- 100));
1437 while (iter
->Valid() && iter
->key().compare(Key(kMaxKey
+ 100)) < 0) {
1440 ASSERT_OK(iter
->status());
1442 ASSERT_LT(get_perf_context()->internal_delete_skipped_count
, 30u);
1443 ASSERT_LT(get_perf_context()->internal_key_skipped_count
, 30u);
1444 SetPerfLevel(kDisable
);
1448 TEST_F(DBPropertiesTest
, NeedCompactHintPersistentTest
) {
1452 options
.create_if_missing
= true;
1453 options
.max_write_buffer_number
= 8;
1454 options
.level0_file_num_compaction_trigger
= 10;
1455 options
.level0_slowdown_writes_trigger
= 10;
1456 options
.level0_stop_writes_trigger
= 10;
1457 options
.disable_auto_compactions
= true;
1460 std::shared_ptr
<TablePropertiesCollectorFactory
> collector_factory
=
1461 std::make_shared
<CountingDeleteTabPropCollectorFactory
>();
1462 options
.table_properties_collector_factories
.resize(1);
1463 options
.table_properties_collector_factories
[0] = collector_factory
;
1465 DestroyAndReopen(options
);
1467 const int kMaxKey
= 100;
1468 for (int i
= 0; i
< kMaxKey
; i
++) {
1469 ASSERT_OK(Put(Key(i
), ""));
1472 ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
1474 for (int i
= 1; i
< kMaxKey
- 1; i
++) {
1475 ASSERT_OK(Delete(Key(i
)));
1478 ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
1479 ASSERT_EQ(NumTableFilesAtLevel(0), 2);
1481 // Restart the DB. Although number of files didn't reach
1482 // options.level0_file_num_compaction_trigger, compaction should
1483 // still be triggered because of the need-compaction hint.
1484 options
.disable_auto_compactions
= false;
1486 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1487 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1489 SetPerfLevel(kEnableCount
);
1490 get_perf_context()->Reset();
1492 std::unique_ptr
<Iterator
> iter(db_
->NewIterator(ReadOptions()));
1493 for (iter
->Seek(Key(0)); iter
->Valid(); iter
->Next()) {
1496 ASSERT_OK(iter
->status());
1498 ASSERT_EQ(get_perf_context()->internal_delete_skipped_count
, 0);
1499 // We iterate every key twice. Is it a bug?
1500 ASSERT_LE(get_perf_context()->internal_key_skipped_count
, 2);
1501 SetPerfLevel(kDisable
);
1505 // Excluded from RocksDB lite tests due to `GetPropertiesOfAllTables()` usage.
1506 TEST_F(DBPropertiesTest
, BlockAddForCompressionSampling
) {
1507 // Sampled compression requires at least one of the following four types.
1508 if (!Snappy_Supported() && !Zlib_Supported() && !LZ4_Supported() &&
1509 !ZSTD_Supported()) {
1513 Options options
= CurrentOptions();
1514 options
.disable_auto_compactions
= true;
1515 options
.table_properties_collector_factories
.emplace_back(
1516 std::make_shared
<BlockCountingTablePropertiesCollectorFactory
>());
1518 for (bool sample_for_compression
: {false, true}) {
1519 // For simplicity/determinism, sample 100% when enabled, or 0% when disabled
1520 options
.sample_for_compression
= sample_for_compression
? 1 : 0;
1522 DestroyAndReopen(options
);
1524 // Setup the following LSM:
1529 // L0_0 was created by flush. L1_0 was created by compaction. Each file
1530 // contains one data block.
1531 for (int i
= 0; i
< 3; ++i
) {
1532 ASSERT_OK(Put("a", "val"));
1533 ASSERT_OK(Put("b", "val"));
1536 ASSERT_OK(db_
->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1540 // A `BlockAdd()` should have been seen for files generated by flush or
1541 // compaction when `sample_for_compression` is enabled.
1542 TablePropertiesCollection file_to_props
;
1543 ASSERT_OK(db_
->GetPropertiesOfAllTables(&file_to_props
));
1544 ASSERT_EQ(2, file_to_props
.size());
1545 for (const auto& file_and_props
: file_to_props
) {
1546 auto& user_props
= file_and_props
.second
->user_collected_properties
;
1547 ASSERT_TRUE(user_props
.find(BlockCountingTablePropertiesCollector::
1548 kNumSampledBlocksPropertyName
) !=
1550 ASSERT_EQ(user_props
.at(BlockCountingTablePropertiesCollector::
1551 kNumSampledBlocksPropertyName
),
1552 std::to_string(sample_for_compression
? 1 : 0));
1557 class CompressionSamplingDBPropertiesTest
1558 : public DBPropertiesTest
,
1559 public ::testing::WithParamInterface
<bool> {
1561 CompressionSamplingDBPropertiesTest() : fast_(GetParam()) {}
1567 INSTANTIATE_TEST_CASE_P(CompressionSamplingDBPropertiesTest
,
1568 CompressionSamplingDBPropertiesTest
, ::testing::Bool());
1570 // Excluded from RocksDB lite tests due to `GetPropertiesOfAllTables()` usage.
1571 TEST_P(CompressionSamplingDBPropertiesTest
,
1572 EstimateDataSizeWithCompressionSampling
) {
1573 Options options
= CurrentOptions();
1575 // One of the following light compression libraries must be present.
1576 if (LZ4_Supported()) {
1577 options
.compression
= kLZ4Compression
;
1578 } else if (Snappy_Supported()) {
1579 options
.compression
= kSnappyCompression
;
1584 // One of the following heavy compression libraries must be present.
1585 if (ZSTD_Supported()) {
1586 options
.compression
= kZSTD
;
1587 } else if (Zlib_Supported()) {
1588 options
.compression
= kZlibCompression
;
1593 options
.disable_auto_compactions
= true;
1594 // For simplicity/determinism, sample 100%.
1595 options
.sample_for_compression
= 1;
1598 // Setup the following LSM:
1603 // L0_0 was created by flush. L1_0 was created by compaction. Each file
1604 // contains one data block. The value consists of compressible data so the
1605 // data block should be stored compressed.
1606 std::string
val(1024, 'a');
1607 for (int i
= 0; i
< 3; ++i
) {
1608 ASSERT_OK(Put("a", val
));
1609 ASSERT_OK(Put("b", val
));
1612 ASSERT_OK(db_
->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1616 TablePropertiesCollection file_to_props
;
1617 ASSERT_OK(db_
->GetPropertiesOfAllTables(&file_to_props
));
1618 ASSERT_EQ(2, file_to_props
.size());
1619 for (const auto& file_and_props
: file_to_props
) {
1620 ASSERT_GT(file_and_props
.second
->data_size
, 0);
1622 ASSERT_EQ(file_and_props
.second
->data_size
,
1623 file_and_props
.second
->fast_compression_estimated_data_size
);
1625 ASSERT_EQ(file_and_props
.second
->data_size
,
1626 file_and_props
.second
->slow_compression_estimated_data_size
);
1631 TEST_F(DBPropertiesTest
, EstimateNumKeysUnderflow
) {
1632 Options options
= CurrentOptions();
1634 ASSERT_OK(Put("foo", "bar"));
1635 ASSERT_OK(Delete("foo"));
1636 ASSERT_OK(Delete("foo"));
1637 uint64_t num_keys
= 0;
1638 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &num_keys
));
1639 ASSERT_EQ(0, num_keys
);
1642 TEST_F(DBPropertiesTest
, EstimateOldestKeyTime
) {
1643 uint64_t oldest_key_time
= 0;
1644 Options options
= CurrentOptions();
1645 SetTimeElapseOnlySleepOnReopen(&options
);
1647 // "rocksdb.estimate-oldest-key-time" only available to fifo compaction.
1648 for (auto compaction
: {kCompactionStyleLevel
, kCompactionStyleUniversal
,
1649 kCompactionStyleNone
}) {
1650 options
.compaction_style
= compaction
;
1651 options
.create_if_missing
= true;
1652 DestroyAndReopen(options
);
1653 ASSERT_OK(Put("foo", "bar"));
1654 ASSERT_FALSE(dbfull()->GetIntProperty(
1655 DB::Properties::kEstimateOldestKeyTime
, &oldest_key_time
));
1658 int64_t mock_start_time
;
1659 ASSERT_OK(env_
->GetCurrentTime(&mock_start_time
));
1661 options
.compaction_style
= kCompactionStyleFIFO
;
1663 options
.max_open_files
= -1;
1664 options
.compaction_options_fifo
.allow_compaction
= false;
1665 DestroyAndReopen(options
);
1667 env_
->MockSleepForSeconds(100);
1668 ASSERT_OK(Put("k1", "v1"));
1669 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1671 ASSERT_EQ(100, oldest_key_time
- mock_start_time
);
1673 ASSERT_EQ("1", FilesPerLevel());
1674 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1676 ASSERT_EQ(100, oldest_key_time
- mock_start_time
);
1678 env_
->MockSleepForSeconds(100); // -> 200
1679 ASSERT_OK(Put("k2", "v2"));
1681 ASSERT_EQ("2", FilesPerLevel());
1682 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1684 ASSERT_EQ(100, oldest_key_time
- mock_start_time
);
1686 env_
->MockSleepForSeconds(100); // -> 300
1687 ASSERT_OK(Put("k3", "v3"));
1689 ASSERT_EQ("3", FilesPerLevel());
1690 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1692 ASSERT_EQ(100, oldest_key_time
- mock_start_time
);
1694 env_
->MockSleepForSeconds(150); // -> 450
1695 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1696 ASSERT_EQ("2", FilesPerLevel());
1697 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1699 ASSERT_EQ(200, oldest_key_time
- mock_start_time
);
1701 env_
->MockSleepForSeconds(100); // -> 550
1702 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1703 ASSERT_EQ("1", FilesPerLevel());
1704 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1706 ASSERT_EQ(300, oldest_key_time
- mock_start_time
);
1708 env_
->MockSleepForSeconds(100); // -> 650
1709 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1710 ASSERT_EQ("", FilesPerLevel());
1711 ASSERT_FALSE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1715 TEST_F(DBPropertiesTest
, SstFilesSize
) {
1716 struct TestListener
: public EventListener
{
1717 void OnCompactionCompleted(DB
* db
,
1718 const CompactionJobInfo
& /*info*/) override
{
1719 assert(callback_triggered
== false);
1720 assert(size_before_compaction
> 0);
1721 callback_triggered
= true;
1722 uint64_t total_sst_size
= 0;
1723 uint64_t live_sst_size
= 0;
1724 bool ok
= db
->GetIntProperty(DB::Properties::kTotalSstFilesSize
,
1727 // total_sst_size include files before and after compaction.
1728 ASSERT_GT(total_sst_size
, size_before_compaction
);
1730 db
->GetIntProperty(DB::Properties::kLiveSstFilesSize
, &live_sst_size
);
1732 // live_sst_size only include files after compaction.
1733 ASSERT_GT(live_sst_size
, 0);
1734 ASSERT_LT(live_sst_size
, size_before_compaction
);
1737 uint64_t size_before_compaction
= 0;
1738 bool callback_triggered
= false;
1740 std::shared_ptr
<TestListener
> listener
= std::make_shared
<TestListener
>();
1743 options
.env
= CurrentOptions().env
;
1744 options
.disable_auto_compactions
= true;
1745 options
.listeners
.push_back(listener
);
1748 for (int i
= 0; i
< 10; i
++) {
1749 ASSERT_OK(Put("key" + std::to_string(i
), std::string(1000, 'v')));
1752 for (int i
= 0; i
< 5; i
++) {
1753 ASSERT_OK(Delete("key" + std::to_string(i
)));
1757 bool ok
= db_
->GetIntProperty(DB::Properties::kTotalSstFilesSize
, &sst_size
);
1759 ASSERT_GT(sst_size
, 0);
1760 listener
->size_before_compaction
= sst_size
;
1761 // Compact to clean all keys and trigger listener.
1762 ASSERT_OK(db_
->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1763 ASSERT_TRUE(listener
->callback_triggered
);
1766 TEST_F(DBPropertiesTest
, MinObsoleteSstNumberToKeep
) {
1767 class TestListener
: public EventListener
{
1769 void OnTableFileCreated(const TableFileCreationInfo
& info
) override
{
1770 if (info
.reason
== TableFileCreationReason::kCompaction
) {
1771 // Verify the property indicates that SSTs created by a running
1772 // compaction cannot be deleted.
1773 uint64_t created_file_num
;
1774 FileType created_file_type
;
1775 std::string filename
=
1776 info
.file_path
.substr(info
.file_path
.rfind('/') + 1);
1778 ParseFileName(filename
, &created_file_num
, &created_file_type
));
1779 ASSERT_EQ(kTableFile
, created_file_type
);
1781 uint64_t keep_sst_lower_bound
;
1783 db_
->GetIntProperty(DB::Properties::kMinObsoleteSstNumberToKeep
,
1784 &keep_sst_lower_bound
));
1786 ASSERT_LE(keep_sst_lower_bound
, created_file_num
);
1791 void SetDB(DB
* db
) { db_
= db
; }
1793 int GetNumCompactions() { return num_compactions_
; }
1795 // True if we've verified the property for at least one output file
1796 bool Validated() { return validated_
; }
1799 int num_compactions_
= 0;
1800 bool validated_
= false;
1804 const int kNumL0Files
= 4;
1806 std::shared_ptr
<TestListener
> listener
= std::make_shared
<TestListener
>();
1808 Options options
= CurrentOptions();
1809 options
.listeners
.push_back(listener
);
1810 options
.level0_file_num_compaction_trigger
= kNumL0Files
;
1811 DestroyAndReopen(options
);
1812 listener
->SetDB(db_
);
1814 for (int i
= 0; i
< kNumL0Files
; ++i
) {
1815 // Make sure they overlap in keyspace to prevent trivial move
1816 ASSERT_OK(Put("key1", "val"));
1817 ASSERT_OK(Put("key2", "val"));
1820 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1821 ASSERT_TRUE(listener
->Validated());
1824 TEST_F(DBPropertiesTest
, BlobCacheProperties
) {
1828 options
.env
= CurrentOptions().env
;
1830 // Test with empty blob cache.
1831 constexpr size_t kCapacity
= 100;
1833 co
.capacity
= kCapacity
;
1834 co
.num_shard_bits
= 0;
1835 co
.metadata_charge_policy
= kDontChargeCacheMetadata
;
1836 auto blob_cache
= NewLRUCache(co
);
1837 options
.blob_cache
= blob_cache
;
1841 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlobCacheCapacity
, &value
));
1842 ASSERT_EQ(kCapacity
, value
);
1843 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlobCacheUsage
, &value
));
1844 ASSERT_EQ(0, value
);
1846 db_
->GetIntProperty(DB::Properties::kBlobCachePinnedUsage
, &value
));
1847 ASSERT_EQ(0, value
);
1849 // Insert unpinned blob to the cache and check size.
1850 constexpr size_t kSize1
= 70;
1851 ASSERT_OK(blob_cache
->Insert("blob1", nullptr /*value*/, kSize1
,
1852 nullptr /*deleter*/));
1853 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlobCacheCapacity
, &value
));
1854 ASSERT_EQ(kCapacity
, value
);
1855 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlobCacheUsage
, &value
));
1856 ASSERT_EQ(kSize1
, value
);
1858 db_
->GetIntProperty(DB::Properties::kBlobCachePinnedUsage
, &value
));
1859 ASSERT_EQ(0, value
);
1861 // Insert pinned blob to the cache and check size.
1862 constexpr size_t kSize2
= 60;
1863 Cache::Handle
* blob2
= nullptr;
1864 ASSERT_OK(blob_cache
->Insert("blob2", nullptr /*value*/, kSize2
,
1865 nullptr /*deleter*/, &blob2
));
1866 ASSERT_NE(nullptr, blob2
);
1867 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlobCacheCapacity
, &value
));
1868 ASSERT_EQ(kCapacity
, value
);
1869 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlobCacheUsage
, &value
));
1870 // blob1 is evicted.
1871 ASSERT_EQ(kSize2
, value
);
1873 db_
->GetIntProperty(DB::Properties::kBlobCachePinnedUsage
, &value
));
1874 ASSERT_EQ(kSize2
, value
);
1876 // Insert another pinned blob to make the cache over-sized.
1877 constexpr size_t kSize3
= 80;
1878 Cache::Handle
* blob3
= nullptr;
1879 ASSERT_OK(blob_cache
->Insert("blob3", nullptr /*value*/, kSize3
,
1880 nullptr /*deleter*/, &blob3
));
1881 ASSERT_NE(nullptr, blob3
);
1882 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlobCacheCapacity
, &value
));
1883 ASSERT_EQ(kCapacity
, value
);
1884 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlobCacheUsage
, &value
));
1885 ASSERT_EQ(kSize2
+ kSize3
, value
);
1887 db_
->GetIntProperty(DB::Properties::kBlobCachePinnedUsage
, &value
));
1888 ASSERT_EQ(kSize2
+ kSize3
, value
);
1890 // Check size after release.
1891 blob_cache
->Release(blob2
);
1892 blob_cache
->Release(blob3
);
1893 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlobCacheCapacity
, &value
));
1894 ASSERT_EQ(kCapacity
, value
);
1895 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlobCacheUsage
, &value
));
1896 // blob2 will be evicted, while blob3 remain in cache after release.
1897 ASSERT_EQ(kSize3
, value
);
1899 db_
->GetIntProperty(DB::Properties::kBlobCachePinnedUsage
, &value
));
1900 ASSERT_EQ(0, value
);
1903 TEST_F(DBPropertiesTest
, BlockCacheProperties
) {
1907 options
.env
= CurrentOptions().env
;
1909 // Block cache properties are not available for tables other than
1910 // block-based table.
1911 options
.table_factory
.reset(NewPlainTableFactory());
1914 db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1915 ASSERT_FALSE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1917 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1919 options
.table_factory
.reset(NewCuckooTableFactory());
1922 db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1923 ASSERT_FALSE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1925 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1927 // Block cache properties are not available if block cache is not used.
1928 BlockBasedTableOptions table_options
;
1929 table_options
.no_block_cache
= true;
1930 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
1933 db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1934 ASSERT_FALSE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1936 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1938 // Test with empty block cache.
1939 constexpr size_t kCapacity
= 100;
1941 co
.capacity
= kCapacity
;
1942 co
.num_shard_bits
= 0;
1943 co
.metadata_charge_policy
= kDontChargeCacheMetadata
;
1944 auto block_cache
= NewLRUCache(co
);
1945 table_options
.block_cache
= block_cache
;
1946 table_options
.no_block_cache
= false;
1947 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
1949 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1950 ASSERT_EQ(kCapacity
, value
);
1951 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1952 ASSERT_EQ(0, value
);
1954 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1955 ASSERT_EQ(0, value
);
1957 // Insert unpinned item to the cache and check size.
1958 constexpr size_t kSize1
= 50;
1959 ASSERT_OK(block_cache
->Insert("item1", nullptr /*value*/, kSize1
,
1960 nullptr /*deleter*/));
1961 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1962 ASSERT_EQ(kCapacity
, value
);
1963 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1964 ASSERT_EQ(kSize1
, value
);
1966 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1967 ASSERT_EQ(0, value
);
1969 // Insert pinned item to the cache and check size.
1970 constexpr size_t kSize2
= 30;
1971 Cache::Handle
* item2
= nullptr;
1972 ASSERT_OK(block_cache
->Insert("item2", nullptr /*value*/, kSize2
,
1973 nullptr /*deleter*/, &item2
));
1974 ASSERT_NE(nullptr, item2
);
1975 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1976 ASSERT_EQ(kCapacity
, value
);
1977 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1978 ASSERT_EQ(kSize1
+ kSize2
, value
);
1980 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1981 ASSERT_EQ(kSize2
, value
);
1983 // Insert another pinned item to make the cache over-sized.
1984 constexpr size_t kSize3
= 80;
1985 Cache::Handle
* item3
= nullptr;
1986 ASSERT_OK(block_cache
->Insert("item3", nullptr /*value*/, kSize3
,
1987 nullptr /*deleter*/, &item3
));
1988 ASSERT_NE(nullptr, item2
);
1989 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1990 ASSERT_EQ(kCapacity
, value
);
1991 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1992 // Item 1 is evicted.
1993 ASSERT_EQ(kSize2
+ kSize3
, value
);
1995 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1996 ASSERT_EQ(kSize2
+ kSize3
, value
);
1998 // Check size after release.
1999 block_cache
->Release(item2
);
2000 block_cache
->Release(item3
);
2001 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
2002 ASSERT_EQ(kCapacity
, value
);
2003 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
2004 // item2 will be evicted, while item3 remain in cache after release.
2005 ASSERT_EQ(kSize3
, value
);
2007 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
2008 ASSERT_EQ(0, value
);
2011 TEST_F(DBPropertiesTest
, GetMapPropertyDbStats
) {
2012 auto mock_clock
= std::make_shared
<MockSystemClock
>(env_
->GetSystemClock());
2013 CompositeEnvWrapper
env(env_
, mock_clock
);
2015 Options opts
= CurrentOptions();
2020 std::map
<std::string
, std::string
> db_stats
;
2021 ASSERT_TRUE(db_
->GetMapProperty(DB::Properties::kDBStats
, &db_stats
));
2022 AssertDbStats(db_stats
, 0.0 /* expected_uptime */,
2023 0 /* expected_user_bytes_written */,
2024 0 /* expected_wal_bytes_written */,
2025 0 /* expected_user_writes_by_self */,
2026 0 /* expected_user_writes_with_wal */);
2030 mock_clock
->SleepForMicroseconds(1500000);
2032 std::map
<std::string
, std::string
> db_stats
;
2033 ASSERT_TRUE(db_
->GetMapProperty(DB::Properties::kDBStats
, &db_stats
));
2034 AssertDbStats(db_stats
, 1.5 /* expected_uptime */,
2035 0 /* expected_user_bytes_written */,
2036 0 /* expected_wal_bytes_written */,
2037 0 /* expected_user_writes_by_self */,
2038 0 /* expected_user_writes_with_wal */);
2041 int expected_user_bytes_written
= 0;
2043 // Write with WAL disabled.
2044 WriteOptions write_opts
;
2045 write_opts
.disableWAL
= true;
2048 ASSERT_OK(batch
.Put("key", "val"));
2049 expected_user_bytes_written
+= static_cast<int>(batch
.GetDataSize());
2051 ASSERT_OK(db_
->Write(write_opts
, &batch
));
2053 std::map
<std::string
, std::string
> db_stats
;
2054 ASSERT_TRUE(db_
->GetMapProperty(DB::Properties::kDBStats
, &db_stats
));
2055 AssertDbStats(db_stats
, 1.5 /* expected_uptime */,
2056 expected_user_bytes_written
,
2057 0 /* expected_wal_bytes_written */,
2058 1 /* expected_user_writes_by_self */,
2059 0 /* expected_user_writes_with_wal */);
2062 int expected_wal_bytes_written
= 0;
2064 // Write with WAL enabled.
2066 ASSERT_OK(batch
.Delete("key"));
2067 expected_user_bytes_written
+= static_cast<int>(batch
.GetDataSize());
2068 expected_wal_bytes_written
+= static_cast<int>(batch
.GetDataSize());
2070 ASSERT_OK(db_
->Write(WriteOptions(), &batch
));
2072 std::map
<std::string
, std::string
> db_stats
;
2073 ASSERT_TRUE(db_
->GetMapProperty(DB::Properties::kDBStats
, &db_stats
));
2074 AssertDbStats(db_stats
, 1.5 /* expected_uptime */,
2075 expected_user_bytes_written
, expected_wal_bytes_written
,
2076 2 /* expected_user_writes_by_self */,
2077 1 /* expected_user_writes_with_wal */);
2083 TEST_F(DBPropertiesTest
, GetMapPropertyBlockCacheEntryStats
) {
2084 // Currently only verifies the expected properties are present
2085 std::map
<std::string
, std::string
> values
;
2087 db_
->GetMapProperty(DB::Properties::kBlockCacheEntryStats
, &values
));
2089 ASSERT_TRUE(values
.find(BlockCacheEntryStatsMapKeys::CacheId()) !=
2091 ASSERT_TRUE(values
.find(BlockCacheEntryStatsMapKeys::CacheCapacityBytes()) !=
2095 BlockCacheEntryStatsMapKeys::LastCollectionDurationSeconds()) !=
2098 values
.find(BlockCacheEntryStatsMapKeys::LastCollectionAgeSeconds()) !=
2100 for (size_t i
= 0; i
< kNumCacheEntryRoles
; ++i
) {
2101 CacheEntryRole role
= static_cast<CacheEntryRole
>(i
);
2102 ASSERT_TRUE(values
.find(BlockCacheEntryStatsMapKeys::EntryCount(role
)) !=
2104 ASSERT_TRUE(values
.find(BlockCacheEntryStatsMapKeys::UsedBytes(role
)) !=
2106 ASSERT_TRUE(values
.find(BlockCacheEntryStatsMapKeys::UsedPercent(role
)) !=
2110 // There should be no extra values in the map.
2111 ASSERT_EQ(3 * kNumCacheEntryRoles
+ 4, values
.size());
2115 std::string
PopMetaIndexKey(InternalIterator
* meta_iter
) {
2116 Status s
= meta_iter
->status();
2118 return s
.ToString();
2119 } else if (meta_iter
->Valid()) {
2120 std::string rv
= meta_iter
->key().ToString();
2128 } // anonymous namespace
2130 TEST_F(DBPropertiesTest
, TableMetaIndexKeys
) {
2131 // This is to detect unexpected churn in metaindex block keys. This is more
2132 // of a "table test" but table_test.cc doesn't depend on db_test_util.h and
2133 // we need ChangeOptions() for broad coverage.
2134 constexpr int kKeyCount
= 100;
2137 options
= CurrentOptions(options
);
2138 DestroyAndReopen(options
);
2140 // Create an SST file
2141 for (int key
= 0; key
< kKeyCount
; key
++) {
2142 ASSERT_OK(Put(Key(key
), "val"));
2146 // Find its file number
2147 std::vector
<LiveFileMetaData
> files
;
2148 db_
->GetLiveFilesMetaData(&files
);
2150 ASSERT_EQ(1, files
.size());
2152 // Open it for inspection
2153 std::string sst_file
=
2154 files
[0].directory
+ "/" + files
[0].relative_filename
;
2155 std::unique_ptr
<FSRandomAccessFile
> f
;
2156 ASSERT_OK(env_
->GetFileSystem()->NewRandomAccessFile(
2157 sst_file
, FileOptions(), &f
, nullptr));
2158 std::unique_ptr
<RandomAccessFileReader
> r
;
2159 r
.reset(new RandomAccessFileReader(std::move(f
), sst_file
));
2160 uint64_t file_size
= 0;
2161 ASSERT_OK(env_
->GetFileSize(sst_file
, &file_size
));
2165 ASSERT_OK(ReadMetaIndexBlockInFile(r
.get(), file_size
, 0U,
2166 ImmutableOptions(options
), &bc
));
2167 Block
metaindex_block(std::move(bc
));
2168 std::unique_ptr
<InternalIterator
> meta_iter
;
2169 meta_iter
.reset(metaindex_block
.NewMetaIterator());
2170 meta_iter
->SeekToFirst();
2172 if (strcmp(options
.table_factory
->Name(),
2173 TableFactory::kBlockBasedTableName()) == 0) {
2174 auto bbto
= options
.table_factory
->GetOptions
<BlockBasedTableOptions
>();
2175 if (bbto
->filter_policy
) {
2176 if (bbto
->partition_filters
) {
2177 // The key names are intentionally hard-coded here to detect
2178 // accidental regression on compatibility.
2179 EXPECT_EQ("partitionedfilter.rocksdb.BuiltinBloomFilter",
2180 PopMetaIndexKey(meta_iter
.get()));
2182 EXPECT_EQ("fullfilter.rocksdb.BuiltinBloomFilter",
2183 PopMetaIndexKey(meta_iter
.get()));
2186 if (bbto
->index_type
== BlockBasedTableOptions::kHashSearch
) {
2187 EXPECT_EQ("rocksdb.hashindex.metadata",
2188 PopMetaIndexKey(meta_iter
.get()));
2189 EXPECT_EQ("rocksdb.hashindex.prefixes",
2190 PopMetaIndexKey(meta_iter
.get()));
2193 EXPECT_EQ("rocksdb.properties", PopMetaIndexKey(meta_iter
.get()));
2194 EXPECT_EQ("NOT_FOUND", PopMetaIndexKey(meta_iter
.get()));
2195 } while (ChangeOptions());
2198 #endif // ROCKSDB_LITE
2200 } // namespace ROCKSDB_NAMESPACE
2202 int main(int argc
, char** argv
) {
2203 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
2204 ::testing::InitGoogleTest(&argc
, argv
);
2205 return RUN_ALL_TESTS();