1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
15 #include "db/db_test_util.h"
16 #include "port/stack_trace.h"
17 #include "rocksdb/listener.h"
18 #include "rocksdb/options.h"
19 #include "rocksdb/perf_context.h"
20 #include "rocksdb/perf_level.h"
21 #include "rocksdb/table.h"
22 #include "util/random.h"
23 #include "util/string_util.h"
25 namespace ROCKSDB_NAMESPACE
{
27 class DBPropertiesTest
: public DBTestBase
{
30 : DBTestBase("/db_properties_test", /*env_do_fsync=*/false) {}
34 TEST_F(DBPropertiesTest
, Empty
) {
38 options
.write_buffer_size
= 100000; // Small write buffer
39 options
.allow_concurrent_memtable_write
= false;
40 options
= CurrentOptions(options
);
41 CreateAndReopenWithCF({"pikachu"}, options
);
44 ASSERT_TRUE(dbfull()->GetProperty(
45 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
48 ASSERT_OK(Put(1, "foo", "v1"));
49 ASSERT_EQ("v1", Get(1, "foo"));
50 ASSERT_TRUE(dbfull()->GetProperty(
51 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
55 env_
->delay_sstable_sync_
.store(true, std::memory_order_release
);
56 ASSERT_OK(Put(1, "k1", std::string(100000, 'x'))); // Fill memtable
57 ASSERT_TRUE(dbfull()->GetProperty(
58 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
61 ASSERT_OK(Put(1, "k2", std::string(100000, 'y'))); // Trigger compaction
62 ASSERT_TRUE(dbfull()->GetProperty(
63 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
66 ASSERT_EQ("v1", Get(1, "foo"));
68 env_
->delay_sstable_sync_
.store(false, std::memory_order_release
);
70 ASSERT_OK(db_
->DisableFileDeletions());
72 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num
));
75 ASSERT_OK(db_
->DisableFileDeletions());
77 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num
));
80 ASSERT_OK(db_
->DisableFileDeletions());
82 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num
));
85 ASSERT_OK(db_
->EnableFileDeletions(false));
87 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num
));
90 ASSERT_OK(db_
->EnableFileDeletions());
92 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num
));
94 } while (ChangeOptions());
97 TEST_F(DBPropertiesTest
, CurrentVersionNumber
) {
100 dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v1
));
101 ASSERT_OK(Put("12345678", ""));
103 dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v2
));
106 dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v3
));
112 TEST_F(DBPropertiesTest
, GetAggregatedIntPropertyTest
) {
113 const int kKeySize
= 100;
114 const int kValueSize
= 500;
115 const int kKeyNum
= 100;
119 options
.create_if_missing
= true;
120 options
.write_buffer_size
= (kKeySize
+ kValueSize
) * kKeyNum
/ 10;
121 // Make them never flush
122 options
.min_write_buffer_number_to_merge
= 1000;
123 options
.max_write_buffer_number
= 1000;
124 options
= CurrentOptions(options
);
125 CreateAndReopenWithCF({"one", "two", "three", "four"}, options
);
128 for (auto* handle
: handles_
) {
129 for (int i
= 0; i
< kKeyNum
; ++i
) {
130 ASSERT_OK(db_
->Put(WriteOptions(), handle
, rnd
.RandomString(kKeySize
),
131 rnd
.RandomString(kValueSize
)));
135 uint64_t manual_sum
= 0;
136 uint64_t api_sum
= 0;
138 for (auto* handle
: handles_
) {
140 db_
->GetIntProperty(handle
, DB::Properties::kSizeAllMemTables
, &value
));
143 ASSERT_TRUE(db_
->GetAggregatedIntProperty(DB::Properties::kSizeAllMemTables
,
145 ASSERT_GT(manual_sum
, 0);
146 ASSERT_EQ(manual_sum
, api_sum
);
148 ASSERT_FALSE(db_
->GetAggregatedIntProperty(DB::Properties::kDBStats
, &value
));
150 uint64_t before_flush_trm
;
151 uint64_t after_flush_trm
;
152 for (auto* handle
: handles_
) {
153 ASSERT_TRUE(db_
->GetAggregatedIntProperty(
154 DB::Properties::kEstimateTableReadersMem
, &before_flush_trm
));
156 // Issue flush and expect larger memory usage of table readers.
157 ASSERT_OK(db_
->Flush(FlushOptions(), handle
));
159 ASSERT_TRUE(db_
->GetAggregatedIntProperty(
160 DB::Properties::kEstimateTableReadersMem
, &after_flush_trm
));
161 ASSERT_GT(after_flush_trm
, before_flush_trm
);
166 void ResetTableProperties(TableProperties
* tp
) {
170 tp
->raw_key_size
= 0;
171 tp
->raw_value_size
= 0;
172 tp
->num_data_blocks
= 0;
174 tp
->num_deletions
= 0;
175 tp
->num_merge_operands
= 0;
176 tp
->num_range_deletions
= 0;
179 void ParseTablePropertiesString(std::string tp_string
, TableProperties
* tp
) {
181 std::replace(tp_string
.begin(), tp_string
.end(), ';', ' ');
182 std::replace(tp_string
.begin(), tp_string
.end(), '=', ' ');
183 ResetTableProperties(tp
);
184 sscanf(tp_string
.c_str(),
185 "# data blocks %" SCNu64
" # entries %" SCNu64
" # deletions %" SCNu64
186 " # merge operands %" SCNu64
" # range deletions %" SCNu64
187 " raw key size %" SCNu64
188 " raw average key size %lf "
189 " raw value size %" SCNu64
190 " raw average value size %lf "
191 " data block size %" SCNu64
" index block size (user-key? %" SCNu64
192 ", delta-value? %" SCNu64
") %" SCNu64
" filter block size %" SCNu64
,
193 &tp
->num_data_blocks
, &tp
->num_entries
, &tp
->num_deletions
,
194 &tp
->num_merge_operands
, &tp
->num_range_deletions
, &tp
->raw_key_size
,
195 &dummy_double
, &tp
->raw_value_size
, &dummy_double
, &tp
->data_size
,
196 &tp
->index_key_is_user_key
, &tp
->index_value_is_delta_encoded
,
197 &tp
->index_size
, &tp
->filter_size
);
200 void VerifySimilar(uint64_t a
, uint64_t b
, double bias
) {
201 ASSERT_EQ(a
== 0U, b
== 0U);
205 double dbl_a
= static_cast<double>(a
);
206 double dbl_b
= static_cast<double>(b
);
208 ASSERT_LT(static_cast<double>(dbl_a
- dbl_b
) / (dbl_a
+ dbl_b
), bias
);
210 ASSERT_LT(static_cast<double>(dbl_b
- dbl_a
) / (dbl_a
+ dbl_b
), bias
);
214 void VerifyTableProperties(
215 const TableProperties
& base_tp
, const TableProperties
& new_tp
,
216 double filter_size_bias
= CACHE_LINE_SIZE
>= 256 ? 0.15 : 0.1,
217 double index_size_bias
= 0.1, double data_size_bias
= 0.1,
218 double num_data_blocks_bias
= 0.05) {
219 VerifySimilar(base_tp
.data_size
, new_tp
.data_size
, data_size_bias
);
220 VerifySimilar(base_tp
.index_size
, new_tp
.index_size
, index_size_bias
);
221 VerifySimilar(base_tp
.filter_size
, new_tp
.filter_size
, filter_size_bias
);
222 VerifySimilar(base_tp
.num_data_blocks
, new_tp
.num_data_blocks
,
223 num_data_blocks_bias
);
225 ASSERT_EQ(base_tp
.raw_key_size
, new_tp
.raw_key_size
);
226 ASSERT_EQ(base_tp
.raw_value_size
, new_tp
.raw_value_size
);
227 ASSERT_EQ(base_tp
.num_entries
, new_tp
.num_entries
);
228 ASSERT_EQ(base_tp
.num_deletions
, new_tp
.num_deletions
);
229 ASSERT_EQ(base_tp
.num_range_deletions
, new_tp
.num_range_deletions
);
231 // Merge operands may become Puts, so we only have an upper bound the exact
232 // number of merge operands.
233 ASSERT_GE(base_tp
.num_merge_operands
, new_tp
.num_merge_operands
);
236 void GetExpectedTableProperties(
237 TableProperties
* expected_tp
, const int kKeySize
, const int kValueSize
,
238 const int kPutsPerTable
, const int kDeletionsPerTable
,
239 const int kMergeOperandsPerTable
, const int kRangeDeletionsPerTable
,
240 const int kTableCount
, const int kBloomBitsPerKey
, const size_t kBlockSize
,
241 const bool index_key_is_user_key
, const bool value_delta_encoding
) {
242 const int kKeysPerTable
=
243 kPutsPerTable
+ kDeletionsPerTable
+ kMergeOperandsPerTable
;
244 const int kPutCount
= kTableCount
* kPutsPerTable
;
245 const int kDeletionCount
= kTableCount
* kDeletionsPerTable
;
246 const int kMergeCount
= kTableCount
* kMergeOperandsPerTable
;
247 const int kRangeDeletionCount
= kTableCount
* kRangeDeletionsPerTable
;
248 const int kKeyCount
= kPutCount
+ kDeletionCount
+ kMergeCount
+ kRangeDeletionCount
;
249 const int kAvgSuccessorSize
= kKeySize
/ 5;
250 const int kEncodingSavePerKey
= kKeySize
/ 4;
251 expected_tp
->raw_key_size
= kKeyCount
* (kKeySize
+ 8);
252 expected_tp
->raw_value_size
=
253 (kPutCount
+ kMergeCount
+ kRangeDeletionCount
) * kValueSize
;
254 expected_tp
->num_entries
= kKeyCount
;
255 expected_tp
->num_deletions
= kDeletionCount
+ kRangeDeletionCount
;
256 expected_tp
->num_merge_operands
= kMergeCount
;
257 expected_tp
->num_range_deletions
= kRangeDeletionCount
;
258 expected_tp
->num_data_blocks
=
259 kTableCount
* (kKeysPerTable
* (kKeySize
- kEncodingSavePerKey
+ kValueSize
)) /
261 expected_tp
->data_size
=
262 kTableCount
* (kKeysPerTable
* (kKeySize
+ 8 + kValueSize
));
263 expected_tp
->index_size
=
264 expected_tp
->num_data_blocks
*
265 (kAvgSuccessorSize
+ (index_key_is_user_key
? 0 : 8) -
266 // discount 1 byte as value size is not encoded in value delta encoding
267 (value_delta_encoding
? 1 : 0));
268 expected_tp
->filter_size
=
269 kTableCount
* ((kKeysPerTable
* kBloomBitsPerKey
+ 7) / 8 +
270 /*average-ish overhead*/ CACHE_LINE_SIZE
/ 2);
272 } // anonymous namespace
274 TEST_F(DBPropertiesTest
, ValidatePropertyInfo
) {
275 for (const auto& ppt_name_and_info
: InternalStats::ppt_name_to_info
) {
276 // If C++ gets a std::string_literal, this would be better to check at
277 // compile-time using static_assert.
278 ASSERT_TRUE(ppt_name_and_info
.first
.empty() ||
279 !isdigit(ppt_name_and_info
.first
.back()));
282 count
+= (ppt_name_and_info
.second
.handle_string
== nullptr) ? 0 : 1;
283 count
+= (ppt_name_and_info
.second
.handle_int
== nullptr) ? 0 : 1;
284 count
+= (ppt_name_and_info
.second
.handle_string_dbimpl
== nullptr) ? 0 : 1;
285 ASSERT_TRUE(count
== 1);
289 TEST_F(DBPropertiesTest
, ValidateSampleNumber
) {
290 // When "max_open_files" is -1, we read all the files for
291 // "rocksdb.estimate-num-keys" computation, which is the ground truth.
292 // Otherwise, we sample 20 newest files to make an estimation.
293 // Formula: lastest_20_files_active_key_ratio * total_files
294 Options options
= CurrentOptions();
295 options
.disable_auto_compactions
= true;
296 options
.level0_stop_writes_trigger
= 1000;
297 DestroyAndReopen(options
);
299 for (int files
= 20; files
>= 10; files
-= 10) {
300 for (int i
= 0; i
< files
; i
++) {
301 int rows
= files
/ 10;
302 for (int j
= 0; j
< rows
; j
++) {
303 ASSERT_OK(db_
->Put(WriteOptions(), std::to_string(++key
), "foo"));
305 ASSERT_OK(db_
->Flush(FlushOptions()));
310 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num
));
311 ASSERT_EQ("45", num
);
312 options
.max_open_files
= -1;
314 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num
));
315 ASSERT_EQ("50", num
);
318 TEST_F(DBPropertiesTest
, AggregatedTableProperties
) {
319 for (int kTableCount
= 40; kTableCount
<= 100; kTableCount
+= 30) {
320 const int kDeletionsPerTable
= 5;
321 const int kMergeOperandsPerTable
= 15;
322 const int kRangeDeletionsPerTable
= 5;
323 const int kPutsPerTable
= 100;
324 const int kKeySize
= 80;
325 const int kValueSize
= 200;
326 const int kBloomBitsPerKey
= 20;
328 Options options
= CurrentOptions();
329 options
.level0_file_num_compaction_trigger
= 8;
330 options
.compression
= kNoCompression
;
331 options
.create_if_missing
= true;
332 options
.preserve_deletes
= true;
333 options
.merge_operator
.reset(new TestPutOperator());
335 BlockBasedTableOptions table_options
;
336 table_options
.filter_policy
.reset(
337 NewBloomFilterPolicy(kBloomBitsPerKey
, false));
338 table_options
.block_size
= 1024;
339 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
341 DestroyAndReopen(options
);
343 // Hold open a snapshot to prevent range tombstones from being compacted
345 ManagedSnapshot
snapshot(db_
);
348 for (int table
= 1; table
<= kTableCount
; ++table
) {
349 for (int i
= 0; i
< kPutsPerTable
; ++i
) {
350 ASSERT_OK(db_
->Put(WriteOptions(), rnd
.RandomString(kKeySize
),
351 rnd
.RandomString(kValueSize
)));
353 for (int i
= 0; i
< kDeletionsPerTable
; i
++) {
354 ASSERT_OK(db_
->Delete(WriteOptions(), rnd
.RandomString(kKeySize
)));
356 for (int i
= 0; i
< kMergeOperandsPerTable
; i
++) {
357 ASSERT_OK(db_
->Merge(WriteOptions(), rnd
.RandomString(kKeySize
),
358 rnd
.RandomString(kValueSize
)));
360 for (int i
= 0; i
< kRangeDeletionsPerTable
; i
++) {
361 std::string start
= rnd
.RandomString(kKeySize
);
362 std::string end
= start
;
363 end
.resize(kValueSize
);
364 ASSERT_OK(db_
->DeleteRange(WriteOptions(), db_
->DefaultColumnFamily(),
367 ASSERT_OK(db_
->Flush(FlushOptions()));
369 std::string property
;
370 db_
->GetProperty(DB::Properties::kAggregatedTableProperties
, &property
);
371 TableProperties output_tp
;
372 ParseTablePropertiesString(property
, &output_tp
);
373 bool index_key_is_user_key
= output_tp
.index_key_is_user_key
> 0;
374 bool value_is_delta_encoded
= output_tp
.index_value_is_delta_encoded
> 0;
376 TableProperties expected_tp
;
377 GetExpectedTableProperties(
378 &expected_tp
, kKeySize
, kValueSize
, kPutsPerTable
, kDeletionsPerTable
,
379 kMergeOperandsPerTable
, kRangeDeletionsPerTable
, kTableCount
,
380 kBloomBitsPerKey
, table_options
.block_size
, index_key_is_user_key
,
381 value_is_delta_encoded
);
383 VerifyTableProperties(expected_tp
, output_tp
);
387 TEST_F(DBPropertiesTest
, ReadLatencyHistogramByLevel
) {
388 Options options
= CurrentOptions();
389 options
.write_buffer_size
= 110 << 10;
390 options
.level0_file_num_compaction_trigger
= 6;
391 options
.num_levels
= 4;
392 options
.compression
= kNoCompression
;
393 options
.max_bytes_for_level_base
= 4500 << 10;
394 options
.target_file_size_base
= 98 << 10;
395 options
.max_write_buffer_number
= 2;
396 options
.statistics
= ROCKSDB_NAMESPACE::CreateDBStatistics();
397 options
.max_open_files
= 11; // Make sure no proloading of table readers
399 // RocksDB sanitize max open files to at least 20. Modify it back.
400 ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
401 "SanitizeOptions::AfterChangeMaxOpenFiles", [&](void* arg
) {
402 int* max_open_files
= static_cast<int*>(arg
);
403 *max_open_files
= 11;
405 ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
407 BlockBasedTableOptions table_options
;
408 table_options
.no_block_cache
= true;
410 CreateAndReopenWithCF({"pikachu"}, options
);
413 for (int num
= 0; num
< 8; num
++) {
414 ASSERT_OK(Put("foo", "bar"));
415 GenerateNewFile(&rnd
, &key_index
);
416 ASSERT_OK(dbfull()->TEST_WaitForCompact());
418 ASSERT_OK(dbfull()->TEST_WaitForCompact());
421 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop
));
423 // Get() after flushes, See latency histogram tracked.
424 for (int key
= 0; key
< key_index
; key
++) {
427 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop
));
428 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
429 ASSERT_NE(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
430 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
432 // Reopen and issue Get(). See thee latency tracked
433 ReopenWithColumnFamilies({"default", "pikachu"}, options
);
434 ASSERT_OK(dbfull()->TEST_WaitForCompact());
435 for (int key
= 0; key
< key_index
; key
++) {
439 // Test for getting immutable_db_options_.statistics
440 ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
441 "rocksdb.options-statistics", &prop
));
442 ASSERT_NE(std::string::npos
, prop
.find("rocksdb.block.cache.miss"));
443 ASSERT_EQ(std::string::npos
, prop
.find("rocksdb.db.f.micros"));
445 ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
446 "rocksdb.cf-file-histogram", &prop
));
447 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
448 ASSERT_NE(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
449 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
451 // Reopen and issue iterating. See thee latency tracked
452 ReopenWithColumnFamilies({"default", "pikachu"}, options
);
453 ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
454 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop
));
455 ASSERT_EQ(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
456 ASSERT_EQ(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
457 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
459 std::unique_ptr
<Iterator
> iter(db_
->NewIterator(ReadOptions()));
460 for (iter
->Seek(Key(0)); iter
->Valid(); iter
->Next()) {
462 ASSERT_OK(iter
->status());
464 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop
));
465 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
466 ASSERT_NE(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
467 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
469 // CF 1 should show no histogram.
471 dbfull()->GetProperty(handles_
[1], "rocksdb.cf-file-histogram", &prop
));
472 ASSERT_EQ(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
473 ASSERT_EQ(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
474 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
475 // put something and read it back , CF 1 should show histogram.
476 ASSERT_OK(Put(1, "foo", "bar"));
478 ASSERT_OK(dbfull()->TEST_WaitForCompact());
479 ASSERT_EQ("bar", Get(1, "foo"));
482 dbfull()->GetProperty(handles_
[1], "rocksdb.cf-file-histogram", &prop
));
483 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
484 ASSERT_EQ(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
485 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
487 // options.max_open_files preloads table readers.
488 options
.max_open_files
= -1;
489 ReopenWithColumnFamilies({"default", "pikachu"}, options
);
490 ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
491 "rocksdb.cf-file-histogram", &prop
));
492 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
493 ASSERT_NE(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
494 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
495 for (int key
= 0; key
< key_index
; key
++) {
498 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop
));
499 ASSERT_NE(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
500 ASSERT_NE(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
501 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
503 // Clear internal stats
504 ASSERT_OK(dbfull()->ResetStats());
505 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop
));
506 ASSERT_EQ(std::string::npos
, prop
.find("** Level 0 read latency histogram"));
507 ASSERT_EQ(std::string::npos
, prop
.find("** Level 1 read latency histogram"));
508 ASSERT_EQ(std::string::npos
, prop
.find("** Level 2 read latency histogram"));
511 TEST_F(DBPropertiesTest
, AggregatedTablePropertiesAtLevel
) {
512 const int kTableCount
= 100;
513 const int kDeletionsPerTable
= 2;
514 const int kMergeOperandsPerTable
= 2;
515 const int kRangeDeletionsPerTable
= 2;
516 const int kPutsPerTable
= 10;
517 const int kKeySize
= 50;
518 const int kValueSize
= 400;
519 const int kMaxLevel
= 7;
520 const int kBloomBitsPerKey
= 20;
522 Options options
= CurrentOptions();
523 options
.level0_file_num_compaction_trigger
= 8;
524 options
.compression
= kNoCompression
;
525 options
.create_if_missing
= true;
526 options
.level0_file_num_compaction_trigger
= 2;
527 options
.target_file_size_base
= 8192;
528 options
.max_bytes_for_level_base
= 10000;
529 options
.max_bytes_for_level_multiplier
= 2;
530 // This ensures there no compaction happening when we call GetProperty().
531 options
.disable_auto_compactions
= true;
532 options
.preserve_deletes
= true;
533 options
.merge_operator
.reset(new TestPutOperator());
535 BlockBasedTableOptions table_options
;
536 table_options
.filter_policy
.reset(
537 NewBloomFilterPolicy(kBloomBitsPerKey
, false));
538 table_options
.block_size
= 1024;
539 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
541 DestroyAndReopen(options
);
543 // Hold open a snapshot to prevent range tombstones from being compacted away.
544 ManagedSnapshot
snapshot(db_
);
546 std::string level_tp_strings
[kMaxLevel
];
547 std::string tp_string
;
548 TableProperties level_tps
[kMaxLevel
];
549 TableProperties tp
, sum_tp
, expected_tp
;
550 for (int table
= 1; table
<= kTableCount
; ++table
) {
551 for (int i
= 0; i
< kPutsPerTable
; ++i
) {
552 ASSERT_OK(db_
->Put(WriteOptions(), rnd
.RandomString(kKeySize
),
553 rnd
.RandomString(kValueSize
)));
555 for (int i
= 0; i
< kDeletionsPerTable
; i
++) {
556 ASSERT_OK(db_
->Delete(WriteOptions(), rnd
.RandomString(kKeySize
)));
558 for (int i
= 0; i
< kMergeOperandsPerTable
; i
++) {
559 ASSERT_OK(db_
->Merge(WriteOptions(), rnd
.RandomString(kKeySize
),
560 rnd
.RandomString(kValueSize
)));
562 for (int i
= 0; i
< kRangeDeletionsPerTable
; i
++) {
563 std::string start
= rnd
.RandomString(kKeySize
);
564 std::string end
= start
;
565 end
.resize(kValueSize
);
566 ASSERT_OK(db_
->DeleteRange(WriteOptions(), db_
->DefaultColumnFamily(),
569 ASSERT_OK(db_
->Flush(FlushOptions()));
570 ASSERT_OK(db_
->CompactRange(CompactRangeOptions(), nullptr, nullptr));
571 ResetTableProperties(&sum_tp
);
572 for (int level
= 0; level
< kMaxLevel
; ++level
) {
574 DB::Properties::kAggregatedTablePropertiesAtLevel
+ ToString(level
),
575 &level_tp_strings
[level
]);
576 ParseTablePropertiesString(level_tp_strings
[level
], &level_tps
[level
]);
577 sum_tp
.data_size
+= level_tps
[level
].data_size
;
578 sum_tp
.index_size
+= level_tps
[level
].index_size
;
579 sum_tp
.filter_size
+= level_tps
[level
].filter_size
;
580 sum_tp
.raw_key_size
+= level_tps
[level
].raw_key_size
;
581 sum_tp
.raw_value_size
+= level_tps
[level
].raw_value_size
;
582 sum_tp
.num_data_blocks
+= level_tps
[level
].num_data_blocks
;
583 sum_tp
.num_entries
+= level_tps
[level
].num_entries
;
584 sum_tp
.num_deletions
+= level_tps
[level
].num_deletions
;
585 sum_tp
.num_merge_operands
+= level_tps
[level
].num_merge_operands
;
586 sum_tp
.num_range_deletions
+= level_tps
[level
].num_range_deletions
;
588 db_
->GetProperty(DB::Properties::kAggregatedTableProperties
, &tp_string
);
589 ParseTablePropertiesString(tp_string
, &tp
);
590 bool index_key_is_user_key
= tp
.index_key_is_user_key
> 0;
591 bool value_is_delta_encoded
= tp
.index_value_is_delta_encoded
> 0;
592 ASSERT_EQ(sum_tp
.data_size
, tp
.data_size
);
593 ASSERT_EQ(sum_tp
.index_size
, tp
.index_size
);
594 ASSERT_EQ(sum_tp
.filter_size
, tp
.filter_size
);
595 ASSERT_EQ(sum_tp
.raw_key_size
, tp
.raw_key_size
);
596 ASSERT_EQ(sum_tp
.raw_value_size
, tp
.raw_value_size
);
597 ASSERT_EQ(sum_tp
.num_data_blocks
, tp
.num_data_blocks
);
598 ASSERT_EQ(sum_tp
.num_entries
, tp
.num_entries
);
599 ASSERT_EQ(sum_tp
.num_deletions
, tp
.num_deletions
);
600 ASSERT_EQ(sum_tp
.num_merge_operands
, tp
.num_merge_operands
);
601 ASSERT_EQ(sum_tp
.num_range_deletions
, tp
.num_range_deletions
);
603 GetExpectedTableProperties(
604 &expected_tp
, kKeySize
, kValueSize
, kPutsPerTable
, kDeletionsPerTable
,
605 kMergeOperandsPerTable
, kRangeDeletionsPerTable
, table
,
606 kBloomBitsPerKey
, table_options
.block_size
, index_key_is_user_key
,
607 value_is_delta_encoded
);
608 // Gives larger bias here as index block size, filter block size,
609 // and data block size become much harder to estimate in this test.
610 VerifyTableProperties(expected_tp
, tp
, 0.5, 0.4, 0.4, 0.25);
615 TEST_F(DBPropertiesTest
, NumImmutableMemTable
) {
617 Options options
= CurrentOptions();
618 WriteOptions writeOpt
= WriteOptions();
619 writeOpt
.disableWAL
= true;
620 options
.max_write_buffer_number
= 4;
621 options
.min_write_buffer_number_to_merge
= 3;
622 options
.write_buffer_size
= 1000000;
623 options
.max_write_buffer_size_to_maintain
=
624 5 * static_cast<int64_t>(options
.write_buffer_size
);
625 CreateAndReopenWithCF({"pikachu"}, options
);
627 std::string
big_value(1000000 * 2, 'x');
630 SetPerfLevel(kEnableTime
);
631 ASSERT_TRUE(GetPerfLevel() == kEnableTime
);
633 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k1", big_value
));
634 ASSERT_TRUE(dbfull()->GetProperty(handles_
[1],
635 "rocksdb.num-immutable-mem-table", &num
));
637 ASSERT_TRUE(dbfull()->GetProperty(
638 handles_
[1], DB::Properties::kNumImmutableMemTableFlushed
, &num
));
640 ASSERT_TRUE(dbfull()->GetProperty(
641 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
643 get_perf_context()->Reset();
645 ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count
));
647 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k2", big_value
));
648 ASSERT_TRUE(dbfull()->GetProperty(handles_
[1],
649 "rocksdb.num-immutable-mem-table", &num
));
651 ASSERT_TRUE(dbfull()->GetProperty(
652 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
654 ASSERT_TRUE(dbfull()->GetProperty(
655 handles_
[1], "rocksdb.num-entries-imm-mem-tables", &num
));
658 get_perf_context()->Reset();
660 ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count
));
661 get_perf_context()->Reset();
663 ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count
));
665 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k3", big_value
));
666 ASSERT_TRUE(dbfull()->GetProperty(
667 handles_
[1], "rocksdb.cur-size-active-mem-table", &num
));
668 ASSERT_TRUE(dbfull()->GetProperty(handles_
[1],
669 "rocksdb.num-immutable-mem-table", &num
));
671 ASSERT_TRUE(dbfull()->GetProperty(
672 handles_
[1], "rocksdb.num-entries-active-mem-table", &num
));
674 ASSERT_TRUE(dbfull()->GetProperty(
675 handles_
[1], "rocksdb.num-entries-imm-mem-tables", &num
));
677 get_perf_context()->Reset();
679 ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count
));
680 get_perf_context()->Reset();
682 ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count
));
683 get_perf_context()->Reset();
685 ASSERT_EQ(3, static_cast<int>(get_perf_context()->get_from_memtable_count
));
688 ASSERT_TRUE(dbfull()->GetProperty(handles_
[1],
689 "rocksdb.num-immutable-mem-table", &num
));
691 ASSERT_TRUE(dbfull()->GetProperty(
692 handles_
[1], DB::Properties::kNumImmutableMemTableFlushed
, &num
));
694 ASSERT_TRUE(dbfull()->GetIntProperty(
695 handles_
[1], "rocksdb.cur-size-active-mem-table", &value
));
696 // "192" is the size of the metadata of two empty skiplists, this would
697 // break if we change the default skiplist implementation
698 ASSERT_GE(value
, 192);
701 uint64_t base_total_size
;
702 ASSERT_TRUE(dbfull()->GetIntProperty(
703 handles_
[1], "rocksdb.estimate-num-keys", &base_total_size
));
705 ASSERT_OK(dbfull()->Delete(writeOpt
, handles_
[1], "k2"));
706 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k3", ""));
707 ASSERT_OK(dbfull()->Delete(writeOpt
, handles_
[1], "k3"));
708 ASSERT_TRUE(dbfull()->GetIntProperty(
709 handles_
[1], "rocksdb.num-deletes-active-mem-table", &int_num
));
710 ASSERT_EQ(int_num
, 2U);
711 ASSERT_TRUE(dbfull()->GetIntProperty(
712 handles_
[1], "rocksdb.num-entries-active-mem-table", &int_num
));
713 ASSERT_EQ(int_num
, 3U);
715 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k2", big_value
));
716 ASSERT_OK(dbfull()->Put(writeOpt
, handles_
[1], "k2", big_value
));
717 ASSERT_TRUE(dbfull()->GetIntProperty(
718 handles_
[1], "rocksdb.num-entries-imm-mem-tables", &int_num
));
719 ASSERT_EQ(int_num
, 4U);
720 ASSERT_TRUE(dbfull()->GetIntProperty(
721 handles_
[1], "rocksdb.num-deletes-imm-mem-tables", &int_num
));
722 ASSERT_EQ(int_num
, 2U);
724 ASSERT_TRUE(dbfull()->GetIntProperty(
725 handles_
[1], "rocksdb.estimate-num-keys", &int_num
));
726 ASSERT_EQ(int_num
, base_total_size
+ 1);
728 SetPerfLevel(kDisable
);
729 ASSERT_TRUE(GetPerfLevel() == kDisable
);
730 } while (ChangeCompactOptions());
733 // TODO(techdept) : Disabled flaky test #12863555
734 TEST_F(DBPropertiesTest
, DISABLED_GetProperty
) {
735 // Set sizes to both background thread pool to be 1 and block them.
736 env_
->SetBackgroundThreads(1, Env::HIGH
);
737 env_
->SetBackgroundThreads(1, Env::LOW
);
738 test::SleepingBackgroundTask sleeping_task_low
;
739 env_
->Schedule(&test::SleepingBackgroundTask::DoSleepTask
, &sleeping_task_low
,
741 test::SleepingBackgroundTask sleeping_task_high
;
742 env_
->Schedule(&test::SleepingBackgroundTask::DoSleepTask
,
743 &sleeping_task_high
, Env::Priority::HIGH
);
745 Options options
= CurrentOptions();
746 WriteOptions writeOpt
= WriteOptions();
747 writeOpt
.disableWAL
= true;
748 options
.compaction_style
= kCompactionStyleUniversal
;
749 options
.level0_file_num_compaction_trigger
= 1;
750 options
.compaction_options_universal
.size_ratio
= 50;
751 options
.max_background_compactions
= 1;
752 options
.max_background_flushes
= 1;
753 options
.max_write_buffer_number
= 10;
754 options
.min_write_buffer_number_to_merge
= 1;
755 options
.max_write_buffer_size_to_maintain
= 0;
756 options
.write_buffer_size
= 1000000;
759 std::string
big_value(1000000 * 2, 'x');
762 SetPerfLevel(kEnableTime
);
765 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num
));
766 ASSERT_EQ(int_num
, 0U);
768 dbfull()->GetIntProperty("rocksdb.estimate-live-data-size", &int_num
));
769 ASSERT_EQ(int_num
, 0U);
771 ASSERT_OK(dbfull()->Put(writeOpt
, "k1", big_value
));
772 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num
));
774 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num
));
776 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num
));
778 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num
));
780 get_perf_context()->Reset();
782 ASSERT_OK(dbfull()->Put(writeOpt
, "k2", big_value
));
783 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num
));
785 ASSERT_OK(dbfull()->Delete(writeOpt
, "k-non-existing"));
786 ASSERT_OK(dbfull()->Put(writeOpt
, "k3", big_value
));
787 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num
));
789 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num
));
791 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num
));
793 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num
));
795 // Verify the same set of properties through GetIntProperty
797 dbfull()->GetIntProperty("rocksdb.num-immutable-mem-table", &int_num
));
798 ASSERT_EQ(int_num
, 2U);
800 dbfull()->GetIntProperty("rocksdb.mem-table-flush-pending", &int_num
));
801 ASSERT_EQ(int_num
, 1U);
802 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.compaction-pending", &int_num
));
803 ASSERT_EQ(int_num
, 0U);
804 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num
));
805 ASSERT_EQ(int_num
, 2U);
808 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num
));
809 ASSERT_EQ(int_num
, 0U);
811 sleeping_task_high
.WakeUp();
812 sleeping_task_high
.WaitUntilDone();
813 dbfull()->TEST_WaitForFlushMemTable();
815 ASSERT_OK(dbfull()->Put(writeOpt
, "k4", big_value
));
816 ASSERT_OK(dbfull()->Put(writeOpt
, "k5", big_value
));
817 dbfull()->TEST_WaitForFlushMemTable();
818 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num
));
820 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num
));
822 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num
));
826 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num
));
827 ASSERT_GT(int_num
, 0U);
829 sleeping_task_low
.WakeUp();
830 sleeping_task_low
.WaitUntilDone();
832 // Wait for compaction to be done. This is important because otherwise RocksDB
833 // might schedule a compaction when reopening the database, failing assertion
835 ASSERT_OK(dbfull()->TEST_WaitForCompact());
836 options
.max_open_files
= 10;
838 // After reopening, no table reader is loaded, so no memory for table readers
840 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num
));
841 ASSERT_EQ(int_num
, 0U); // (A)
842 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num
));
843 ASSERT_GT(int_num
, 0U);
845 // After reading a key, at least one table reader is loaded.
848 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num
));
849 ASSERT_GT(int_num
, 0U);
851 // Test rocksdb.num-live-versions
853 options
.level0_file_num_compaction_trigger
= 20;
856 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num
));
857 ASSERT_EQ(int_num
, 1U);
859 // Use an iterator to hold current version
860 std::unique_ptr
<Iterator
> iter1(dbfull()->NewIterator(ReadOptions()));
862 ASSERT_OK(dbfull()->Put(writeOpt
, "k6", big_value
));
865 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num
));
866 ASSERT_EQ(int_num
, 2U);
868 // Use an iterator to hold current version
869 std::unique_ptr
<Iterator
> iter2(dbfull()->NewIterator(ReadOptions()));
871 ASSERT_OK(dbfull()->Put(writeOpt
, "k7", big_value
));
874 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num
));
875 ASSERT_EQ(int_num
, 3U);
879 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num
));
880 ASSERT_EQ(int_num
, 2U);
884 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num
));
885 ASSERT_EQ(int_num
, 1U);
889 TEST_F(DBPropertiesTest
, ApproximateMemoryUsage
) {
890 const int kNumRounds
= 10;
891 // TODO(noetzli) kFlushesPerRound does not really correlate with how many
893 const int kFlushesPerRound
= 10;
894 const int kWritesPerFlush
= 10;
895 const int kKeySize
= 100;
896 const int kValueSize
= 1000;
898 options
.write_buffer_size
= 1000; // small write buffer
899 options
.min_write_buffer_number_to_merge
= 4;
900 options
.compression
= kNoCompression
;
901 options
.create_if_missing
= true;
902 options
= CurrentOptions(options
);
903 DestroyAndReopen(options
);
907 std::vector
<Iterator
*> iters
;
910 uint64_t unflushed_mem
;
912 uint64_t prev_all_mem
;
914 // Phase 0. The verify the initial value of all these properties are the same
915 // as we have no mem-tables.
916 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem
);
917 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem
);
918 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
919 ASSERT_EQ(all_mem
, active_mem
);
920 ASSERT_EQ(all_mem
, unflushed_mem
);
922 // Phase 1. Simply issue Put() and expect "cur-size-all-mem-tables" equals to
923 // "size-all-mem-tables"
924 for (int r
= 0; r
< kNumRounds
; ++r
) {
925 for (int f
= 0; f
< kFlushesPerRound
; ++f
) {
926 for (int w
= 0; w
< kWritesPerFlush
; ++w
) {
928 Put(rnd
.RandomString(kKeySize
), rnd
.RandomString(kValueSize
)));
931 // Make sure that there is no flush between getting the two properties.
932 ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
933 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem
);
934 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
935 // in no iterator case, these two number should be the same.
936 ASSERT_EQ(unflushed_mem
, all_mem
);
938 prev_all_mem
= all_mem
;
940 // Phase 2. Keep issuing Put() but also create new iterators. This time we
941 // expect "size-all-mem-tables" > "cur-size-all-mem-tables".
942 for (int r
= 0; r
< kNumRounds
; ++r
) {
943 iters
.push_back(db_
->NewIterator(ReadOptions()));
944 for (int f
= 0; f
< kFlushesPerRound
; ++f
) {
945 for (int w
= 0; w
< kWritesPerFlush
; ++w
) {
947 Put(rnd
.RandomString(kKeySize
), rnd
.RandomString(kValueSize
)));
950 // Force flush to prevent flush from happening between getting the
951 // properties or after getting the properties and before the new round.
954 // In the second round, add iterators.
955 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem
);
956 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem
);
957 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
958 ASSERT_GT(all_mem
, active_mem
);
959 ASSERT_GT(all_mem
, unflushed_mem
);
960 ASSERT_GT(all_mem
, prev_all_mem
);
961 prev_all_mem
= all_mem
;
964 // Phase 3. Delete iterators and expect "size-all-mem-tables" shrinks
965 // whenever we release an iterator.
966 for (auto* iter
: iters
) {
967 ASSERT_OK(iter
->status());
969 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
970 // Expect the size shrinking
971 ASSERT_LT(all_mem
, prev_all_mem
);
972 prev_all_mem
= all_mem
;
975 // Expect all these three counters to be the same.
976 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem
);
977 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem
);
978 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
979 ASSERT_EQ(active_mem
, unflushed_mem
);
980 ASSERT_EQ(unflushed_mem
, all_mem
);
982 // Phase 5. Reopen, and expect all these three counters to be the same again.
984 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem
);
985 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem
);
986 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem
);
987 ASSERT_EQ(active_mem
, unflushed_mem
);
988 ASSERT_EQ(unflushed_mem
, all_mem
);
991 TEST_F(DBPropertiesTest
, EstimatePendingCompBytes
) {
992 // Set sizes to both background thread pool to be 1 and block them.
993 env_
->SetBackgroundThreads(1, Env::HIGH
);
994 env_
->SetBackgroundThreads(1, Env::LOW
);
995 test::SleepingBackgroundTask sleeping_task_low
;
996 env_
->Schedule(&test::SleepingBackgroundTask::DoSleepTask
, &sleeping_task_low
,
999 Options options
= CurrentOptions();
1000 WriteOptions writeOpt
= WriteOptions();
1001 writeOpt
.disableWAL
= true;
1002 options
.compaction_style
= kCompactionStyleLevel
;
1003 options
.level0_file_num_compaction_trigger
= 2;
1004 options
.max_background_compactions
= 1;
1005 options
.max_background_flushes
= 1;
1006 options
.max_write_buffer_number
= 10;
1007 options
.min_write_buffer_number_to_merge
= 1;
1008 options
.max_write_buffer_size_to_maintain
= 0;
1009 options
.write_buffer_size
= 1000000;
1012 std::string
big_value(1000000 * 2, 'x');
1016 ASSERT_OK(dbfull()->Put(writeOpt
, "k1", big_value
));
1018 ASSERT_TRUE(dbfull()->GetIntProperty(
1019 "rocksdb.estimate-pending-compaction-bytes", &int_num
));
1020 ASSERT_EQ(int_num
, 0U);
1022 ASSERT_OK(dbfull()->Put(writeOpt
, "k2", big_value
));
1024 ASSERT_TRUE(dbfull()->GetIntProperty(
1025 "rocksdb.estimate-pending-compaction-bytes", &int_num
));
1026 ASSERT_GT(int_num
, 0U);
1028 ASSERT_OK(dbfull()->Put(writeOpt
, "k3", big_value
));
1030 ASSERT_TRUE(dbfull()->GetIntProperty(
1031 "rocksdb.estimate-pending-compaction-bytes", &int_num
));
1032 ASSERT_GT(int_num
, 0U);
1034 sleeping_task_low
.WakeUp();
1035 sleeping_task_low
.WaitUntilDone();
1037 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1038 ASSERT_TRUE(dbfull()->GetIntProperty(
1039 "rocksdb.estimate-pending-compaction-bytes", &int_num
));
1040 ASSERT_EQ(int_num
, 0U);
1043 TEST_F(DBPropertiesTest
, EstimateCompressionRatio
) {
1044 if (!Snappy_Supported()) {
1047 const int kNumL0Files
= 3;
1048 const int kNumEntriesPerFile
= 1000;
1050 Options options
= CurrentOptions();
1051 options
.compression_per_level
= {kNoCompression
, kSnappyCompression
};
1052 options
.disable_auto_compactions
= true;
1053 options
.num_levels
= 2;
1056 // compression ratio is -1.0 when no open files at level
1057 ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
1059 const std::string
kVal(100, 'a');
1060 for (int i
= 0; i
< kNumL0Files
; ++i
) {
1061 for (int j
= 0; j
< kNumEntriesPerFile
; ++j
) {
1062 // Put common data ("key") at end to prevent delta encoding from
1063 // compressing the key effectively
1064 std::string key
= ToString(i
) + ToString(j
) + "key";
1065 ASSERT_OK(dbfull()->Put(WriteOptions(), key
, kVal
));
1070 // no compression at L0, so ratio is less than one
1071 ASSERT_LT(CompressionRatioAtLevel(0), 1.0);
1072 ASSERT_GT(CompressionRatioAtLevel(0), 0.0);
1073 ASSERT_EQ(CompressionRatioAtLevel(1), -1.0);
1075 ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
1077 ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
1078 // Data at L1 should be highly compressed thanks to Snappy and redundant data
1079 // in values (ratio is 12.846 as of 4/19/2016).
1080 ASSERT_GT(CompressionRatioAtLevel(1), 10.0);
1083 #endif // ROCKSDB_LITE
1085 class CountingUserTblPropCollector
: public TablePropertiesCollector
{
1087 const char* Name() const override
{ return "CountingUserTblPropCollector"; }
1089 Status
Finish(UserCollectedProperties
* properties
) override
{
1090 std::string encoded
;
1091 PutVarint32(&encoded
, count_
);
1092 *properties
= UserCollectedProperties
{
1093 {"CountingUserTblPropCollector", message_
}, {"Count", encoded
},
1095 return Status::OK();
1098 Status
AddUserKey(const Slice
& /*user_key*/, const Slice
& /*value*/,
1099 EntryType
/*type*/, SequenceNumber
/*seq*/,
1100 uint64_t /*file_size*/) override
{
1102 return Status::OK();
1105 UserCollectedProperties
GetReadableProperties() const override
{
1106 return UserCollectedProperties
{};
1110 std::string message_
= "Rocksdb";
1111 uint32_t count_
= 0;
1114 class CountingUserTblPropCollectorFactory
1115 : public TablePropertiesCollectorFactory
{
1117 explicit CountingUserTblPropCollectorFactory(
1118 uint32_t expected_column_family_id
)
1119 : expected_column_family_id_(expected_column_family_id
),
1121 TablePropertiesCollector
* CreateTablePropertiesCollector(
1122 TablePropertiesCollectorFactory::Context context
) override
{
1123 EXPECT_EQ(expected_column_family_id_
, context
.column_family_id
);
1125 return new CountingUserTblPropCollector();
1127 const char* Name() const override
{
1128 return "CountingUserTblPropCollectorFactory";
1130 void set_expected_column_family_id(uint32_t v
) {
1131 expected_column_family_id_
= v
;
1133 uint32_t expected_column_family_id_
;
1134 uint32_t num_created_
;
1137 class CountingDeleteTabPropCollector
: public TablePropertiesCollector
{
1139 const char* Name() const override
{ return "CountingDeleteTabPropCollector"; }
1141 Status
AddUserKey(const Slice
& /*user_key*/, const Slice
& /*value*/,
1142 EntryType type
, SequenceNumber
/*seq*/,
1143 uint64_t /*file_size*/) override
{
1144 if (type
== kEntryDelete
) {
1147 return Status::OK();
1150 bool NeedCompact() const override
{ return num_deletes_
> 10; }
1152 UserCollectedProperties
GetReadableProperties() const override
{
1153 return UserCollectedProperties
{};
1156 Status
Finish(UserCollectedProperties
* properties
) override
{
1158 UserCollectedProperties
{{"num_delete", ToString(num_deletes_
)}};
1159 return Status::OK();
1163 uint32_t num_deletes_
= 0;
1166 class CountingDeleteTabPropCollectorFactory
1167 : public TablePropertiesCollectorFactory
{
1169 TablePropertiesCollector
* CreateTablePropertiesCollector(
1170 TablePropertiesCollectorFactory::Context
/*context*/) override
{
1171 return new CountingDeleteTabPropCollector();
1173 const char* Name() const override
{
1174 return "CountingDeleteTabPropCollectorFactory";
1178 #ifndef ROCKSDB_LITE
1179 TEST_F(DBPropertiesTest
, GetUserDefinedTableProperties
) {
1180 Options options
= CurrentOptions();
1181 options
.level0_file_num_compaction_trigger
= (1 << 30);
1182 options
.table_properties_collector_factories
.resize(1);
1183 std::shared_ptr
<CountingUserTblPropCollectorFactory
> collector_factory
=
1184 std::make_shared
<CountingUserTblPropCollectorFactory
>(0);
1185 options
.table_properties_collector_factories
[0] = collector_factory
;
1188 for (int table
= 0; table
< 4; ++table
) {
1189 for (int i
= 0; i
< 10 + table
; ++i
) {
1190 ASSERT_OK(db_
->Put(WriteOptions(), ToString(table
* 100 + i
), "val"));
1192 ASSERT_OK(db_
->Flush(FlushOptions()));
1195 TablePropertiesCollection props
;
1196 ASSERT_OK(db_
->GetPropertiesOfAllTables(&props
));
1197 ASSERT_EQ(4U, props
.size());
1199 for (const auto& item
: props
) {
1200 auto& user_collected
= item
.second
->user_collected_properties
;
1201 ASSERT_TRUE(user_collected
.find("CountingUserTblPropCollector") !=
1202 user_collected
.end());
1203 ASSERT_EQ(user_collected
.at("CountingUserTblPropCollector"), "Rocksdb");
1204 ASSERT_TRUE(user_collected
.find("Count") != user_collected
.end());
1205 Slice
key(user_collected
.at("Count"));
1207 ASSERT_TRUE(GetVarint32(&key
, &count
));
1210 ASSERT_EQ(10u + 11u + 12u + 13u, sum
);
1212 ASSERT_GT(collector_factory
->num_created_
, 0U);
1213 collector_factory
->num_created_
= 0;
1214 ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
1215 ASSERT_GT(collector_factory
->num_created_
, 0U);
1217 #endif // ROCKSDB_LITE
1219 TEST_F(DBPropertiesTest
, UserDefinedTablePropertiesContext
) {
1220 Options options
= CurrentOptions();
1221 options
.level0_file_num_compaction_trigger
= 3;
1222 options
.table_properties_collector_factories
.resize(1);
1223 std::shared_ptr
<CountingUserTblPropCollectorFactory
> collector_factory
=
1224 std::make_shared
<CountingUserTblPropCollectorFactory
>(1);
1225 options
.table_properties_collector_factories
[0] = collector_factory
,
1226 CreateAndReopenWithCF({"pikachu"}, options
);
1228 for (int table
= 0; table
< 2; ++table
) {
1229 for (int i
= 0; i
< 10 + table
; ++i
) {
1230 ASSERT_OK(Put(1, ToString(table
* 100 + i
), "val"));
1232 ASSERT_OK(Flush(1));
1234 ASSERT_GT(collector_factory
->num_created_
, 0U);
1236 collector_factory
->num_created_
= 0;
1237 // Trigger automatic compactions.
1238 for (int table
= 0; table
< 3; ++table
) {
1239 for (int i
= 0; i
< 10 + table
; ++i
) {
1240 ASSERT_OK(Put(1, ToString(table
* 100 + i
), "val"));
1242 ASSERT_OK(Flush(1));
1243 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1245 ASSERT_GT(collector_factory
->num_created_
, 0U);
1247 collector_factory
->num_created_
= 0;
1248 ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_
[1]));
1249 ASSERT_GT(collector_factory
->num_created_
, 0U);
1251 // Come back to write to default column family
1252 collector_factory
->num_created_
= 0;
1253 collector_factory
->set_expected_column_family_id(0); // default CF
1254 // Create 4 tables in default column family
1255 for (int table
= 0; table
< 2; ++table
) {
1256 for (int i
= 0; i
< 10 + table
; ++i
) {
1257 ASSERT_OK(Put(ToString(table
* 100 + i
), "val"));
1261 ASSERT_GT(collector_factory
->num_created_
, 0U);
1263 collector_factory
->num_created_
= 0;
1264 // Trigger automatic compactions.
1265 for (int table
= 0; table
< 3; ++table
) {
1266 for (int i
= 0; i
< 10 + table
; ++i
) {
1267 ASSERT_OK(Put(ToString(table
* 100 + i
), "val"));
1270 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1272 ASSERT_GT(collector_factory
->num_created_
, 0U);
1274 collector_factory
->num_created_
= 0;
1275 ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
1276 ASSERT_GT(collector_factory
->num_created_
, 0U);
1279 #ifndef ROCKSDB_LITE
1280 TEST_F(DBPropertiesTest
, TablePropertiesNeedCompactTest
) {
1284 options
.create_if_missing
= true;
1285 options
.write_buffer_size
= 4096;
1286 options
.max_write_buffer_number
= 8;
1287 options
.level0_file_num_compaction_trigger
= 2;
1288 options
.level0_slowdown_writes_trigger
= 2;
1289 options
.level0_stop_writes_trigger
= 4;
1290 options
.target_file_size_base
= 2048;
1291 options
.max_bytes_for_level_base
= 10240;
1292 options
.max_bytes_for_level_multiplier
= 4;
1293 options
.soft_pending_compaction_bytes_limit
= 1024 * 1024;
1294 options
.num_levels
= 8;
1297 std::shared_ptr
<TablePropertiesCollectorFactory
> collector_factory
=
1298 std::make_shared
<CountingDeleteTabPropCollectorFactory
>();
1299 options
.table_properties_collector_factories
.resize(1);
1300 options
.table_properties_collector_factories
[0] = collector_factory
;
1302 DestroyAndReopen(options
);
1304 const int kMaxKey
= 1000;
1305 for (int i
= 0; i
< kMaxKey
; i
++) {
1306 ASSERT_OK(Put(Key(i
), rnd
.RandomString(102)));
1307 ASSERT_OK(Put(Key(kMaxKey
+ i
), rnd
.RandomString(102)));
1310 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1311 if (NumTableFilesAtLevel(0) == 1) {
1312 // Clear Level 0 so that when later flush a file with deletions,
1313 // we don't trigger an organic compaction.
1314 ASSERT_OK(Put(Key(0), ""));
1315 ASSERT_OK(Put(Key(kMaxKey
* 2), ""));
1317 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1319 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1323 std::unique_ptr
<Iterator
> iter(db_
->NewIterator(ReadOptions()));
1324 iter
->Seek(Key(kMaxKey
- 100));
1325 while (iter
->Valid() && iter
->key().compare(Key(kMaxKey
+ 100)) < 0) {
1329 ASSERT_OK(iter
->status());
1333 ASSERT_OK(Delete(Key(0)));
1334 for (int i
= kMaxKey
- 100; i
< kMaxKey
+ 100; i
++) {
1335 ASSERT_OK(Delete(Key(i
)));
1337 ASSERT_OK(Delete(Key(kMaxKey
* 2)));
1340 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1343 SetPerfLevel(kEnableCount
);
1344 get_perf_context()->Reset();
1346 std::unique_ptr
<Iterator
> iter(db_
->NewIterator(ReadOptions()));
1347 iter
->Seek(Key(kMaxKey
- 100));
1348 while (iter
->Valid() && iter
->key().compare(Key(kMaxKey
+ 100)) < 0) {
1351 ASSERT_OK(iter
->status());
1353 ASSERT_LT(get_perf_context()->internal_delete_skipped_count
, 30u);
1354 ASSERT_LT(get_perf_context()->internal_key_skipped_count
, 30u);
1355 SetPerfLevel(kDisable
);
1359 TEST_F(DBPropertiesTest
, NeedCompactHintPersistentTest
) {
1363 options
.create_if_missing
= true;
1364 options
.max_write_buffer_number
= 8;
1365 options
.level0_file_num_compaction_trigger
= 10;
1366 options
.level0_slowdown_writes_trigger
= 10;
1367 options
.level0_stop_writes_trigger
= 10;
1368 options
.disable_auto_compactions
= true;
1371 std::shared_ptr
<TablePropertiesCollectorFactory
> collector_factory
=
1372 std::make_shared
<CountingDeleteTabPropCollectorFactory
>();
1373 options
.table_properties_collector_factories
.resize(1);
1374 options
.table_properties_collector_factories
[0] = collector_factory
;
1376 DestroyAndReopen(options
);
1378 const int kMaxKey
= 100;
1379 for (int i
= 0; i
< kMaxKey
; i
++) {
1380 ASSERT_OK(Put(Key(i
), ""));
1383 ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
1385 for (int i
= 1; i
< kMaxKey
- 1; i
++) {
1386 ASSERT_OK(Delete(Key(i
)));
1389 ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
1390 ASSERT_EQ(NumTableFilesAtLevel(0), 2);
1392 // Restart the DB. Although number of files didn't reach
1393 // options.level0_file_num_compaction_trigger, compaction should
1394 // still be triggered because of the need-compaction hint.
1395 options
.disable_auto_compactions
= false;
1397 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1398 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1400 SetPerfLevel(kEnableCount
);
1401 get_perf_context()->Reset();
1403 std::unique_ptr
<Iterator
> iter(db_
->NewIterator(ReadOptions()));
1404 for (iter
->Seek(Key(0)); iter
->Valid(); iter
->Next()) {
1407 ASSERT_OK(iter
->status());
1409 ASSERT_EQ(get_perf_context()->internal_delete_skipped_count
, 0);
1410 // We iterate every key twice. Is it a bug?
1411 ASSERT_LE(get_perf_context()->internal_key_skipped_count
, 2);
1412 SetPerfLevel(kDisable
);
1416 TEST_F(DBPropertiesTest
, EstimateNumKeysUnderflow
) {
1417 Options options
= CurrentOptions();
1419 ASSERT_OK(Put("foo", "bar"));
1420 ASSERT_OK(Delete("foo"));
1421 ASSERT_OK(Delete("foo"));
1422 uint64_t num_keys
= 0;
1423 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &num_keys
));
1424 ASSERT_EQ(0, num_keys
);
1427 TEST_F(DBPropertiesTest
, EstimateOldestKeyTime
) {
1428 uint64_t oldest_key_time
= 0;
1429 Options options
= CurrentOptions();
1430 SetTimeElapseOnlySleepOnReopen(&options
);
1432 // "rocksdb.estimate-oldest-key-time" only available to fifo compaction.
1433 for (auto compaction
: {kCompactionStyleLevel
, kCompactionStyleUniversal
,
1434 kCompactionStyleNone
}) {
1435 options
.compaction_style
= compaction
;
1436 options
.create_if_missing
= true;
1437 DestroyAndReopen(options
);
1438 ASSERT_OK(Put("foo", "bar"));
1439 ASSERT_FALSE(dbfull()->GetIntProperty(
1440 DB::Properties::kEstimateOldestKeyTime
, &oldest_key_time
));
1443 int64_t mock_start_time
;
1444 ASSERT_OK(env_
->GetCurrentTime(&mock_start_time
));
1446 options
.compaction_style
= kCompactionStyleFIFO
;
1448 options
.compaction_options_fifo
.allow_compaction
= false;
1449 DestroyAndReopen(options
);
1451 env_
->MockSleepForSeconds(100);
1452 ASSERT_OK(Put("k1", "v1"));
1453 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1455 ASSERT_EQ(100, oldest_key_time
- mock_start_time
);
1457 ASSERT_EQ("1", FilesPerLevel());
1458 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1460 ASSERT_EQ(100, oldest_key_time
- mock_start_time
);
1462 env_
->MockSleepForSeconds(100); // -> 200
1463 ASSERT_OK(Put("k2", "v2"));
1465 ASSERT_EQ("2", FilesPerLevel());
1466 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1468 ASSERT_EQ(100, oldest_key_time
- mock_start_time
);
1470 env_
->MockSleepForSeconds(100); // -> 300
1471 ASSERT_OK(Put("k3", "v3"));
1473 ASSERT_EQ("3", FilesPerLevel());
1474 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1476 ASSERT_EQ(100, oldest_key_time
- mock_start_time
);
1478 env_
->MockSleepForSeconds(150); // -> 450
1479 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1480 ASSERT_EQ("2", FilesPerLevel());
1481 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1483 ASSERT_EQ(200, oldest_key_time
- mock_start_time
);
1485 env_
->MockSleepForSeconds(100); // -> 550
1486 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1487 ASSERT_EQ("1", FilesPerLevel());
1488 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1490 ASSERT_EQ(300, oldest_key_time
- mock_start_time
);
1492 env_
->MockSleepForSeconds(100); // -> 650
1493 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1494 ASSERT_EQ("", FilesPerLevel());
1495 ASSERT_FALSE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime
,
1499 TEST_F(DBPropertiesTest
, SstFilesSize
) {
1500 struct TestListener
: public EventListener
{
1501 void OnCompactionCompleted(DB
* db
,
1502 const CompactionJobInfo
& /*info*/) override
{
1503 assert(callback_triggered
== false);
1504 assert(size_before_compaction
> 0);
1505 callback_triggered
= true;
1506 uint64_t total_sst_size
= 0;
1507 uint64_t live_sst_size
= 0;
1508 bool ok
= db
->GetIntProperty(DB::Properties::kTotalSstFilesSize
,
1511 // total_sst_size include files before and after compaction.
1512 ASSERT_GT(total_sst_size
, size_before_compaction
);
1514 db
->GetIntProperty(DB::Properties::kLiveSstFilesSize
, &live_sst_size
);
1516 // live_sst_size only include files after compaction.
1517 ASSERT_GT(live_sst_size
, 0);
1518 ASSERT_LT(live_sst_size
, size_before_compaction
);
1521 uint64_t size_before_compaction
= 0;
1522 bool callback_triggered
= false;
1524 std::shared_ptr
<TestListener
> listener
= std::make_shared
<TestListener
>();
1527 options
.env
= CurrentOptions().env
;
1528 options
.disable_auto_compactions
= true;
1529 options
.listeners
.push_back(listener
);
1532 for (int i
= 0; i
< 10; i
++) {
1533 ASSERT_OK(Put("key" + ToString(i
), std::string(1000, 'v')));
1536 for (int i
= 0; i
< 5; i
++) {
1537 ASSERT_OK(Delete("key" + ToString(i
)));
1541 bool ok
= db_
->GetIntProperty(DB::Properties::kTotalSstFilesSize
, &sst_size
);
1543 ASSERT_GT(sst_size
, 0);
1544 listener
->size_before_compaction
= sst_size
;
1545 // Compact to clean all keys and trigger listener.
1546 ASSERT_OK(db_
->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1547 ASSERT_TRUE(listener
->callback_triggered
);
1550 TEST_F(DBPropertiesTest
, MinObsoleteSstNumberToKeep
) {
1551 class TestListener
: public EventListener
{
1553 void OnTableFileCreated(const TableFileCreationInfo
& info
) override
{
1554 if (info
.reason
== TableFileCreationReason::kCompaction
) {
1555 // Verify the property indicates that SSTs created by a running
1556 // compaction cannot be deleted.
1557 uint64_t created_file_num
;
1558 FileType created_file_type
;
1559 std::string filename
=
1560 info
.file_path
.substr(info
.file_path
.rfind('/') + 1);
1562 ParseFileName(filename
, &created_file_num
, &created_file_type
));
1563 ASSERT_EQ(kTableFile
, created_file_type
);
1565 uint64_t keep_sst_lower_bound
;
1567 db_
->GetIntProperty(DB::Properties::kMinObsoleteSstNumberToKeep
,
1568 &keep_sst_lower_bound
));
1570 ASSERT_LE(keep_sst_lower_bound
, created_file_num
);
1575 void SetDB(DB
* db
) { db_
= db
; }
1577 int GetNumCompactions() { return num_compactions_
; }
1579 // True if we've verified the property for at least one output file
1580 bool Validated() { return validated_
; }
1583 int num_compactions_
= 0;
1584 bool validated_
= false;
1588 const int kNumL0Files
= 4;
1590 std::shared_ptr
<TestListener
> listener
= std::make_shared
<TestListener
>();
1592 Options options
= CurrentOptions();
1593 options
.listeners
.push_back(listener
);
1594 options
.level0_file_num_compaction_trigger
= kNumL0Files
;
1595 DestroyAndReopen(options
);
1596 listener
->SetDB(db_
);
1598 for (int i
= 0; i
< kNumL0Files
; ++i
) {
1599 // Make sure they overlap in keyspace to prevent trivial move
1600 ASSERT_OK(Put("key1", "val"));
1601 ASSERT_OK(Put("key2", "val"));
1604 ASSERT_OK(dbfull()->TEST_WaitForCompact());
1605 ASSERT_TRUE(listener
->Validated());
1608 TEST_F(DBPropertiesTest
, BlockCacheProperties
) {
1612 options
.env
= CurrentOptions().env
;
1614 // Block cache properties are not available for tables other than
1615 // block-based table.
1616 options
.table_factory
.reset(NewPlainTableFactory());
1619 db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1620 ASSERT_FALSE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1622 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1624 options
.table_factory
.reset(NewCuckooTableFactory());
1627 db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1628 ASSERT_FALSE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1630 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1632 // Block cache properties are not available if block cache is not used.
1633 BlockBasedTableOptions table_options
;
1634 table_options
.no_block_cache
= true;
1635 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
1638 db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1639 ASSERT_FALSE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1641 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1643 // Test with empty block cache.
1644 constexpr size_t kCapacity
= 100;
1646 co
.capacity
= kCapacity
;
1647 co
.num_shard_bits
= 0;
1648 co
.metadata_charge_policy
= kDontChargeCacheMetadata
;
1649 auto block_cache
= NewLRUCache(co
);
1650 table_options
.block_cache
= block_cache
;
1651 table_options
.no_block_cache
= false;
1652 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
1654 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1655 ASSERT_EQ(kCapacity
, value
);
1656 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1657 ASSERT_EQ(0, value
);
1659 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1660 ASSERT_EQ(0, value
);
1662 // Insert unpinned item to the cache and check size.
1663 constexpr size_t kSize1
= 50;
1664 ASSERT_OK(block_cache
->Insert("item1", nullptr /*value*/, kSize1
,
1665 nullptr /*deleter*/));
1666 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1667 ASSERT_EQ(kCapacity
, value
);
1668 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1669 ASSERT_EQ(kSize1
, value
);
1671 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1672 ASSERT_EQ(0, value
);
1674 // Insert pinned item to the cache and check size.
1675 constexpr size_t kSize2
= 30;
1676 Cache::Handle
* item2
= nullptr;
1677 ASSERT_OK(block_cache
->Insert("item2", nullptr /*value*/, kSize2
,
1678 nullptr /*deleter*/, &item2
));
1679 ASSERT_NE(nullptr, item2
);
1680 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1681 ASSERT_EQ(kCapacity
, value
);
1682 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1683 ASSERT_EQ(kSize1
+ kSize2
, value
);
1685 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1686 ASSERT_EQ(kSize2
, value
);
1688 // Insert another pinned item to make the cache over-sized.
1689 constexpr size_t kSize3
= 80;
1690 Cache::Handle
* item3
= nullptr;
1691 ASSERT_OK(block_cache
->Insert("item3", nullptr /*value*/, kSize3
,
1692 nullptr /*deleter*/, &item3
));
1693 ASSERT_NE(nullptr, item2
);
1694 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1695 ASSERT_EQ(kCapacity
, value
);
1696 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1697 // Item 1 is evicted.
1698 ASSERT_EQ(kSize2
+ kSize3
, value
);
1700 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1701 ASSERT_EQ(kSize2
+ kSize3
, value
);
1703 // Check size after release.
1704 block_cache
->Release(item2
);
1705 block_cache
->Release(item3
);
1706 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheCapacity
, &value
));
1707 ASSERT_EQ(kCapacity
, value
);
1708 ASSERT_TRUE(db_
->GetIntProperty(DB::Properties::kBlockCacheUsage
, &value
));
1709 // item2 will be evicted, while item3 remain in cache after release.
1710 ASSERT_EQ(kSize3
, value
);
1712 db_
->GetIntProperty(DB::Properties::kBlockCachePinnedUsage
, &value
));
1713 ASSERT_EQ(0, value
);
1716 #endif // ROCKSDB_LITE
1717 } // namespace ROCKSDB_NAMESPACE
1719 int main(int argc
, char** argv
) {
1720 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
1721 ::testing::InitGoogleTest(&argc
, argv
);
1722 return RUN_ALL_TESTS();