#include <string>
#include "db/db_test_util.h"
+#include "options/cf_options.h"
#include "port/stack_trace.h"
#include "rocksdb/listener.h"
#include "rocksdb/options.h"
#include "rocksdb/perf_context.h"
#include "rocksdb/perf_level.h"
#include "rocksdb/table.h"
+#include "table/block_based/block.h"
+#include "table/format.h"
+#include "table/meta_blocks.h"
+#include "table/table_builder.h"
+#include "test_util/mock_time_env.h"
#include "util/random.h"
#include "util/string_util.h"
class DBPropertiesTest : public DBTestBase {
public:
DBPropertiesTest()
- : DBTestBase("/db_properties_test", /*env_do_fsync=*/false) {}
+ : DBTestBase("db_properties_test", /*env_do_fsync=*/false) {}
+
+ void AssertDbStats(const std::map<std::string, std::string>& db_stats,
+ double expected_uptime, int expected_user_bytes_written,
+ int expected_wal_bytes_written,
+ int expected_user_writes_by_self,
+ int expected_user_writes_with_wal) {
+ ASSERT_EQ(std::to_string(expected_uptime), db_stats.at("db.uptime"));
+ ASSERT_EQ(std::to_string(expected_wal_bytes_written),
+ db_stats.at("db.wal_bytes_written"));
+ ASSERT_EQ("0", db_stats.at("db.wal_syncs"));
+ ASSERT_EQ(std::to_string(expected_user_bytes_written),
+ db_stats.at("db.user_bytes_written"));
+ ASSERT_EQ("0", db_stats.at("db.user_writes_by_other"));
+ ASSERT_EQ(std::to_string(expected_user_writes_by_self),
+ db_stats.at("db.user_writes_by_self"));
+ ASSERT_EQ(std::to_string(expected_user_writes_with_wal),
+ db_stats.at("db.user_writes_with_wal"));
+ ASSERT_EQ("0", db_stats.at("db.user_write_stall_micros"));
+ }
};
#ifndef ROCKSDB_LITE
void VerifyTableProperties(
const TableProperties& base_tp, const TableProperties& new_tp,
- double filter_size_bias = CACHE_LINE_SIZE >= 256 ? 0.15 : 0.1,
+ double filter_size_bias = CACHE_LINE_SIZE >= 256 ? 0.18 : 0.1,
double index_size_bias = 0.1, double data_size_bias = 0.1,
double num_data_blocks_bias = 0.05) {
VerifySimilar(base_tp.data_size, new_tp.data_size, data_size_bias);
const int kDeletionCount = kTableCount * kDeletionsPerTable;
const int kMergeCount = kTableCount * kMergeOperandsPerTable;
const int kRangeDeletionCount = kTableCount * kRangeDeletionsPerTable;
- const int kKeyCount = kPutCount + kDeletionCount + kMergeCount + kRangeDeletionCount;
+ const int kKeyCount =
+ kPutCount + kDeletionCount + kMergeCount + kRangeDeletionCount;
const int kAvgSuccessorSize = kKeySize / 5;
const int kEncodingSavePerKey = kKeySize / 4;
expected_tp->raw_key_size = kKeyCount * (kKeySize + 8);
expected_tp->num_merge_operands = kMergeCount;
expected_tp->num_range_deletions = kRangeDeletionCount;
expected_tp->num_data_blocks =
- kTableCount * (kKeysPerTable * (kKeySize - kEncodingSavePerKey + kValueSize)) /
+ kTableCount *
+ (kKeysPerTable * (kKeySize - kEncodingSavePerKey + kValueSize)) /
kBlockSize;
expected_tp->data_size =
kTableCount * (kKeysPerTable * (kKeySize + 8 + kValueSize));
TEST_F(DBPropertiesTest, AggregatedTableProperties) {
for (int kTableCount = 40; kTableCount <= 100; kTableCount += 30) {
- const int kDeletionsPerTable = 5;
+ const int kDeletionsPerTable = 0;
const int kMergeOperandsPerTable = 15;
const int kRangeDeletionsPerTable = 5;
const int kPutsPerTable = 100;
options.level0_file_num_compaction_trigger = 8;
options.compression = kNoCompression;
options.create_if_missing = true;
- options.preserve_deletes = true;
options.merge_operator.reset(new TestPutOperator());
BlockBasedTableOptions table_options;
TEST_F(DBPropertiesTest, AggregatedTablePropertiesAtLevel) {
const int kTableCount = 100;
- const int kDeletionsPerTable = 2;
+ const int kDeletionsPerTable = 0;
const int kMergeOperandsPerTable = 2;
const int kRangeDeletionsPerTable = 2;
const int kPutsPerTable = 10;
options.max_bytes_for_level_multiplier = 2;
// This ensures there no compaction happening when we call GetProperty().
options.disable_auto_compactions = true;
- options.preserve_deletes = true;
options.merge_operator.reset(new TestPutOperator());
BlockBasedTableOptions table_options;
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ResetTableProperties(&sum_tp);
for (int level = 0; level < kMaxLevel; ++level) {
- db_->GetProperty(
- DB::Properties::kAggregatedTablePropertiesAtLevel + ToString(level),
- &level_tp_strings[level]);
+ db_->GetProperty(DB::Properties::kAggregatedTablePropertiesAtLevel +
+ std::to_string(level),
+ &level_tp_strings[level]);
ParseTablePropertiesString(level_tp_strings[level], &level_tps[level]);
sum_tp.data_size += level_tps[level].data_size;
sum_tp.index_size += level_tps[level].index_size;
value_is_delta_encoded);
// Gives larger bias here as index block size, filter block size,
// and data block size become much harder to estimate in this test.
- VerifyTableProperties(expected_tp, tp, 0.5, 0.4, 0.4, 0.25);
+ VerifyTableProperties(expected_tp, tp, CACHE_LINE_SIZE >= 256 ? 0.6 : 0.5,
+ 0.5, 0.5, 0.25);
}
}
}
const int kNumEntriesPerFile = 1000;
Options options = CurrentOptions();
- options.compression_per_level = {kNoCompression, kSnappyCompression};
options.disable_auto_compactions = true;
- options.num_levels = 2;
+ options.num_levels = 3;
Reopen(options);
+ ASSERT_OK(db_->SetOptions(
+ {{"compression_per_level", "kNoCompression:kSnappyCompression"}}));
+ auto opts = db_->GetOptions();
+ ASSERT_EQ(opts.compression_per_level.size(), 2);
+ ASSERT_EQ(opts.compression_per_level[0], kNoCompression);
+ ASSERT_EQ(opts.compression_per_level[1], kSnappyCompression);
+
// compression ratio is -1.0 when no open files at level
ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
for (int j = 0; j < kNumEntriesPerFile; ++j) {
// Put common data ("key") at end to prevent delta encoding from
// compressing the key effectively
- std::string key = ToString(i) + ToString(j) + "key";
+ std::string key = std::to_string(i) + std::to_string(j) + "key";
ASSERT_OK(dbfull()->Put(WriteOptions(), key, kVal));
}
ASSERT_OK(Flush());
std::string encoded;
PutVarint32(&encoded, count_);
*properties = UserCollectedProperties{
- {"CountingUserTblPropCollector", message_}, {"Count", encoded},
+ {"CountingUserTblPropCollector", message_},
+ {"Count", encoded},
};
return Status::OK();
}
Status Finish(UserCollectedProperties* properties) override {
*properties =
- UserCollectedProperties{{"num_delete", ToString(num_deletes_)}};
+ UserCollectedProperties{{"num_delete", std::to_string(num_deletes_)}};
return Status::OK();
}
}
};
+class BlockCountingTablePropertiesCollector : public TablePropertiesCollector {
+ public:
+ static const std::string kNumSampledBlocksPropertyName;
+
+ const char* Name() const override {
+ return "BlockCountingTablePropertiesCollector";
+ }
+
+ Status Finish(UserCollectedProperties* properties) override {
+ (*properties)[kNumSampledBlocksPropertyName] =
+ std::to_string(num_sampled_blocks_);
+ return Status::OK();
+ }
+
+ Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/,
+ EntryType /*type*/, SequenceNumber /*seq*/,
+ uint64_t /*file_size*/) override {
+ return Status::OK();
+ }
+
+ void BlockAdd(uint64_t /* block_uncomp_bytes */,
+ uint64_t block_compressed_bytes_fast,
+ uint64_t block_compressed_bytes_slow) override {
+ if (block_compressed_bytes_fast > 0 || block_compressed_bytes_slow > 0) {
+ num_sampled_blocks_++;
+ }
+ }
+
+ UserCollectedProperties GetReadableProperties() const override {
+ return UserCollectedProperties{
+ {kNumSampledBlocksPropertyName, std::to_string(num_sampled_blocks_)},
+ };
+ }
+
+ private:
+ uint32_t num_sampled_blocks_ = 0;
+};
+
+const std::string
+ BlockCountingTablePropertiesCollector::kNumSampledBlocksPropertyName =
+ "NumSampledBlocks";
+
+class BlockCountingTablePropertiesCollectorFactory
+ : public TablePropertiesCollectorFactory {
+ public:
+ const char* Name() const override {
+ return "BlockCountingTablePropertiesCollectorFactory";
+ }
+
+ TablePropertiesCollector* CreateTablePropertiesCollector(
+ TablePropertiesCollectorFactory::Context /* context */) override {
+ return new BlockCountingTablePropertiesCollector();
+ }
+};
+
#ifndef ROCKSDB_LITE
TEST_F(DBPropertiesTest, GetUserDefinedTableProperties) {
Options options = CurrentOptions();
// Create 4 tables
for (int table = 0; table < 4; ++table) {
for (int i = 0; i < 10 + table; ++i) {
- ASSERT_OK(db_->Put(WriteOptions(), ToString(table * 100 + i), "val"));
+ ASSERT_OK(
+ db_->Put(WriteOptions(), std::to_string(table * 100 + i), "val"));
}
ASSERT_OK(db_->Flush(FlushOptions()));
}
// Create 2 files
for (int table = 0; table < 2; ++table) {
for (int i = 0; i < 10 + table; ++i) {
- ASSERT_OK(Put(1, ToString(table * 100 + i), "val"));
+ ASSERT_OK(Put(1, std::to_string(table * 100 + i), "val"));
}
ASSERT_OK(Flush(1));
}
// Trigger automatic compactions.
for (int table = 0; table < 3; ++table) {
for (int i = 0; i < 10 + table; ++i) {
- ASSERT_OK(Put(1, ToString(table * 100 + i), "val"));
+ ASSERT_OK(Put(1, std::to_string(table * 100 + i), "val"));
}
ASSERT_OK(Flush(1));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
// Create 4 tables in default column family
for (int table = 0; table < 2; ++table) {
for (int i = 0; i < 10 + table; ++i) {
- ASSERT_OK(Put(ToString(table * 100 + i), "val"));
+ ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
}
ASSERT_OK(Flush());
}
// Trigger automatic compactions.
for (int table = 0; table < 3; ++table) {
for (int i = 0; i < 10 + table; ++i) {
- ASSERT_OK(Put(ToString(table * 100 + i), "val"));
+ ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
}
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact());
}
}
+// Excluded from RocksDB lite tests due to `GetPropertiesOfAllTables()` usage.
+TEST_F(DBPropertiesTest, BlockAddForCompressionSampling) {
+ // Sampled compression requires at least one of the following four types.
+ if (!Snappy_Supported() && !Zlib_Supported() && !LZ4_Supported() &&
+ !ZSTD_Supported()) {
+ return;
+ }
+
+ Options options = CurrentOptions();
+ options.disable_auto_compactions = true;
+ options.table_properties_collector_factories.emplace_back(
+ std::make_shared<BlockCountingTablePropertiesCollectorFactory>());
+
+ for (bool sample_for_compression : {false, true}) {
+ // For simplicity/determinism, sample 100% when enabled, or 0% when disabled
+ options.sample_for_compression = sample_for_compression ? 1 : 0;
+
+ DestroyAndReopen(options);
+
+ // Setup the following LSM:
+ //
+ // L0_0 ["a", "b"]
+ // L1_0 ["a", "b"]
+ //
+ // L0_0 was created by flush. L1_0 was created by compaction. Each file
+ // contains one data block.
+ for (int i = 0; i < 3; ++i) {
+ ASSERT_OK(Put("a", "val"));
+ ASSERT_OK(Put("b", "val"));
+ ASSERT_OK(Flush());
+ if (i == 1) {
+ ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
+ }
+ }
+
+ // A `BlockAdd()` should have been seen for files generated by flush or
+ // compaction when `sample_for_compression` is enabled.
+ TablePropertiesCollection file_to_props;
+ ASSERT_OK(db_->GetPropertiesOfAllTables(&file_to_props));
+ ASSERT_EQ(2, file_to_props.size());
+ for (const auto& file_and_props : file_to_props) {
+ auto& user_props = file_and_props.second->user_collected_properties;
+ ASSERT_TRUE(user_props.find(BlockCountingTablePropertiesCollector::
+ kNumSampledBlocksPropertyName) !=
+ user_props.end());
+ ASSERT_EQ(user_props.at(BlockCountingTablePropertiesCollector::
+ kNumSampledBlocksPropertyName),
+ std::to_string(sample_for_compression ? 1 : 0));
+ }
+ }
+}
+
+class CompressionSamplingDBPropertiesTest
+ : public DBPropertiesTest,
+ public ::testing::WithParamInterface<bool> {
+ public:
+ CompressionSamplingDBPropertiesTest() : fast_(GetParam()) {}
+
+ protected:
+ const bool fast_;
+};
+
+INSTANTIATE_TEST_CASE_P(CompressionSamplingDBPropertiesTest,
+ CompressionSamplingDBPropertiesTest, ::testing::Bool());
+
+// Excluded from RocksDB lite tests due to `GetPropertiesOfAllTables()` usage.
+TEST_P(CompressionSamplingDBPropertiesTest,
+ EstimateDataSizeWithCompressionSampling) {
+ Options options = CurrentOptions();
+ if (fast_) {
+ // One of the following light compression libraries must be present.
+ if (LZ4_Supported()) {
+ options.compression = kLZ4Compression;
+ } else if (Snappy_Supported()) {
+ options.compression = kSnappyCompression;
+ } else {
+ return;
+ }
+ } else {
+ // One of the following heavy compression libraries must be present.
+ if (ZSTD_Supported()) {
+ options.compression = kZSTD;
+ } else if (Zlib_Supported()) {
+ options.compression = kZlibCompression;
+ } else {
+ return;
+ }
+ }
+ options.disable_auto_compactions = true;
+ // For simplicity/determinism, sample 100%.
+ options.sample_for_compression = 1;
+ Reopen(options);
+
+ // Setup the following LSM:
+ //
+ // L0_0 ["a", "b"]
+ // L1_0 ["a", "b"]
+ //
+ // L0_0 was created by flush. L1_0 was created by compaction. Each file
+ // contains one data block. The value consists of compressible data so the
+ // data block should be stored compressed.
+ std::string val(1024, 'a');
+ for (int i = 0; i < 3; ++i) {
+ ASSERT_OK(Put("a", val));
+ ASSERT_OK(Put("b", val));
+ ASSERT_OK(Flush());
+ if (i == 1) {
+ ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
+ }
+ }
+
+ TablePropertiesCollection file_to_props;
+ ASSERT_OK(db_->GetPropertiesOfAllTables(&file_to_props));
+ ASSERT_EQ(2, file_to_props.size());
+ for (const auto& file_and_props : file_to_props) {
+ ASSERT_GT(file_and_props.second->data_size, 0);
+ if (fast_) {
+ ASSERT_EQ(file_and_props.second->data_size,
+ file_and_props.second->fast_compression_estimated_data_size);
+ } else {
+ ASSERT_EQ(file_and_props.second->data_size,
+ file_and_props.second->slow_compression_estimated_data_size);
+ }
+ }
+}
+
TEST_F(DBPropertiesTest, EstimateNumKeysUnderflow) {
Options options = CurrentOptions();
Reopen(options);
options.compaction_style = kCompactionStyleFIFO;
options.ttl = 300;
+ options.max_open_files = -1;
options.compaction_options_fifo.allow_compaction = false;
DestroyAndReopen(options);
Reopen(options);
for (int i = 0; i < 10; i++) {
- ASSERT_OK(Put("key" + ToString(i), std::string(1000, 'v')));
+ ASSERT_OK(Put("key" + std::to_string(i), std::string(1000, 'v')));
}
ASSERT_OK(Flush());
for (int i = 0; i < 5; i++) {
- ASSERT_OK(Delete("key" + ToString(i)));
+ ASSERT_OK(Delete("key" + std::to_string(i)));
}
ASSERT_OK(Flush());
uint64_t sst_size;
ASSERT_TRUE(listener->Validated());
}
+TEST_F(DBPropertiesTest, BlobCacheProperties) {
+ Options options;
+ uint64_t value;
+
+ options.env = CurrentOptions().env;
+
+ // Test with empty blob cache.
+ constexpr size_t kCapacity = 100;
+ LRUCacheOptions co;
+ co.capacity = kCapacity;
+ co.num_shard_bits = 0;
+ co.metadata_charge_policy = kDontChargeCacheMetadata;
+ auto blob_cache = NewLRUCache(co);
+ options.blob_cache = blob_cache;
+
+ Reopen(options);
+
+ ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
+ ASSERT_EQ(kCapacity, value);
+ ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
+ ASSERT_EQ(0, value);
+ ASSERT_TRUE(
+ db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
+ ASSERT_EQ(0, value);
+
+ // Insert unpinned blob to the cache and check size.
+ constexpr size_t kSize1 = 70;
+ ASSERT_OK(blob_cache->Insert("blob1", nullptr /*value*/, kSize1,
+ nullptr /*deleter*/));
+ ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
+ ASSERT_EQ(kCapacity, value);
+ ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
+ ASSERT_EQ(kSize1, value);
+ ASSERT_TRUE(
+ db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
+ ASSERT_EQ(0, value);
+
+ // Insert pinned blob to the cache and check size.
+ constexpr size_t kSize2 = 60;
+ Cache::Handle* blob2 = nullptr;
+ ASSERT_OK(blob_cache->Insert("blob2", nullptr /*value*/, kSize2,
+ nullptr /*deleter*/, &blob2));
+ ASSERT_NE(nullptr, blob2);
+ ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
+ ASSERT_EQ(kCapacity, value);
+ ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
+ // blob1 is evicted.
+ ASSERT_EQ(kSize2, value);
+ ASSERT_TRUE(
+ db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
+ ASSERT_EQ(kSize2, value);
+
+ // Insert another pinned blob to make the cache over-sized.
+ constexpr size_t kSize3 = 80;
+ Cache::Handle* blob3 = nullptr;
+ ASSERT_OK(blob_cache->Insert("blob3", nullptr /*value*/, kSize3,
+ nullptr /*deleter*/, &blob3));
+ ASSERT_NE(nullptr, blob3);
+ ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
+ ASSERT_EQ(kCapacity, value);
+ ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
+ ASSERT_EQ(kSize2 + kSize3, value);
+ ASSERT_TRUE(
+ db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
+ ASSERT_EQ(kSize2 + kSize3, value);
+
+ // Check size after release.
+ blob_cache->Release(blob2);
+ blob_cache->Release(blob3);
+ ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
+ ASSERT_EQ(kCapacity, value);
+ ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
+ // blob2 will be evicted, while blob3 remain in cache after release.
+ ASSERT_EQ(kSize3, value);
+ ASSERT_TRUE(
+ db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
+ ASSERT_EQ(0, value);
+}
+
TEST_F(DBPropertiesTest, BlockCacheProperties) {
Options options;
uint64_t value;
ASSERT_EQ(0, value);
}
+TEST_F(DBPropertiesTest, GetMapPropertyDbStats) {
+ auto mock_clock = std::make_shared<MockSystemClock>(env_->GetSystemClock());
+ CompositeEnvWrapper env(env_, mock_clock);
+
+ Options opts = CurrentOptions();
+ opts.env = &env;
+ Reopen(opts);
+
+ {
+ std::map<std::string, std::string> db_stats;
+ ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
+ AssertDbStats(db_stats, 0.0 /* expected_uptime */,
+ 0 /* expected_user_bytes_written */,
+ 0 /* expected_wal_bytes_written */,
+ 0 /* expected_user_writes_by_self */,
+ 0 /* expected_user_writes_with_wal */);
+ }
+
+ {
+ mock_clock->SleepForMicroseconds(1500000);
+
+ std::map<std::string, std::string> db_stats;
+ ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
+ AssertDbStats(db_stats, 1.5 /* expected_uptime */,
+ 0 /* expected_user_bytes_written */,
+ 0 /* expected_wal_bytes_written */,
+ 0 /* expected_user_writes_by_self */,
+ 0 /* expected_user_writes_with_wal */);
+ }
+
+ int expected_user_bytes_written = 0;
+ {
+ // Write with WAL disabled.
+ WriteOptions write_opts;
+ write_opts.disableWAL = true;
+
+ WriteBatch batch;
+ ASSERT_OK(batch.Put("key", "val"));
+ expected_user_bytes_written += static_cast<int>(batch.GetDataSize());
+
+ ASSERT_OK(db_->Write(write_opts, &batch));
+
+ std::map<std::string, std::string> db_stats;
+ ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
+ AssertDbStats(db_stats, 1.5 /* expected_uptime */,
+ expected_user_bytes_written,
+ 0 /* expected_wal_bytes_written */,
+ 1 /* expected_user_writes_by_self */,
+ 0 /* expected_user_writes_with_wal */);
+ }
+
+ int expected_wal_bytes_written = 0;
+ {
+ // Write with WAL enabled.
+ WriteBatch batch;
+ ASSERT_OK(batch.Delete("key"));
+ expected_user_bytes_written += static_cast<int>(batch.GetDataSize());
+ expected_wal_bytes_written += static_cast<int>(batch.GetDataSize());
+
+ ASSERT_OK(db_->Write(WriteOptions(), &batch));
+
+ std::map<std::string, std::string> db_stats;
+ ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
+ AssertDbStats(db_stats, 1.5 /* expected_uptime */,
+ expected_user_bytes_written, expected_wal_bytes_written,
+ 2 /* expected_user_writes_by_self */,
+ 1 /* expected_user_writes_with_wal */);
+ }
+
+ Close();
+}
+
+TEST_F(DBPropertiesTest, GetMapPropertyBlockCacheEntryStats) {
+ // Currently only verifies the expected properties are present
+ std::map<std::string, std::string> values;
+ ASSERT_TRUE(
+ db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats, &values));
+
+ ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::CacheId()) !=
+ values.end());
+ ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::CacheCapacityBytes()) !=
+ values.end());
+ ASSERT_TRUE(
+ values.find(
+ BlockCacheEntryStatsMapKeys::LastCollectionDurationSeconds()) !=
+ values.end());
+ ASSERT_TRUE(
+ values.find(BlockCacheEntryStatsMapKeys::LastCollectionAgeSeconds()) !=
+ values.end());
+ for (size_t i = 0; i < kNumCacheEntryRoles; ++i) {
+ CacheEntryRole role = static_cast<CacheEntryRole>(i);
+ ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::EntryCount(role)) !=
+ values.end());
+ ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::UsedBytes(role)) !=
+ values.end());
+ ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::UsedPercent(role)) !=
+ values.end());
+ }
+
+ // There should be no extra values in the map.
+ ASSERT_EQ(3 * kNumCacheEntryRoles + 4, values.size());
+}
+
+namespace {
+std::string PopMetaIndexKey(InternalIterator* meta_iter) {
+ Status s = meta_iter->status();
+ if (!s.ok()) {
+ return s.ToString();
+ } else if (meta_iter->Valid()) {
+ std::string rv = meta_iter->key().ToString();
+ meta_iter->Next();
+ return rv;
+ } else {
+ return "NOT_FOUND";
+ }
+}
+
+} // anonymous namespace
+
+TEST_F(DBPropertiesTest, TableMetaIndexKeys) {
+ // This is to detect unexpected churn in metaindex block keys. This is more
+ // of a "table test" but table_test.cc doesn't depend on db_test_util.h and
+ // we need ChangeOptions() for broad coverage.
+ constexpr int kKeyCount = 100;
+ do {
+ Options options;
+ options = CurrentOptions(options);
+ DestroyAndReopen(options);
+
+ // Create an SST file
+ for (int key = 0; key < kKeyCount; key++) {
+ ASSERT_OK(Put(Key(key), "val"));
+ }
+ ASSERT_OK(Flush());
+
+ // Find its file number
+ std::vector<LiveFileMetaData> files;
+ db_->GetLiveFilesMetaData(&files);
+ // 1 SST file
+ ASSERT_EQ(1, files.size());
+
+ // Open it for inspection
+ std::string sst_file =
+ files[0].directory + "/" + files[0].relative_filename;
+ std::unique_ptr<FSRandomAccessFile> f;
+ ASSERT_OK(env_->GetFileSystem()->NewRandomAccessFile(
+ sst_file, FileOptions(), &f, nullptr));
+ std::unique_ptr<RandomAccessFileReader> r;
+ r.reset(new RandomAccessFileReader(std::move(f), sst_file));
+ uint64_t file_size = 0;
+ ASSERT_OK(env_->GetFileSize(sst_file, &file_size));
+
+ // Read metaindex
+ BlockContents bc;
+ ASSERT_OK(ReadMetaIndexBlockInFile(r.get(), file_size, 0U,
+ ImmutableOptions(options), &bc));
+ Block metaindex_block(std::move(bc));
+ std::unique_ptr<InternalIterator> meta_iter;
+ meta_iter.reset(metaindex_block.NewMetaIterator());
+ meta_iter->SeekToFirst();
+
+ if (strcmp(options.table_factory->Name(),
+ TableFactory::kBlockBasedTableName()) == 0) {
+ auto bbto = options.table_factory->GetOptions<BlockBasedTableOptions>();
+ if (bbto->filter_policy) {
+ if (bbto->partition_filters) {
+ // The key names are intentionally hard-coded here to detect
+ // accidental regression on compatibility.
+ EXPECT_EQ("partitionedfilter.rocksdb.BuiltinBloomFilter",
+ PopMetaIndexKey(meta_iter.get()));
+ } else {
+ EXPECT_EQ("fullfilter.rocksdb.BuiltinBloomFilter",
+ PopMetaIndexKey(meta_iter.get()));
+ }
+ }
+ if (bbto->index_type == BlockBasedTableOptions::kHashSearch) {
+ EXPECT_EQ("rocksdb.hashindex.metadata",
+ PopMetaIndexKey(meta_iter.get()));
+ EXPECT_EQ("rocksdb.hashindex.prefixes",
+ PopMetaIndexKey(meta_iter.get()));
+ }
+ }
+ EXPECT_EQ("rocksdb.properties", PopMetaIndexKey(meta_iter.get()));
+ EXPECT_EQ("NOT_FOUND", PopMetaIndexKey(meta_iter.get()));
+ } while (ChangeOptions());
+}
+
#endif // ROCKSDB_LITE
+
} // namespace ROCKSDB_NAMESPACE
int main(int argc, char** argv) {