#endif
#include "cache/lru_cache.h"
-#include "db/blob_index.h"
+#include "db/blob/blob_index.h"
#include "db/db_impl/db_impl.h"
#include "db/db_test_util.h"
#include "db/dbformat.h"
#include "rocksdb/utilities/checkpoint.h"
#include "rocksdb/utilities/optimistic_transaction_db.h"
#include "rocksdb/utilities/write_batch_with_index.h"
-#include "table/block_based/block_based_table_factory.h"
#include "table/mock_table.h"
-#include "table/plain/plain_table_factory.h"
#include "table/scoped_arena_iterator.h"
#include "test_util/sync_point.h"
#include "test_util/testharness.h"
#include "test_util/testutil.h"
#include "util/compression.h"
#include "util/mutexlock.h"
+#include "util/random.h"
#include "util/rate_limiter.h"
#include "util/string_util.h"
#include "utilities/merge_operators.h"
namespace ROCKSDB_NAMESPACE {
+// Note that whole DBTest and its child classes disable fsync on files
+// and directories for speed.
+// If fsync needs to be covered in a test, put it in other places.
class DBTest : public DBTestBase {
public:
- DBTest() : DBTestBase("/db_test") {}
+ DBTest() : DBTestBase("/db_test", /*env_do_fsync=*/false) {}
};
class DBTestWithParam
// TEST_FlushMemTable() is not supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE
- DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
+ DBImpl* dbi = static_cast_with_check<DBImpl>(db);
ASSERT_OK(dbi->TEST_FlushMemTable());
for (size_t i = 0; i < 3; ++i) {
ASSERT_TRUE(!iterator->Valid());
delete iterator;
- DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
+ DBImpl* dbi = static_cast_with_check<DBImpl>(db);
ASSERT_OK(dbi->TEST_FlushMemTable());
for (size_t i = 0; i < 3; ++i) {
WriteOptions wo;
// this should fill up 2 memtables
for (int k = 0; k < 5000; ++k) {
- ASSERT_OK(db_->Put(wo, handles_[a & 1], RandomString(&rnd, 13), ""));
+ ASSERT_OK(db_->Put(wo, handles_[a & 1], rnd.RandomString(13), ""));
}
};
bool Filter(int /*level*/, const Slice& /*key*/, const Slice& /*value*/,
std::string* /*new_value*/,
bool* /*value_changed*/) const override {
- db_test->env_->addon_time_.fetch_add(1000);
+ db_test->env_->MockSleepForMicroseconds(1000);
return true;
}
std::vector<std::string> values;
// Write 120KB (12 values, each 10K)
for (int i = 0; i < 12; i++) {
- values.push_back(DBTestBase::RandomString(&rnd, 10000));
+ values.push_back(rnd.RandomString(10000));
ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
}
self->dbfull()->TEST_WaitForFlushMemTable();
// generate one more file in level-0, and should trigger level-0 compaction
std::vector<std::string> values;
for (int i = 0; i < 12; i++) {
- values.push_back(DBTestBase::RandomString(&rnd, 10000));
+ values.push_back(rnd.RandomString(10000));
ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
}
self->dbfull()->TEST_WaitForCompact();
Random rnd(301);
std::string value =
- RandomString(&rnd, static_cast<int>(2 * options.write_buffer_size));
+ rnd.RandomString(static_cast<int>(2 * options.write_buffer_size));
for (int i = 0; i < 5 * kMaxFiles; i++) {
ASSERT_OK(Put(1, "key", value));
ASSERT_LE(TotalTableFiles(1), kMaxFiles);
const int N = 128;
Random rnd(301);
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
}
uint64_t size;
ASSERT_EQ(size, 0);
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(1000 + i), rnd.RandomString(1024)));
}
start = Key(500);
keys[i * 3 + 1] = i * 5 + 1;
keys[i * 3 + 2] = i * 5 + 2;
}
- std::random_shuffle(std::begin(keys), std::end(keys));
+ // MemTable entry counting is estimated and can vary greatly depending on
+ // layout. Thus, using deterministic seed for test stability.
+ RandomShuffle(std::begin(keys), std::end(keys), rnd.Next());
for (int i = 0; i < N * 3; i++) {
- ASSERT_OK(Put(Key(keys[i] + 1000), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(keys[i] + 1000), rnd.RandomString(1024)));
}
start = Key(100);
Flush();
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(i + 1000), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(i + 1000), rnd.RandomString(1024)));
}
start = Key(1050);
}
TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) {
+ // Roughly 4 keys per data block, 1000 keys per file,
+ // with filter substantially larger than a data block
+ BlockBasedTableOptions table_options;
+ table_options.filter_policy.reset(NewBloomFilterPolicy(16));
+ table_options.block_size = 100;
Options options = CurrentOptions();
- options.write_buffer_size = 1024 * 1024;
+ options.table_factory.reset(NewBlockBasedTableFactory(table_options));
+ options.write_buffer_size = 24 * 1024;
options.compression = kNoCompression;
options.create_if_missing = true;
- options.target_file_size_base = 1024 * 1024;
+ options.target_file_size_base = 24 * 1024;
DestroyAndReopen(options);
const auto default_cf = db_->DefaultColumnFamily();
const int N = 64000;
Random rnd(301);
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(24)));
}
// Flush everything to files
Flush();
// Write more keys
for (int i = N; i < (N + N / 4); i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(24)));
}
// Flush everything to files again
Flush();
// Wait for compaction to finish
ASSERT_OK(dbfull()->TEST_WaitForCompact());
- const std::string start = Key(0);
- const std::string end = Key(2 * N);
- const Range r(start, end);
+ {
+ const std::string start = Key(0);
+ const std::string end = Key(2 * N);
+ const Range r(start, end);
- SizeApproximationOptions size_approx_options;
- size_approx_options.include_memtabtles = false;
- size_approx_options.include_files = true;
- size_approx_options.files_size_error_margin = -1.0; // disabled
+ SizeApproximationOptions size_approx_options;
+ size_approx_options.include_memtabtles = false;
+ size_approx_options.include_files = true;
+ size_approx_options.files_size_error_margin = -1.0; // disabled
- // Get the precise size without any approximation heuristic
- uint64_t size;
- db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
- ASSERT_NE(size, 0);
+ // Get the precise size without any approximation heuristic
+ uint64_t size;
+ db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
+ ASSERT_NE(size, 0);
- // Get the size with an approximation heuristic
- uint64_t size2;
- const double error_margin = 0.2;
- size_approx_options.files_size_error_margin = error_margin;
- db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2);
- ASSERT_LT(size2, size * (1 + error_margin));
- ASSERT_GT(size2, size * (1 - error_margin));
+ // Get the size with an approximation heuristic
+ uint64_t size2;
+ const double error_margin = 0.2;
+ size_approx_options.files_size_error_margin = error_margin;
+ db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2);
+ ASSERT_LT(size2, size * (1 + error_margin));
+ ASSERT_GT(size2, size * (1 - error_margin));
+ }
+
+ {
+ // Ensure that metadata is not falsely attributed only to the last data in
+ // the file. (In some applications, filters can be large portion of data
+ // size.)
+ // Perform many queries over small range, enough to ensure crossing file
+ // boundary, and make sure we never see a spike for large filter.
+ for (int i = 0; i < 3000; i += 10) {
+ const std::string start = Key(i);
+ const std::string end = Key(i + 11); // overlap by 1 key
+ const Range r(start, end);
+ uint64_t size;
+ db_->GetApproximateSizes(&r, 1, &size);
+ ASSERT_LE(size, 11 * 100);
+ }
+ }
}
TEST_F(DBTest, GetApproximateMemTableStats) {
const int N = 128;
Random rnd(301);
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
}
uint64_t count;
ASSERT_EQ(size, 0);
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(1000 + i), rnd.RandomString(1024)));
}
start = Key(100);
static const int S2 = 105000; // Allow some expansion from metadata
Random rnd(301);
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(1, Key(i), RandomString(&rnd, S1)));
+ ASSERT_OK(Put(1, Key(i), rnd.RandomString(S1)));
}
// 0 because GetApproximateSizes() does not account for memtable space
CreateAndReopenWithCF({"pikachu"}, options);
Random rnd(301);
- std::string big1 = RandomString(&rnd, 100000);
- ASSERT_OK(Put(1, Key(0), RandomString(&rnd, 10000)));
- ASSERT_OK(Put(1, Key(1), RandomString(&rnd, 10000)));
+ std::string big1 = rnd.RandomString(100000);
+ ASSERT_OK(Put(1, Key(0), rnd.RandomString(10000)));
+ ASSERT_OK(Put(1, Key(1), rnd.RandomString(10000)));
ASSERT_OK(Put(1, Key(2), big1));
- ASSERT_OK(Put(1, Key(3), RandomString(&rnd, 10000)));
+ ASSERT_OK(Put(1, Key(3), rnd.RandomString(10000)));
ASSERT_OK(Put(1, Key(4), big1));
- ASSERT_OK(Put(1, Key(5), RandomString(&rnd, 10000)));
- ASSERT_OK(Put(1, Key(6), RandomString(&rnd, 300000)));
- ASSERT_OK(Put(1, Key(7), RandomString(&rnd, 10000)));
+ ASSERT_OK(Put(1, Key(5), rnd.RandomString(10000)));
+ ASSERT_OK(Put(1, Key(6), rnd.RandomString(300000)));
+ ASSERT_OK(Put(1, Key(7), rnd.RandomString(10000)));
// Check sizes across recovery by reopening a few times
for (int run = 0; run < 3; run++) {
ASSERT_TRUE(Between(Size("", Key(2), 1), 20000, 21000));
ASSERT_TRUE(Between(Size("", Key(3), 1), 120000, 121000));
ASSERT_TRUE(Between(Size("", Key(4), 1), 130000, 131000));
- ASSERT_TRUE(Between(Size("", Key(5), 1), 230000, 231000));
- ASSERT_TRUE(Between(Size("", Key(6), 1), 240000, 241000));
- ASSERT_TRUE(Between(Size("", Key(7), 1), 540000, 541000));
- ASSERT_TRUE(Between(Size("", Key(8), 1), 550000, 560000));
+ ASSERT_TRUE(Between(Size("", Key(5), 1), 230000, 232000));
+ ASSERT_TRUE(Between(Size("", Key(6), 1), 240000, 242000));
+ // Ensure some overhead is accounted for, even without including all
+ ASSERT_TRUE(Between(Size("", Key(7), 1), 540500, 545000));
+ ASSERT_TRUE(Between(Size("", Key(8), 1), 550500, 555000));
- ASSERT_TRUE(Between(Size(Key(3), Key(5), 1), 110000, 111000));
+ ASSERT_TRUE(Between(Size(Key(3), Key(5), 1), 110100, 111000));
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, Snapshot) {
+ env_->SetMockSleep();
anon::OptionsOverride options_override;
options_override.skip_policy = kSkipNoSnapshot;
do {
Put(0, "foo", "0v2");
Put(1, "foo", "1v2");
- env_->addon_time_.fetch_add(1);
+ env_->MockSleepForSeconds(1);
const Snapshot* s2 = db_->GetSnapshot();
ASSERT_EQ(2U, GetNumSnapshots());
Random rnd(301);
FillLevels("a", "z", 1);
- std::string big = RandomString(&rnd, 50000);
+ std::string big = rnd.RandomString(50000);
Put(1, "foo", big);
Put(1, "pastfoo", "v");
const Snapshot* snapshot = db_->GetSnapshot();
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
std::vector<std::string> values;
for (int i = 0; i < 80; i++) {
- values.push_back(RandomString(&rnd, 100000));
+ values.push_back(rnd.RandomString(100000));
ASSERT_OK(Put((i < 40), Key(i), values[i]));
}
// copy these files to a new snapshot directory
std::string snapdir = dbname_ + ".snapdir/";
- ASSERT_OK(env_->CreateDirIfMissing(snapdir));
+ if (env_->FileExists(snapdir).ok()) {
+ ASSERT_OK(DestroyDir(env_, snapdir));
+ }
+ ASSERT_OK(env_->CreateDir(snapdir));
for (size_t i = 0; i < files.size(); i++) {
// our clients require that GetLiveFiles returns
// overwrite one key, this key should not appear in the snapshot
std::vector<std::string> extras;
for (unsigned int i = 0; i < 1; i++) {
- extras.push_back(RandomString(&rnd, 100000));
+ extras.push_back(rnd.RandomString(100000));
ASSERT_OK(Put(0, Key(i), extras[i]));
}
Close();
} while (ChangeCompactOptions());
}
+
+TEST_F(DBTest, GetLiveBlobFiles) {
+ VersionSet* const versions = dbfull()->TEST_GetVersionSet();
+ assert(versions);
+ assert(versions->GetColumnFamilySet());
+
+ ColumnFamilyData* const cfd = versions->GetColumnFamilySet()->GetDefault();
+ assert(cfd);
+
+ Version* const version = cfd->current();
+ assert(version);
+
+ VersionStorageInfo* const storage_info = version->storage_info();
+ assert(storage_info);
+
+ // Add a live blob file.
+ constexpr uint64_t blob_file_number = 234;
+ constexpr uint64_t total_blob_count = 555;
+ constexpr uint64_t total_blob_bytes = 66666;
+ constexpr char checksum_method[] = "CRC32";
+ constexpr char checksum_value[] = "3d87ff57";
+
+ auto shared_meta = SharedBlobFileMetaData::Create(
+ blob_file_number, total_blob_count, total_blob_bytes, checksum_method,
+ checksum_value);
+
+ constexpr uint64_t garbage_blob_count = 0;
+ constexpr uint64_t garbage_blob_bytes = 0;
+
+ auto meta = BlobFileMetaData::Create(std::move(shared_meta),
+ BlobFileMetaData::LinkedSsts(),
+ garbage_blob_count, garbage_blob_bytes);
+
+ storage_info->AddBlobFile(std::move(meta));
+
+ // Make sure it appears in the results returned by GetLiveFiles.
+ uint64_t manifest_size = 0;
+ std::vector<std::string> files;
+ ASSERT_OK(dbfull()->GetLiveFiles(files, &manifest_size));
+
+ ASSERT_FALSE(files.empty());
+ ASSERT_EQ(files[0], BlobFileName("", blob_file_number));
+}
#endif
TEST_F(DBTest, PurgeInfoLogs) {
Options options = CurrentOptions();
options.keep_log_file_num = 5;
options.create_if_missing = true;
+ options.env = env_;
for (int mode = 0; mode <= 1; mode++) {
if (mode == 1) {
options.db_log_dir = dbname_ + "_logs";
Status SyncWAL() override { return Status::OK(); }
-#ifndef ROCKSDB_LITE
Status DisableFileDeletions() override { return Status::OK(); }
Status EnableFileDeletions(bool /*force*/) override { return Status::OK(); }
+#ifndef ROCKSDB_LITE
+
Status GetLiveFiles(std::vector<std::string>&, uint64_t* /*size*/,
bool /*flush_memtable*/ = true) override {
return Status::OK();
}
+ Status GetLiveFilesChecksumInfo(
+ FileChecksumList* /*checksum_list*/) override {
+ return Status::OK();
+ }
+
Status GetSortedWalFiles(VectorLogPtr& /*files*/) override {
return Status::OK();
}
return Status::OK();
}
+ Status GetDbSessionId(std::string& /*session_id*/) const override {
+ return Status::OK();
+ }
+
SequenceNumber GetLatestSequenceNumber() const override { return 0; }
bool SetPreserveDeletesSequenceNumber(SequenceNumber /*seqnum*/) override {
fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
step, EscapeString(miter->key()).c_str(),
EscapeString(miter->value()).c_str(),
- EscapeString(miter->value()).c_str());
+ EscapeString(dbiter->value()).c_str());
ok = false;
}
}
}
if (p < 45) { // Put
k = RandomKey(&rnd, minimum);
- v = RandomString(&rnd,
- rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
+ v = rnd.RandomString(rnd.OneIn(20) ? 100 + rnd.Uniform(100)
+ : rnd.Uniform(8));
ASSERT_OK(model.Put(WriteOptions(), k, v));
ASSERT_OK(db_->Put(WriteOptions(), k, v));
} else if (p < 90) { // Delete
// we have multiple entries in the write batch for the same key
}
if (rnd.OneIn(2)) {
- v = RandomString(&rnd, rnd.Uniform(10));
+ v = rnd.RandomString(rnd.Uniform(10));
b.Put(k, v);
} else {
b.Delete(k);
Random rnd(301);
for (int i = 0; i < 6; ++i) {
for (int j = 0; j < 110; ++j) {
- ASSERT_OK(Put(ToString(i * 100 + j), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 100 + j), rnd.RandomString(980)));
}
// flush should happen here
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
for (int i = 0; i < 60; i++) {
// Generate and flush a file about 20KB.
for (int j = 0; j < 20; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
for (int i = 0; i < 60; i++) {
// Generate and flush a file about 20KB.
for (int j = 0; j < 20; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
Random rnd(301);
for (int i = 0; i < 3; i++) {
// Each file contains a different key which will be dropped later.
- ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500)));
+ ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500)));
ASSERT_OK(Put("key" + ToString(i), ""));
- ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500)));
+ ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500)));
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
}
}
for (int i = 0; i < 3; i++) {
// Each file contains a different key which will be dropped later.
- ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500)));
+ ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500)));
ASSERT_OK(Delete("key" + ToString(i)));
- ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500)));
+ ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500)));
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
}
options.arena_block_size = 4096;
options.compression = kNoCompression;
options.create_if_missing = true;
- env_->time_elapse_only_sleep_ = false;
+ env_->SetMockSleep();
options.env = env_;
// Test to make sure that all files with expired ttl are deleted on next
// manual compaction.
{
- env_->addon_time_.store(0);
+ // NOTE: Presumed unnecessary and removed: resetting mock time in env
+
options.compaction_options_fifo.max_table_files_size = 150 << 10; // 150KB
options.compaction_options_fifo.allow_compaction = false;
options.ttl = 1 * 60 * 60 ; // 1 hour
for (int i = 0; i < 10; i++) {
// Generate and flush a file about 10KB.
for (int j = 0; j < 10; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
// Sleep for 2 hours -- which is much greater than TTL.
- // Note: Couldn't use SleepForMicroseconds because it takes an int instead
- // of uint64_t. Hence used addon_time_ directly.
- // env_->SleepForMicroseconds(2 * 60 * 60 * 1000 * 1000);
- env_->addon_time_.fetch_add(2 * 60 * 60);
+ env_->MockSleepForSeconds(2 * 60 * 60);
// Since no flushes and compactions have run, the db should still be in
// the same state even after considerable time has passed.
for (int i = 0; i < 10; i++) {
// Generate and flush a file about 10KB.
for (int j = 0; j < 10; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
// Sleep for 2 hours -- which is much greater than TTL.
- env_->addon_time_.fetch_add(2 * 60 * 60);
+ env_->MockSleepForSeconds(2 * 60 * 60);
// Just to make sure that we are in the same state even after sleeping.
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
// Create 1 more file to trigger TTL compaction. The old files are dropped.
for (int i = 0; i < 1; i++) {
for (int j = 0; j < 10; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
}
Flush();
}
for (int i = 0; i < 3; i++) {
// Generate and flush a file about 10KB.
for (int j = 0; j < 10; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
// Sleep for 2 hours -- which is much greater than TTL.
- env_->addon_time_.fetch_add(2 * 60 * 60);
+ env_->MockSleepForSeconds(2 * 60 * 60);
// Just to make sure that we are in the same state even after sleeping.
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
for (int i = 0; i < 5; i++) {
for (int j = 0; j < 140; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
for (int i = 0; i < 10; i++) {
// Generate and flush a file about 10KB.
for (int j = 0; j < 10; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 5);
// Sleep for 2 hours -- which is much greater than TTL.
- env_->addon_time_.fetch_add(2 * 60 * 60);
+ env_->MockSleepForSeconds(2 * 60 * 60);
// Just to make sure that we are in the same state even after sleeping.
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 5);
// Create 10 more files. The old 5 files are dropped as their ttl expired.
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
for (int i = 0; i < 60; i++) {
// Generate and flush a file about 20KB.
for (int j = 0; j < 20; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
for (int i = 0; i < 60; i++) {
// Generate and flush a file about 20KB.
for (int j = 0; j < 20; j++) {
- ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980)));
+ ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(dbfull()->TEST_WaitForCompact());
uint64_t start = env_->NowMicros();
// Write ~96M data
for (int64_t i = 0; i < (96 << 10); ++i) {
- ASSERT_OK(
- Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
+ ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo));
}
uint64_t elapsed = env_->NowMicros() - start;
double raw_rate = env_->bytes_written_ * 1000000.0 / elapsed;
start = env_->NowMicros();
// Write ~96M data
for (int64_t i = 0; i < (96 << 10); ++i) {
- ASSERT_OK(
- Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
+ ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo));
}
rate_limiter_drains =
TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
start = env_->NowMicros();
// Write ~96M data
for (int64_t i = 0; i < (96 << 10); ++i) {
- ASSERT_OK(
- Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
+ ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo));
}
elapsed = env_->NowMicros() - start;
rate_limiter_drains =
DestroyAndReopen(options);
ASSERT_EQ(db_->GetOptions().allow_mmap_reads, false);
- options.table_factory.reset(new PlainTableFactory());
+ options.table_factory.reset(NewPlainTableFactory());
options.prefix_extractor.reset(NewNoopTransform());
Destroy(options);
ASSERT_TRUE(!TryReopen(options).IsNotSupported());
TEST_F(DBTest, ConcurrentFlushWAL) {
const size_t cnt = 100;
Options options;
+ options.env = env_;
WriteOptions wopt;
ReadOptions ropt;
for (bool two_write_queues : {false, true}) {
const int kNumPutsBeforeWaitForFlush = 64;
Random rnd(301);
for (int i = 0; i < size; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
// The following condition prevents a race condition between flush jobs
// acquiring work and this thread filling up multiple memtables. Without
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
while (!sleeping_task_low.WokenUp() && count < 256) {
- ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
+ ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions()));
count++;
}
ASSERT_GT(static_cast<double>(count), 128 * 0.8);
Env::Priority::LOW);
count = 0;
while (!sleeping_task_low.WokenUp() && count < 1024) {
- ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
+ ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions()));
count++;
}
// Windows fails this test. Will tune in the future and figure out
count = 0;
while (!sleeping_task_low.WokenUp() && count < 1024) {
- ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
+ ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions()));
count++;
}
// Windows fails this test. Will tune in the future and figure out
for (int file = 0; file < kNumL0Files; ++file) {
for (int key = 0; key < kEntriesPerBuffer; ++key) {
ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer),
- RandomString(&rnd, kTestValueSize)));
+ rnd.RandomString(kTestValueSize)));
}
Flush();
}
ASSERT_EQ("1,1,1", FilesPerLevel(1));
// Compaction range overlaps files
- Compact(1, "p1", "p9");
+ Compact(1, "p", "q");
ASSERT_EQ("0,0,1", FilesPerLevel(1));
// Populate a different range
int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
for (int file = 0; file < 16 * kNumL0Files; ++file) {
for (int k = 0; k < kEntriesPerBuffer; ++k) {
- ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize)));
+ ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize)));
}
Status s = env_->GetThreadList(&thread_list);
int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
for (int file = 0; file < 16 * kNumL0Files; ++file) {
for (int k = 0; k < kEntriesPerBuffer; ++k) {
- ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize)));
+ ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize)));
}
Status s = env_->GetThreadList(&thread_list);
for (int i = 0; i < kNKeys; i++) {
keys[i] = i;
}
- std::random_shuffle(std::begin(keys), std::end(keys));
+ RandomShuffle(std::begin(keys), std::end(keys));
Random rnd(301);
Options options;
+ options.env = env_;
options.create_if_missing = true;
options.db_write_buffer_size = 20480;
options.write_buffer_size = 20480;
for (int i = 0; i < kNKeys; i++) {
keys[i] = i;
}
- std::random_shuffle(std::begin(keys), std::end(keys));
+ RandomShuffle(std::begin(keys), std::end(keys));
Random rnd(301);
Options options;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
for (int i = 0; i < 100; i++) {
- std::string value = RandomString(&rnd, 200);
+ std::string value = rnd.RandomString(200);
ASSERT_OK(Put(Key(keys[i]), value));
if (i % 25 == 24) {
Flush();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
for (int i = 101; i < 500; i++) {
- std::string value = RandomString(&rnd, 200);
+ std::string value = rnd.RandomString(200);
ASSERT_OK(Put(Key(keys[i]), value));
if (i % 100 == 99) {
Flush();
auto gen_l0_kb = [this](int start, int size, int stride) {
Random rnd(301);
for (int i = 0; i < size; i++) {
- ASSERT_OK(Put(Key(start + stride * i), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(start + stride * i), rnd.RandomString(1024)));
}
dbfull()->TEST_WaitForFlushMemTable();
};
Random rnd(301);
WriteOptions wo;
while (count < 64) {
- ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo));
+ ASSERT_OK(Put(Key(count), rnd.RandomString(1024), wo));
dbfull()->TEST_FlushMemTable(true, true);
count++;
if (dbfull()->TEST_write_controler().IsStopped()) {
sleeping_task_low.WaitUntilSleeping();
count = 0;
while (count < 64) {
- ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo));
+ ASSERT_OK(Put(Key(count), rnd.RandomString(1024), wo));
dbfull()->TEST_FlushMemTable(true, true);
count++;
if (dbfull()->TEST_write_controler().IsStopped()) {
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
for (int i = 0; i < 4; ++i) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
// Wait for compaction so that put won't stop
dbfull()->TEST_FlushMemTable(true);
}
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
for (int i = 0; i < 4; ++i) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
// Wait for compaction so that put won't stop
dbfull()->TEST_FlushMemTable(true);
}
Options options;
options.ttl = 0;
options.create_if_missing = true;
+ options.env = env_;
DestroyAndReopen(options);
// Initial defaults
TEST_F(DBTest, DynamicUniversalCompactionOptions) {
Options options;
options.create_if_missing = true;
+ options.env = env_;
DestroyAndReopen(options);
// Initial defaults
DestroyAndReopen(options);
Random rnd(301);
- const int kCDTKeysPerBuffer = 4;
- const int kTestSize = kCDTKeysPerBuffer * 4096;
- const int kTotalIteration = 100;
+ constexpr int kCDTKeysPerBuffer = 4;
+ constexpr int kTestSize = kCDTKeysPerBuffer * 4096;
+ constexpr int kTotalIteration = 20;
// the second half of the test involves in random failure
// of file creation.
- const int kRandomFailureTest = kTotalIteration / 2;
+ constexpr int kRandomFailureTest = kTotalIteration / 2;
+
std::vector<std::string> values;
for (int i = 0; i < kTestSize; ++i) {
values.push_back("NOT_FOUND");
}
for (int k = 0; k < kTestSize; ++k) {
// here we expect some of the Put fails.
- std::string value = RandomString(&rnd, 100);
+ std::string value = rnd.RandomString(100);
Status s = Put(Key(k), Slice(value));
if (s.ok()) {
// update the latest successful put
int key1 = key_start + 1;
int key2 = key_start + 2;
Random rnd(301);
- ASSERT_OK(Put(Key(key0), RandomString(&rnd, 8)));
+ ASSERT_OK(Put(Key(key0), rnd.RandomString(8)));
for (int i = 0; i < 10; ++i) {
- ASSERT_OK(Put(Key(key1), RandomString(&rnd, 8)));
+ ASSERT_OK(Put(Key(key1), rnd.RandomString(8)));
}
- ASSERT_OK(Put(Key(key2), RandomString(&rnd, 8)));
+ ASSERT_OK(Put(Key(key2), rnd.RandomString(8)));
std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
iter->Seek(Key(key1));
ASSERT_TRUE(iter->Valid());
ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
&mutable_cf_options));
ASSERT_TRUE(mutable_cf_options.report_bg_io_stats);
+ ASSERT_TRUE(mutable_cf_options.check_flush_compaction_key_order);
+
+ ASSERT_OK(dbfull()->SetOptions(
+ handles_[1], {{"check_flush_compaction_key_order", "false"}}));
+ ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
+ &mutable_cf_options));
+ ASSERT_FALSE(mutable_cf_options.check_flush_compaction_key_order);
}
#endif // ROCKSDB_LITE
Random rnd(301);
for (int i = 0; i < kNumKeysWritten; ++i) {
// compressible string
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a')));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(128) + std::string(128, 'a')));
}
table_options.format_version = first_table_version == 1 ? 2 : 1;
public:
explicit DelayedMergeOperator(DBTest* d) : db_test_(d) {}
- bool FullMergeV2(const MergeOperationInput& /*merge_in*/,
+ bool FullMergeV2(const MergeOperationInput& merge_in,
MergeOperationOutput* merge_out) const override {
- db_test_->env_->addon_time_.fetch_add(1000);
+ db_test_->env_->MockSleepForMicroseconds(1000 *
+ merge_in.operand_list.size());
merge_out->new_value = "";
return true;
}
// Enable time profiling
SetPerfLevel(kEnableTime);
- this->env_->addon_time_.store(0);
- this->env_->time_elapse_only_sleep_ = true;
- this->env_->no_slowdown_ = true;
Options options = CurrentOptions();
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
options.merge_operator.reset(new DelayedMergeOperator(this));
+ SetTimeElapseOnlySleepOnReopen(&options);
DestroyAndReopen(options);
+ // NOTE: Presumed unnecessary and removed: resetting mock time in env
+
ASSERT_EQ(TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME), 0);
db_->Put(WriteOptions(), "foo", one);
ASSERT_OK(Flush());
std::string result;
db_->Get(opt, "foo", &result);
- ASSERT_EQ(1000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
+ ASSERT_EQ(2000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
ReadOptions read_options;
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
}
ASSERT_EQ(1, count);
- ASSERT_EQ(2000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
+ ASSERT_EQ(4000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
#ifdef ROCKSDB_USING_THREAD_STATUS
ASSERT_GT(TestGetTickerCount(options, FLUSH_WRITE_BYTES), 0);
#endif // ROCKSDB_USING_THREAD_STATUS
- this->env_->time_elapse_only_sleep_ = false;
}
#ifndef ROCKSDB_LITE
options.compaction_filter_factory = std::make_shared<KeepFilterFactory>();
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
options.merge_operator.reset(new DelayedMergeOperator(this));
- options.compaction_style = kCompactionStyleUniversal;
+ options.disable_auto_compactions = true;
options.max_subcompactions = max_subcompactions_;
+ SetTimeElapseOnlySleepOnReopen(&options);
DestroyAndReopen(options);
- for (int i = 0; i < 1000; i++) {
+ constexpr unsigned n = 1000;
+ for (unsigned i = 0; i < n; i++) {
ASSERT_OK(db_->Merge(WriteOptions(), "foo", "TEST"));
ASSERT_OK(Flush());
}
dbfull()->TEST_WaitForFlushMemTable();
- dbfull()->TEST_WaitForCompact();
- ASSERT_NE(TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME), 0);
+ CompactRangeOptions cro;
+ cro.exclusive_manual_compaction = exclusive_manual_compaction_;
+ ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
+
+ ASSERT_EQ(uint64_t{n} * 1000000U,
+ TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
}
TEST_P(DBTestWithParam, FilterCompactionTimeTest) {
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
options.statistics->set_stats_level(kExceptTimeForMutex);
options.max_subcompactions = max_subcompactions_;
+ SetTimeElapseOnlySleepOnReopen(&options);
DestroyAndReopen(options);
+ unsigned n = 0;
// put some data
for (int table = 0; table < 4; ++table) {
for (int i = 0; i < 10 + table; ++i) {
Put(ToString(table * 100 + i), "val");
+ ++n;
}
Flush();
}
Iterator* itr = db_->NewIterator(ReadOptions());
itr->SeekToFirst();
- ASSERT_NE(TestGetTickerCount(options, FILTER_OPERATION_TOTAL_TIME), 0);
+ ASSERT_EQ(uint64_t{n} * 1000000U,
+ TestGetTickerCount(options, FILTER_OPERATION_TOTAL_TIME));
delete itr;
}
#endif // ROCKSDB_LITE
#endif // ROCKSDB_LITE
#ifndef ROCKSDB_LITE
-TEST_F(DBTest, SuggestCompactRangeTest) {
+TEST_F(DBTest, DISABLED_SuggestCompactRangeTest) {
class CompactionFilterFactoryGetContext : public CompactionFilterFactory {
public:
std::unique_ptr<CompactionFilter> CreateCompactionFilter(
ASSERT_EQ(1, NumTableFilesAtLevel(1));
}
+
TEST_F(DBTest, PromoteL0) {
Options options = CurrentOptions();
options.disable_auto_compactions = true;
std::map<int32_t, std::string> values;
for (const auto& range : ranges) {
for (int32_t j = range.first; j < range.second; j++) {
- values[j] = RandomString(&rnd, value_size);
+ values[j] = rnd.RandomString(value_size);
ASSERT_OK(Put(Key(j), values[j]));
}
ASSERT_OK(Flush());
Random rnd(301);
for (int i = 0; i < kNumL0Files; ++i) {
- ASSERT_OK(Put(Key(0), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(0), rnd.RandomString(1024)));
Flush();
}
ASSERT_EQ(NumTableFilesAtLevel(0), kNumL0Files);
for (int i = 0; i < 2; ++i) {
// put two keys to ensure no trivial move
for (int j = 0; j < 2; ++j) {
- ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
}
ASSERT_OK(Flush());
}
for (int i = 0; i < kNumL0Files; ++i) {
// put two keys to ensure no trivial move
for (int j = 0; j < 2; ++j) {
- ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
}
ASSERT_OK(Flush());
}
for (int i = 0; i < 2; ++i) {
// put two keys to ensure no trivial move
for (int j = 0; j < 2; ++j) {
- ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
}
ASSERT_OK(Flush());
}
// generate enough files to trigger compaction
for (int i = 0; i < 20; ++i) {
for (int j = 0; j < 2; ++j) {
- ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
+ ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
}
ASSERT_OK(Flush());
}
Options options = CurrentOptions();
env_->SetBackgroundThreads(1, Env::LOW);
options.env = env_;
- env_->no_slowdown_ = true;
options.write_buffer_size = 100000000;
options.max_write_buffer_number = 256;
options.max_background_compactions = 1;
options.memtable_factory.reset(
new SpecialSkipListFactory(kEntriesPerMemTable));
+ SetTimeElapseOnlySleepOnReopen(&options);
CreateAndReopenWithCF({"pikachu"}, options);
// Block compactions
kIncSlowdownRatio * kIncSlowdownRatio);
}
// Estimate the total sleep time fall into the rough range.
- ASSERT_GT(env_->addon_time_.load(),
- static_cast<int64_t>(estimated_sleep_time / 2));
- ASSERT_LT(env_->addon_time_.load(),
- static_cast<int64_t>(estimated_sleep_time * 2));
+ ASSERT_GT(env_->NowMicros(), estimated_sleep_time / 2);
+ ASSERT_LT(env_->NowMicros(), estimated_sleep_time * 2);
- env_->no_slowdown_ = false;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
sleeping_task_low.WakeUp();
sleeping_task_low.WaitUntilDone();
threads.emplace_back([&]() {
Random rnd(301);
for (int i = 0; i < 10000; ++i) {
- Put(RandomString(&rnd, 10), RandomString(&rnd, 10));
+ Put(rnd.RandomString(10), rnd.RandomString(10));
}
done.store(true);
});
Options options = CurrentOptions();
options.max_open_files = -1;
- env_->time_elapse_only_sleep_ = false;
+ env_->SetMockSleep();
options.env = env_;
- env_->addon_time_.store(0);
+ // NOTE: Presumed unnecessary and removed: resetting mock time in env
+
DestroyAndReopen(options);
bool set_file_creation_time_to_zero = true;
const uint64_t uint_time_1 = static_cast<uint64_t>(time_1);
// Add 50 hours
- env_->addon_time_.fetch_add(50 * 60 * 60);
+ env_->MockSleepForSeconds(50 * 60 * 60);
int64_t time_2 = 0;
env_->GetCurrentTime(&time_2);
for (int i = 0; i < kNumLevelFiles; ++i) {
for (int j = 0; j < kNumKeysPerFile; ++j) {
ASSERT_OK(
- Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize)));
+ Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
}
Flush();
}
set_file_creation_time_to_zero = false;
options = CurrentOptions();
options.max_open_files = -1;
- env_->time_elapse_only_sleep_ = false;
options.env = env_;
- env_->addon_time_.store(0);
+ // NOTE: Presumed unnecessary and removed: resetting mock time in env
+
DestroyAndReopen(options);
for (int i = 0; i < kNumLevelFiles; ++i) {
for (int j = 0; j < kNumKeysPerFile; ++j) {
ASSERT_OK(
- Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize)));
+ Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
}
Flush();
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
+TEST_F(DBTest, MemoryUsageWithMaxWriteBufferSizeToMaintain) {
+ Options options = CurrentOptions();
+ options.max_write_buffer_size_to_maintain = 10000;
+ options.write_buffer_size = 160000;
+ Reopen(options);
+ Random rnd(301);
+ bool memory_limit_exceeded = false;
+ uint64_t size_all_mem_table = 0;
+ uint64_t cur_active_mem = 0;
+ for (int i = 0; i < 1000; i++) {
+ std::string value = rnd.RandomString(1000);
+ ASSERT_OK(Put("keykey_" + std::to_string(i), value));
+
+ dbfull()->TEST_WaitForFlushMemTable();
+
+ ASSERT_TRUE(db_->GetIntProperty(db_->DefaultColumnFamily(),
+ DB::Properties::kSizeAllMemTables,
+ &size_all_mem_table));
+ ASSERT_TRUE(db_->GetIntProperty(db_->DefaultColumnFamily(),
+ DB::Properties::kCurSizeActiveMemTable,
+ &cur_active_mem));
+
+ // Errors out if memory usage keeps on increasing beyond the limit.
+ // Once memory limit exceeds, memory_limit_exceeded is set and if
+ // size_all_mem_table doesn't drop out in the next write then it errors out
+ // (not expected behaviour). If memory usage drops then
+ // memory_limit_exceeded is set to false.
+ if ((size_all_mem_table > cur_active_mem) &&
+ (cur_active_mem >=
+ static_cast<uint64_t>(options.max_write_buffer_size_to_maintain)) &&
+ (size_all_mem_table > options.max_write_buffer_size_to_maintain +
+ options.write_buffer_size)) {
+ ASSERT_FALSE(memory_limit_exceeded);
+ memory_limit_exceeded = true;
+ } else {
+ memory_limit_exceeded = false;
+ }
+ }
+}
+
#endif
} // namespace ROCKSDB_NAMESPACE