#include "port/port.h"
#include "port/stack_trace.h"
#include "rocksdb/sst_file_manager.h"
+#include "util/random.h"
namespace ROCKSDB_NAMESPACE {
class DBSSTTest : public DBTestBase {
public:
- DBSSTTest() : DBTestBase("/db_sst_test") {}
+ DBSSTTest() : DBTestBase("/db_sst_test", /*env_do_fsync=*/true) {}
};
#ifndef ROCKSDB_LITE
// Just open the DB with the option set to true and check that we don't crash.
Options options;
+ options.env = env_;
options.skip_checking_sst_file_sizes_on_db_open = true;
Reopen(options);
for (int i = 0; i < 2; ++i) {
// Create 1MB sst file
for (int j = 0; j < 100; ++j) {
- ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024)));
+ ASSERT_OK(Put(Key(i * 50 + j), rnd.RandomString(10 * 1024)));
}
ASSERT_OK(Flush());
}
for (int i = 0; i < 2; ++i) {
// Create 1MB sst file
for (int j = 0; j < 100; ++j) {
- ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024)));
+ ASSERT_OK(Put(Key(i * 50 + j), rnd.RandomString(10 * 1024)));
}
ASSERT_OK(Flush());
}
// write_buffer_size. The flush will be blocked with block_first_time
// pending_file is protecting all the files created after
for (int j = 0; j < 256; ++j) {
- ASSERT_OK(Put(Key(j), RandomString(&rnd, 10 * 1024)));
+ ASSERT_OK(Put(Key(j), rnd.RandomString(10 * 1024)));
}
blocking_thread.WaitUntilSleeping();
dbfull()->TEST_WaitForFlushMemTable();
dbfull()->TEST_WaitForCompact();
// Verify that we are tracking all sst files in dbname_
- ASSERT_EQ(sfm->GetTrackedFiles(), GetAllSSTFiles());
+ std::unordered_map<std::string, uint64_t> files_in_db;
+ ASSERT_OK(GetAllSSTFiles(&files_in_db));
+ ASSERT_EQ(sfm->GetTrackedFiles(), files_in_db);
}
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
- auto files_in_db = GetAllSSTFiles();
+ std::unordered_map<std::string, uint64_t> files_in_db;
+ ASSERT_OK(GetAllSSTFiles(&files_in_db));
// Verify that we are tracking all sst files in dbname_
ASSERT_EQ(sfm->GetTrackedFiles(), files_in_db);
// Verify the total files size
"InstrumentedCondVar::TimedWaitInternal", [&](void* arg) {
// Turn timed wait into a simulated sleep
uint64_t* abs_time_us = static_cast<uint64_t*>(arg);
- int64_t cur_time = 0;
- env_->GetCurrentTime(&cur_time);
- if (*abs_time_us > static_cast<uint64_t>(cur_time)) {
- env_->addon_time_.fetch_add(*abs_time_us -
- static_cast<uint64_t>(cur_time));
+ uint64_t cur_time = env_->NowMicros();
+ if (*abs_time_us > cur_time) {
+ env_->MockSleepForMicroseconds(*abs_time_us - cur_time);
}
- // Randomly sleep shortly
- env_->addon_time_.fetch_add(
- static_cast<uint64_t>(Random::GetTLSInstance()->Uniform(10)));
+ // Plus an additional short, random amount
+ env_->MockSleepForMicroseconds(Random::GetTLSInstance()->Uniform(10));
- // Set wait until time to before current to force not to sleep.
- int64_t real_cur_time = 0;
- Env::Default()->GetCurrentTime(&real_cur_time);
- *abs_time_us = static_cast<uint64_t>(real_cur_time);
+ // Set wait until time to before (actual) current time to force not
+ // to sleep
+ *abs_time_us = Env::Default()->NowMicros();
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
- env_->no_slowdown_ = true;
- env_->time_elapse_only_sleep_ = true;
Options options = CurrentOptions();
+ SetTimeElapseOnlySleepOnReopen(&options);
options.disable_auto_compactions = true;
- // Need to disable stats dumping and persisting which also use
- // RepeatableThread, one of whose member variables is of type
- // InstrumentedCondVar. The callback for
- // InstrumentedCondVar::TimedWaitInternal can be triggered by stats dumping
- // and persisting threads and cause time_spent_deleting measurement to become
- // incorrect.
- options.stats_dump_period_sec = 0;
- options.stats_persist_period_sec = 0;
options.env = env_;
+ options.statistics = CreateDBStatistics();
int64_t rate_bytes_per_sec = 1024 * 10; // 10 Kbs / Sec
Status s;
}
ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
ASSERT_LT(time_spent_deleting, expected_penlty * 1.1);
+ ASSERT_EQ(4, options.statistics->getAndResetTickerCount(FILES_MARKED_TRASH));
+ ASSERT_EQ(
+ 0, options.statistics->getAndResetTickerCount(FILES_DELETED_IMMEDIATELY));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
"DeleteScheduler::BackgroundEmptyTrash:Wait",
[&](void* arg) { penalties.push_back(*(static_cast<uint64_t*>(arg))); });
- env_->no_slowdown_ = true;
- env_->time_elapse_only_sleep_ = true;
Options options = CurrentOptions();
options.disable_auto_compactions = true;
options.compression = kNoCompression;
options.sst_file_manager->SetDeleteRateBytesPerSecond(rate_bytes_per_sec);
auto sfm = static_cast<SstFileManagerImpl*>(options.sst_file_manager.get());
sfm->delete_scheduler()->SetMaxTrashDBRatio(3.1);
+ SetTimeElapseOnlySleepOnReopen(&options);
ASSERT_OK(TryReopen(options));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
bool fake_log_delete;
};
- std::unique_ptr<MyEnv> env(new MyEnv(Env::Default()));
+ std::unique_ptr<MyEnv> env(new MyEnv(env_));
Destroy(last_options_);
env->set_fake_log_delete(true);
// Generate a file containing 100 keys.
for (int i = 0; i < 100; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
}
ASSERT_OK(Flush());
uint64_t first_file_size = 0;
- auto files_in_db = GetAllSSTFiles(&first_file_size);
+ std::unordered_map<std::string, uint64_t> files_in_db;
+ ASSERT_OK(GetAllSSTFiles(&files_in_db, &first_file_size));
ASSERT_EQ(sfm->GetTotalSize(), first_file_size);
// Set the maximum allowed space usage to the current total size
// Generate a file containing 10 keys.
for (int i = 0; i < 10; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
}
ASSERT_OK(Flush());
uint64_t total_file_size = 0;
- auto files_in_db = GetAllSSTFiles(&total_file_size);
+ std::unordered_map<std::string, uint64_t> files_in_db;
+ ASSERT_OK(GetAllSSTFiles(&files_in_db, &total_file_size));
// Set the maximum allowed space usage to the current total size
sfm->SetMaxAllowedSpaceUsage(2 * total_file_size + 1);
// Generate another file to trigger compaction.
for (int i = 0; i < 10; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
}
ASSERT_OK(Flush());
dbfull()->TEST_WaitForCompact(true);
ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0);
// Make sure the stat is bumped
ASSERT_GT(dbfull()->immutable_db_options().statistics.get()->getTickerCount(COMPACTION_CANCELLED), 0);
+ ASSERT_EQ(0,
+ dbfull()->immutable_db_options().statistics.get()->getTickerCount(
+ FILES_MARKED_TRASH));
+ ASSERT_EQ(4,
+ dbfull()->immutable_db_options().statistics.get()->getTickerCount(
+ FILES_DELETED_IMMEDIATELY));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
// Generate a file containing 10 keys.
for (int i = 0; i < 10; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
}
ASSERT_OK(Flush());
uint64_t total_file_size = 0;
- auto files_in_db = GetAllSSTFiles(&total_file_size);
+ std::unordered_map<std::string, uint64_t> files_in_db;
+ ASSERT_OK(GetAllSSTFiles(&files_in_db, &total_file_size));
// Set the maximum allowed space usage to the current total size
sfm->SetMaxAllowedSpaceUsage(2 * total_file_size + 1);
// Generate another file to trigger compaction.
for (int i = 0; i < 10; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
+ ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
}
ASSERT_OK(Flush());
// It is easy to detect if the test is stuck in a loop. No need for
// complex termination logic.
while (true) {
- auto s = Put(RandomString(&rnd, 10), RandomString(&rnd, 50));
+ auto s = Put(rnd.RandomString(10), rnd.RandomString(50));
if (!s.ok()) {
break;
}
}
ASSERT_TRUE(bg_error_set);
uint64_t total_sst_files_size = 0;
- GetAllSSTFiles(&total_sst_files_size);
+ std::unordered_map<std::string, uint64_t> files_in_db;
+ ASSERT_OK(GetAllSSTFiles(&files_in_db, &total_sst_files_size));
ASSERT_GE(total_sst_files_size, limit_mb * 1024 * 1024);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
// we encode table properties as varint64. Force time to be 0 to work around
// it. Should remove the workaround after we propagate the property on
// compaction.
- std::unique_ptr<MockTimeEnv> mock_env(new MockTimeEnv(Env::Default()));
- mock_env->set_current_time(0);
+ ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
+ "FlushJob::WriteLevel0Table:oldest_ancester_time", [&](void* arg) {
+ uint64_t* current_time = static_cast<uint64_t*>(arg);
+ *current_time = 0;
+ });
+ ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions();
options.disable_auto_compactions = true;
options.compression = kNoCompression;
- options.env = mock_env.get();
DestroyAndReopen(options);
// Generate 5 files in L0
for (int i = 0; i < 5; i++) {
// Total SST files = 0
ASSERT_EQ(total_sst_files_size, 0);
- // Close db before mock_env destruct.
- Close();
+ ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) {