1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
11 #ifndef __STDC_FORMAT_MACROS
12 #define __STDC_FORMAT_MACROS
23 #include <unordered_set>
27 #include "db/db_impl.h"
28 #include "db/dbformat.h"
29 #include "env/mock_env.h"
30 #include "memtable/hash_linklist_rep.h"
31 #include "rocksdb/cache.h"
32 #include "rocksdb/compaction_filter.h"
33 #include "rocksdb/convenience.h"
34 #include "rocksdb/db.h"
35 #include "rocksdb/env.h"
36 #include "rocksdb/filter_policy.h"
37 #include "rocksdb/options.h"
38 #include "rocksdb/slice.h"
39 #include "rocksdb/sst_file_writer.h"
40 #include "rocksdb/statistics.h"
41 #include "rocksdb/table.h"
42 #include "rocksdb/utilities/checkpoint.h"
43 #include "table/block_based_table_factory.h"
44 #include "table/mock_table.h"
45 #include "table/plain_table_factory.h"
46 #include "table/scoped_arena_iterator.h"
47 #include "util/compression.h"
48 #include "util/filename.h"
49 #include "util/mock_time_env.h"
50 #include "util/mutexlock.h"
52 #include "util/string_util.h"
53 #include "util/sync_point.h"
54 #include "util/testharness.h"
55 #include "util/testutil.h"
56 #include "utilities/merge_operators.h"
63 explicit AtomicCounter(Env
* env
= NULL
)
64 : env_(env
), cond_count_(&mu_
), count_(0) {}
69 cond_count_
.SignalAll();
77 bool WaitFor(int count
) {
80 uint64_t start
= env_
->NowMicros();
81 while (count_
< count
) {
82 uint64_t now
= env_
->NowMicros();
83 cond_count_
.TimedWait(now
+ /*1s*/ 1 * 1000 * 1000);
84 if (env_
->NowMicros() - start
> /*10s*/ 10 * 1000 * 1000) {
88 GTEST_LOG_(WARNING
) << "WaitFor is taking more time than usual";
98 cond_count_
.SignalAll();
104 port::CondVar cond_count_
;
108 struct OptionsOverride
{
109 std::shared_ptr
<const FilterPolicy
> filter_policy
= nullptr;
110 // These will be used only if filter_policy is set
111 bool partition_filters
= false;
112 uint64_t metadata_block_size
= 1024;
114 // Used as a bit mask of individual enums in which to skip an XF test point
120 enum SkipPolicy
{ kSkipNone
= 0, kSkipNoSnapshot
= 1, kSkipNoPrefix
= 2 };
122 // A hacky skip list mem table that triggers flush after number of entries.
123 class SpecialMemTableRep
: public MemTableRep
{
125 explicit SpecialMemTableRep(Allocator
* allocator
, MemTableRep
* memtable
,
126 int num_entries_flush
)
127 : MemTableRep(allocator
),
129 num_entries_flush_(num_entries_flush
),
132 virtual KeyHandle
Allocate(const size_t len
, char** buf
) override
{
133 return memtable_
->Allocate(len
, buf
);
136 // Insert key into the list.
137 // REQUIRES: nothing that compares equal to key is currently in the list.
138 virtual void Insert(KeyHandle handle
) override
{
140 memtable_
->Insert(handle
);
143 // Returns true iff an entry that compares equal to key is in the list.
144 virtual bool Contains(const char* key
) const override
{
145 return memtable_
->Contains(key
);
148 virtual size_t ApproximateMemoryUsage() override
{
149 // Return a high memory usage when number of entries exceeds the threshold
150 // to trigger a flush.
151 return (num_entries_
< num_entries_flush_
) ? 0 : 1024 * 1024 * 1024;
154 virtual void Get(const LookupKey
& k
, void* callback_args
,
155 bool (*callback_func
)(void* arg
,
156 const char* entry
)) override
{
157 memtable_
->Get(k
, callback_args
, callback_func
);
160 uint64_t ApproximateNumEntries(const Slice
& start_ikey
,
161 const Slice
& end_ikey
) override
{
162 return memtable_
->ApproximateNumEntries(start_ikey
, end_ikey
);
165 virtual MemTableRep::Iterator
* GetIterator(Arena
* arena
= nullptr) override
{
166 return memtable_
->GetIterator(arena
);
169 virtual ~SpecialMemTableRep() override
{}
172 std::unique_ptr
<MemTableRep
> memtable_
;
173 int num_entries_flush_
;
177 // The factory for the hacky skip list mem table that triggers flush after
178 // number of entries exceeds a threshold.
179 class SpecialSkipListFactory
: public MemTableRepFactory
{
181 // After number of inserts exceeds `num_entries_flush` in a mem table, trigger
183 explicit SpecialSkipListFactory(int num_entries_flush
)
184 : num_entries_flush_(num_entries_flush
) {}
186 using MemTableRepFactory::CreateMemTableRep
;
187 virtual MemTableRep
* CreateMemTableRep(
188 const MemTableRep::KeyComparator
& compare
, Allocator
* allocator
,
189 const SliceTransform
* transform
, Logger
* /*logger*/) override
{
190 return new SpecialMemTableRep(
191 allocator
, factory_
.CreateMemTableRep(compare
, allocator
, transform
, 0),
194 virtual const char* Name() const override
{ return "SkipListFactory"; }
196 bool IsInsertConcurrentlySupported() const override
{
197 return factory_
.IsInsertConcurrentlySupported();
201 SkipListFactory factory_
;
202 int num_entries_flush_
;
205 // Special Env used to delay background operations
206 class SpecialEnv
: public EnvWrapper
{
208 explicit SpecialEnv(Env
* base
);
210 Status
NewWritableFile(const std::string
& f
, std::unique_ptr
<WritableFile
>* r
,
211 const EnvOptions
& soptions
) override
{
212 class SSTableFile
: public WritableFile
{
215 std::unique_ptr
<WritableFile
> base_
;
218 SSTableFile(SpecialEnv
* env
, std::unique_ptr
<WritableFile
>&& base
)
219 : env_(env
), base_(std::move(base
)) {}
220 Status
Append(const Slice
& data
) override
{
221 if (env_
->table_write_callback_
) {
222 (*env_
->table_write_callback_
)();
224 if (env_
->drop_writes_
.load(std::memory_order_acquire
)) {
225 // Drop writes on the floor
227 } else if (env_
->no_space_
.load(std::memory_order_acquire
)) {
228 return Status::NoSpace("No space left on device");
230 env_
->bytes_written_
+= data
.size();
231 return base_
->Append(data
);
234 Status
PositionedAppend(const Slice
& data
, uint64_t offset
) override
{
235 if (env_
->table_write_callback_
) {
236 (*env_
->table_write_callback_
)();
238 if (env_
->drop_writes_
.load(std::memory_order_acquire
)) {
239 // Drop writes on the floor
241 } else if (env_
->no_space_
.load(std::memory_order_acquire
)) {
242 return Status::NoSpace("No space left on device");
244 env_
->bytes_written_
+= data
.size();
245 return base_
->PositionedAppend(data
, offset
);
248 Status
Truncate(uint64_t size
) override
{ return base_
->Truncate(size
); }
249 Status
RangeSync(uint64_t offset
, uint64_t nbytes
) override
{
250 Status s
= base_
->RangeSync(offset
, nbytes
);
251 #if !(defined NDEBUG) || !defined(OS_WIN)
252 TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::RangeSync", &s
);
253 #endif // !(defined NDEBUG) || !defined(OS_WIN)
256 Status
Close() override
{
257 // SyncPoint is not supported in Released Windows Mode.
258 #if !(defined NDEBUG) || !defined(OS_WIN)
259 // Check preallocation size
260 // preallocation size is never passed to base file.
261 size_t preallocation_size
= preallocation_block_size();
262 TEST_SYNC_POINT_CALLBACK("DBTestWritableFile.GetPreallocationStatus",
263 &preallocation_size
);
264 #endif // !(defined NDEBUG) || !defined(OS_WIN)
265 Status s
= base_
->Close();
266 #if !(defined NDEBUG) || !defined(OS_WIN)
267 TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::Close", &s
);
268 #endif // !(defined NDEBUG) || !defined(OS_WIN)
271 Status
Flush() override
{ return base_
->Flush(); }
272 Status
Sync() override
{
273 ++env_
->sync_counter_
;
274 while (env_
->delay_sstable_sync_
.load(std::memory_order_acquire
)) {
275 env_
->SleepForMicroseconds(100000);
277 Status s
= base_
->Sync();
278 #if !(defined NDEBUG) || !defined(OS_WIN)
279 TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::Sync", &s
);
280 #endif // !(defined NDEBUG) || !defined(OS_WIN)
283 void SetIOPriority(Env::IOPriority pri
) override
{
284 base_
->SetIOPriority(pri
);
286 Env::IOPriority
GetIOPriority() override
{
287 return base_
->GetIOPriority();
289 bool use_direct_io() const override
{
290 return base_
->use_direct_io();
292 Status
Allocate(uint64_t offset
, uint64_t len
) override
{
293 return base_
->Allocate(offset
, len
);
296 class ManifestFile
: public WritableFile
{
298 ManifestFile(SpecialEnv
* env
, std::unique_ptr
<WritableFile
>&& b
)
299 : env_(env
), base_(std::move(b
)) {}
300 Status
Append(const Slice
& data
) override
{
301 if (env_
->manifest_write_error_
.load(std::memory_order_acquire
)) {
302 return Status::IOError("simulated writer error");
304 return base_
->Append(data
);
307 Status
Truncate(uint64_t size
) override
{ return base_
->Truncate(size
); }
308 Status
Close() override
{ return base_
->Close(); }
309 Status
Flush() override
{ return base_
->Flush(); }
310 Status
Sync() override
{
311 ++env_
->sync_counter_
;
312 if (env_
->manifest_sync_error_
.load(std::memory_order_acquire
)) {
313 return Status::IOError("simulated sync error");
315 return base_
->Sync();
318 uint64_t GetFileSize() override
{ return base_
->GetFileSize(); }
319 Status
Allocate(uint64_t offset
, uint64_t len
) override
{
320 return base_
->Allocate(offset
, len
);
325 std::unique_ptr
<WritableFile
> base_
;
327 class WalFile
: public WritableFile
{
329 WalFile(SpecialEnv
* env
, std::unique_ptr
<WritableFile
>&& b
)
330 : env_(env
), base_(std::move(b
)) {
331 env_
->num_open_wal_file_
.fetch_add(1);
333 virtual ~WalFile() { env_
->num_open_wal_file_
.fetch_add(-1); }
334 Status
Append(const Slice
& data
) override
{
335 #if !(defined NDEBUG) || !defined(OS_WIN)
336 TEST_SYNC_POINT("SpecialEnv::WalFile::Append:1");
339 if (env_
->log_write_error_
.load(std::memory_order_acquire
)) {
340 s
= Status::IOError("simulated writer error");
343 env_
->log_write_slowdown_
.load(std::memory_order_acquire
);
345 env_
->SleepForMicroseconds(slowdown
);
347 s
= base_
->Append(data
);
349 #if !(defined NDEBUG) || !defined(OS_WIN)
350 TEST_SYNC_POINT("SpecialEnv::WalFile::Append:2");
354 Status
Truncate(uint64_t size
) override
{ return base_
->Truncate(size
); }
355 Status
Close() override
{
356 // SyncPoint is not supported in Released Windows Mode.
357 #if !(defined NDEBUG) || !defined(OS_WIN)
358 // Check preallocation size
359 // preallocation size is never passed to base file.
360 size_t preallocation_size
= preallocation_block_size();
361 TEST_SYNC_POINT_CALLBACK("DBTestWalFile.GetPreallocationStatus",
362 &preallocation_size
);
363 #endif // !(defined NDEBUG) || !defined(OS_WIN)
365 return base_
->Close();
367 Status
Flush() override
{ return base_
->Flush(); }
368 Status
Sync() override
{
369 ++env_
->sync_counter_
;
370 return base_
->Sync();
372 bool IsSyncThreadSafe() const override
{
373 return env_
->is_wal_sync_thread_safe_
.load();
375 Status
Allocate(uint64_t offset
, uint64_t len
) override
{
376 return base_
->Allocate(offset
, len
);
381 std::unique_ptr
<WritableFile
> base_
;
384 if (non_writeable_rate_
.load(std::memory_order_acquire
) > 0) {
385 uint32_t random_number
;
387 MutexLock
l(&rnd_mutex_
);
388 random_number
= rnd_
.Uniform(100);
390 if (random_number
< non_writeable_rate_
.load()) {
391 return Status::IOError("simulated random write error");
395 new_writable_count_
++;
397 if (non_writable_count_
.load() > 0) {
398 non_writable_count_
--;
399 return Status::IOError("simulated write error");
402 EnvOptions optimized
= soptions
;
403 if (strstr(f
.c_str(), "MANIFEST") != nullptr ||
404 strstr(f
.c_str(), "log") != nullptr) {
405 optimized
.use_mmap_writes
= false;
406 optimized
.use_direct_writes
= false;
409 Status s
= target()->NewWritableFile(f
, r
, optimized
);
411 if (strstr(f
.c_str(), ".sst") != nullptr) {
412 r
->reset(new SSTableFile(this, std::move(*r
)));
413 } else if (strstr(f
.c_str(), "MANIFEST") != nullptr) {
414 r
->reset(new ManifestFile(this, std::move(*r
)));
415 } else if (strstr(f
.c_str(), "log") != nullptr) {
416 r
->reset(new WalFile(this, std::move(*r
)));
422 Status
NewRandomAccessFile(const std::string
& f
,
423 std::unique_ptr
<RandomAccessFile
>* r
,
424 const EnvOptions
& soptions
) override
{
425 class CountingFile
: public RandomAccessFile
{
427 CountingFile(std::unique_ptr
<RandomAccessFile
>&& target
,
428 anon::AtomicCounter
* counter
,
429 std::atomic
<size_t>* bytes_read
)
430 : target_(std::move(target
)),
432 bytes_read_(bytes_read
) {}
433 virtual Status
Read(uint64_t offset
, size_t n
, Slice
* result
,
434 char* scratch
) const override
{
435 counter_
->Increment();
436 Status s
= target_
->Read(offset
, n
, result
, scratch
);
437 *bytes_read_
+= result
->size();
442 std::unique_ptr
<RandomAccessFile
> target_
;
443 anon::AtomicCounter
* counter_
;
444 std::atomic
<size_t>* bytes_read_
;
447 Status s
= target()->NewRandomAccessFile(f
, r
, soptions
);
448 random_file_open_counter_
++;
449 if (s
.ok() && count_random_reads_
) {
450 r
->reset(new CountingFile(std::move(*r
), &random_read_counter_
,
451 &random_read_bytes_counter_
));
453 if (s
.ok() && soptions
.compaction_readahead_size
> 0) {
454 compaction_readahead_size_
= soptions
.compaction_readahead_size
;
459 virtual Status
NewSequentialFile(const std::string
& f
,
460 std::unique_ptr
<SequentialFile
>* r
,
461 const EnvOptions
& soptions
) override
{
462 class CountingFile
: public SequentialFile
{
464 CountingFile(std::unique_ptr
<SequentialFile
>&& target
,
465 anon::AtomicCounter
* counter
)
466 : target_(std::move(target
)), counter_(counter
) {}
467 virtual Status
Read(size_t n
, Slice
* result
, char* scratch
) override
{
468 counter_
->Increment();
469 return target_
->Read(n
, result
, scratch
);
471 virtual Status
Skip(uint64_t n
) override
{ return target_
->Skip(n
); }
474 std::unique_ptr
<SequentialFile
> target_
;
475 anon::AtomicCounter
* counter_
;
478 Status s
= target()->NewSequentialFile(f
, r
, soptions
);
479 if (s
.ok() && count_sequential_reads_
) {
480 r
->reset(new CountingFile(std::move(*r
), &sequential_read_counter_
));
485 virtual void SleepForMicroseconds(int micros
) override
{
486 sleep_counter_
.Increment();
487 if (no_slowdown_
|| time_elapse_only_sleep_
) {
488 addon_time_
.fetch_add(micros
);
491 target()->SleepForMicroseconds(micros
);
495 virtual Status
GetCurrentTime(int64_t* unix_time
) override
{
497 if (!time_elapse_only_sleep_
) {
498 s
= target()->GetCurrentTime(unix_time
);
501 *unix_time
+= addon_time_
.load();
506 virtual uint64_t NowCPUNanos() override
{
507 now_cpu_count_
.fetch_add(1);
508 return target()->NowCPUNanos();
511 virtual uint64_t NowNanos() override
{
512 return (time_elapse_only_sleep_
? 0 : target()->NowNanos()) +
513 addon_time_
.load() * 1000;
516 virtual uint64_t NowMicros() override
{
517 return (time_elapse_only_sleep_
? 0 : target()->NowMicros()) +
521 virtual Status
DeleteFile(const std::string
& fname
) override
{
522 delete_count_
.fetch_add(1);
523 return target()->DeleteFile(fname
);
527 port::Mutex rnd_mutex_
; // Lock to pretect rnd_
529 // sstable Sync() calls are blocked while this pointer is non-nullptr.
530 std::atomic
<bool> delay_sstable_sync_
;
532 // Drop writes on the floor while this pointer is non-nullptr.
533 std::atomic
<bool> drop_writes_
;
535 // Simulate no-space errors while this pointer is non-nullptr.
536 std::atomic
<bool> no_space_
;
538 // Simulate non-writable file system while this pointer is non-nullptr
539 std::atomic
<bool> non_writable_
;
541 // Force sync of manifest files to fail while this pointer is non-nullptr
542 std::atomic
<bool> manifest_sync_error_
;
544 // Force write to manifest files to fail while this pointer is non-nullptr
545 std::atomic
<bool> manifest_write_error_
;
547 // Force write to log files to fail while this pointer is non-nullptr
548 std::atomic
<bool> log_write_error_
;
550 // Slow down every log write, in micro-seconds.
551 std::atomic
<int> log_write_slowdown_
;
553 // Number of WAL files that are still open for write.
554 std::atomic
<int> num_open_wal_file_
;
556 bool count_random_reads_
;
557 anon::AtomicCounter random_read_counter_
;
558 std::atomic
<size_t> random_read_bytes_counter_
;
559 std::atomic
<int> random_file_open_counter_
;
561 bool count_sequential_reads_
;
562 anon::AtomicCounter sequential_read_counter_
;
564 anon::AtomicCounter sleep_counter_
;
566 std::atomic
<int64_t> bytes_written_
;
568 std::atomic
<int> sync_counter_
;
570 std::atomic
<uint32_t> non_writeable_rate_
;
572 std::atomic
<uint32_t> new_writable_count_
;
574 std::atomic
<uint32_t> non_writable_count_
;
576 std::function
<void()>* table_write_callback_
;
578 std::atomic
<int64_t> addon_time_
;
580 std::atomic
<int> now_cpu_count_
;
582 std::atomic
<int> delete_count_
;
584 std::atomic
<bool> time_elapse_only_sleep_
;
588 std::atomic
<bool> is_wal_sync_thread_safe_
{true};
590 std::atomic
<size_t> compaction_readahead_size_
{};
594 class OnFileDeletionListener
: public EventListener
{
596 OnFileDeletionListener() : matched_count_(0), expected_file_name_("") {}
598 void SetExpectedFileName(const std::string file_name
) {
599 expected_file_name_
= file_name
;
602 void VerifyMatchedCount(size_t expected_value
) {
603 ASSERT_EQ(matched_count_
, expected_value
);
606 void OnTableFileDeleted(const TableFileDeletionInfo
& info
) override
{
607 if (expected_file_name_
!= "") {
608 ASSERT_EQ(expected_file_name_
, info
.file_path
);
609 expected_file_name_
= "";
615 size_t matched_count_
;
616 std::string expected_file_name_
;
620 // A test merge operator mimics put but also fails if one of merge operands is
622 class TestPutOperator
: public MergeOperator
{
624 virtual bool FullMergeV2(const MergeOperationInput
& merge_in
,
625 MergeOperationOutput
* merge_out
) const override
{
626 if (merge_in
.existing_value
!= nullptr &&
627 *(merge_in
.existing_value
) == "corrupted") {
630 for (auto value
: merge_in
.operand_list
) {
631 if (value
== "corrupted") {
635 merge_out
->existing_operand
= merge_in
.operand_list
.back();
639 virtual const char* Name() const override
{ return "TestPutOperator"; }
642 class DBTestBase
: public testing::Test
{
644 // Sequence of option configurations to try
645 enum OptionConfig
: int {
647 kBlockBasedTableWithPrefixHashIndex
= 1,
648 kBlockBasedTableWithWholeKeyHashIndex
= 2,
649 kPlainTableFirstBytePrefix
= 3,
650 kPlainTableCappedPrefix
= 4,
651 kPlainTableCappedPrefixNonMmap
= 5,
652 kPlainTableAllBytesPrefix
= 6,
657 kFullFilterWithNewTableReaderForCompactions
= 11,
661 kWalDirAndMmapReads
= 15,
662 kManifestFileSize
= 16,
665 kUniversalCompaction
= 19,
666 kUniversalCompactionMultiLevel
= 20,
667 kCompressedBlockCache
= 21,
668 kInfiniteMaxOpenFiles
= 22,
669 kxxHashChecksum
= 23,
670 kFIFOCompaction
= 24,
671 kOptimizeFiltersForHits
= 25,
673 kRecycleLogFiles
= 27,
674 kConcurrentSkipList
= 28,
675 kPipelinedWrite
= 29,
676 kConcurrentWALWrites
= 30,
678 kLevelSubcompactions
,
679 kBlockBasedTableWithIndexRestartInterval
,
680 kBlockBasedTableWithPartitionedIndex
,
681 kBlockBasedTableWithPartitionedIndexFormat4
,
682 kPartitionedFilterWithNewTableReaderForCompactions
,
683 kUniversalSubcompactions
,
685 // This must be the last line
691 std::string alternative_wal_dir_
;
692 std::string alternative_db_log_dir_
;
697 std::vector
<ColumnFamilyHandle
*> handles_
;
700 Options last_options_
;
702 // Skip some options, as they may not be applicable to a specific test.
703 // To add more skip constants, use values 4, 8, 16, etc.
706 kSkipDeletesFilterFirst
= 1,
707 kSkipUniversalCompaction
= 2,
711 kSkipNoSeekToLast
= 32,
712 kSkipFIFOCompaction
= 128,
713 kSkipMmapReads
= 256,
716 const int kRangeDelSkipConfigs
=
717 // Plain tables do not support range deletions.
719 // MmapReads disables the iterator pinning that RangeDelAggregator
723 explicit DBTestBase(const std::string path
);
727 static std::string
RandomString(Random
* rnd
, int len
) {
729 test::RandomString(rnd
, len
, &r
);
733 static std::string
Key(int i
) {
735 snprintf(buf
, sizeof(buf
), "key%06d", i
);
736 return std::string(buf
);
739 static bool ShouldSkipOptions(int option_config
, int skip_mask
= kNoSkip
);
741 // Switch to a fresh database with the next option configuration to
742 // test. Return false if there are no more configurations to test.
743 bool ChangeOptions(int skip_mask
= kNoSkip
);
745 // Switch between different compaction styles.
746 bool ChangeCompactOptions();
748 // Switch between different WAL-realted options.
749 bool ChangeWalOptions();
751 // Switch between different filter policy
752 // Jump from kDefault to kFilter to kFullFilter
753 bool ChangeFilterOptions();
755 // Switch between different DB options for file ingestion tests.
756 bool ChangeOptionsForFileIngestionTest();
758 // Return the current option configuration.
759 Options
CurrentOptions(const anon::OptionsOverride
& options_override
=
760 anon::OptionsOverride()) const;
762 Options
CurrentOptions(const Options
& default_options
,
763 const anon::OptionsOverride
& options_override
=
764 anon::OptionsOverride()) const;
766 static Options
GetDefaultOptions();
768 Options
GetOptions(int option_config
,
769 const Options
& default_options
= GetDefaultOptions(),
770 const anon::OptionsOverride
& options_override
=
771 anon::OptionsOverride()) const;
773 DBImpl
* dbfull() { return reinterpret_cast<DBImpl
*>(db_
); }
775 void CreateColumnFamilies(const std::vector
<std::string
>& cfs
,
776 const Options
& options
);
778 void CreateAndReopenWithCF(const std::vector
<std::string
>& cfs
,
779 const Options
& options
);
781 void ReopenWithColumnFamilies(const std::vector
<std::string
>& cfs
,
782 const std::vector
<Options
>& options
);
784 void ReopenWithColumnFamilies(const std::vector
<std::string
>& cfs
,
785 const Options
& options
);
787 Status
TryReopenWithColumnFamilies(const std::vector
<std::string
>& cfs
,
788 const std::vector
<Options
>& options
);
790 Status
TryReopenWithColumnFamilies(const std::vector
<std::string
>& cfs
,
791 const Options
& options
);
793 void Reopen(const Options
& options
);
797 void DestroyAndReopen(const Options
& options
);
799 void Destroy(const Options
& options
, bool delete_cf_paths
= false);
801 Status
ReadOnlyReopen(const Options
& options
);
803 Status
TryReopen(const Options
& options
);
805 bool IsDirectIOSupported();
807 bool IsMemoryMappedAccessSupported() const;
809 Status
Flush(int cf
= 0);
811 Status
Flush(const std::vector
<int>& cf_ids
);
813 Status
Put(const Slice
& k
, const Slice
& v
, WriteOptions wo
= WriteOptions());
815 Status
Put(int cf
, const Slice
& k
, const Slice
& v
,
816 WriteOptions wo
= WriteOptions());
818 Status
Merge(const Slice
& k
, const Slice
& v
,
819 WriteOptions wo
= WriteOptions());
821 Status
Merge(int cf
, const Slice
& k
, const Slice
& v
,
822 WriteOptions wo
= WriteOptions());
824 Status
Delete(const std::string
& k
);
826 Status
Delete(int cf
, const std::string
& k
);
828 Status
SingleDelete(const std::string
& k
);
830 Status
SingleDelete(int cf
, const std::string
& k
);
832 bool SetPreserveDeletesSequenceNumber(SequenceNumber sn
);
834 std::string
Get(const std::string
& k
, const Snapshot
* snapshot
= nullptr);
836 std::string
Get(int cf
, const std::string
& k
,
837 const Snapshot
* snapshot
= nullptr);
839 Status
Get(const std::string
& k
, PinnableSlice
* v
);
841 std::vector
<std::string
> MultiGet(std::vector
<int> cfs
,
842 const std::vector
<std::string
>& k
,
843 const Snapshot
* snapshot
= nullptr);
845 uint64_t GetNumSnapshots();
847 uint64_t GetTimeOldestSnapshots();
849 // Return a string that contains all key,value pairs in order,
850 // formatted like "(k1->v1)(k2->v2)".
851 std::string
Contents(int cf
= 0);
853 std::string
AllEntriesFor(const Slice
& user_key
, int cf
= 0);
856 int NumSortedRuns(int cf
= 0);
858 uint64_t TotalSize(int cf
= 0);
860 uint64_t SizeAtLevel(int level
);
862 size_t TotalLiveFiles(int cf
= 0);
864 size_t CountLiveFiles();
866 int NumTableFilesAtLevel(int level
, int cf
= 0);
868 double CompressionRatioAtLevel(int level
, int cf
= 0);
870 int TotalTableFiles(int cf
= 0, int levels
= -1);
871 #endif // ROCKSDB_LITE
873 // Return spread of files per level
874 std::string
FilesPerLevel(int cf
= 0);
878 uint64_t Size(const Slice
& start
, const Slice
& limit
, int cf
= 0);
880 void Compact(int cf
, const Slice
& start
, const Slice
& limit
,
881 uint32_t target_path_id
);
883 void Compact(int cf
, const Slice
& start
, const Slice
& limit
);
885 void Compact(const Slice
& start
, const Slice
& limit
);
887 // Do n memtable compactions, each of which produces an sstable
888 // covering the range [small,large].
889 void MakeTables(int n
, const std::string
& small
, const std::string
& large
,
892 // Prevent pushing of new sstables into deeper levels by adding
893 // tables that cover a specified range to all levels.
894 void FillLevels(const std::string
& smallest
, const std::string
& largest
,
897 void MoveFilesToLevel(int level
, int cf
= 0);
900 void DumpFileCounts(const char* label
);
901 #endif // ROCKSDB_LITE
903 std::string
DumpSSTableList();
905 static void GetSstFiles(Env
* env
, std::string path
,
906 std::vector
<std::string
>* files
);
908 int GetSstFileCount(std::string path
);
910 // this will generate non-overlapping files since it keeps increasing key_idx
911 void GenerateNewFile(Random
* rnd
, int* key_idx
, bool nowait
= false);
913 void GenerateNewFile(int fd
, Random
* rnd
, int* key_idx
, bool nowait
= false);
915 static const int kNumKeysByGenerateNewRandomFile
;
916 static const int KNumKeysByGenerateNewFile
= 100;
918 void GenerateNewRandomFile(Random
* rnd
, bool nowait
= false);
920 std::string
IterStatus(Iterator
* iter
);
922 Options
OptionsForLogIterTest();
924 std::string
DummyString(size_t len
, char c
= 'a');
926 void VerifyIterLast(std::string expected_key
, int cf
= 0);
928 // Used to test InplaceUpdate
930 // If previous value is nullptr or delta is > than previous value,
931 // sets newValue with delta
932 // If previous value is not empty,
933 // updates previous value with 'b' string of previous value size - 1.
934 static UpdateStatus
updateInPlaceSmallerSize(char* prevValue
,
935 uint32_t* prevSize
, Slice delta
,
936 std::string
* newValue
);
938 static UpdateStatus
updateInPlaceSmallerVarintSize(char* prevValue
,
941 std::string
* newValue
);
943 static UpdateStatus
updateInPlaceLargerSize(char* prevValue
,
944 uint32_t* prevSize
, Slice delta
,
945 std::string
* newValue
);
947 static UpdateStatus
updateInPlaceNoAction(char* prevValue
, uint32_t* prevSize
,
948 Slice delta
, std::string
* newValue
);
950 // Utility method to test InplaceUpdate
951 void validateNumberOfEntries(int numValues
, int cf
= 0);
953 void CopyFile(const std::string
& source
, const std::string
& destination
,
956 std::unordered_map
<std::string
, uint64_t> GetAllSSTFiles(
957 uint64_t* total_size
= nullptr);
959 std::vector
<std::uint64_t> ListTableFiles(Env
* env
, const std::string
& path
);
961 void VerifyDBFromMap(
962 std::map
<std::string
, std::string
> true_data
,
963 size_t* total_reads_res
= nullptr, bool tailing_iter
= false,
964 std::map
<std::string
, Status
> status
= std::map
<std::string
, Status
>());
966 void VerifyDBInternal(
967 std::vector
<std::pair
<std::string
, std::string
>> true_data
);
970 uint64_t GetNumberOfSstFilesForColumnFamily(DB
* db
,
971 std::string column_family_name
);
972 #endif // ROCKSDB_LITE
974 uint64_t TestGetTickerCount(const Options
& options
, Tickers ticker_type
) {
975 return options
.statistics
->getTickerCount(ticker_type
);
979 } // namespace rocksdb