]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/db/db_test_util.h
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / rocksdb / db / db_test_util.h
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #pragma once
11 #ifndef __STDC_FORMAT_MACROS
12 #define __STDC_FORMAT_MACROS
13 #endif
14
15 #include <fcntl.h>
16 #include <inttypes.h>
17
18 #include <algorithm>
19 #include <map>
20 #include <set>
21 #include <string>
22 #include <thread>
23 #include <unordered_set>
24 #include <utility>
25 #include <vector>
26
27 #include "db/db_impl.h"
28 #include "db/dbformat.h"
29 #include "env/mock_env.h"
30 #include "memtable/hash_linklist_rep.h"
31 #include "rocksdb/cache.h"
32 #include "rocksdb/compaction_filter.h"
33 #include "rocksdb/convenience.h"
34 #include "rocksdb/db.h"
35 #include "rocksdb/env.h"
36 #include "rocksdb/filter_policy.h"
37 #include "rocksdb/options.h"
38 #include "rocksdb/slice.h"
39 #include "rocksdb/sst_file_writer.h"
40 #include "rocksdb/statistics.h"
41 #include "rocksdb/table.h"
42 #include "rocksdb/utilities/checkpoint.h"
43 #include "table/block_based_table_factory.h"
44 #include "table/mock_table.h"
45 #include "table/plain_table_factory.h"
46 #include "table/scoped_arena_iterator.h"
47 #include "util/compression.h"
48 #include "util/filename.h"
49 #include "util/mock_time_env.h"
50 #include "util/mutexlock.h"
51
52 #include "util/string_util.h"
53 #include "util/sync_point.h"
54 #include "util/testharness.h"
55 #include "util/testutil.h"
56 #include "utilities/merge_operators.h"
57
58 namespace rocksdb {
59
60 namespace anon {
61 class AtomicCounter {
62 public:
63 explicit AtomicCounter(Env* env = NULL)
64 : env_(env), cond_count_(&mu_), count_(0) {}
65
66 void Increment() {
67 MutexLock l(&mu_);
68 count_++;
69 cond_count_.SignalAll();
70 }
71
72 int Read() {
73 MutexLock l(&mu_);
74 return count_;
75 }
76
77 bool WaitFor(int count) {
78 MutexLock l(&mu_);
79
80 uint64_t start = env_->NowMicros();
81 while (count_ < count) {
82 uint64_t now = env_->NowMicros();
83 cond_count_.TimedWait(now + /*1s*/ 1 * 1000 * 1000);
84 if (env_->NowMicros() - start > /*10s*/ 10 * 1000 * 1000) {
85 return false;
86 }
87 if (count_ < count) {
88 GTEST_LOG_(WARNING) << "WaitFor is taking more time than usual";
89 }
90 }
91
92 return true;
93 }
94
95 void Reset() {
96 MutexLock l(&mu_);
97 count_ = 0;
98 cond_count_.SignalAll();
99 }
100
101 private:
102 Env* env_;
103 port::Mutex mu_;
104 port::CondVar cond_count_;
105 int count_;
106 };
107
108 struct OptionsOverride {
109 std::shared_ptr<const FilterPolicy> filter_policy = nullptr;
110 // These will be used only if filter_policy is set
111 bool partition_filters = false;
112 uint64_t metadata_block_size = 1024;
113
114 // Used as a bit mask of individual enums in which to skip an XF test point
115 int skip_policy = 0;
116 };
117
118 } // namespace anon
119
120 enum SkipPolicy { kSkipNone = 0, kSkipNoSnapshot = 1, kSkipNoPrefix = 2 };
121
122 // A hacky skip list mem table that triggers flush after number of entries.
123 class SpecialMemTableRep : public MemTableRep {
124 public:
125 explicit SpecialMemTableRep(Allocator* allocator, MemTableRep* memtable,
126 int num_entries_flush)
127 : MemTableRep(allocator),
128 memtable_(memtable),
129 num_entries_flush_(num_entries_flush),
130 num_entries_(0) {}
131
132 virtual KeyHandle Allocate(const size_t len, char** buf) override {
133 return memtable_->Allocate(len, buf);
134 }
135
136 // Insert key into the list.
137 // REQUIRES: nothing that compares equal to key is currently in the list.
138 virtual void Insert(KeyHandle handle) override {
139 num_entries_++;
140 memtable_->Insert(handle);
141 }
142
143 // Returns true iff an entry that compares equal to key is in the list.
144 virtual bool Contains(const char* key) const override {
145 return memtable_->Contains(key);
146 }
147
148 virtual size_t ApproximateMemoryUsage() override {
149 // Return a high memory usage when number of entries exceeds the threshold
150 // to trigger a flush.
151 return (num_entries_ < num_entries_flush_) ? 0 : 1024 * 1024 * 1024;
152 }
153
154 virtual void Get(const LookupKey& k, void* callback_args,
155 bool (*callback_func)(void* arg,
156 const char* entry)) override {
157 memtable_->Get(k, callback_args, callback_func);
158 }
159
160 uint64_t ApproximateNumEntries(const Slice& start_ikey,
161 const Slice& end_ikey) override {
162 return memtable_->ApproximateNumEntries(start_ikey, end_ikey);
163 }
164
165 virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override {
166 return memtable_->GetIterator(arena);
167 }
168
169 virtual ~SpecialMemTableRep() override {}
170
171 private:
172 std::unique_ptr<MemTableRep> memtable_;
173 int num_entries_flush_;
174 int num_entries_;
175 };
176
177 // The factory for the hacky skip list mem table that triggers flush after
178 // number of entries exceeds a threshold.
179 class SpecialSkipListFactory : public MemTableRepFactory {
180 public:
181 // After number of inserts exceeds `num_entries_flush` in a mem table, trigger
182 // flush.
183 explicit SpecialSkipListFactory(int num_entries_flush)
184 : num_entries_flush_(num_entries_flush) {}
185
186 using MemTableRepFactory::CreateMemTableRep;
187 virtual MemTableRep* CreateMemTableRep(
188 const MemTableRep::KeyComparator& compare, Allocator* allocator,
189 const SliceTransform* transform, Logger* /*logger*/) override {
190 return new SpecialMemTableRep(
191 allocator, factory_.CreateMemTableRep(compare, allocator, transform, 0),
192 num_entries_flush_);
193 }
194 virtual const char* Name() const override { return "SkipListFactory"; }
195
196 bool IsInsertConcurrentlySupported() const override {
197 return factory_.IsInsertConcurrentlySupported();
198 }
199
200 private:
201 SkipListFactory factory_;
202 int num_entries_flush_;
203 };
204
205 // Special Env used to delay background operations
206 class SpecialEnv : public EnvWrapper {
207 public:
208 explicit SpecialEnv(Env* base);
209
210 Status NewWritableFile(const std::string& f, std::unique_ptr<WritableFile>* r,
211 const EnvOptions& soptions) override {
212 class SSTableFile : public WritableFile {
213 private:
214 SpecialEnv* env_;
215 std::unique_ptr<WritableFile> base_;
216
217 public:
218 SSTableFile(SpecialEnv* env, std::unique_ptr<WritableFile>&& base)
219 : env_(env), base_(std::move(base)) {}
220 Status Append(const Slice& data) override {
221 if (env_->table_write_callback_) {
222 (*env_->table_write_callback_)();
223 }
224 if (env_->drop_writes_.load(std::memory_order_acquire)) {
225 // Drop writes on the floor
226 return Status::OK();
227 } else if (env_->no_space_.load(std::memory_order_acquire)) {
228 return Status::NoSpace("No space left on device");
229 } else {
230 env_->bytes_written_ += data.size();
231 return base_->Append(data);
232 }
233 }
234 Status PositionedAppend(const Slice& data, uint64_t offset) override {
235 if (env_->table_write_callback_) {
236 (*env_->table_write_callback_)();
237 }
238 if (env_->drop_writes_.load(std::memory_order_acquire)) {
239 // Drop writes on the floor
240 return Status::OK();
241 } else if (env_->no_space_.load(std::memory_order_acquire)) {
242 return Status::NoSpace("No space left on device");
243 } else {
244 env_->bytes_written_ += data.size();
245 return base_->PositionedAppend(data, offset);
246 }
247 }
248 Status Truncate(uint64_t size) override { return base_->Truncate(size); }
249 Status RangeSync(uint64_t offset, uint64_t nbytes) override {
250 Status s = base_->RangeSync(offset, nbytes);
251 #if !(defined NDEBUG) || !defined(OS_WIN)
252 TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::RangeSync", &s);
253 #endif // !(defined NDEBUG) || !defined(OS_WIN)
254 return s;
255 }
256 Status Close() override {
257 // SyncPoint is not supported in Released Windows Mode.
258 #if !(defined NDEBUG) || !defined(OS_WIN)
259 // Check preallocation size
260 // preallocation size is never passed to base file.
261 size_t preallocation_size = preallocation_block_size();
262 TEST_SYNC_POINT_CALLBACK("DBTestWritableFile.GetPreallocationStatus",
263 &preallocation_size);
264 #endif // !(defined NDEBUG) || !defined(OS_WIN)
265 Status s = base_->Close();
266 #if !(defined NDEBUG) || !defined(OS_WIN)
267 TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::Close", &s);
268 #endif // !(defined NDEBUG) || !defined(OS_WIN)
269 return s;
270 }
271 Status Flush() override { return base_->Flush(); }
272 Status Sync() override {
273 ++env_->sync_counter_;
274 while (env_->delay_sstable_sync_.load(std::memory_order_acquire)) {
275 env_->SleepForMicroseconds(100000);
276 }
277 Status s = base_->Sync();
278 #if !(defined NDEBUG) || !defined(OS_WIN)
279 TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::Sync", &s);
280 #endif // !(defined NDEBUG) || !defined(OS_WIN)
281 return s;
282 }
283 void SetIOPriority(Env::IOPriority pri) override {
284 base_->SetIOPriority(pri);
285 }
286 Env::IOPriority GetIOPriority() override {
287 return base_->GetIOPriority();
288 }
289 bool use_direct_io() const override {
290 return base_->use_direct_io();
291 }
292 Status Allocate(uint64_t offset, uint64_t len) override {
293 return base_->Allocate(offset, len);
294 }
295 };
296 class ManifestFile : public WritableFile {
297 public:
298 ManifestFile(SpecialEnv* env, std::unique_ptr<WritableFile>&& b)
299 : env_(env), base_(std::move(b)) {}
300 Status Append(const Slice& data) override {
301 if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
302 return Status::IOError("simulated writer error");
303 } else {
304 return base_->Append(data);
305 }
306 }
307 Status Truncate(uint64_t size) override { return base_->Truncate(size); }
308 Status Close() override { return base_->Close(); }
309 Status Flush() override { return base_->Flush(); }
310 Status Sync() override {
311 ++env_->sync_counter_;
312 if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
313 return Status::IOError("simulated sync error");
314 } else {
315 return base_->Sync();
316 }
317 }
318 uint64_t GetFileSize() override { return base_->GetFileSize(); }
319 Status Allocate(uint64_t offset, uint64_t len) override {
320 return base_->Allocate(offset, len);
321 }
322
323 private:
324 SpecialEnv* env_;
325 std::unique_ptr<WritableFile> base_;
326 };
327 class WalFile : public WritableFile {
328 public:
329 WalFile(SpecialEnv* env, std::unique_ptr<WritableFile>&& b)
330 : env_(env), base_(std::move(b)) {
331 env_->num_open_wal_file_.fetch_add(1);
332 }
333 virtual ~WalFile() { env_->num_open_wal_file_.fetch_add(-1); }
334 Status Append(const Slice& data) override {
335 #if !(defined NDEBUG) || !defined(OS_WIN)
336 TEST_SYNC_POINT("SpecialEnv::WalFile::Append:1");
337 #endif
338 Status s;
339 if (env_->log_write_error_.load(std::memory_order_acquire)) {
340 s = Status::IOError("simulated writer error");
341 } else {
342 int slowdown =
343 env_->log_write_slowdown_.load(std::memory_order_acquire);
344 if (slowdown > 0) {
345 env_->SleepForMicroseconds(slowdown);
346 }
347 s = base_->Append(data);
348 }
349 #if !(defined NDEBUG) || !defined(OS_WIN)
350 TEST_SYNC_POINT("SpecialEnv::WalFile::Append:2");
351 #endif
352 return s;
353 }
354 Status Truncate(uint64_t size) override { return base_->Truncate(size); }
355 Status Close() override {
356 // SyncPoint is not supported in Released Windows Mode.
357 #if !(defined NDEBUG) || !defined(OS_WIN)
358 // Check preallocation size
359 // preallocation size is never passed to base file.
360 size_t preallocation_size = preallocation_block_size();
361 TEST_SYNC_POINT_CALLBACK("DBTestWalFile.GetPreallocationStatus",
362 &preallocation_size);
363 #endif // !(defined NDEBUG) || !defined(OS_WIN)
364
365 return base_->Close();
366 }
367 Status Flush() override { return base_->Flush(); }
368 Status Sync() override {
369 ++env_->sync_counter_;
370 return base_->Sync();
371 }
372 bool IsSyncThreadSafe() const override {
373 return env_->is_wal_sync_thread_safe_.load();
374 }
375 Status Allocate(uint64_t offset, uint64_t len) override {
376 return base_->Allocate(offset, len);
377 }
378
379 private:
380 SpecialEnv* env_;
381 std::unique_ptr<WritableFile> base_;
382 };
383
384 if (non_writeable_rate_.load(std::memory_order_acquire) > 0) {
385 uint32_t random_number;
386 {
387 MutexLock l(&rnd_mutex_);
388 random_number = rnd_.Uniform(100);
389 }
390 if (random_number < non_writeable_rate_.load()) {
391 return Status::IOError("simulated random write error");
392 }
393 }
394
395 new_writable_count_++;
396
397 if (non_writable_count_.load() > 0) {
398 non_writable_count_--;
399 return Status::IOError("simulated write error");
400 }
401
402 EnvOptions optimized = soptions;
403 if (strstr(f.c_str(), "MANIFEST") != nullptr ||
404 strstr(f.c_str(), "log") != nullptr) {
405 optimized.use_mmap_writes = false;
406 optimized.use_direct_writes = false;
407 }
408
409 Status s = target()->NewWritableFile(f, r, optimized);
410 if (s.ok()) {
411 if (strstr(f.c_str(), ".sst") != nullptr) {
412 r->reset(new SSTableFile(this, std::move(*r)));
413 } else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
414 r->reset(new ManifestFile(this, std::move(*r)));
415 } else if (strstr(f.c_str(), "log") != nullptr) {
416 r->reset(new WalFile(this, std::move(*r)));
417 }
418 }
419 return s;
420 }
421
422 Status NewRandomAccessFile(const std::string& f,
423 std::unique_ptr<RandomAccessFile>* r,
424 const EnvOptions& soptions) override {
425 class CountingFile : public RandomAccessFile {
426 public:
427 CountingFile(std::unique_ptr<RandomAccessFile>&& target,
428 anon::AtomicCounter* counter,
429 std::atomic<size_t>* bytes_read)
430 : target_(std::move(target)),
431 counter_(counter),
432 bytes_read_(bytes_read) {}
433 virtual Status Read(uint64_t offset, size_t n, Slice* result,
434 char* scratch) const override {
435 counter_->Increment();
436 Status s = target_->Read(offset, n, result, scratch);
437 *bytes_read_ += result->size();
438 return s;
439 }
440
441 private:
442 std::unique_ptr<RandomAccessFile> target_;
443 anon::AtomicCounter* counter_;
444 std::atomic<size_t>* bytes_read_;
445 };
446
447 Status s = target()->NewRandomAccessFile(f, r, soptions);
448 random_file_open_counter_++;
449 if (s.ok() && count_random_reads_) {
450 r->reset(new CountingFile(std::move(*r), &random_read_counter_,
451 &random_read_bytes_counter_));
452 }
453 if (s.ok() && soptions.compaction_readahead_size > 0) {
454 compaction_readahead_size_ = soptions.compaction_readahead_size;
455 }
456 return s;
457 }
458
459 virtual Status NewSequentialFile(const std::string& f,
460 std::unique_ptr<SequentialFile>* r,
461 const EnvOptions& soptions) override {
462 class CountingFile : public SequentialFile {
463 public:
464 CountingFile(std::unique_ptr<SequentialFile>&& target,
465 anon::AtomicCounter* counter)
466 : target_(std::move(target)), counter_(counter) {}
467 virtual Status Read(size_t n, Slice* result, char* scratch) override {
468 counter_->Increment();
469 return target_->Read(n, result, scratch);
470 }
471 virtual Status Skip(uint64_t n) override { return target_->Skip(n); }
472
473 private:
474 std::unique_ptr<SequentialFile> target_;
475 anon::AtomicCounter* counter_;
476 };
477
478 Status s = target()->NewSequentialFile(f, r, soptions);
479 if (s.ok() && count_sequential_reads_) {
480 r->reset(new CountingFile(std::move(*r), &sequential_read_counter_));
481 }
482 return s;
483 }
484
485 virtual void SleepForMicroseconds(int micros) override {
486 sleep_counter_.Increment();
487 if (no_slowdown_ || time_elapse_only_sleep_) {
488 addon_time_.fetch_add(micros);
489 }
490 if (!no_slowdown_) {
491 target()->SleepForMicroseconds(micros);
492 }
493 }
494
495 virtual Status GetCurrentTime(int64_t* unix_time) override {
496 Status s;
497 if (!time_elapse_only_sleep_) {
498 s = target()->GetCurrentTime(unix_time);
499 }
500 if (s.ok()) {
501 *unix_time += addon_time_.load();
502 }
503 return s;
504 }
505
506 virtual uint64_t NowCPUNanos() override {
507 now_cpu_count_.fetch_add(1);
508 return target()->NowCPUNanos();
509 }
510
511 virtual uint64_t NowNanos() override {
512 return (time_elapse_only_sleep_ ? 0 : target()->NowNanos()) +
513 addon_time_.load() * 1000;
514 }
515
516 virtual uint64_t NowMicros() override {
517 return (time_elapse_only_sleep_ ? 0 : target()->NowMicros()) +
518 addon_time_.load();
519 }
520
521 virtual Status DeleteFile(const std::string& fname) override {
522 delete_count_.fetch_add(1);
523 return target()->DeleteFile(fname);
524 }
525
526 Random rnd_;
527 port::Mutex rnd_mutex_; // Lock to pretect rnd_
528
529 // sstable Sync() calls are blocked while this pointer is non-nullptr.
530 std::atomic<bool> delay_sstable_sync_;
531
532 // Drop writes on the floor while this pointer is non-nullptr.
533 std::atomic<bool> drop_writes_;
534
535 // Simulate no-space errors while this pointer is non-nullptr.
536 std::atomic<bool> no_space_;
537
538 // Simulate non-writable file system while this pointer is non-nullptr
539 std::atomic<bool> non_writable_;
540
541 // Force sync of manifest files to fail while this pointer is non-nullptr
542 std::atomic<bool> manifest_sync_error_;
543
544 // Force write to manifest files to fail while this pointer is non-nullptr
545 std::atomic<bool> manifest_write_error_;
546
547 // Force write to log files to fail while this pointer is non-nullptr
548 std::atomic<bool> log_write_error_;
549
550 // Slow down every log write, in micro-seconds.
551 std::atomic<int> log_write_slowdown_;
552
553 // Number of WAL files that are still open for write.
554 std::atomic<int> num_open_wal_file_;
555
556 bool count_random_reads_;
557 anon::AtomicCounter random_read_counter_;
558 std::atomic<size_t> random_read_bytes_counter_;
559 std::atomic<int> random_file_open_counter_;
560
561 bool count_sequential_reads_;
562 anon::AtomicCounter sequential_read_counter_;
563
564 anon::AtomicCounter sleep_counter_;
565
566 std::atomic<int64_t> bytes_written_;
567
568 std::atomic<int> sync_counter_;
569
570 std::atomic<uint32_t> non_writeable_rate_;
571
572 std::atomic<uint32_t> new_writable_count_;
573
574 std::atomic<uint32_t> non_writable_count_;
575
576 std::function<void()>* table_write_callback_;
577
578 std::atomic<int64_t> addon_time_;
579
580 std::atomic<int> now_cpu_count_;
581
582 std::atomic<int> delete_count_;
583
584 std::atomic<bool> time_elapse_only_sleep_;
585
586 bool no_slowdown_;
587
588 std::atomic<bool> is_wal_sync_thread_safe_{true};
589
590 std::atomic<size_t> compaction_readahead_size_{};
591 };
592
593 #ifndef ROCKSDB_LITE
594 class OnFileDeletionListener : public EventListener {
595 public:
596 OnFileDeletionListener() : matched_count_(0), expected_file_name_("") {}
597
598 void SetExpectedFileName(const std::string file_name) {
599 expected_file_name_ = file_name;
600 }
601
602 void VerifyMatchedCount(size_t expected_value) {
603 ASSERT_EQ(matched_count_, expected_value);
604 }
605
606 void OnTableFileDeleted(const TableFileDeletionInfo& info) override {
607 if (expected_file_name_ != "") {
608 ASSERT_EQ(expected_file_name_, info.file_path);
609 expected_file_name_ = "";
610 matched_count_++;
611 }
612 }
613
614 private:
615 size_t matched_count_;
616 std::string expected_file_name_;
617 };
618 #endif
619
620 // A test merge operator mimics put but also fails if one of merge operands is
621 // "corrupted".
622 class TestPutOperator : public MergeOperator {
623 public:
624 virtual bool FullMergeV2(const MergeOperationInput& merge_in,
625 MergeOperationOutput* merge_out) const override {
626 if (merge_in.existing_value != nullptr &&
627 *(merge_in.existing_value) == "corrupted") {
628 return false;
629 }
630 for (auto value : merge_in.operand_list) {
631 if (value == "corrupted") {
632 return false;
633 }
634 }
635 merge_out->existing_operand = merge_in.operand_list.back();
636 return true;
637 }
638
639 virtual const char* Name() const override { return "TestPutOperator"; }
640 };
641
642 class DBTestBase : public testing::Test {
643 public:
644 // Sequence of option configurations to try
645 enum OptionConfig : int {
646 kDefault = 0,
647 kBlockBasedTableWithPrefixHashIndex = 1,
648 kBlockBasedTableWithWholeKeyHashIndex = 2,
649 kPlainTableFirstBytePrefix = 3,
650 kPlainTableCappedPrefix = 4,
651 kPlainTableCappedPrefixNonMmap = 5,
652 kPlainTableAllBytesPrefix = 6,
653 kVectorRep = 7,
654 kHashLinkList = 8,
655 kMergePut = 9,
656 kFilter = 10,
657 kFullFilterWithNewTableReaderForCompactions = 11,
658 kUncompressed = 12,
659 kNumLevel_3 = 13,
660 kDBLogDir = 14,
661 kWalDirAndMmapReads = 15,
662 kManifestFileSize = 16,
663 kPerfOptions = 17,
664 kHashSkipList = 18,
665 kUniversalCompaction = 19,
666 kUniversalCompactionMultiLevel = 20,
667 kCompressedBlockCache = 21,
668 kInfiniteMaxOpenFiles = 22,
669 kxxHashChecksum = 23,
670 kFIFOCompaction = 24,
671 kOptimizeFiltersForHits = 25,
672 kRowCache = 26,
673 kRecycleLogFiles = 27,
674 kConcurrentSkipList = 28,
675 kPipelinedWrite = 29,
676 kConcurrentWALWrites = 30,
677 kDirectIO,
678 kLevelSubcompactions,
679 kBlockBasedTableWithIndexRestartInterval,
680 kBlockBasedTableWithPartitionedIndex,
681 kBlockBasedTableWithPartitionedIndexFormat4,
682 kPartitionedFilterWithNewTableReaderForCompactions,
683 kUniversalSubcompactions,
684 kxxHash64Checksum,
685 // This must be the last line
686 kEnd,
687 };
688
689 public:
690 std::string dbname_;
691 std::string alternative_wal_dir_;
692 std::string alternative_db_log_dir_;
693 MockEnv* mem_env_;
694 Env* encrypted_env_;
695 SpecialEnv* env_;
696 DB* db_;
697 std::vector<ColumnFamilyHandle*> handles_;
698
699 int option_config_;
700 Options last_options_;
701
702 // Skip some options, as they may not be applicable to a specific test.
703 // To add more skip constants, use values 4, 8, 16, etc.
704 enum OptionSkip {
705 kNoSkip = 0,
706 kSkipDeletesFilterFirst = 1,
707 kSkipUniversalCompaction = 2,
708 kSkipMergePut = 4,
709 kSkipPlainTable = 8,
710 kSkipHashIndex = 16,
711 kSkipNoSeekToLast = 32,
712 kSkipFIFOCompaction = 128,
713 kSkipMmapReads = 256,
714 };
715
716 const int kRangeDelSkipConfigs =
717 // Plain tables do not support range deletions.
718 kSkipPlainTable |
719 // MmapReads disables the iterator pinning that RangeDelAggregator
720 // requires.
721 kSkipMmapReads;
722
723 explicit DBTestBase(const std::string path);
724
725 ~DBTestBase();
726
727 static std::string RandomString(Random* rnd, int len) {
728 std::string r;
729 test::RandomString(rnd, len, &r);
730 return r;
731 }
732
733 static std::string Key(int i) {
734 char buf[100];
735 snprintf(buf, sizeof(buf), "key%06d", i);
736 return std::string(buf);
737 }
738
739 static bool ShouldSkipOptions(int option_config, int skip_mask = kNoSkip);
740
741 // Switch to a fresh database with the next option configuration to
742 // test. Return false if there are no more configurations to test.
743 bool ChangeOptions(int skip_mask = kNoSkip);
744
745 // Switch between different compaction styles.
746 bool ChangeCompactOptions();
747
748 // Switch between different WAL-realted options.
749 bool ChangeWalOptions();
750
751 // Switch between different filter policy
752 // Jump from kDefault to kFilter to kFullFilter
753 bool ChangeFilterOptions();
754
755 // Switch between different DB options for file ingestion tests.
756 bool ChangeOptionsForFileIngestionTest();
757
758 // Return the current option configuration.
759 Options CurrentOptions(const anon::OptionsOverride& options_override =
760 anon::OptionsOverride()) const;
761
762 Options CurrentOptions(const Options& default_options,
763 const anon::OptionsOverride& options_override =
764 anon::OptionsOverride()) const;
765
766 static Options GetDefaultOptions();
767
768 Options GetOptions(int option_config,
769 const Options& default_options = GetDefaultOptions(),
770 const anon::OptionsOverride& options_override =
771 anon::OptionsOverride()) const;
772
773 DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
774
775 void CreateColumnFamilies(const std::vector<std::string>& cfs,
776 const Options& options);
777
778 void CreateAndReopenWithCF(const std::vector<std::string>& cfs,
779 const Options& options);
780
781 void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
782 const std::vector<Options>& options);
783
784 void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
785 const Options& options);
786
787 Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
788 const std::vector<Options>& options);
789
790 Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
791 const Options& options);
792
793 void Reopen(const Options& options);
794
795 void Close();
796
797 void DestroyAndReopen(const Options& options);
798
799 void Destroy(const Options& options, bool delete_cf_paths = false);
800
801 Status ReadOnlyReopen(const Options& options);
802
803 Status TryReopen(const Options& options);
804
805 bool IsDirectIOSupported();
806
807 bool IsMemoryMappedAccessSupported() const;
808
809 Status Flush(int cf = 0);
810
811 Status Flush(const std::vector<int>& cf_ids);
812
813 Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions());
814
815 Status Put(int cf, const Slice& k, const Slice& v,
816 WriteOptions wo = WriteOptions());
817
818 Status Merge(const Slice& k, const Slice& v,
819 WriteOptions wo = WriteOptions());
820
821 Status Merge(int cf, const Slice& k, const Slice& v,
822 WriteOptions wo = WriteOptions());
823
824 Status Delete(const std::string& k);
825
826 Status Delete(int cf, const std::string& k);
827
828 Status SingleDelete(const std::string& k);
829
830 Status SingleDelete(int cf, const std::string& k);
831
832 bool SetPreserveDeletesSequenceNumber(SequenceNumber sn);
833
834 std::string Get(const std::string& k, const Snapshot* snapshot = nullptr);
835
836 std::string Get(int cf, const std::string& k,
837 const Snapshot* snapshot = nullptr);
838
839 Status Get(const std::string& k, PinnableSlice* v);
840
841 std::vector<std::string> MultiGet(std::vector<int> cfs,
842 const std::vector<std::string>& k,
843 const Snapshot* snapshot = nullptr);
844
845 uint64_t GetNumSnapshots();
846
847 uint64_t GetTimeOldestSnapshots();
848
849 // Return a string that contains all key,value pairs in order,
850 // formatted like "(k1->v1)(k2->v2)".
851 std::string Contents(int cf = 0);
852
853 std::string AllEntriesFor(const Slice& user_key, int cf = 0);
854
855 #ifndef ROCKSDB_LITE
856 int NumSortedRuns(int cf = 0);
857
858 uint64_t TotalSize(int cf = 0);
859
860 uint64_t SizeAtLevel(int level);
861
862 size_t TotalLiveFiles(int cf = 0);
863
864 size_t CountLiveFiles();
865
866 int NumTableFilesAtLevel(int level, int cf = 0);
867
868 double CompressionRatioAtLevel(int level, int cf = 0);
869
870 int TotalTableFiles(int cf = 0, int levels = -1);
871 #endif // ROCKSDB_LITE
872
873 // Return spread of files per level
874 std::string FilesPerLevel(int cf = 0);
875
876 size_t CountFiles();
877
878 uint64_t Size(const Slice& start, const Slice& limit, int cf = 0);
879
880 void Compact(int cf, const Slice& start, const Slice& limit,
881 uint32_t target_path_id);
882
883 void Compact(int cf, const Slice& start, const Slice& limit);
884
885 void Compact(const Slice& start, const Slice& limit);
886
887 // Do n memtable compactions, each of which produces an sstable
888 // covering the range [small,large].
889 void MakeTables(int n, const std::string& small, const std::string& large,
890 int cf = 0);
891
892 // Prevent pushing of new sstables into deeper levels by adding
893 // tables that cover a specified range to all levels.
894 void FillLevels(const std::string& smallest, const std::string& largest,
895 int cf);
896
897 void MoveFilesToLevel(int level, int cf = 0);
898
899 #ifndef ROCKSDB_LITE
900 void DumpFileCounts(const char* label);
901 #endif // ROCKSDB_LITE
902
903 std::string DumpSSTableList();
904
905 static void GetSstFiles(Env* env, std::string path,
906 std::vector<std::string>* files);
907
908 int GetSstFileCount(std::string path);
909
910 // this will generate non-overlapping files since it keeps increasing key_idx
911 void GenerateNewFile(Random* rnd, int* key_idx, bool nowait = false);
912
913 void GenerateNewFile(int fd, Random* rnd, int* key_idx, bool nowait = false);
914
915 static const int kNumKeysByGenerateNewRandomFile;
916 static const int KNumKeysByGenerateNewFile = 100;
917
918 void GenerateNewRandomFile(Random* rnd, bool nowait = false);
919
920 std::string IterStatus(Iterator* iter);
921
922 Options OptionsForLogIterTest();
923
924 std::string DummyString(size_t len, char c = 'a');
925
926 void VerifyIterLast(std::string expected_key, int cf = 0);
927
928 // Used to test InplaceUpdate
929
930 // If previous value is nullptr or delta is > than previous value,
931 // sets newValue with delta
932 // If previous value is not empty,
933 // updates previous value with 'b' string of previous value size - 1.
934 static UpdateStatus updateInPlaceSmallerSize(char* prevValue,
935 uint32_t* prevSize, Slice delta,
936 std::string* newValue);
937
938 static UpdateStatus updateInPlaceSmallerVarintSize(char* prevValue,
939 uint32_t* prevSize,
940 Slice delta,
941 std::string* newValue);
942
943 static UpdateStatus updateInPlaceLargerSize(char* prevValue,
944 uint32_t* prevSize, Slice delta,
945 std::string* newValue);
946
947 static UpdateStatus updateInPlaceNoAction(char* prevValue, uint32_t* prevSize,
948 Slice delta, std::string* newValue);
949
950 // Utility method to test InplaceUpdate
951 void validateNumberOfEntries(int numValues, int cf = 0);
952
953 void CopyFile(const std::string& source, const std::string& destination,
954 uint64_t size = 0);
955
956 std::unordered_map<std::string, uint64_t> GetAllSSTFiles(
957 uint64_t* total_size = nullptr);
958
959 std::vector<std::uint64_t> ListTableFiles(Env* env, const std::string& path);
960
961 void VerifyDBFromMap(
962 std::map<std::string, std::string> true_data,
963 size_t* total_reads_res = nullptr, bool tailing_iter = false,
964 std::map<std::string, Status> status = std::map<std::string, Status>());
965
966 void VerifyDBInternal(
967 std::vector<std::pair<std::string, std::string>> true_data);
968
969 #ifndef ROCKSDB_LITE
970 uint64_t GetNumberOfSstFilesForColumnFamily(DB* db,
971 std::string column_family_name);
972 #endif // ROCKSDB_LITE
973
974 uint64_t TestGetTickerCount(const Options& options, Tickers ticker_type) {
975 return options.statistics->getTickerCount(ticker_type);
976 }
977 };
978
979 } // namespace rocksdb