]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/db/db_test_util.cc
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / rocksdb / db / db_test_util.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #include "db/db_test_util.h"
11 #include "db/forward_iterator.h"
12 #include "rocksdb/env_encryption.h"
13
14 namespace rocksdb {
15
16 // Special Env used to delay background operations
17
18 SpecialEnv::SpecialEnv(Env* base)
19 : EnvWrapper(base),
20 rnd_(301),
21 sleep_counter_(this),
22 addon_time_(0),
23 time_elapse_only_sleep_(false),
24 no_slowdown_(false) {
25 delay_sstable_sync_.store(false, std::memory_order_release);
26 drop_writes_.store(false, std::memory_order_release);
27 no_space_.store(false, std::memory_order_release);
28 non_writable_.store(false, std::memory_order_release);
29 count_random_reads_ = false;
30 count_sequential_reads_ = false;
31 manifest_sync_error_.store(false, std::memory_order_release);
32 manifest_write_error_.store(false, std::memory_order_release);
33 log_write_error_.store(false, std::memory_order_release);
34 random_file_open_counter_.store(0, std::memory_order_relaxed);
35 delete_count_.store(0, std::memory_order_relaxed);
36 num_open_wal_file_.store(0);
37 log_write_slowdown_ = 0;
38 bytes_written_ = 0;
39 sync_counter_ = 0;
40 non_writeable_rate_ = 0;
41 new_writable_count_ = 0;
42 non_writable_count_ = 0;
43 table_write_callback_ = nullptr;
44 }
45 #ifndef ROCKSDB_LITE
46 ROT13BlockCipher rot13Cipher_(16);
47 #endif // ROCKSDB_LITE
48
49 DBTestBase::DBTestBase(const std::string path)
50 : mem_env_(!getenv("MEM_ENV") ? nullptr : new MockEnv(Env::Default())),
51 #ifndef ROCKSDB_LITE
52 encrypted_env_(
53 !getenv("ENCRYPTED_ENV")
54 ? nullptr
55 : NewEncryptedEnv(mem_env_ ? mem_env_ : Env::Default(),
56 new CTREncryptionProvider(rot13Cipher_))),
57 #else
58 encrypted_env_(nullptr),
59 #endif // ROCKSDB_LITE
60 env_(new SpecialEnv(encrypted_env_
61 ? encrypted_env_
62 : (mem_env_ ? mem_env_ : Env::Default()))),
63 option_config_(kDefault) {
64 env_->SetBackgroundThreads(1, Env::LOW);
65 env_->SetBackgroundThreads(1, Env::HIGH);
66 dbname_ = test::PerThreadDBPath(env_, path);
67 alternative_wal_dir_ = dbname_ + "/wal";
68 alternative_db_log_dir_ = dbname_ + "/db_log_dir";
69 auto options = CurrentOptions();
70 options.env = env_;
71 auto delete_options = options;
72 delete_options.wal_dir = alternative_wal_dir_;
73 EXPECT_OK(DestroyDB(dbname_, delete_options));
74 // Destroy it for not alternative WAL dir is used.
75 EXPECT_OK(DestroyDB(dbname_, options));
76 db_ = nullptr;
77 Reopen(options);
78 Random::GetTLSInstance()->Reset(0xdeadbeef);
79 }
80
81 DBTestBase::~DBTestBase() {
82 rocksdb::SyncPoint::GetInstance()->DisableProcessing();
83 rocksdb::SyncPoint::GetInstance()->LoadDependency({});
84 rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
85 Close();
86 Options options;
87 options.db_paths.emplace_back(dbname_, 0);
88 options.db_paths.emplace_back(dbname_ + "_2", 0);
89 options.db_paths.emplace_back(dbname_ + "_3", 0);
90 options.db_paths.emplace_back(dbname_ + "_4", 0);
91 options.env = env_;
92
93 if (getenv("KEEP_DB")) {
94 printf("DB is still at %s\n", dbname_.c_str());
95 } else {
96 EXPECT_OK(DestroyDB(dbname_, options));
97 }
98 delete env_;
99 }
100
101 bool DBTestBase::ShouldSkipOptions(int option_config, int skip_mask) {
102 #ifdef ROCKSDB_LITE
103 // These options are not supported in ROCKSDB_LITE
104 if (option_config == kHashSkipList ||
105 option_config == kPlainTableFirstBytePrefix ||
106 option_config == kPlainTableCappedPrefix ||
107 option_config == kPlainTableCappedPrefixNonMmap ||
108 option_config == kPlainTableAllBytesPrefix ||
109 option_config == kVectorRep || option_config == kHashLinkList ||
110 option_config == kUniversalCompaction ||
111 option_config == kUniversalCompactionMultiLevel ||
112 option_config == kUniversalSubcompactions ||
113 option_config == kFIFOCompaction ||
114 option_config == kConcurrentSkipList) {
115 return true;
116 }
117 #endif
118
119 if ((skip_mask & kSkipUniversalCompaction) &&
120 (option_config == kUniversalCompaction ||
121 option_config == kUniversalCompactionMultiLevel ||
122 option_config == kUniversalSubcompactions)) {
123 return true;
124 }
125 if ((skip_mask & kSkipMergePut) && option_config == kMergePut) {
126 return true;
127 }
128 if ((skip_mask & kSkipNoSeekToLast) &&
129 (option_config == kHashLinkList || option_config == kHashSkipList)) {
130 return true;
131 }
132 if ((skip_mask & kSkipPlainTable) &&
133 (option_config == kPlainTableAllBytesPrefix ||
134 option_config == kPlainTableFirstBytePrefix ||
135 option_config == kPlainTableCappedPrefix ||
136 option_config == kPlainTableCappedPrefixNonMmap)) {
137 return true;
138 }
139 if ((skip_mask & kSkipHashIndex) &&
140 (option_config == kBlockBasedTableWithPrefixHashIndex ||
141 option_config == kBlockBasedTableWithWholeKeyHashIndex)) {
142 return true;
143 }
144 if ((skip_mask & kSkipFIFOCompaction) && option_config == kFIFOCompaction) {
145 return true;
146 }
147 if ((skip_mask & kSkipMmapReads) && option_config == kWalDirAndMmapReads) {
148 return true;
149 }
150 return false;
151 }
152
153 // Switch to a fresh database with the next option configuration to
154 // test. Return false if there are no more configurations to test.
155 bool DBTestBase::ChangeOptions(int skip_mask) {
156 for (option_config_++; option_config_ < kEnd; option_config_++) {
157 if (ShouldSkipOptions(option_config_, skip_mask)) {
158 continue;
159 }
160 break;
161 }
162
163 if (option_config_ >= kEnd) {
164 Destroy(last_options_);
165 return false;
166 } else {
167 auto options = CurrentOptions();
168 options.create_if_missing = true;
169 DestroyAndReopen(options);
170 return true;
171 }
172 }
173
174 // Switch between different compaction styles.
175 bool DBTestBase::ChangeCompactOptions() {
176 if (option_config_ == kDefault) {
177 option_config_ = kUniversalCompaction;
178 Destroy(last_options_);
179 auto options = CurrentOptions();
180 options.create_if_missing = true;
181 TryReopen(options);
182 return true;
183 } else if (option_config_ == kUniversalCompaction) {
184 option_config_ = kUniversalCompactionMultiLevel;
185 Destroy(last_options_);
186 auto options = CurrentOptions();
187 options.create_if_missing = true;
188 TryReopen(options);
189 return true;
190 } else if (option_config_ == kUniversalCompactionMultiLevel) {
191 option_config_ = kLevelSubcompactions;
192 Destroy(last_options_);
193 auto options = CurrentOptions();
194 assert(options.max_subcompactions > 1);
195 TryReopen(options);
196 return true;
197 } else if (option_config_ == kLevelSubcompactions) {
198 option_config_ = kUniversalSubcompactions;
199 Destroy(last_options_);
200 auto options = CurrentOptions();
201 assert(options.max_subcompactions > 1);
202 TryReopen(options);
203 return true;
204 } else {
205 return false;
206 }
207 }
208
209 // Switch between different WAL settings
210 bool DBTestBase::ChangeWalOptions() {
211 if (option_config_ == kDefault) {
212 option_config_ = kDBLogDir;
213 Destroy(last_options_);
214 auto options = CurrentOptions();
215 Destroy(options);
216 options.create_if_missing = true;
217 TryReopen(options);
218 return true;
219 } else if (option_config_ == kDBLogDir) {
220 option_config_ = kWalDirAndMmapReads;
221 Destroy(last_options_);
222 auto options = CurrentOptions();
223 Destroy(options);
224 options.create_if_missing = true;
225 TryReopen(options);
226 return true;
227 } else if (option_config_ == kWalDirAndMmapReads) {
228 option_config_ = kRecycleLogFiles;
229 Destroy(last_options_);
230 auto options = CurrentOptions();
231 Destroy(options);
232 TryReopen(options);
233 return true;
234 } else {
235 return false;
236 }
237 }
238
239 // Switch between different filter policy
240 // Jump from kDefault to kFilter to kFullFilter
241 bool DBTestBase::ChangeFilterOptions() {
242 if (option_config_ == kDefault) {
243 option_config_ = kFilter;
244 } else if (option_config_ == kFilter) {
245 option_config_ = kFullFilterWithNewTableReaderForCompactions;
246 } else if (option_config_ == kFullFilterWithNewTableReaderForCompactions) {
247 option_config_ = kPartitionedFilterWithNewTableReaderForCompactions;
248 } else {
249 return false;
250 }
251 Destroy(last_options_);
252
253 auto options = CurrentOptions();
254 options.create_if_missing = true;
255 TryReopen(options);
256 return true;
257 }
258
259 // Switch between different DB options for file ingestion tests.
260 bool DBTestBase::ChangeOptionsForFileIngestionTest() {
261 if (option_config_ == kDefault) {
262 option_config_ = kUniversalCompaction;
263 Destroy(last_options_);
264 auto options = CurrentOptions();
265 options.create_if_missing = true;
266 TryReopen(options);
267 return true;
268 } else if (option_config_ == kUniversalCompaction) {
269 option_config_ = kUniversalCompactionMultiLevel;
270 Destroy(last_options_);
271 auto options = CurrentOptions();
272 options.create_if_missing = true;
273 TryReopen(options);
274 return true;
275 } else if (option_config_ == kUniversalCompactionMultiLevel) {
276 option_config_ = kLevelSubcompactions;
277 Destroy(last_options_);
278 auto options = CurrentOptions();
279 assert(options.max_subcompactions > 1);
280 TryReopen(options);
281 return true;
282 } else if (option_config_ == kLevelSubcompactions) {
283 option_config_ = kUniversalSubcompactions;
284 Destroy(last_options_);
285 auto options = CurrentOptions();
286 assert(options.max_subcompactions > 1);
287 TryReopen(options);
288 return true;
289 } else if (option_config_ == kUniversalSubcompactions) {
290 option_config_ = kDirectIO;
291 Destroy(last_options_);
292 auto options = CurrentOptions();
293 TryReopen(options);
294 return true;
295 } else {
296 return false;
297 }
298 }
299
300 // Return the current option configuration.
301 Options DBTestBase::CurrentOptions(
302 const anon::OptionsOverride& options_override) const {
303 return GetOptions(option_config_, GetDefaultOptions(), options_override);
304 }
305
306 Options DBTestBase::CurrentOptions(
307 const Options& default_options,
308 const anon::OptionsOverride& options_override) const {
309 return GetOptions(option_config_, default_options, options_override);
310 }
311
312 Options DBTestBase::GetDefaultOptions() {
313 Options options;
314 options.write_buffer_size = 4090 * 4096;
315 options.target_file_size_base = 2 * 1024 * 1024;
316 options.max_bytes_for_level_base = 10 * 1024 * 1024;
317 options.max_open_files = 5000;
318 options.wal_recovery_mode = WALRecoveryMode::kTolerateCorruptedTailRecords;
319 options.compaction_pri = CompactionPri::kByCompensatedSize;
320 return options;
321 }
322
323 Options DBTestBase::GetOptions(
324 int option_config, const Options& default_options,
325 const anon::OptionsOverride& options_override) const {
326 // this redundant copy is to minimize code change w/o having lint error.
327 Options options = default_options;
328 BlockBasedTableOptions table_options;
329 bool set_block_based_table_factory = true;
330 #if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \
331 !defined(OS_AIX)
332 rocksdb::SyncPoint::GetInstance()->ClearCallBack(
333 "NewRandomAccessFile:O_DIRECT");
334 rocksdb::SyncPoint::GetInstance()->ClearCallBack("NewWritableFile:O_DIRECT");
335 #endif
336
337 bool can_allow_mmap = IsMemoryMappedAccessSupported();
338 switch (option_config) {
339 #ifndef ROCKSDB_LITE
340 case kHashSkipList:
341 options.prefix_extractor.reset(NewFixedPrefixTransform(1));
342 options.memtable_factory.reset(NewHashSkipListRepFactory(16));
343 options.allow_concurrent_memtable_write = false;
344 break;
345 case kPlainTableFirstBytePrefix:
346 options.table_factory.reset(new PlainTableFactory());
347 options.prefix_extractor.reset(NewFixedPrefixTransform(1));
348 options.allow_mmap_reads = can_allow_mmap;
349 options.max_sequential_skip_in_iterations = 999999;
350 set_block_based_table_factory = false;
351 break;
352 case kPlainTableCappedPrefix:
353 options.table_factory.reset(new PlainTableFactory());
354 options.prefix_extractor.reset(NewCappedPrefixTransform(8));
355 options.allow_mmap_reads = can_allow_mmap;
356 options.max_sequential_skip_in_iterations = 999999;
357 set_block_based_table_factory = false;
358 break;
359 case kPlainTableCappedPrefixNonMmap:
360 options.table_factory.reset(new PlainTableFactory());
361 options.prefix_extractor.reset(NewCappedPrefixTransform(8));
362 options.allow_mmap_reads = false;
363 options.max_sequential_skip_in_iterations = 999999;
364 set_block_based_table_factory = false;
365 break;
366 case kPlainTableAllBytesPrefix:
367 options.table_factory.reset(new PlainTableFactory());
368 options.prefix_extractor.reset(NewNoopTransform());
369 options.allow_mmap_reads = can_allow_mmap;
370 options.max_sequential_skip_in_iterations = 999999;
371 set_block_based_table_factory = false;
372 break;
373 case kVectorRep:
374 options.memtable_factory.reset(new VectorRepFactory(100));
375 options.allow_concurrent_memtable_write = false;
376 break;
377 case kHashLinkList:
378 options.prefix_extractor.reset(NewFixedPrefixTransform(1));
379 options.memtable_factory.reset(
380 NewHashLinkListRepFactory(4, 0, 3, true, 4));
381 options.allow_concurrent_memtable_write = false;
382 break;
383 case kDirectIO: {
384 options.use_direct_reads = true;
385 options.use_direct_io_for_flush_and_compaction = true;
386 options.compaction_readahead_size = 2 * 1024 * 1024;
387 #if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \
388 !defined(OS_AIX) && !defined(OS_OPENBSD)
389 rocksdb::SyncPoint::GetInstance()->SetCallBack(
390 "NewWritableFile:O_DIRECT", [&](void* arg) {
391 int* val = static_cast<int*>(arg);
392 *val &= ~O_DIRECT;
393 });
394 rocksdb::SyncPoint::GetInstance()->SetCallBack(
395 "NewRandomAccessFile:O_DIRECT", [&](void* arg) {
396 int* val = static_cast<int*>(arg);
397 *val &= ~O_DIRECT;
398 });
399 rocksdb::SyncPoint::GetInstance()->EnableProcessing();
400 #endif
401 break;
402 }
403 #endif // ROCKSDB_LITE
404 case kMergePut:
405 options.merge_operator = MergeOperators::CreatePutOperator();
406 break;
407 case kFilter:
408 table_options.filter_policy.reset(NewBloomFilterPolicy(10, true));
409 break;
410 case kFullFilterWithNewTableReaderForCompactions:
411 table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
412 options.new_table_reader_for_compaction_inputs = true;
413 options.compaction_readahead_size = 10 * 1024 * 1024;
414 break;
415 case kPartitionedFilterWithNewTableReaderForCompactions:
416 table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
417 table_options.partition_filters = true;
418 table_options.index_type =
419 BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
420 options.new_table_reader_for_compaction_inputs = true;
421 options.compaction_readahead_size = 10 * 1024 * 1024;
422 break;
423 case kUncompressed:
424 options.compression = kNoCompression;
425 break;
426 case kNumLevel_3:
427 options.num_levels = 3;
428 break;
429 case kDBLogDir:
430 options.db_log_dir = alternative_db_log_dir_;
431 break;
432 case kWalDirAndMmapReads:
433 options.wal_dir = alternative_wal_dir_;
434 // mmap reads should be orthogonal to WalDir setting, so we piggyback to
435 // this option config to test mmap reads as well
436 options.allow_mmap_reads = can_allow_mmap;
437 break;
438 case kManifestFileSize:
439 options.max_manifest_file_size = 50; // 50 bytes
440 break;
441 case kPerfOptions:
442 options.soft_rate_limit = 2.0;
443 options.delayed_write_rate = 8 * 1024 * 1024;
444 options.report_bg_io_stats = true;
445 // TODO(3.13) -- test more options
446 break;
447 case kUniversalCompaction:
448 options.compaction_style = kCompactionStyleUniversal;
449 options.num_levels = 1;
450 break;
451 case kUniversalCompactionMultiLevel:
452 options.compaction_style = kCompactionStyleUniversal;
453 options.num_levels = 8;
454 break;
455 case kCompressedBlockCache:
456 options.allow_mmap_writes = can_allow_mmap;
457 table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
458 break;
459 case kInfiniteMaxOpenFiles:
460 options.max_open_files = -1;
461 break;
462 case kxxHashChecksum: {
463 table_options.checksum = kxxHash;
464 break;
465 }
466 case kxxHash64Checksum: {
467 table_options.checksum = kxxHash64;
468 break;
469 }
470 case kFIFOCompaction: {
471 options.compaction_style = kCompactionStyleFIFO;
472 break;
473 }
474 case kBlockBasedTableWithPrefixHashIndex: {
475 table_options.index_type = BlockBasedTableOptions::kHashSearch;
476 options.prefix_extractor.reset(NewFixedPrefixTransform(1));
477 break;
478 }
479 case kBlockBasedTableWithWholeKeyHashIndex: {
480 table_options.index_type = BlockBasedTableOptions::kHashSearch;
481 options.prefix_extractor.reset(NewNoopTransform());
482 break;
483 }
484 case kBlockBasedTableWithPartitionedIndex: {
485 table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
486 options.prefix_extractor.reset(NewNoopTransform());
487 break;
488 }
489 case kBlockBasedTableWithPartitionedIndexFormat4: {
490 table_options.format_version = 4;
491 // Format 4 changes the binary index format. Since partitioned index is a
492 // super-set of simple indexes, we are also using kTwoLevelIndexSearch to
493 // test this format.
494 table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
495 // The top-level index in partition filters are also affected by format 4.
496 table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
497 table_options.partition_filters = true;
498 table_options.index_block_restart_interval = 8;
499 break;
500 }
501 case kBlockBasedTableWithIndexRestartInterval: {
502 table_options.index_block_restart_interval = 8;
503 break;
504 }
505 case kOptimizeFiltersForHits: {
506 options.optimize_filters_for_hits = true;
507 set_block_based_table_factory = true;
508 break;
509 }
510 case kRowCache: {
511 options.row_cache = NewLRUCache(1024 * 1024);
512 break;
513 }
514 case kRecycleLogFiles: {
515 options.recycle_log_file_num = 2;
516 break;
517 }
518 case kLevelSubcompactions: {
519 options.max_subcompactions = 4;
520 break;
521 }
522 case kUniversalSubcompactions: {
523 options.compaction_style = kCompactionStyleUniversal;
524 options.num_levels = 8;
525 options.max_subcompactions = 4;
526 break;
527 }
528 case kConcurrentSkipList: {
529 options.allow_concurrent_memtable_write = true;
530 options.enable_write_thread_adaptive_yield = true;
531 break;
532 }
533 case kPipelinedWrite: {
534 options.enable_pipelined_write = true;
535 break;
536 }
537 case kConcurrentWALWrites: {
538 // This options optimize 2PC commit path
539 options.two_write_queues = true;
540 options.manual_wal_flush = true;
541 break;
542 }
543
544 default:
545 break;
546 }
547
548 if (options_override.filter_policy) {
549 table_options.filter_policy = options_override.filter_policy;
550 table_options.partition_filters = options_override.partition_filters;
551 table_options.metadata_block_size = options_override.metadata_block_size;
552 }
553 if (set_block_based_table_factory) {
554 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
555 }
556 options.env = env_;
557 options.create_if_missing = true;
558 options.fail_if_options_file_error = true;
559 return options;
560 }
561
562 void DBTestBase::CreateColumnFamilies(const std::vector<std::string>& cfs,
563 const Options& options) {
564 ColumnFamilyOptions cf_opts(options);
565 size_t cfi = handles_.size();
566 handles_.resize(cfi + cfs.size());
567 for (auto cf : cfs) {
568 ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
569 }
570 }
571
572 void DBTestBase::CreateAndReopenWithCF(const std::vector<std::string>& cfs,
573 const Options& options) {
574 CreateColumnFamilies(cfs, options);
575 std::vector<std::string> cfs_plus_default = cfs;
576 cfs_plus_default.insert(cfs_plus_default.begin(), kDefaultColumnFamilyName);
577 ReopenWithColumnFamilies(cfs_plus_default, options);
578 }
579
580 void DBTestBase::ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
581 const std::vector<Options>& options) {
582 ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
583 }
584
585 void DBTestBase::ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
586 const Options& options) {
587 ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
588 }
589
590 Status DBTestBase::TryReopenWithColumnFamilies(
591 const std::vector<std::string>& cfs, const std::vector<Options>& options) {
592 Close();
593 EXPECT_EQ(cfs.size(), options.size());
594 std::vector<ColumnFamilyDescriptor> column_families;
595 for (size_t i = 0; i < cfs.size(); ++i) {
596 column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
597 }
598 DBOptions db_opts = DBOptions(options[0]);
599 last_options_ = options[0];
600 return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
601 }
602
603 Status DBTestBase::TryReopenWithColumnFamilies(
604 const std::vector<std::string>& cfs, const Options& options) {
605 Close();
606 std::vector<Options> v_opts(cfs.size(), options);
607 return TryReopenWithColumnFamilies(cfs, v_opts);
608 }
609
610 void DBTestBase::Reopen(const Options& options) {
611 ASSERT_OK(TryReopen(options));
612 }
613
614 void DBTestBase::Close() {
615 for (auto h : handles_) {
616 db_->DestroyColumnFamilyHandle(h);
617 }
618 handles_.clear();
619 delete db_;
620 db_ = nullptr;
621 }
622
623 void DBTestBase::DestroyAndReopen(const Options& options) {
624 // Destroy using last options
625 Destroy(last_options_);
626 ASSERT_OK(TryReopen(options));
627 }
628
629 void DBTestBase::Destroy(const Options& options, bool delete_cf_paths) {
630 std::vector<ColumnFamilyDescriptor> column_families;
631 if (delete_cf_paths) {
632 for (size_t i = 0; i < handles_.size(); ++i) {
633 ColumnFamilyDescriptor cfdescriptor;
634 handles_[i]->GetDescriptor(&cfdescriptor);
635 column_families.push_back(cfdescriptor);
636 }
637 }
638 Close();
639 ASSERT_OK(DestroyDB(dbname_, options, column_families));
640 }
641
642 Status DBTestBase::ReadOnlyReopen(const Options& options) {
643 return DB::OpenForReadOnly(options, dbname_, &db_);
644 }
645
646 Status DBTestBase::TryReopen(const Options& options) {
647 Close();
648 last_options_.table_factory.reset();
649 // Note: operator= is an unsafe approach here since it destructs
650 // std::shared_ptr in the same order of their creation, in contrast to
651 // destructors which destructs them in the opposite order of creation. One
652 // particular problme is that the cache destructor might invoke callback
653 // functions that use Option members such as statistics. To work around this
654 // problem, we manually call destructor of table_facotry which eventually
655 // clears the block cache.
656 last_options_ = options;
657 return DB::Open(options, dbname_, &db_);
658 }
659
660 bool DBTestBase::IsDirectIOSupported() {
661 return test::IsDirectIOSupported(env_, dbname_);
662 }
663
664 bool DBTestBase::IsMemoryMappedAccessSupported() const {
665 return (!encrypted_env_);
666 }
667
668 Status DBTestBase::Flush(int cf) {
669 if (cf == 0) {
670 return db_->Flush(FlushOptions());
671 } else {
672 return db_->Flush(FlushOptions(), handles_[cf]);
673 }
674 }
675
676 Status DBTestBase::Flush(const std::vector<int>& cf_ids) {
677 std::vector<ColumnFamilyHandle*> cfhs;
678 std::for_each(cf_ids.begin(), cf_ids.end(),
679 [&cfhs, this](int id) { cfhs.emplace_back(handles_[id]); });
680 return db_->Flush(FlushOptions(), cfhs);
681 }
682
683 Status DBTestBase::Put(const Slice& k, const Slice& v, WriteOptions wo) {
684 if (kMergePut == option_config_) {
685 return db_->Merge(wo, k, v);
686 } else {
687 return db_->Put(wo, k, v);
688 }
689 }
690
691 Status DBTestBase::Put(int cf, const Slice& k, const Slice& v,
692 WriteOptions wo) {
693 if (kMergePut == option_config_) {
694 return db_->Merge(wo, handles_[cf], k, v);
695 } else {
696 return db_->Put(wo, handles_[cf], k, v);
697 }
698 }
699
700 Status DBTestBase::Merge(const Slice& k, const Slice& v, WriteOptions wo) {
701 return db_->Merge(wo, k, v);
702 }
703
704 Status DBTestBase::Merge(int cf, const Slice& k, const Slice& v,
705 WriteOptions wo) {
706 return db_->Merge(wo, handles_[cf], k, v);
707 }
708
709 Status DBTestBase::Delete(const std::string& k) {
710 return db_->Delete(WriteOptions(), k);
711 }
712
713 Status DBTestBase::Delete(int cf, const std::string& k) {
714 return db_->Delete(WriteOptions(), handles_[cf], k);
715 }
716
717 Status DBTestBase::SingleDelete(const std::string& k) {
718 return db_->SingleDelete(WriteOptions(), k);
719 }
720
721 Status DBTestBase::SingleDelete(int cf, const std::string& k) {
722 return db_->SingleDelete(WriteOptions(), handles_[cf], k);
723 }
724
725 bool DBTestBase::SetPreserveDeletesSequenceNumber(SequenceNumber sn) {
726 return db_->SetPreserveDeletesSequenceNumber(sn);
727 }
728
729 std::string DBTestBase::Get(const std::string& k, const Snapshot* snapshot) {
730 ReadOptions options;
731 options.verify_checksums = true;
732 options.snapshot = snapshot;
733 std::string result;
734 Status s = db_->Get(options, k, &result);
735 if (s.IsNotFound()) {
736 result = "NOT_FOUND";
737 } else if (!s.ok()) {
738 result = s.ToString();
739 }
740 return result;
741 }
742
743 std::string DBTestBase::Get(int cf, const std::string& k,
744 const Snapshot* snapshot) {
745 ReadOptions options;
746 options.verify_checksums = true;
747 options.snapshot = snapshot;
748 std::string result;
749 Status s = db_->Get(options, handles_[cf], k, &result);
750 if (s.IsNotFound()) {
751 result = "NOT_FOUND";
752 } else if (!s.ok()) {
753 result = s.ToString();
754 }
755 return result;
756 }
757
758 std::vector<std::string> DBTestBase::MultiGet(std::vector<int> cfs,
759 const std::vector<std::string>& k,
760 const Snapshot* snapshot) {
761 ReadOptions options;
762 options.verify_checksums = true;
763 options.snapshot = snapshot;
764 std::vector<ColumnFamilyHandle*> handles;
765 std::vector<Slice> keys;
766 std::vector<std::string> result;
767
768 for (unsigned int i = 0; i < cfs.size(); ++i) {
769 handles.push_back(handles_[cfs[i]]);
770 keys.push_back(k[i]);
771 }
772 std::vector<Status> s = db_->MultiGet(options, handles, keys, &result);
773 for (unsigned int i = 0; i < s.size(); ++i) {
774 if (s[i].IsNotFound()) {
775 result[i] = "NOT_FOUND";
776 } else if (!s[i].ok()) {
777 result[i] = s[i].ToString();
778 }
779 }
780 return result;
781 }
782
783 Status DBTestBase::Get(const std::string& k, PinnableSlice* v) {
784 ReadOptions options;
785 options.verify_checksums = true;
786 Status s = dbfull()->Get(options, dbfull()->DefaultColumnFamily(), k, v);
787 return s;
788 }
789
790 uint64_t DBTestBase::GetNumSnapshots() {
791 uint64_t int_num;
792 EXPECT_TRUE(dbfull()->GetIntProperty("rocksdb.num-snapshots", &int_num));
793 return int_num;
794 }
795
796 uint64_t DBTestBase::GetTimeOldestSnapshots() {
797 uint64_t int_num;
798 EXPECT_TRUE(
799 dbfull()->GetIntProperty("rocksdb.oldest-snapshot-time", &int_num));
800 return int_num;
801 }
802
803 // Return a string that contains all key,value pairs in order,
804 // formatted like "(k1->v1)(k2->v2)".
805 std::string DBTestBase::Contents(int cf) {
806 std::vector<std::string> forward;
807 std::string result;
808 Iterator* iter = (cf == 0) ? db_->NewIterator(ReadOptions())
809 : db_->NewIterator(ReadOptions(), handles_[cf]);
810 for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
811 std::string s = IterStatus(iter);
812 result.push_back('(');
813 result.append(s);
814 result.push_back(')');
815 forward.push_back(s);
816 }
817
818 // Check reverse iteration results are the reverse of forward results
819 unsigned int matched = 0;
820 for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
821 EXPECT_LT(matched, forward.size());
822 EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
823 matched++;
824 }
825 EXPECT_EQ(matched, forward.size());
826
827 delete iter;
828 return result;
829 }
830
831 std::string DBTestBase::AllEntriesFor(const Slice& user_key, int cf) {
832 Arena arena;
833 auto options = CurrentOptions();
834 InternalKeyComparator icmp(options.comparator);
835 ReadRangeDelAggregator range_del_agg(&icmp,
836 kMaxSequenceNumber /* upper_bound */);
837 ScopedArenaIterator iter;
838 if (cf == 0) {
839 iter.set(dbfull()->NewInternalIterator(&arena, &range_del_agg,
840 kMaxSequenceNumber));
841 } else {
842 iter.set(dbfull()->NewInternalIterator(&arena, &range_del_agg,
843 kMaxSequenceNumber, handles_[cf]));
844 }
845 InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
846 iter->Seek(target.Encode());
847 std::string result;
848 if (!iter->status().ok()) {
849 result = iter->status().ToString();
850 } else {
851 result = "[ ";
852 bool first = true;
853 while (iter->Valid()) {
854 ParsedInternalKey ikey(Slice(), 0, kTypeValue);
855 if (!ParseInternalKey(iter->key(), &ikey)) {
856 result += "CORRUPTED";
857 } else {
858 if (!last_options_.comparator->Equal(ikey.user_key, user_key)) {
859 break;
860 }
861 if (!first) {
862 result += ", ";
863 }
864 first = false;
865 switch (ikey.type) {
866 case kTypeValue:
867 result += iter->value().ToString();
868 break;
869 case kTypeMerge:
870 // keep it the same as kTypeValue for testing kMergePut
871 result += iter->value().ToString();
872 break;
873 case kTypeDeletion:
874 result += "DEL";
875 break;
876 case kTypeSingleDeletion:
877 result += "SDEL";
878 break;
879 default:
880 assert(false);
881 break;
882 }
883 }
884 iter->Next();
885 }
886 if (!first) {
887 result += " ";
888 }
889 result += "]";
890 }
891 return result;
892 }
893
894 #ifndef ROCKSDB_LITE
895 int DBTestBase::NumSortedRuns(int cf) {
896 ColumnFamilyMetaData cf_meta;
897 if (cf == 0) {
898 db_->GetColumnFamilyMetaData(&cf_meta);
899 } else {
900 db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta);
901 }
902 int num_sr = static_cast<int>(cf_meta.levels[0].files.size());
903 for (size_t i = 1U; i < cf_meta.levels.size(); i++) {
904 if (cf_meta.levels[i].files.size() > 0) {
905 num_sr++;
906 }
907 }
908 return num_sr;
909 }
910
911 uint64_t DBTestBase::TotalSize(int cf) {
912 ColumnFamilyMetaData cf_meta;
913 if (cf == 0) {
914 db_->GetColumnFamilyMetaData(&cf_meta);
915 } else {
916 db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta);
917 }
918 return cf_meta.size;
919 }
920
921 uint64_t DBTestBase::SizeAtLevel(int level) {
922 std::vector<LiveFileMetaData> metadata;
923 db_->GetLiveFilesMetaData(&metadata);
924 uint64_t sum = 0;
925 for (const auto& m : metadata) {
926 if (m.level == level) {
927 sum += m.size;
928 }
929 }
930 return sum;
931 }
932
933 size_t DBTestBase::TotalLiveFiles(int cf) {
934 ColumnFamilyMetaData cf_meta;
935 if (cf == 0) {
936 db_->GetColumnFamilyMetaData(&cf_meta);
937 } else {
938 db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta);
939 }
940 size_t num_files = 0;
941 for (auto& level : cf_meta.levels) {
942 num_files += level.files.size();
943 }
944 return num_files;
945 }
946
947 size_t DBTestBase::CountLiveFiles() {
948 std::vector<LiveFileMetaData> metadata;
949 db_->GetLiveFilesMetaData(&metadata);
950 return metadata.size();
951 }
952
953 int DBTestBase::NumTableFilesAtLevel(int level, int cf) {
954 std::string property;
955 if (cf == 0) {
956 // default cfd
957 EXPECT_TRUE(db_->GetProperty(
958 "rocksdb.num-files-at-level" + NumberToString(level), &property));
959 } else {
960 EXPECT_TRUE(db_->GetProperty(
961 handles_[cf], "rocksdb.num-files-at-level" + NumberToString(level),
962 &property));
963 }
964 return atoi(property.c_str());
965 }
966
967 double DBTestBase::CompressionRatioAtLevel(int level, int cf) {
968 std::string property;
969 if (cf == 0) {
970 // default cfd
971 EXPECT_TRUE(db_->GetProperty(
972 "rocksdb.compression-ratio-at-level" + NumberToString(level),
973 &property));
974 } else {
975 EXPECT_TRUE(db_->GetProperty(
976 handles_[cf],
977 "rocksdb.compression-ratio-at-level" + NumberToString(level),
978 &property));
979 }
980 return std::stod(property);
981 }
982
983 int DBTestBase::TotalTableFiles(int cf, int levels) {
984 if (levels == -1) {
985 levels = (cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]);
986 }
987 int result = 0;
988 for (int level = 0; level < levels; level++) {
989 result += NumTableFilesAtLevel(level, cf);
990 }
991 return result;
992 }
993
994 // Return spread of files per level
995 std::string DBTestBase::FilesPerLevel(int cf) {
996 int num_levels =
997 (cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]);
998 std::string result;
999 size_t last_non_zero_offset = 0;
1000 for (int level = 0; level < num_levels; level++) {
1001 int f = NumTableFilesAtLevel(level, cf);
1002 char buf[100];
1003 snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
1004 result += buf;
1005 if (f > 0) {
1006 last_non_zero_offset = result.size();
1007 }
1008 }
1009 result.resize(last_non_zero_offset);
1010 return result;
1011 }
1012 #endif // !ROCKSDB_LITE
1013
1014 size_t DBTestBase::CountFiles() {
1015 std::vector<std::string> files;
1016 env_->GetChildren(dbname_, &files);
1017
1018 std::vector<std::string> logfiles;
1019 if (dbname_ != last_options_.wal_dir) {
1020 env_->GetChildren(last_options_.wal_dir, &logfiles);
1021 }
1022
1023 return files.size() + logfiles.size();
1024 }
1025
1026 uint64_t DBTestBase::Size(const Slice& start, const Slice& limit, int cf) {
1027 Range r(start, limit);
1028 uint64_t size;
1029 if (cf == 0) {
1030 db_->GetApproximateSizes(&r, 1, &size);
1031 } else {
1032 db_->GetApproximateSizes(handles_[1], &r, 1, &size);
1033 }
1034 return size;
1035 }
1036
1037 void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit,
1038 uint32_t target_path_id) {
1039 CompactRangeOptions compact_options;
1040 compact_options.target_path_id = target_path_id;
1041 ASSERT_OK(db_->CompactRange(compact_options, handles_[cf], &start, &limit));
1042 }
1043
1044 void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit) {
1045 ASSERT_OK(
1046 db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit));
1047 }
1048
1049 void DBTestBase::Compact(const Slice& start, const Slice& limit) {
1050 ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &start, &limit));
1051 }
1052
1053 // Do n memtable compactions, each of which produces an sstable
1054 // covering the range [small,large].
1055 void DBTestBase::MakeTables(int n, const std::string& small,
1056 const std::string& large, int cf) {
1057 for (int i = 0; i < n; i++) {
1058 ASSERT_OK(Put(cf, small, "begin"));
1059 ASSERT_OK(Put(cf, large, "end"));
1060 ASSERT_OK(Flush(cf));
1061 MoveFilesToLevel(n - i - 1, cf);
1062 }
1063 }
1064
1065 // Prevent pushing of new sstables into deeper levels by adding
1066 // tables that cover a specified range to all levels.
1067 void DBTestBase::FillLevels(const std::string& smallest,
1068 const std::string& largest, int cf) {
1069 MakeTables(db_->NumberLevels(handles_[cf]), smallest, largest, cf);
1070 }
1071
1072 void DBTestBase::MoveFilesToLevel(int level, int cf) {
1073 for (int l = 0; l < level; ++l) {
1074 if (cf > 0) {
1075 dbfull()->TEST_CompactRange(l, nullptr, nullptr, handles_[cf]);
1076 } else {
1077 dbfull()->TEST_CompactRange(l, nullptr, nullptr);
1078 }
1079 }
1080 }
1081
1082 #ifndef ROCKSDB_LITE
1083 void DBTestBase::DumpFileCounts(const char* label) {
1084 fprintf(stderr, "---\n%s:\n", label);
1085 fprintf(stderr, "maxoverlap: %" PRIu64 "\n",
1086 dbfull()->TEST_MaxNextLevelOverlappingBytes());
1087 for (int level = 0; level < db_->NumberLevels(); level++) {
1088 int num = NumTableFilesAtLevel(level);
1089 if (num > 0) {
1090 fprintf(stderr, " level %3d : %d files\n", level, num);
1091 }
1092 }
1093 }
1094 #endif // !ROCKSDB_LITE
1095
1096 std::string DBTestBase::DumpSSTableList() {
1097 std::string property;
1098 db_->GetProperty("rocksdb.sstables", &property);
1099 return property;
1100 }
1101
1102 void DBTestBase::GetSstFiles(Env* env, std::string path,
1103 std::vector<std::string>* files) {
1104 env->GetChildren(path, files);
1105
1106 files->erase(
1107 std::remove_if(files->begin(), files->end(), [](std::string name) {
1108 uint64_t number;
1109 FileType type;
1110 return !(ParseFileName(name, &number, &type) && type == kTableFile);
1111 }), files->end());
1112 }
1113
1114 int DBTestBase::GetSstFileCount(std::string path) {
1115 std::vector<std::string> files;
1116 DBTestBase::GetSstFiles(env_, path, &files);
1117 return static_cast<int>(files.size());
1118 }
1119
1120 // this will generate non-overlapping files since it keeps increasing key_idx
1121 void DBTestBase::GenerateNewFile(int cf, Random* rnd, int* key_idx,
1122 bool nowait) {
1123 for (int i = 0; i < KNumKeysByGenerateNewFile; i++) {
1124 ASSERT_OK(Put(cf, Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990)));
1125 (*key_idx)++;
1126 }
1127 if (!nowait) {
1128 dbfull()->TEST_WaitForFlushMemTable();
1129 dbfull()->TEST_WaitForCompact();
1130 }
1131 }
1132
1133 // this will generate non-overlapping files since it keeps increasing key_idx
1134 void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) {
1135 for (int i = 0; i < KNumKeysByGenerateNewFile; i++) {
1136 ASSERT_OK(Put(Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990)));
1137 (*key_idx)++;
1138 }
1139 if (!nowait) {
1140 dbfull()->TEST_WaitForFlushMemTable();
1141 dbfull()->TEST_WaitForCompact();
1142 }
1143 }
1144
1145 const int DBTestBase::kNumKeysByGenerateNewRandomFile = 51;
1146
1147 void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) {
1148 for (int i = 0; i < kNumKeysByGenerateNewRandomFile; i++) {
1149 ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 2000)));
1150 }
1151 ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 200)));
1152 if (!nowait) {
1153 dbfull()->TEST_WaitForFlushMemTable();
1154 dbfull()->TEST_WaitForCompact();
1155 }
1156 }
1157
1158 std::string DBTestBase::IterStatus(Iterator* iter) {
1159 std::string result;
1160 if (iter->Valid()) {
1161 result = iter->key().ToString() + "->" + iter->value().ToString();
1162 } else {
1163 result = "(invalid)";
1164 }
1165 return result;
1166 }
1167
1168 Options DBTestBase::OptionsForLogIterTest() {
1169 Options options = CurrentOptions();
1170 options.create_if_missing = true;
1171 options.WAL_ttl_seconds = 1000;
1172 return options;
1173 }
1174
1175 std::string DBTestBase::DummyString(size_t len, char c) {
1176 return std::string(len, c);
1177 }
1178
1179 void DBTestBase::VerifyIterLast(std::string expected_key, int cf) {
1180 Iterator* iter;
1181 ReadOptions ro;
1182 if (cf == 0) {
1183 iter = db_->NewIterator(ro);
1184 } else {
1185 iter = db_->NewIterator(ro, handles_[cf]);
1186 }
1187 iter->SeekToLast();
1188 ASSERT_EQ(IterStatus(iter), expected_key);
1189 delete iter;
1190 }
1191
1192 // Used to test InplaceUpdate
1193
1194 // If previous value is nullptr or delta is > than previous value,
1195 // sets newValue with delta
1196 // If previous value is not empty,
1197 // updates previous value with 'b' string of previous value size - 1.
1198 UpdateStatus DBTestBase::updateInPlaceSmallerSize(char* prevValue,
1199 uint32_t* prevSize,
1200 Slice delta,
1201 std::string* newValue) {
1202 if (prevValue == nullptr) {
1203 *newValue = std::string(delta.size(), 'c');
1204 return UpdateStatus::UPDATED;
1205 } else {
1206 *prevSize = *prevSize - 1;
1207 std::string str_b = std::string(*prevSize, 'b');
1208 memcpy(prevValue, str_b.c_str(), str_b.size());
1209 return UpdateStatus::UPDATED_INPLACE;
1210 }
1211 }
1212
1213 UpdateStatus DBTestBase::updateInPlaceSmallerVarintSize(char* prevValue,
1214 uint32_t* prevSize,
1215 Slice delta,
1216 std::string* newValue) {
1217 if (prevValue == nullptr) {
1218 *newValue = std::string(delta.size(), 'c');
1219 return UpdateStatus::UPDATED;
1220 } else {
1221 *prevSize = 1;
1222 std::string str_b = std::string(*prevSize, 'b');
1223 memcpy(prevValue, str_b.c_str(), str_b.size());
1224 return UpdateStatus::UPDATED_INPLACE;
1225 }
1226 }
1227
1228 UpdateStatus DBTestBase::updateInPlaceLargerSize(char* /*prevValue*/,
1229 uint32_t* /*prevSize*/,
1230 Slice delta,
1231 std::string* newValue) {
1232 *newValue = std::string(delta.size(), 'c');
1233 return UpdateStatus::UPDATED;
1234 }
1235
1236 UpdateStatus DBTestBase::updateInPlaceNoAction(char* /*prevValue*/,
1237 uint32_t* /*prevSize*/,
1238 Slice /*delta*/,
1239 std::string* /*newValue*/) {
1240 return UpdateStatus::UPDATE_FAILED;
1241 }
1242
1243 // Utility method to test InplaceUpdate
1244 void DBTestBase::validateNumberOfEntries(int numValues, int cf) {
1245 Arena arena;
1246 auto options = CurrentOptions();
1247 InternalKeyComparator icmp(options.comparator);
1248 ReadRangeDelAggregator range_del_agg(&icmp,
1249 kMaxSequenceNumber /* upper_bound */);
1250 // This should be defined after range_del_agg so that it destructs the
1251 // assigned iterator before it range_del_agg is already destructed.
1252 ScopedArenaIterator iter;
1253 if (cf != 0) {
1254 iter.set(dbfull()->NewInternalIterator(&arena, &range_del_agg,
1255 kMaxSequenceNumber, handles_[cf]));
1256 } else {
1257 iter.set(dbfull()->NewInternalIterator(&arena, &range_del_agg,
1258 kMaxSequenceNumber));
1259 }
1260 iter->SeekToFirst();
1261 ASSERT_EQ(iter->status().ok(), true);
1262 int seq = numValues;
1263 while (iter->Valid()) {
1264 ParsedInternalKey ikey;
1265 ikey.clear();
1266 ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
1267
1268 // checks sequence number for updates
1269 ASSERT_EQ(ikey.sequence, (unsigned)seq--);
1270 iter->Next();
1271 }
1272 ASSERT_EQ(0, seq);
1273 }
1274
1275 void DBTestBase::CopyFile(const std::string& source,
1276 const std::string& destination, uint64_t size) {
1277 const EnvOptions soptions;
1278 std::unique_ptr<SequentialFile> srcfile;
1279 ASSERT_OK(env_->NewSequentialFile(source, &srcfile, soptions));
1280 std::unique_ptr<WritableFile> destfile;
1281 ASSERT_OK(env_->NewWritableFile(destination, &destfile, soptions));
1282
1283 if (size == 0) {
1284 // default argument means copy everything
1285 ASSERT_OK(env_->GetFileSize(source, &size));
1286 }
1287
1288 char buffer[4096];
1289 Slice slice;
1290 while (size > 0) {
1291 uint64_t one = std::min(uint64_t(sizeof(buffer)), size);
1292 ASSERT_OK(srcfile->Read(one, &slice, buffer));
1293 ASSERT_OK(destfile->Append(slice));
1294 size -= slice.size();
1295 }
1296 ASSERT_OK(destfile->Close());
1297 }
1298
1299 std::unordered_map<std::string, uint64_t> DBTestBase::GetAllSSTFiles(
1300 uint64_t* total_size) {
1301 std::unordered_map<std::string, uint64_t> res;
1302
1303 if (total_size) {
1304 *total_size = 0;
1305 }
1306 std::vector<std::string> files;
1307 env_->GetChildren(dbname_, &files);
1308 for (auto& file_name : files) {
1309 uint64_t number;
1310 FileType type;
1311 std::string file_path = dbname_ + "/" + file_name;
1312 if (ParseFileName(file_name, &number, &type) && type == kTableFile) {
1313 uint64_t file_size = 0;
1314 env_->GetFileSize(file_path, &file_size);
1315 res[file_path] = file_size;
1316 if (total_size) {
1317 *total_size += file_size;
1318 }
1319 }
1320 }
1321 return res;
1322 }
1323
1324 std::vector<std::uint64_t> DBTestBase::ListTableFiles(Env* env,
1325 const std::string& path) {
1326 std::vector<std::string> files;
1327 std::vector<uint64_t> file_numbers;
1328 env->GetChildren(path, &files);
1329 uint64_t number;
1330 FileType type;
1331 for (size_t i = 0; i < files.size(); ++i) {
1332 if (ParseFileName(files[i], &number, &type)) {
1333 if (type == kTableFile) {
1334 file_numbers.push_back(number);
1335 }
1336 }
1337 }
1338 return file_numbers;
1339 }
1340
1341 void DBTestBase::VerifyDBFromMap(std::map<std::string, std::string> true_data,
1342 size_t* total_reads_res, bool tailing_iter,
1343 std::map<std::string, Status> status) {
1344 size_t total_reads = 0;
1345
1346 for (auto& kv : true_data) {
1347 Status s = status[kv.first];
1348 if (s.ok()) {
1349 ASSERT_EQ(Get(kv.first), kv.second);
1350 } else {
1351 std::string value;
1352 ASSERT_EQ(s, db_->Get(ReadOptions(), kv.first, &value));
1353 }
1354 total_reads++;
1355 }
1356
1357 // Normal Iterator
1358 {
1359 int iter_cnt = 0;
1360 ReadOptions ro;
1361 ro.total_order_seek = true;
1362 Iterator* iter = db_->NewIterator(ro);
1363 // Verify Iterator::Next()
1364 iter_cnt = 0;
1365 auto data_iter = true_data.begin();
1366 Status s;
1367 for (iter->SeekToFirst(); iter->Valid(); iter->Next(), data_iter++) {
1368 ASSERT_EQ(iter->key().ToString(), data_iter->first);
1369 Status current_status = status[data_iter->first];
1370 if (!current_status.ok()) {
1371 s = current_status;
1372 }
1373 ASSERT_EQ(iter->status(), s);
1374 if (current_status.ok()) {
1375 ASSERT_EQ(iter->value().ToString(), data_iter->second);
1376 }
1377 iter_cnt++;
1378 total_reads++;
1379 }
1380 ASSERT_EQ(data_iter, true_data.end()) << iter_cnt << " / "
1381 << true_data.size();
1382 delete iter;
1383
1384 // Verify Iterator::Prev()
1385 // Use a new iterator to make sure its status is clean.
1386 iter = db_->NewIterator(ro);
1387 iter_cnt = 0;
1388 s = Status::OK();
1389 auto data_rev = true_data.rbegin();
1390 for (iter->SeekToLast(); iter->Valid(); iter->Prev(), data_rev++) {
1391 ASSERT_EQ(iter->key().ToString(), data_rev->first);
1392 Status current_status = status[data_rev->first];
1393 if (!current_status.ok()) {
1394 s = current_status;
1395 }
1396 ASSERT_EQ(iter->status(), s);
1397 if (current_status.ok()) {
1398 ASSERT_EQ(iter->value().ToString(), data_rev->second);
1399 }
1400 iter_cnt++;
1401 total_reads++;
1402 }
1403 ASSERT_EQ(data_rev, true_data.rend()) << iter_cnt << " / "
1404 << true_data.size();
1405
1406 // Verify Iterator::Seek()
1407 for (auto kv : true_data) {
1408 iter->Seek(kv.first);
1409 ASSERT_EQ(kv.first, iter->key().ToString());
1410 ASSERT_EQ(kv.second, iter->value().ToString());
1411 total_reads++;
1412 }
1413 delete iter;
1414 }
1415
1416 if (tailing_iter) {
1417 #ifndef ROCKSDB_LITE
1418 // Tailing iterator
1419 int iter_cnt = 0;
1420 ReadOptions ro;
1421 ro.tailing = true;
1422 ro.total_order_seek = true;
1423 Iterator* iter = db_->NewIterator(ro);
1424
1425 // Verify ForwardIterator::Next()
1426 iter_cnt = 0;
1427 auto data_iter = true_data.begin();
1428 for (iter->SeekToFirst(); iter->Valid(); iter->Next(), data_iter++) {
1429 ASSERT_EQ(iter->key().ToString(), data_iter->first);
1430 ASSERT_EQ(iter->value().ToString(), data_iter->second);
1431 iter_cnt++;
1432 total_reads++;
1433 }
1434 ASSERT_EQ(data_iter, true_data.end()) << iter_cnt << " / "
1435 << true_data.size();
1436
1437 // Verify ForwardIterator::Seek()
1438 for (auto kv : true_data) {
1439 iter->Seek(kv.first);
1440 ASSERT_EQ(kv.first, iter->key().ToString());
1441 ASSERT_EQ(kv.second, iter->value().ToString());
1442 total_reads++;
1443 }
1444
1445 delete iter;
1446 #endif // ROCKSDB_LITE
1447 }
1448
1449 if (total_reads_res) {
1450 *total_reads_res = total_reads;
1451 }
1452 }
1453
1454 void DBTestBase::VerifyDBInternal(
1455 std::vector<std::pair<std::string, std::string>> true_data) {
1456 Arena arena;
1457 InternalKeyComparator icmp(last_options_.comparator);
1458 ReadRangeDelAggregator range_del_agg(&icmp,
1459 kMaxSequenceNumber /* upper_bound */);
1460 auto iter =
1461 dbfull()->NewInternalIterator(&arena, &range_del_agg, kMaxSequenceNumber);
1462 iter->SeekToFirst();
1463 for (auto p : true_data) {
1464 ASSERT_TRUE(iter->Valid());
1465 ParsedInternalKey ikey;
1466 ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey));
1467 ASSERT_EQ(p.first, ikey.user_key);
1468 ASSERT_EQ(p.second, iter->value());
1469 iter->Next();
1470 };
1471 ASSERT_FALSE(iter->Valid());
1472 iter->~InternalIterator();
1473 }
1474
1475 #ifndef ROCKSDB_LITE
1476
1477 uint64_t DBTestBase::GetNumberOfSstFilesForColumnFamily(
1478 DB* db, std::string column_family_name) {
1479 std::vector<LiveFileMetaData> metadata;
1480 db->GetLiveFilesMetaData(&metadata);
1481 uint64_t result = 0;
1482 for (auto& fileMetadata : metadata) {
1483 result += (fileMetadata.column_family_name == column_family_name);
1484 }
1485 return result;
1486 }
1487 #endif // ROCKSDB_LITE
1488
1489 } // namespace rocksdb