1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
13 fprintf(stderr
, "Please install gflags to run rocksdb tools\n");
22 #include <type_traits>
25 #include "db/dbformat.h"
26 #include "db/memtable.h"
27 #include "memory/arena.h"
28 #include "port/port.h"
29 #include "port/stack_trace.h"
30 #include "rocksdb/comparator.h"
31 #include "rocksdb/convenience.h"
32 #include "rocksdb/memtablerep.h"
33 #include "rocksdb/options.h"
34 #include "rocksdb/slice_transform.h"
35 #include "rocksdb/system_clock.h"
36 #include "rocksdb/write_buffer_manager.h"
37 #include "test_util/testutil.h"
38 #include "util/gflags_compat.h"
39 #include "util/mutexlock.h"
40 #include "util/stop_watch.h"
42 using GFLAGS_NAMESPACE::ParseCommandLineFlags
;
43 using GFLAGS_NAMESPACE::RegisterFlagValidator
;
44 using GFLAGS_NAMESPACE::SetUsageMessage
;
46 DEFINE_string(benchmarks
, "fillrandom",
47 "Comma-separated list of benchmarks to run. Options:\n"
48 "\tfillrandom -- write N random values\n"
49 "\tfillseq -- write N values in sequential order\n"
50 "\treadrandom -- read N values in random order\n"
51 "\treadseq -- scan the DB\n"
52 "\treadwrite -- 1 thread writes while N - 1 threads "
55 "\tseqreadwrite -- 1 thread writes while N - 1 threads "
58 DEFINE_string(memtablerep
, "skiplist",
59 "Which implementation of memtablerep to use. See "
60 "include/memtablerep.h for\n"
61 " more details. Options:\n"
62 "\tskiplist -- backed by a skiplist\n"
63 "\tvector -- backed by an std::vector\n"
64 "\thashskiplist -- backed by a hash skip list\n"
65 "\thashlinklist -- backed by a hash linked list\n"
66 "\tcuckoo -- backed by a cuckoo hash table");
68 DEFINE_int64(bucket_count
, 1000000,
69 "bucket_count parameter to pass into NewHashSkiplistRepFactory or "
70 "NewHashLinkListRepFactory");
73 hashskiplist_height
, 4,
74 "skiplist_height parameter to pass into NewHashSkiplistRepFactory");
77 hashskiplist_branching_factor
, 4,
78 "branching_factor parameter to pass into NewHashSkiplistRepFactory");
81 huge_page_tlb_size
, 0,
82 "huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory");
84 DEFINE_int32(bucket_entries_logging_threshold
, 4096,
85 "bucket_entries_logging_threshold parameter to pass into "
86 "NewHashLinkListRepFactory");
88 DEFINE_bool(if_log_bucket_dist_when_flash
, true,
89 "if_log_bucket_dist_when_flash parameter to pass into "
90 "NewHashLinkListRepFactory");
93 threshold_use_skiplist
, 256,
94 "threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory");
96 DEFINE_int64(write_buffer_size
, 256,
97 "write_buffer_size parameter to pass into WriteBufferManager");
101 "Number of concurrent threads to run. If the benchmark includes writes,\n"
102 "then at most one thread will be a writer");
104 DEFINE_int32(num_operations
, 1000000,
105 "Number of operations to do for write and random read benchmarks");
107 DEFINE_int32(num_scans
, 10,
108 "Number of times for each thread to scan the memtablerep for "
112 DEFINE_int32(item_size
, 100, "Number of bytes each item should be");
114 DEFINE_int32(prefix_length
, 8,
115 "Prefix length to pass into NewFixedPrefixTransform");
117 /* VectorRep settings */
118 DEFINE_int64(vectorrep_count
, 0,
119 "Number of entries to reserve on VectorRep initialization");
121 DEFINE_int64(seed
, 0,
122 "Seed base for random number generators. "
123 "When 0 it is deterministic.");
125 namespace ROCKSDB_NAMESPACE
{
128 struct CallbackVerifyArgs
{
132 InternalKeyComparator
* comparator
;
136 // Helper for quickly generating random data.
137 class RandomGenerator
{
145 auto size
= (unsigned)std::max(1048576, FLAGS_item_size
);
146 data_
= rnd
.RandomString(size
);
150 Slice
Generate(unsigned int len
) {
151 assert(len
<= data_
.size());
152 if (pos_
+ len
> data_
.size()) {
156 return Slice(data_
.data() + pos_
- len
, len
);
160 enum WriteMode
{ SEQUENTIAL
, RANDOM
, UNIQUE_RANDOM
};
164 KeyGenerator(Random64
* rand
, WriteMode mode
, uint64_t num
)
165 : rand_(rand
), mode_(mode
), num_(num
), next_(0) {
166 if (mode_
== UNIQUE_RANDOM
) {
167 // NOTE: if memory consumption of this approach becomes a concern,
168 // we can either break it into pieces and only random shuffle a section
169 // each time. Alternatively, use a bit map implementation
170 // (https://reviews.facebook.net/differential/diff/54627/)
171 values_
.resize(num_
);
172 for (uint64_t i
= 0; i
< num_
; ++i
) {
175 RandomShuffle(values_
.begin(), values_
.end(),
176 static_cast<uint32_t>(FLAGS_seed
));
185 return rand_
->Next() % num_
;
187 return values_
[next_
++];
190 return std::numeric_limits
<uint64_t>::max();
198 std::vector
<uint64_t> values_
;
201 class BenchmarkThread
{
203 explicit BenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
204 uint64_t* bytes_written
, uint64_t* bytes_read
,
205 uint64_t* sequence
, uint64_t num_ops
,
209 bytes_written_(bytes_written
),
210 bytes_read_(bytes_read
),
213 read_hits_(read_hits
) {}
215 virtual void operator()() = 0;
216 virtual ~BenchmarkThread() {}
220 KeyGenerator
* key_gen_
;
221 uint64_t* bytes_written_
;
222 uint64_t* bytes_read_
;
225 uint64_t* read_hits_
;
226 RandomGenerator generator_
;
229 class FillBenchmarkThread
: public BenchmarkThread
{
231 FillBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
232 uint64_t* bytes_written
, uint64_t* bytes_read
,
233 uint64_t* sequence
, uint64_t num_ops
, uint64_t* read_hits
)
234 : BenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
, sequence
,
235 num_ops
, read_hits
) {}
239 auto internal_key_size
= 16;
241 FLAGS_item_size
+ VarintLength(internal_key_size
) + internal_key_size
;
242 KeyHandle handle
= table_
->Allocate(encoded_len
, &buf
);
243 assert(buf
!= nullptr);
244 char* p
= EncodeVarint32(buf
, internal_key_size
);
245 auto key
= key_gen_
->Next();
246 EncodeFixed64(p
, key
);
248 EncodeFixed64(p
, ++(*sequence_
));
250 Slice bytes
= generator_
.Generate(FLAGS_item_size
);
251 memcpy(p
, bytes
.data(), FLAGS_item_size
);
252 p
+= FLAGS_item_size
;
253 assert(p
== buf
+ encoded_len
);
254 table_
->Insert(handle
);
255 *bytes_written_
+= encoded_len
;
258 void operator()() override
{
259 for (unsigned int i
= 0; i
< num_ops_
; ++i
) {
265 class ConcurrentFillBenchmarkThread
: public FillBenchmarkThread
{
267 ConcurrentFillBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
268 uint64_t* bytes_written
, uint64_t* bytes_read
,
269 uint64_t* sequence
, uint64_t num_ops
,
271 std::atomic_int
* threads_done
)
272 : FillBenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
, sequence
,
273 num_ops
, read_hits
) {
274 threads_done_
= threads_done
;
277 void operator()() override
{
278 // # of read threads will be total threads - write threads (always 1). Loop
279 // while all reads complete.
280 while ((*threads_done_
).load() < (FLAGS_num_threads
- 1)) {
286 std::atomic_int
* threads_done_
;
289 class ReadBenchmarkThread
: public BenchmarkThread
{
291 ReadBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
292 uint64_t* bytes_written
, uint64_t* bytes_read
,
293 uint64_t* sequence
, uint64_t num_ops
, uint64_t* read_hits
)
294 : BenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
, sequence
,
295 num_ops
, read_hits
) {}
297 static bool callback(void* arg
, const char* entry
) {
298 CallbackVerifyArgs
* callback_args
= static_cast<CallbackVerifyArgs
*>(arg
);
299 assert(callback_args
!= nullptr);
301 const char* key_ptr
= GetVarint32Ptr(entry
, entry
+ 5, &key_length
);
302 if ((callback_args
->comparator
)
304 ->Equal(Slice(key_ptr
, key_length
- 8),
305 callback_args
->key
->user_key())) {
306 callback_args
->found
= true;
312 std::string user_key
;
313 auto key
= key_gen_
->Next();
314 PutFixed64(&user_key
, key
);
315 LookupKey
lookup_key(user_key
, *sequence_
);
316 InternalKeyComparator
internal_key_comp(BytewiseComparator());
317 CallbackVerifyArgs verify_args
;
318 verify_args
.found
= false;
319 verify_args
.key
= &lookup_key
;
320 verify_args
.table
= table_
;
321 verify_args
.comparator
= &internal_key_comp
;
322 table_
->Get(lookup_key
, &verify_args
, callback
);
323 if (verify_args
.found
) {
324 *bytes_read_
+= VarintLength(16) + 16 + FLAGS_item_size
;
328 void operator()() override
{
329 for (unsigned int i
= 0; i
< num_ops_
; ++i
) {
335 class SeqReadBenchmarkThread
: public BenchmarkThread
{
337 SeqReadBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
338 uint64_t* bytes_written
, uint64_t* bytes_read
,
339 uint64_t* sequence
, uint64_t num_ops
,
341 : BenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
, sequence
,
342 num_ops
, read_hits
) {}
345 std::unique_ptr
<MemTableRep::Iterator
> iter(table_
->GetIterator());
346 for (iter
->SeekToFirst(); iter
->Valid(); iter
->Next()) {
347 // pretend to read the value
348 *bytes_read_
+= VarintLength(16) + 16 + FLAGS_item_size
;
353 void operator()() override
{
354 for (unsigned int i
= 0; i
< num_ops_
; ++i
) {
360 class ConcurrentReadBenchmarkThread
: public ReadBenchmarkThread
{
362 ConcurrentReadBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
363 uint64_t* bytes_written
, uint64_t* bytes_read
,
364 uint64_t* sequence
, uint64_t num_ops
,
366 std::atomic_int
* threads_done
)
367 : ReadBenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
, sequence
,
368 num_ops
, read_hits
) {
369 threads_done_
= threads_done
;
372 void operator()() override
{
373 for (unsigned int i
= 0; i
< num_ops_
; ++i
) {
380 std::atomic_int
* threads_done_
;
383 class SeqConcurrentReadBenchmarkThread
: public SeqReadBenchmarkThread
{
385 SeqConcurrentReadBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
386 uint64_t* bytes_written
,
387 uint64_t* bytes_read
, uint64_t* sequence
,
388 uint64_t num_ops
, uint64_t* read_hits
,
389 std::atomic_int
* threads_done
)
390 : SeqReadBenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
,
391 sequence
, num_ops
, read_hits
) {
392 threads_done_
= threads_done
;
395 void operator()() override
{
396 for (unsigned int i
= 0; i
< num_ops_
; ++i
) {
403 std::atomic_int
* threads_done_
;
408 explicit Benchmark(MemTableRep
* table
, KeyGenerator
* key_gen
,
409 uint64_t* sequence
, uint32_t num_threads
)
413 num_threads_(num_threads
) {}
415 virtual ~Benchmark() {}
417 std::cout
<< "Number of threads: " << num_threads_
<< std::endl
;
418 std::vector
<port::Thread
> threads
;
419 uint64_t bytes_written
= 0;
420 uint64_t bytes_read
= 0;
421 uint64_t read_hits
= 0;
422 StopWatchNano
timer(SystemClock::Default().get(), true);
423 RunThreads(&threads
, &bytes_written
, &bytes_read
, true, &read_hits
);
424 auto elapsed_time
= static_cast<double>(timer
.ElapsedNanos() / 1000);
425 std::cout
<< "Elapsed time: " << static_cast<int>(elapsed_time
) << " us"
428 if (bytes_written
> 0) {
429 auto MiB_written
= static_cast<double>(bytes_written
) / (1 << 20);
430 auto write_throughput
= MiB_written
/ (elapsed_time
/ 1000000);
431 std::cout
<< "Total bytes written: " << MiB_written
<< " MiB"
433 std::cout
<< "Write throughput: " << write_throughput
<< " MiB/s"
435 auto us_per_op
= elapsed_time
/ num_write_ops_per_thread_
;
436 std::cout
<< "write us/op: " << us_per_op
<< std::endl
;
438 if (bytes_read
> 0) {
439 auto MiB_read
= static_cast<double>(bytes_read
) / (1 << 20);
440 auto read_throughput
= MiB_read
/ (elapsed_time
/ 1000000);
441 std::cout
<< "Total bytes read: " << MiB_read
<< " MiB" << std::endl
;
442 std::cout
<< "Read throughput: " << read_throughput
<< " MiB/s"
444 auto us_per_op
= elapsed_time
/ num_read_ops_per_thread_
;
445 std::cout
<< "read us/op: " << us_per_op
<< std::endl
;
449 virtual void RunThreads(std::vector
<port::Thread
>* threads
,
450 uint64_t* bytes_written
, uint64_t* bytes_read
,
451 bool write
, uint64_t* read_hits
) = 0;
455 KeyGenerator
* key_gen_
;
457 uint64_t num_write_ops_per_thread_
= 0;
458 uint64_t num_read_ops_per_thread_
= 0;
459 const uint32_t num_threads_
;
462 class FillBenchmark
: public Benchmark
{
464 explicit FillBenchmark(MemTableRep
* table
, KeyGenerator
* key_gen
,
466 : Benchmark(table
, key_gen
, sequence
, 1) {
467 num_write_ops_per_thread_
= FLAGS_num_operations
;
470 void RunThreads(std::vector
<port::Thread
>* /*threads*/,
471 uint64_t* bytes_written
, uint64_t* bytes_read
, bool /*write*/,
472 uint64_t* read_hits
) override
{
473 FillBenchmarkThread(table_
, key_gen_
, bytes_written
, bytes_read
, sequence_
,
474 num_write_ops_per_thread_
, read_hits
)();
478 class ReadBenchmark
: public Benchmark
{
480 explicit ReadBenchmark(MemTableRep
* table
, KeyGenerator
* key_gen
,
482 : Benchmark(table
, key_gen
, sequence
, FLAGS_num_threads
) {
483 num_read_ops_per_thread_
= FLAGS_num_operations
/ FLAGS_num_threads
;
486 void RunThreads(std::vector
<port::Thread
>* threads
, uint64_t* bytes_written
,
487 uint64_t* bytes_read
, bool /*write*/,
488 uint64_t* read_hits
) override
{
489 for (int i
= 0; i
< FLAGS_num_threads
; ++i
) {
490 threads
->emplace_back(
491 ReadBenchmarkThread(table_
, key_gen_
, bytes_written
, bytes_read
,
492 sequence_
, num_read_ops_per_thread_
, read_hits
));
494 for (auto& thread
: *threads
) {
497 std::cout
<< "read hit%: "
498 << (static_cast<double>(*read_hits
) / FLAGS_num_operations
) * 100
503 class SeqReadBenchmark
: public Benchmark
{
505 explicit SeqReadBenchmark(MemTableRep
* table
, uint64_t* sequence
)
506 : Benchmark(table
, nullptr, sequence
, FLAGS_num_threads
) {
507 num_read_ops_per_thread_
= FLAGS_num_scans
;
510 void RunThreads(std::vector
<port::Thread
>* threads
, uint64_t* bytes_written
,
511 uint64_t* bytes_read
, bool /*write*/,
512 uint64_t* read_hits
) override
{
513 for (int i
= 0; i
< FLAGS_num_threads
; ++i
) {
514 threads
->emplace_back(SeqReadBenchmarkThread(
515 table_
, key_gen_
, bytes_written
, bytes_read
, sequence_
,
516 num_read_ops_per_thread_
, read_hits
));
518 for (auto& thread
: *threads
) {
524 template <class ReadThreadType
>
525 class ReadWriteBenchmark
: public Benchmark
{
527 explicit ReadWriteBenchmark(MemTableRep
* table
, KeyGenerator
* key_gen
,
529 : Benchmark(table
, key_gen
, sequence
, FLAGS_num_threads
) {
530 num_read_ops_per_thread_
=
531 FLAGS_num_threads
<= 1
533 : (FLAGS_num_operations
/ (FLAGS_num_threads
- 1));
534 num_write_ops_per_thread_
= FLAGS_num_operations
;
537 void RunThreads(std::vector
<port::Thread
>* threads
, uint64_t* bytes_written
,
538 uint64_t* bytes_read
, bool /*write*/,
539 uint64_t* read_hits
) override
{
540 std::atomic_int threads_done
;
541 threads_done
.store(0);
542 threads
->emplace_back(ConcurrentFillBenchmarkThread(
543 table_
, key_gen_
, bytes_written
, bytes_read
, sequence_
,
544 num_write_ops_per_thread_
, read_hits
, &threads_done
));
545 for (int i
= 1; i
< FLAGS_num_threads
; ++i
) {
546 threads
->emplace_back(
547 ReadThreadType(table_
, key_gen_
, bytes_written
, bytes_read
, sequence_
,
548 num_read_ops_per_thread_
, read_hits
, &threads_done
));
550 for (auto& thread
: *threads
) {
556 } // namespace ROCKSDB_NAMESPACE
558 void PrintWarnings() {
559 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
561 "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
565 "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
569 int main(int argc
, char** argv
) {
570 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
571 SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv
[0]) +
573 ParseCommandLineFlags(&argc
, &argv
, true);
577 ROCKSDB_NAMESPACE::Options options
;
579 std::unique_ptr
<ROCKSDB_NAMESPACE::MemTableRepFactory
> factory
;
580 if (FLAGS_memtablerep
== "skiplist") {
581 factory
.reset(new ROCKSDB_NAMESPACE::SkipListFactory
);
583 } else if (FLAGS_memtablerep
== "vector") {
584 factory
.reset(new ROCKSDB_NAMESPACE::VectorRepFactory
);
585 } else if (FLAGS_memtablerep
== "hashskiplist" ||
586 FLAGS_memtablerep
== "prefix_hash") {
587 factory
.reset(ROCKSDB_NAMESPACE::NewHashSkipListRepFactory(
588 FLAGS_bucket_count
, FLAGS_hashskiplist_height
,
589 FLAGS_hashskiplist_branching_factor
));
590 options
.prefix_extractor
.reset(
591 ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length
));
592 } else if (FLAGS_memtablerep
== "hashlinklist" ||
593 FLAGS_memtablerep
== "hash_linkedlist") {
594 factory
.reset(ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(
595 FLAGS_bucket_count
, FLAGS_huge_page_tlb_size
,
596 FLAGS_bucket_entries_logging_threshold
,
597 FLAGS_if_log_bucket_dist_when_flash
, FLAGS_threshold_use_skiplist
));
598 options
.prefix_extractor
.reset(
599 ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length
));
600 #endif // ROCKSDB_LITE
602 ROCKSDB_NAMESPACE::ConfigOptions config_options
;
603 config_options
.ignore_unsupported_options
= false;
605 ROCKSDB_NAMESPACE::Status s
=
606 ROCKSDB_NAMESPACE::MemTableRepFactory::CreateFromString(
607 config_options
, FLAGS_memtablerep
, &factory
);
609 fprintf(stdout
, "Unknown memtablerep: %s\n", s
.ToString().c_str());
614 ROCKSDB_NAMESPACE::InternalKeyComparator
internal_key_comp(
615 ROCKSDB_NAMESPACE::BytewiseComparator());
616 ROCKSDB_NAMESPACE::MemTable::KeyComparator
key_comp(internal_key_comp
);
617 ROCKSDB_NAMESPACE::Arena arena
;
618 ROCKSDB_NAMESPACE::WriteBufferManager
wb(FLAGS_write_buffer_size
);
620 auto createMemtableRep
= [&] {
622 return factory
->CreateMemTableRep(key_comp
, &arena
,
623 options
.prefix_extractor
.get(),
624 options
.info_log
.get());
626 std::unique_ptr
<ROCKSDB_NAMESPACE::MemTableRep
> memtablerep
;
627 ROCKSDB_NAMESPACE::Random64
rng(FLAGS_seed
);
628 const char* benchmarks
= FLAGS_benchmarks
.c_str();
629 while (benchmarks
!= nullptr) {
630 std::unique_ptr
<ROCKSDB_NAMESPACE::KeyGenerator
> key_gen
;
631 const char* sep
= strchr(benchmarks
, ',');
632 ROCKSDB_NAMESPACE::Slice name
;
633 if (sep
== nullptr) {
635 benchmarks
= nullptr;
637 name
= ROCKSDB_NAMESPACE::Slice(benchmarks
, sep
- benchmarks
);
638 benchmarks
= sep
+ 1;
640 std::unique_ptr
<ROCKSDB_NAMESPACE::Benchmark
> benchmark
;
641 if (name
== ROCKSDB_NAMESPACE::Slice("fillseq")) {
642 memtablerep
.reset(createMemtableRep());
643 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
644 &rng
, ROCKSDB_NAMESPACE::SEQUENTIAL
, FLAGS_num_operations
));
645 benchmark
.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
646 memtablerep
.get(), key_gen
.get(), &sequence
));
647 } else if (name
== ROCKSDB_NAMESPACE::Slice("fillrandom")) {
648 memtablerep
.reset(createMemtableRep());
649 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
650 &rng
, ROCKSDB_NAMESPACE::UNIQUE_RANDOM
, FLAGS_num_operations
));
651 benchmark
.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
652 memtablerep
.get(), key_gen
.get(), &sequence
));
653 } else if (name
== ROCKSDB_NAMESPACE::Slice("readrandom")) {
654 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
655 &rng
, ROCKSDB_NAMESPACE::RANDOM
, FLAGS_num_operations
));
656 benchmark
.reset(new ROCKSDB_NAMESPACE::ReadBenchmark(
657 memtablerep
.get(), key_gen
.get(), &sequence
));
658 } else if (name
== ROCKSDB_NAMESPACE::Slice("readseq")) {
659 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
660 &rng
, ROCKSDB_NAMESPACE::SEQUENTIAL
, FLAGS_num_operations
));
661 benchmark
.reset(new ROCKSDB_NAMESPACE::SeqReadBenchmark(memtablerep
.get(),
663 } else if (name
== ROCKSDB_NAMESPACE::Slice("readwrite")) {
664 memtablerep
.reset(createMemtableRep());
665 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
666 &rng
, ROCKSDB_NAMESPACE::RANDOM
, FLAGS_num_operations
));
667 benchmark
.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark
<
668 ROCKSDB_NAMESPACE::ConcurrentReadBenchmarkThread
>(
669 memtablerep
.get(), key_gen
.get(), &sequence
));
670 } else if (name
== ROCKSDB_NAMESPACE::Slice("seqreadwrite")) {
671 memtablerep
.reset(createMemtableRep());
672 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
673 &rng
, ROCKSDB_NAMESPACE::RANDOM
, FLAGS_num_operations
));
674 benchmark
.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark
<
675 ROCKSDB_NAMESPACE::SeqConcurrentReadBenchmarkThread
>(
676 memtablerep
.get(), key_gen
.get(), &sequence
));
678 std::cout
<< "WARNING: skipping unknown benchmark '" << name
.ToString()
682 std::cout
<< "Running " << name
.ToString() << std::endl
;