1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
13 fprintf(stderr
, "Please install gflags to run rocksdb tools\n");
22 #include <type_traits>
25 #include "db/dbformat.h"
26 #include "db/memtable.h"
27 #include "memory/arena.h"
28 #include "port/port.h"
29 #include "port/stack_trace.h"
30 #include "rocksdb/comparator.h"
31 #include "rocksdb/memtablerep.h"
32 #include "rocksdb/options.h"
33 #include "rocksdb/slice_transform.h"
34 #include "rocksdb/write_buffer_manager.h"
35 #include "test_util/testutil.h"
36 #include "util/gflags_compat.h"
37 #include "util/mutexlock.h"
38 #include "util/stop_watch.h"
40 using GFLAGS_NAMESPACE::ParseCommandLineFlags
;
41 using GFLAGS_NAMESPACE::RegisterFlagValidator
;
42 using GFLAGS_NAMESPACE::SetUsageMessage
;
44 DEFINE_string(benchmarks
, "fillrandom",
45 "Comma-separated list of benchmarks to run. Options:\n"
46 "\tfillrandom -- write N random values\n"
47 "\tfillseq -- write N values in sequential order\n"
48 "\treadrandom -- read N values in random order\n"
49 "\treadseq -- scan the DB\n"
50 "\treadwrite -- 1 thread writes while N - 1 threads "
53 "\tseqreadwrite -- 1 thread writes while N - 1 threads "
56 DEFINE_string(memtablerep
, "skiplist",
57 "Which implementation of memtablerep to use. See "
58 "include/memtablerep.h for\n"
59 " more details. Options:\n"
60 "\tskiplist -- backed by a skiplist\n"
61 "\tvector -- backed by an std::vector\n"
62 "\thashskiplist -- backed by a hash skip list\n"
63 "\thashlinklist -- backed by a hash linked list\n"
64 "\tcuckoo -- backed by a cuckoo hash table");
66 DEFINE_int64(bucket_count
, 1000000,
67 "bucket_count parameter to pass into NewHashSkiplistRepFactory or "
68 "NewHashLinkListRepFactory");
71 hashskiplist_height
, 4,
72 "skiplist_height parameter to pass into NewHashSkiplistRepFactory");
75 hashskiplist_branching_factor
, 4,
76 "branching_factor parameter to pass into NewHashSkiplistRepFactory");
79 huge_page_tlb_size
, 0,
80 "huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory");
82 DEFINE_int32(bucket_entries_logging_threshold
, 4096,
83 "bucket_entries_logging_threshold parameter to pass into "
84 "NewHashLinkListRepFactory");
86 DEFINE_bool(if_log_bucket_dist_when_flash
, true,
87 "if_log_bucket_dist_when_flash parameter to pass into "
88 "NewHashLinkListRepFactory");
91 threshold_use_skiplist
, 256,
92 "threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory");
94 DEFINE_int64(write_buffer_size
, 256,
95 "write_buffer_size parameter to pass into WriteBufferManager");
99 "Number of concurrent threads to run. If the benchmark includes writes,\n"
100 "then at most one thread will be a writer");
102 DEFINE_int32(num_operations
, 1000000,
103 "Number of operations to do for write and random read benchmarks");
105 DEFINE_int32(num_scans
, 10,
106 "Number of times for each thread to scan the memtablerep for "
110 DEFINE_int32(item_size
, 100, "Number of bytes each item should be");
112 DEFINE_int32(prefix_length
, 8,
113 "Prefix length to pass into NewFixedPrefixTransform");
115 /* VectorRep settings */
116 DEFINE_int64(vectorrep_count
, 0,
117 "Number of entries to reserve on VectorRep initialization");
119 DEFINE_int64(seed
, 0,
120 "Seed base for random number generators. "
121 "When 0 it is deterministic.");
123 namespace ROCKSDB_NAMESPACE
{
126 struct CallbackVerifyArgs
{
130 InternalKeyComparator
* comparator
;
134 // Helper for quickly generating random data.
135 class RandomGenerator
{
143 auto size
= (unsigned)std::max(1048576, FLAGS_item_size
);
144 test::RandomString(&rnd
, size
, &data_
);
148 Slice
Generate(unsigned int len
) {
149 assert(len
<= data_
.size());
150 if (pos_
+ len
> data_
.size()) {
154 return Slice(data_
.data() + pos_
- len
, len
);
158 enum WriteMode
{ SEQUENTIAL
, RANDOM
, UNIQUE_RANDOM
};
162 KeyGenerator(Random64
* rand
, WriteMode mode
, uint64_t num
)
163 : rand_(rand
), mode_(mode
), num_(num
), next_(0) {
164 if (mode_
== UNIQUE_RANDOM
) {
165 // NOTE: if memory consumption of this approach becomes a concern,
166 // we can either break it into pieces and only random shuffle a section
167 // each time. Alternatively, use a bit map implementation
168 // (https://reviews.facebook.net/differential/diff/54627/)
169 values_
.resize(num_
);
170 for (uint64_t i
= 0; i
< num_
; ++i
) {
174 values_
.begin(), values_
.end(),
175 std::default_random_engine(static_cast<unsigned int>(FLAGS_seed
)));
184 return rand_
->Next() % num_
;
186 return values_
[next_
++];
189 return std::numeric_limits
<uint64_t>::max();
197 std::vector
<uint64_t> values_
;
200 class BenchmarkThread
{
202 explicit BenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
203 uint64_t* bytes_written
, uint64_t* bytes_read
,
204 uint64_t* sequence
, uint64_t num_ops
,
208 bytes_written_(bytes_written
),
209 bytes_read_(bytes_read
),
212 read_hits_(read_hits
) {}
214 virtual void operator()() = 0;
215 virtual ~BenchmarkThread() {}
219 KeyGenerator
* key_gen_
;
220 uint64_t* bytes_written_
;
221 uint64_t* bytes_read_
;
224 uint64_t* read_hits_
;
225 RandomGenerator generator_
;
228 class FillBenchmarkThread
: public BenchmarkThread
{
230 FillBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
231 uint64_t* bytes_written
, uint64_t* bytes_read
,
232 uint64_t* sequence
, uint64_t num_ops
, uint64_t* read_hits
)
233 : BenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
, sequence
,
234 num_ops
, read_hits
) {}
238 auto internal_key_size
= 16;
240 FLAGS_item_size
+ VarintLength(internal_key_size
) + internal_key_size
;
241 KeyHandle handle
= table_
->Allocate(encoded_len
, &buf
);
242 assert(buf
!= nullptr);
243 char* p
= EncodeVarint32(buf
, internal_key_size
);
244 auto key
= key_gen_
->Next();
245 EncodeFixed64(p
, key
);
247 EncodeFixed64(p
, ++(*sequence_
));
249 Slice bytes
= generator_
.Generate(FLAGS_item_size
);
250 memcpy(p
, bytes
.data(), FLAGS_item_size
);
251 p
+= FLAGS_item_size
;
252 assert(p
== buf
+ encoded_len
);
253 table_
->Insert(handle
);
254 *bytes_written_
+= encoded_len
;
257 void operator()() override
{
258 for (unsigned int i
= 0; i
< num_ops_
; ++i
) {
264 class ConcurrentFillBenchmarkThread
: public FillBenchmarkThread
{
266 ConcurrentFillBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
267 uint64_t* bytes_written
, uint64_t* bytes_read
,
268 uint64_t* sequence
, uint64_t num_ops
,
270 std::atomic_int
* threads_done
)
271 : FillBenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
, sequence
,
272 num_ops
, read_hits
) {
273 threads_done_
= threads_done
;
276 void operator()() override
{
277 // # of read threads will be total threads - write threads (always 1). Loop
278 // while all reads complete.
279 while ((*threads_done_
).load() < (FLAGS_num_threads
- 1)) {
285 std::atomic_int
* threads_done_
;
288 class ReadBenchmarkThread
: public BenchmarkThread
{
290 ReadBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
291 uint64_t* bytes_written
, uint64_t* bytes_read
,
292 uint64_t* sequence
, uint64_t num_ops
, uint64_t* read_hits
)
293 : BenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
, sequence
,
294 num_ops
, read_hits
) {}
296 static bool callback(void* arg
, const char* entry
) {
297 CallbackVerifyArgs
* callback_args
= static_cast<CallbackVerifyArgs
*>(arg
);
298 assert(callback_args
!= nullptr);
300 const char* key_ptr
= GetVarint32Ptr(entry
, entry
+ 5, &key_length
);
301 if ((callback_args
->comparator
)
303 ->Equal(Slice(key_ptr
, key_length
- 8),
304 callback_args
->key
->user_key())) {
305 callback_args
->found
= true;
311 std::string user_key
;
312 auto key
= key_gen_
->Next();
313 PutFixed64(&user_key
, key
);
314 LookupKey
lookup_key(user_key
, *sequence_
);
315 InternalKeyComparator
internal_key_comp(BytewiseComparator());
316 CallbackVerifyArgs verify_args
;
317 verify_args
.found
= false;
318 verify_args
.key
= &lookup_key
;
319 verify_args
.table
= table_
;
320 verify_args
.comparator
= &internal_key_comp
;
321 table_
->Get(lookup_key
, &verify_args
, callback
);
322 if (verify_args
.found
) {
323 *bytes_read_
+= VarintLength(16) + 16 + FLAGS_item_size
;
327 void operator()() override
{
328 for (unsigned int i
= 0; i
< num_ops_
; ++i
) {
334 class SeqReadBenchmarkThread
: public BenchmarkThread
{
336 SeqReadBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
337 uint64_t* bytes_written
, uint64_t* bytes_read
,
338 uint64_t* sequence
, uint64_t num_ops
,
340 : BenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
, sequence
,
341 num_ops
, read_hits
) {}
344 std::unique_ptr
<MemTableRep::Iterator
> iter(table_
->GetIterator());
345 for (iter
->SeekToFirst(); iter
->Valid(); iter
->Next()) {
346 // pretend to read the value
347 *bytes_read_
+= VarintLength(16) + 16 + FLAGS_item_size
;
352 void operator()() override
{
353 for (unsigned int i
= 0; i
< num_ops_
; ++i
) {
359 class ConcurrentReadBenchmarkThread
: public ReadBenchmarkThread
{
361 ConcurrentReadBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
362 uint64_t* bytes_written
, uint64_t* bytes_read
,
363 uint64_t* sequence
, uint64_t num_ops
,
365 std::atomic_int
* threads_done
)
366 : ReadBenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
, sequence
,
367 num_ops
, read_hits
) {
368 threads_done_
= threads_done
;
371 void operator()() override
{
372 for (unsigned int i
= 0; i
< num_ops_
; ++i
) {
379 std::atomic_int
* threads_done_
;
382 class SeqConcurrentReadBenchmarkThread
: public SeqReadBenchmarkThread
{
384 SeqConcurrentReadBenchmarkThread(MemTableRep
* table
, KeyGenerator
* key_gen
,
385 uint64_t* bytes_written
,
386 uint64_t* bytes_read
, uint64_t* sequence
,
387 uint64_t num_ops
, uint64_t* read_hits
,
388 std::atomic_int
* threads_done
)
389 : SeqReadBenchmarkThread(table
, key_gen
, bytes_written
, bytes_read
,
390 sequence
, num_ops
, read_hits
) {
391 threads_done_
= threads_done
;
394 void operator()() override
{
395 for (unsigned int i
= 0; i
< num_ops_
; ++i
) {
402 std::atomic_int
* threads_done_
;
407 explicit Benchmark(MemTableRep
* table
, KeyGenerator
* key_gen
,
408 uint64_t* sequence
, uint32_t num_threads
)
412 num_threads_(num_threads
) {}
414 virtual ~Benchmark() {}
416 std::cout
<< "Number of threads: " << num_threads_
<< std::endl
;
417 std::vector
<port::Thread
> threads
;
418 uint64_t bytes_written
= 0;
419 uint64_t bytes_read
= 0;
420 uint64_t read_hits
= 0;
421 StopWatchNano
timer(Env::Default(), true);
422 RunThreads(&threads
, &bytes_written
, &bytes_read
, true, &read_hits
);
423 auto elapsed_time
= static_cast<double>(timer
.ElapsedNanos() / 1000);
424 std::cout
<< "Elapsed time: " << static_cast<int>(elapsed_time
) << " us"
427 if (bytes_written
> 0) {
428 auto MiB_written
= static_cast<double>(bytes_written
) / (1 << 20);
429 auto write_throughput
= MiB_written
/ (elapsed_time
/ 1000000);
430 std::cout
<< "Total bytes written: " << MiB_written
<< " MiB"
432 std::cout
<< "Write throughput: " << write_throughput
<< " MiB/s"
434 auto us_per_op
= elapsed_time
/ num_write_ops_per_thread_
;
435 std::cout
<< "write us/op: " << us_per_op
<< std::endl
;
437 if (bytes_read
> 0) {
438 auto MiB_read
= static_cast<double>(bytes_read
) / (1 << 20);
439 auto read_throughput
= MiB_read
/ (elapsed_time
/ 1000000);
440 std::cout
<< "Total bytes read: " << MiB_read
<< " MiB" << std::endl
;
441 std::cout
<< "Read throughput: " << read_throughput
<< " MiB/s"
443 auto us_per_op
= elapsed_time
/ num_read_ops_per_thread_
;
444 std::cout
<< "read us/op: " << us_per_op
<< std::endl
;
448 virtual void RunThreads(std::vector
<port::Thread
>* threads
,
449 uint64_t* bytes_written
, uint64_t* bytes_read
,
450 bool write
, uint64_t* read_hits
) = 0;
454 KeyGenerator
* key_gen_
;
456 uint64_t num_write_ops_per_thread_
;
457 uint64_t num_read_ops_per_thread_
;
458 const uint32_t num_threads_
;
461 class FillBenchmark
: public Benchmark
{
463 explicit FillBenchmark(MemTableRep
* table
, KeyGenerator
* key_gen
,
465 : Benchmark(table
, key_gen
, sequence
, 1) {
466 num_write_ops_per_thread_
= FLAGS_num_operations
;
469 void RunThreads(std::vector
<port::Thread
>* /*threads*/, uint64_t* bytes_written
,
470 uint64_t* bytes_read
, bool /*write*/,
471 uint64_t* read_hits
) override
{
472 FillBenchmarkThread(table_
, key_gen_
, bytes_written
, bytes_read
, sequence_
,
473 num_write_ops_per_thread_
, read_hits
)();
477 class ReadBenchmark
: public Benchmark
{
479 explicit ReadBenchmark(MemTableRep
* table
, KeyGenerator
* key_gen
,
481 : Benchmark(table
, key_gen
, sequence
, FLAGS_num_threads
) {
482 num_read_ops_per_thread_
= FLAGS_num_operations
/ FLAGS_num_threads
;
485 void RunThreads(std::vector
<port::Thread
>* threads
, uint64_t* bytes_written
,
486 uint64_t* bytes_read
, bool /*write*/,
487 uint64_t* read_hits
) override
{
488 for (int i
= 0; i
< FLAGS_num_threads
; ++i
) {
489 threads
->emplace_back(
490 ReadBenchmarkThread(table_
, key_gen_
, bytes_written
, bytes_read
,
491 sequence_
, num_read_ops_per_thread_
, read_hits
));
493 for (auto& thread
: *threads
) {
496 std::cout
<< "read hit%: "
497 << (static_cast<double>(*read_hits
) / FLAGS_num_operations
) * 100
502 class SeqReadBenchmark
: public Benchmark
{
504 explicit SeqReadBenchmark(MemTableRep
* table
, uint64_t* sequence
)
505 : Benchmark(table
, nullptr, sequence
, FLAGS_num_threads
) {
506 num_read_ops_per_thread_
= FLAGS_num_scans
;
509 void RunThreads(std::vector
<port::Thread
>* threads
, uint64_t* bytes_written
,
510 uint64_t* bytes_read
, bool /*write*/,
511 uint64_t* read_hits
) override
{
512 for (int i
= 0; i
< FLAGS_num_threads
; ++i
) {
513 threads
->emplace_back(SeqReadBenchmarkThread(
514 table_
, key_gen_
, bytes_written
, bytes_read
, sequence_
,
515 num_read_ops_per_thread_
, read_hits
));
517 for (auto& thread
: *threads
) {
523 template <class ReadThreadType
>
524 class ReadWriteBenchmark
: public Benchmark
{
526 explicit ReadWriteBenchmark(MemTableRep
* table
, KeyGenerator
* key_gen
,
528 : Benchmark(table
, key_gen
, sequence
, FLAGS_num_threads
) {
529 num_read_ops_per_thread_
=
530 FLAGS_num_threads
<= 1
532 : (FLAGS_num_operations
/ (FLAGS_num_threads
- 1));
533 num_write_ops_per_thread_
= FLAGS_num_operations
;
536 void RunThreads(std::vector
<port::Thread
>* threads
, uint64_t* bytes_written
,
537 uint64_t* bytes_read
, bool /*write*/,
538 uint64_t* read_hits
) override
{
539 std::atomic_int threads_done
;
540 threads_done
.store(0);
541 threads
->emplace_back(ConcurrentFillBenchmarkThread(
542 table_
, key_gen_
, bytes_written
, bytes_read
, sequence_
,
543 num_write_ops_per_thread_
, read_hits
, &threads_done
));
544 for (int i
= 1; i
< FLAGS_num_threads
; ++i
) {
545 threads
->emplace_back(
546 ReadThreadType(table_
, key_gen_
, bytes_written
, bytes_read
, sequence_
,
547 num_read_ops_per_thread_
, read_hits
, &threads_done
));
549 for (auto& thread
: *threads
) {
555 } // namespace ROCKSDB_NAMESPACE
557 void PrintWarnings() {
558 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
560 "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
564 "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
568 int main(int argc
, char** argv
) {
569 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
570 SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv
[0]) +
572 ParseCommandLineFlags(&argc
, &argv
, true);
576 ROCKSDB_NAMESPACE::Options options
;
578 std::unique_ptr
<ROCKSDB_NAMESPACE::MemTableRepFactory
> factory
;
579 if (FLAGS_memtablerep
== "skiplist") {
580 factory
.reset(new ROCKSDB_NAMESPACE::SkipListFactory
);
582 } else if (FLAGS_memtablerep
== "vector") {
583 factory
.reset(new ROCKSDB_NAMESPACE::VectorRepFactory
);
584 } else if (FLAGS_memtablerep
== "hashskiplist") {
585 factory
.reset(ROCKSDB_NAMESPACE::NewHashSkipListRepFactory(
586 FLAGS_bucket_count
, FLAGS_hashskiplist_height
,
587 FLAGS_hashskiplist_branching_factor
));
588 options
.prefix_extractor
.reset(
589 ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length
));
590 } else if (FLAGS_memtablerep
== "hashlinklist") {
591 factory
.reset(ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(
592 FLAGS_bucket_count
, FLAGS_huge_page_tlb_size
,
593 FLAGS_bucket_entries_logging_threshold
,
594 FLAGS_if_log_bucket_dist_when_flash
, FLAGS_threshold_use_skiplist
));
595 options
.prefix_extractor
.reset(
596 ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length
));
597 #endif // ROCKSDB_LITE
599 fprintf(stdout
, "Unknown memtablerep: %s\n", FLAGS_memtablerep
.c_str());
603 ROCKSDB_NAMESPACE::InternalKeyComparator
internal_key_comp(
604 ROCKSDB_NAMESPACE::BytewiseComparator());
605 ROCKSDB_NAMESPACE::MemTable::KeyComparator
key_comp(internal_key_comp
);
606 ROCKSDB_NAMESPACE::Arena arena
;
607 ROCKSDB_NAMESPACE::WriteBufferManager
wb(FLAGS_write_buffer_size
);
609 auto createMemtableRep
= [&] {
611 return factory
->CreateMemTableRep(key_comp
, &arena
,
612 options
.prefix_extractor
.get(),
613 options
.info_log
.get());
615 std::unique_ptr
<ROCKSDB_NAMESPACE::MemTableRep
> memtablerep
;
616 ROCKSDB_NAMESPACE::Random64
rng(FLAGS_seed
);
617 const char* benchmarks
= FLAGS_benchmarks
.c_str();
618 while (benchmarks
!= nullptr) {
619 std::unique_ptr
<ROCKSDB_NAMESPACE::KeyGenerator
> key_gen
;
620 const char* sep
= strchr(benchmarks
, ',');
621 ROCKSDB_NAMESPACE::Slice name
;
622 if (sep
== nullptr) {
624 benchmarks
= nullptr;
626 name
= ROCKSDB_NAMESPACE::Slice(benchmarks
, sep
- benchmarks
);
627 benchmarks
= sep
+ 1;
629 std::unique_ptr
<ROCKSDB_NAMESPACE::Benchmark
> benchmark
;
630 if (name
== ROCKSDB_NAMESPACE::Slice("fillseq")) {
631 memtablerep
.reset(createMemtableRep());
632 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
633 &rng
, ROCKSDB_NAMESPACE::SEQUENTIAL
, FLAGS_num_operations
));
634 benchmark
.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
635 memtablerep
.get(), key_gen
.get(), &sequence
));
636 } else if (name
== ROCKSDB_NAMESPACE::Slice("fillrandom")) {
637 memtablerep
.reset(createMemtableRep());
638 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
639 &rng
, ROCKSDB_NAMESPACE::UNIQUE_RANDOM
, FLAGS_num_operations
));
640 benchmark
.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
641 memtablerep
.get(), key_gen
.get(), &sequence
));
642 } else if (name
== ROCKSDB_NAMESPACE::Slice("readrandom")) {
643 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
644 &rng
, ROCKSDB_NAMESPACE::RANDOM
, FLAGS_num_operations
));
645 benchmark
.reset(new ROCKSDB_NAMESPACE::ReadBenchmark(
646 memtablerep
.get(), key_gen
.get(), &sequence
));
647 } else if (name
== ROCKSDB_NAMESPACE::Slice("readseq")) {
648 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
649 &rng
, ROCKSDB_NAMESPACE::SEQUENTIAL
, FLAGS_num_operations
));
650 benchmark
.reset(new ROCKSDB_NAMESPACE::SeqReadBenchmark(memtablerep
.get(),
652 } else if (name
== ROCKSDB_NAMESPACE::Slice("readwrite")) {
653 memtablerep
.reset(createMemtableRep());
654 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
655 &rng
, ROCKSDB_NAMESPACE::RANDOM
, FLAGS_num_operations
));
656 benchmark
.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark
<
657 ROCKSDB_NAMESPACE::ConcurrentReadBenchmarkThread
>(
658 memtablerep
.get(), key_gen
.get(), &sequence
));
659 } else if (name
== ROCKSDB_NAMESPACE::Slice("seqreadwrite")) {
660 memtablerep
.reset(createMemtableRep());
661 key_gen
.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
662 &rng
, ROCKSDB_NAMESPACE::RANDOM
, FLAGS_num_operations
));
663 benchmark
.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark
<
664 ROCKSDB_NAMESPACE::SeqConcurrentReadBenchmarkThread
>(
665 memtablerep
.get(), key_gen
.get(), &sequence
));
667 std::cout
<< "WARNING: skipping unknown benchmark '" << name
.ToString()
671 std::cout
<< "Running " << name
.ToString() << std::endl
;