]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/memtable/memtablerep_bench.cc
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / rocksdb / memtable / memtablerep_bench.cc
CommitLineData
7c673cae 1// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
11fdf7f2
TL
2// This source code is licensed under both the GPLv2 (found in the
3// COPYING file in the root directory) and Apache 2.0 License
4// (found in the LICENSE.Apache file in the root directory).
7c673cae
FG
5//
6// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7// Use of this source code is governed by a BSD-style license that can be
8// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10#ifndef __STDC_FORMAT_MACROS
11#define __STDC_FORMAT_MACROS
12#endif
13
14#ifndef GFLAGS
15#include <cstdio>
16int main() {
17 fprintf(stderr, "Please install gflags to run rocksdb tools\n");
18 return 1;
19}
20#else
21
7c673cae
FG
22#include <atomic>
23#include <iostream>
24#include <memory>
25#include <thread>
26#include <type_traits>
27#include <vector>
28
29#include "db/dbformat.h"
30#include "db/memtable.h"
31#include "port/port.h"
32#include "port/stack_trace.h"
33#include "rocksdb/comparator.h"
34#include "rocksdb/memtablerep.h"
35#include "rocksdb/options.h"
36#include "rocksdb/slice_transform.h"
37#include "rocksdb/write_buffer_manager.h"
38#include "util/arena.h"
11fdf7f2 39#include "util/gflags_compat.h"
7c673cae
FG
40#include "util/mutexlock.h"
41#include "util/stop_watch.h"
42#include "util/testutil.h"
43
11fdf7f2
TL
44using GFLAGS_NAMESPACE::ParseCommandLineFlags;
45using GFLAGS_NAMESPACE::RegisterFlagValidator;
46using GFLAGS_NAMESPACE::SetUsageMessage;
7c673cae
FG
47
48DEFINE_string(benchmarks, "fillrandom",
49 "Comma-separated list of benchmarks to run. Options:\n"
50 "\tfillrandom -- write N random values\n"
51 "\tfillseq -- write N values in sequential order\n"
52 "\treadrandom -- read N values in random order\n"
53 "\treadseq -- scan the DB\n"
54 "\treadwrite -- 1 thread writes while N - 1 threads "
55 "do random\n"
56 "\t reads\n"
57 "\tseqreadwrite -- 1 thread writes while N - 1 threads "
58 "do scans\n");
59
60DEFINE_string(memtablerep, "skiplist",
61 "Which implementation of memtablerep to use. See "
62 "include/memtablerep.h for\n"
63 " more details. Options:\n"
64 "\tskiplist -- backed by a skiplist\n"
65 "\tvector -- backed by an std::vector\n"
66 "\thashskiplist -- backed by a hash skip list\n"
67 "\thashlinklist -- backed by a hash linked list\n"
68 "\tcuckoo -- backed by a cuckoo hash table");
69
70DEFINE_int64(bucket_count, 1000000,
71 "bucket_count parameter to pass into NewHashSkiplistRepFactory or "
72 "NewHashLinkListRepFactory");
73
74DEFINE_int32(
75 hashskiplist_height, 4,
76 "skiplist_height parameter to pass into NewHashSkiplistRepFactory");
77
78DEFINE_int32(
79 hashskiplist_branching_factor, 4,
80 "branching_factor parameter to pass into NewHashSkiplistRepFactory");
81
82DEFINE_int32(
83 huge_page_tlb_size, 0,
84 "huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory");
85
86DEFINE_int32(bucket_entries_logging_threshold, 4096,
87 "bucket_entries_logging_threshold parameter to pass into "
88 "NewHashLinkListRepFactory");
89
90DEFINE_bool(if_log_bucket_dist_when_flash, true,
91 "if_log_bucket_dist_when_flash parameter to pass into "
92 "NewHashLinkListRepFactory");
93
94DEFINE_int32(
95 threshold_use_skiplist, 256,
96 "threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory");
97
494da23a
TL
98DEFINE_int64(write_buffer_size, 256,
99 "write_buffer_size parameter to pass into WriteBufferManager");
7c673cae
FG
100
101DEFINE_int32(
102 num_threads, 1,
103 "Number of concurrent threads to run. If the benchmark includes writes,\n"
104 "then at most one thread will be a writer");
105
106DEFINE_int32(num_operations, 1000000,
107 "Number of operations to do for write and random read benchmarks");
108
109DEFINE_int32(num_scans, 10,
110 "Number of times for each thread to scan the memtablerep for "
111 "sequential read "
112 "benchmarks");
113
114DEFINE_int32(item_size, 100, "Number of bytes each item should be");
115
116DEFINE_int32(prefix_length, 8,
117 "Prefix length to pass into NewFixedPrefixTransform");
118
119/* VectorRep settings */
120DEFINE_int64(vectorrep_count, 0,
121 "Number of entries to reserve on VectorRep initialization");
122
123DEFINE_int64(seed, 0,
124 "Seed base for random number generators. "
125 "When 0 it is deterministic.");
126
127namespace rocksdb {
128
129namespace {
130struct CallbackVerifyArgs {
131 bool found;
132 LookupKey* key;
133 MemTableRep* table;
134 InternalKeyComparator* comparator;
135};
136} // namespace
137
138// Helper for quickly generating random data.
139class RandomGenerator {
140 private:
141 std::string data_;
142 unsigned int pos_;
143
144 public:
145 RandomGenerator() {
146 Random rnd(301);
147 auto size = (unsigned)std::max(1048576, FLAGS_item_size);
148 test::RandomString(&rnd, size, &data_);
149 pos_ = 0;
150 }
151
152 Slice Generate(unsigned int len) {
153 assert(len <= data_.size());
154 if (pos_ + len > data_.size()) {
155 pos_ = 0;
156 }
157 pos_ += len;
158 return Slice(data_.data() + pos_ - len, len);
159 }
160};
161
162enum WriteMode { SEQUENTIAL, RANDOM, UNIQUE_RANDOM };
163
164class KeyGenerator {
165 public:
166 KeyGenerator(Random64* rand, WriteMode mode, uint64_t num)
167 : rand_(rand), mode_(mode), num_(num), next_(0) {
168 if (mode_ == UNIQUE_RANDOM) {
169 // NOTE: if memory consumption of this approach becomes a concern,
170 // we can either break it into pieces and only random shuffle a section
171 // each time. Alternatively, use a bit map implementation
172 // (https://reviews.facebook.net/differential/diff/54627/)
173 values_.resize(num_);
174 for (uint64_t i = 0; i < num_; ++i) {
175 values_[i] = i;
176 }
177 std::shuffle(
178 values_.begin(), values_.end(),
179 std::default_random_engine(static_cast<unsigned int>(FLAGS_seed)));
180 }
181 }
182
183 uint64_t Next() {
184 switch (mode_) {
185 case SEQUENTIAL:
186 return next_++;
187 case RANDOM:
188 return rand_->Next() % num_;
189 case UNIQUE_RANDOM:
190 return values_[next_++];
191 }
192 assert(false);
193 return std::numeric_limits<uint64_t>::max();
194 }
195
196 private:
197 Random64* rand_;
198 WriteMode mode_;
199 const uint64_t num_;
200 uint64_t next_;
201 std::vector<uint64_t> values_;
202};
203
204class BenchmarkThread {
205 public:
206 explicit BenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
207 uint64_t* bytes_written, uint64_t* bytes_read,
208 uint64_t* sequence, uint64_t num_ops,
209 uint64_t* read_hits)
210 : table_(table),
211 key_gen_(key_gen),
212 bytes_written_(bytes_written),
213 bytes_read_(bytes_read),
214 sequence_(sequence),
215 num_ops_(num_ops),
216 read_hits_(read_hits) {}
217
218 virtual void operator()() = 0;
219 virtual ~BenchmarkThread() {}
220
221 protected:
222 MemTableRep* table_;
223 KeyGenerator* key_gen_;
224 uint64_t* bytes_written_;
225 uint64_t* bytes_read_;
226 uint64_t* sequence_;
227 uint64_t num_ops_;
228 uint64_t* read_hits_;
229 RandomGenerator generator_;
230};
231
232class FillBenchmarkThread : public BenchmarkThread {
233 public:
234 FillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
235 uint64_t* bytes_written, uint64_t* bytes_read,
236 uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
237 : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
238 num_ops, read_hits) {}
239
240 void FillOne() {
241 char* buf = nullptr;
242 auto internal_key_size = 16;
243 auto encoded_len =
244 FLAGS_item_size + VarintLength(internal_key_size) + internal_key_size;
245 KeyHandle handle = table_->Allocate(encoded_len, &buf);
246 assert(buf != nullptr);
247 char* p = EncodeVarint32(buf, internal_key_size);
248 auto key = key_gen_->Next();
249 EncodeFixed64(p, key);
250 p += 8;
251 EncodeFixed64(p, ++(*sequence_));
252 p += 8;
253 Slice bytes = generator_.Generate(FLAGS_item_size);
254 memcpy(p, bytes.data(), FLAGS_item_size);
255 p += FLAGS_item_size;
256 assert(p == buf + encoded_len);
257 table_->Insert(handle);
258 *bytes_written_ += encoded_len;
259 }
260
261 void operator()() override {
262 for (unsigned int i = 0; i < num_ops_; ++i) {
263 FillOne();
264 }
265 }
266};
267
268class ConcurrentFillBenchmarkThread : public FillBenchmarkThread {
269 public:
270 ConcurrentFillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
271 uint64_t* bytes_written, uint64_t* bytes_read,
272 uint64_t* sequence, uint64_t num_ops,
273 uint64_t* read_hits,
274 std::atomic_int* threads_done)
275 : FillBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
276 num_ops, read_hits) {
277 threads_done_ = threads_done;
278 }
279
280 void operator()() override {
281 // # of read threads will be total threads - write threads (always 1). Loop
282 // while all reads complete.
283 while ((*threads_done_).load() < (FLAGS_num_threads - 1)) {
284 FillOne();
285 }
286 }
287
288 private:
289 std::atomic_int* threads_done_;
290};
291
292class ReadBenchmarkThread : public BenchmarkThread {
293 public:
294 ReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
295 uint64_t* bytes_written, uint64_t* bytes_read,
296 uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
297 : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
298 num_ops, read_hits) {}
299
300 static bool callback(void* arg, const char* entry) {
301 CallbackVerifyArgs* callback_args = static_cast<CallbackVerifyArgs*>(arg);
302 assert(callback_args != nullptr);
303 uint32_t key_length;
304 const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
305 if ((callback_args->comparator)
306 ->user_comparator()
307 ->Equal(Slice(key_ptr, key_length - 8),
308 callback_args->key->user_key())) {
309 callback_args->found = true;
310 }
311 return false;
312 }
313
314 void ReadOne() {
315 std::string user_key;
316 auto key = key_gen_->Next();
317 PutFixed64(&user_key, key);
318 LookupKey lookup_key(user_key, *sequence_);
319 InternalKeyComparator internal_key_comp(BytewiseComparator());
320 CallbackVerifyArgs verify_args;
321 verify_args.found = false;
322 verify_args.key = &lookup_key;
323 verify_args.table = table_;
324 verify_args.comparator = &internal_key_comp;
325 table_->Get(lookup_key, &verify_args, callback);
326 if (verify_args.found) {
327 *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
328 ++*read_hits_;
329 }
330 }
331 void operator()() override {
332 for (unsigned int i = 0; i < num_ops_; ++i) {
333 ReadOne();
334 }
335 }
336};
337
338class SeqReadBenchmarkThread : public BenchmarkThread {
339 public:
340 SeqReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
341 uint64_t* bytes_written, uint64_t* bytes_read,
342 uint64_t* sequence, uint64_t num_ops,
343 uint64_t* read_hits)
344 : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
345 num_ops, read_hits) {}
346
347 void ReadOneSeq() {
348 std::unique_ptr<MemTableRep::Iterator> iter(table_->GetIterator());
349 for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
350 // pretend to read the value
351 *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
352 }
353 ++*read_hits_;
354 }
355
356 void operator()() override {
357 for (unsigned int i = 0; i < num_ops_; ++i) {
358 { ReadOneSeq(); }
359 }
360 }
361};
362
363class ConcurrentReadBenchmarkThread : public ReadBenchmarkThread {
364 public:
365 ConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
366 uint64_t* bytes_written, uint64_t* bytes_read,
367 uint64_t* sequence, uint64_t num_ops,
368 uint64_t* read_hits,
369 std::atomic_int* threads_done)
370 : ReadBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
371 num_ops, read_hits) {
372 threads_done_ = threads_done;
373 }
374
375 void operator()() override {
376 for (unsigned int i = 0; i < num_ops_; ++i) {
377 ReadOne();
378 }
379 ++*threads_done_;
380 }
381
382 private:
383 std::atomic_int* threads_done_;
384};
385
386class SeqConcurrentReadBenchmarkThread : public SeqReadBenchmarkThread {
387 public:
388 SeqConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
389 uint64_t* bytes_written,
390 uint64_t* bytes_read, uint64_t* sequence,
391 uint64_t num_ops, uint64_t* read_hits,
392 std::atomic_int* threads_done)
393 : SeqReadBenchmarkThread(table, key_gen, bytes_written, bytes_read,
394 sequence, num_ops, read_hits) {
395 threads_done_ = threads_done;
396 }
397
398 void operator()() override {
399 for (unsigned int i = 0; i < num_ops_; ++i) {
400 ReadOneSeq();
401 }
402 ++*threads_done_;
403 }
404
405 private:
406 std::atomic_int* threads_done_;
407};
408
409class Benchmark {
410 public:
411 explicit Benchmark(MemTableRep* table, KeyGenerator* key_gen,
412 uint64_t* sequence, uint32_t num_threads)
413 : table_(table),
414 key_gen_(key_gen),
415 sequence_(sequence),
416 num_threads_(num_threads) {}
417
418 virtual ~Benchmark() {}
419 virtual void Run() {
420 std::cout << "Number of threads: " << num_threads_ << std::endl;
421 std::vector<port::Thread> threads;
422 uint64_t bytes_written = 0;
423 uint64_t bytes_read = 0;
424 uint64_t read_hits = 0;
425 StopWatchNano timer(Env::Default(), true);
426 RunThreads(&threads, &bytes_written, &bytes_read, true, &read_hits);
427 auto elapsed_time = static_cast<double>(timer.ElapsedNanos() / 1000);
428 std::cout << "Elapsed time: " << static_cast<int>(elapsed_time) << " us"
429 << std::endl;
430
431 if (bytes_written > 0) {
432 auto MiB_written = static_cast<double>(bytes_written) / (1 << 20);
433 auto write_throughput = MiB_written / (elapsed_time / 1000000);
434 std::cout << "Total bytes written: " << MiB_written << " MiB"
435 << std::endl;
436 std::cout << "Write throughput: " << write_throughput << " MiB/s"
437 << std::endl;
438 auto us_per_op = elapsed_time / num_write_ops_per_thread_;
439 std::cout << "write us/op: " << us_per_op << std::endl;
440 }
441 if (bytes_read > 0) {
442 auto MiB_read = static_cast<double>(bytes_read) / (1 << 20);
443 auto read_throughput = MiB_read / (elapsed_time / 1000000);
444 std::cout << "Total bytes read: " << MiB_read << " MiB" << std::endl;
445 std::cout << "Read throughput: " << read_throughput << " MiB/s"
446 << std::endl;
447 auto us_per_op = elapsed_time / num_read_ops_per_thread_;
448 std::cout << "read us/op: " << us_per_op << std::endl;
449 }
450 }
451
452 virtual void RunThreads(std::vector<port::Thread>* threads,
453 uint64_t* bytes_written, uint64_t* bytes_read,
454 bool write, uint64_t* read_hits) = 0;
455
456 protected:
457 MemTableRep* table_;
458 KeyGenerator* key_gen_;
459 uint64_t* sequence_;
460 uint64_t num_write_ops_per_thread_;
461 uint64_t num_read_ops_per_thread_;
462 const uint32_t num_threads_;
463};
464
465class FillBenchmark : public Benchmark {
466 public:
467 explicit FillBenchmark(MemTableRep* table, KeyGenerator* key_gen,
468 uint64_t* sequence)
469 : Benchmark(table, key_gen, sequence, 1) {
470 num_write_ops_per_thread_ = FLAGS_num_operations;
471 }
472
11fdf7f2
TL
473 void RunThreads(std::vector<port::Thread>* /*threads*/, uint64_t* bytes_written,
474 uint64_t* bytes_read, bool /*write*/,
7c673cae
FG
475 uint64_t* read_hits) override {
476 FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_,
477 num_write_ops_per_thread_, read_hits)();
478 }
479};
480
481class ReadBenchmark : public Benchmark {
482 public:
483 explicit ReadBenchmark(MemTableRep* table, KeyGenerator* key_gen,
484 uint64_t* sequence)
485 : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
486 num_read_ops_per_thread_ = FLAGS_num_operations / FLAGS_num_threads;
487 }
488
489 void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
11fdf7f2 490 uint64_t* bytes_read, bool /*write*/,
7c673cae
FG
491 uint64_t* read_hits) override {
492 for (int i = 0; i < FLAGS_num_threads; ++i) {
493 threads->emplace_back(
494 ReadBenchmarkThread(table_, key_gen_, bytes_written, bytes_read,
495 sequence_, num_read_ops_per_thread_, read_hits));
496 }
497 for (auto& thread : *threads) {
498 thread.join();
499 }
500 std::cout << "read hit%: "
501 << (static_cast<double>(*read_hits) / FLAGS_num_operations) * 100
502 << std::endl;
503 }
504};
505
506class SeqReadBenchmark : public Benchmark {
507 public:
508 explicit SeqReadBenchmark(MemTableRep* table, uint64_t* sequence)
509 : Benchmark(table, nullptr, sequence, FLAGS_num_threads) {
510 num_read_ops_per_thread_ = FLAGS_num_scans;
511 }
512
513 void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
11fdf7f2 514 uint64_t* bytes_read, bool /*write*/,
7c673cae
FG
515 uint64_t* read_hits) override {
516 for (int i = 0; i < FLAGS_num_threads; ++i) {
517 threads->emplace_back(SeqReadBenchmarkThread(
518 table_, key_gen_, bytes_written, bytes_read, sequence_,
519 num_read_ops_per_thread_, read_hits));
520 }
521 for (auto& thread : *threads) {
522 thread.join();
523 }
524 }
525};
526
527template <class ReadThreadType>
528class ReadWriteBenchmark : public Benchmark {
529 public:
530 explicit ReadWriteBenchmark(MemTableRep* table, KeyGenerator* key_gen,
531 uint64_t* sequence)
532 : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
533 num_read_ops_per_thread_ =
534 FLAGS_num_threads <= 1
535 ? 0
536 : (FLAGS_num_operations / (FLAGS_num_threads - 1));
537 num_write_ops_per_thread_ = FLAGS_num_operations;
538 }
539
540 void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
11fdf7f2 541 uint64_t* bytes_read, bool /*write*/,
7c673cae
FG
542 uint64_t* read_hits) override {
543 std::atomic_int threads_done;
544 threads_done.store(0);
545 threads->emplace_back(ConcurrentFillBenchmarkThread(
546 table_, key_gen_, bytes_written, bytes_read, sequence_,
547 num_write_ops_per_thread_, read_hits, &threads_done));
548 for (int i = 1; i < FLAGS_num_threads; ++i) {
549 threads->emplace_back(
550 ReadThreadType(table_, key_gen_, bytes_written, bytes_read, sequence_,
551 num_read_ops_per_thread_, read_hits, &threads_done));
552 }
553 for (auto& thread : *threads) {
554 thread.join();
555 }
556 }
557};
558
559} // namespace rocksdb
560
561void PrintWarnings() {
562#if defined(__GNUC__) && !defined(__OPTIMIZE__)
563 fprintf(stdout,
564 "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
565#endif
566#ifndef NDEBUG
567 fprintf(stdout,
568 "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
569#endif
570}
571
572int main(int argc, char** argv) {
573 rocksdb::port::InstallStackTraceHandler();
574 SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
575 " [OPTIONS]...");
576 ParseCommandLineFlags(&argc, &argv, true);
577
578 PrintWarnings();
579
580 rocksdb::Options options;
581
582 std::unique_ptr<rocksdb::MemTableRepFactory> factory;
583 if (FLAGS_memtablerep == "skiplist") {
584 factory.reset(new rocksdb::SkipListFactory);
585#ifndef ROCKSDB_LITE
586 } else if (FLAGS_memtablerep == "vector") {
587 factory.reset(new rocksdb::VectorRepFactory);
588 } else if (FLAGS_memtablerep == "hashskiplist") {
589 factory.reset(rocksdb::NewHashSkipListRepFactory(
590 FLAGS_bucket_count, FLAGS_hashskiplist_height,
591 FLAGS_hashskiplist_branching_factor));
592 options.prefix_extractor.reset(
593 rocksdb::NewFixedPrefixTransform(FLAGS_prefix_length));
594 } else if (FLAGS_memtablerep == "hashlinklist") {
595 factory.reset(rocksdb::NewHashLinkListRepFactory(
596 FLAGS_bucket_count, FLAGS_huge_page_tlb_size,
597 FLAGS_bucket_entries_logging_threshold,
598 FLAGS_if_log_bucket_dist_when_flash, FLAGS_threshold_use_skiplist));
599 options.prefix_extractor.reset(
600 rocksdb::NewFixedPrefixTransform(FLAGS_prefix_length));
7c673cae
FG
601#endif // ROCKSDB_LITE
602 } else {
603 fprintf(stdout, "Unknown memtablerep: %s\n", FLAGS_memtablerep.c_str());
604 exit(1);
605 }
606
607 rocksdb::InternalKeyComparator internal_key_comp(
608 rocksdb::BytewiseComparator());
609 rocksdb::MemTable::KeyComparator key_comp(internal_key_comp);
610 rocksdb::Arena arena;
611 rocksdb::WriteBufferManager wb(FLAGS_write_buffer_size);
7c673cae
FG
612 uint64_t sequence;
613 auto createMemtableRep = [&] {
614 sequence = 0;
11fdf7f2 615 return factory->CreateMemTableRep(key_comp, &arena,
7c673cae
FG
616 options.prefix_extractor.get(),
617 options.info_log.get());
618 };
619 std::unique_ptr<rocksdb::MemTableRep> memtablerep;
620 rocksdb::Random64 rng(FLAGS_seed);
621 const char* benchmarks = FLAGS_benchmarks.c_str();
622 while (benchmarks != nullptr) {
623 std::unique_ptr<rocksdb::KeyGenerator> key_gen;
624 const char* sep = strchr(benchmarks, ',');
625 rocksdb::Slice name;
626 if (sep == nullptr) {
627 name = benchmarks;
628 benchmarks = nullptr;
629 } else {
630 name = rocksdb::Slice(benchmarks, sep - benchmarks);
631 benchmarks = sep + 1;
632 }
633 std::unique_ptr<rocksdb::Benchmark> benchmark;
634 if (name == rocksdb::Slice("fillseq")) {
635 memtablerep.reset(createMemtableRep());
636 key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::SEQUENTIAL,
637 FLAGS_num_operations));
638 benchmark.reset(new rocksdb::FillBenchmark(memtablerep.get(),
639 key_gen.get(), &sequence));
640 } else if (name == rocksdb::Slice("fillrandom")) {
641 memtablerep.reset(createMemtableRep());
642 key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::UNIQUE_RANDOM,
643 FLAGS_num_operations));
644 benchmark.reset(new rocksdb::FillBenchmark(memtablerep.get(),
645 key_gen.get(), &sequence));
646 } else if (name == rocksdb::Slice("readrandom")) {
647 key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::RANDOM,
648 FLAGS_num_operations));
649 benchmark.reset(new rocksdb::ReadBenchmark(memtablerep.get(),
650 key_gen.get(), &sequence));
651 } else if (name == rocksdb::Slice("readseq")) {
652 key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::SEQUENTIAL,
653 FLAGS_num_operations));
654 benchmark.reset(
655 new rocksdb::SeqReadBenchmark(memtablerep.get(), &sequence));
656 } else if (name == rocksdb::Slice("readwrite")) {
657 memtablerep.reset(createMemtableRep());
658 key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::RANDOM,
659 FLAGS_num_operations));
660 benchmark.reset(new rocksdb::ReadWriteBenchmark<
661 rocksdb::ConcurrentReadBenchmarkThread>(memtablerep.get(),
662 key_gen.get(), &sequence));
663 } else if (name == rocksdb::Slice("seqreadwrite")) {
664 memtablerep.reset(createMemtableRep());
665 key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::RANDOM,
666 FLAGS_num_operations));
667 benchmark.reset(new rocksdb::ReadWriteBenchmark<
668 rocksdb::SeqConcurrentReadBenchmarkThread>(memtablerep.get(),
669 key_gen.get(), &sequence));
670 } else {
671 std::cout << "WARNING: skipping unknown benchmark '" << name.ToString()
672 << std::endl;
673 continue;
674 }
675 std::cout << "Running " << name.ToString() << std::endl;
676 benchmark->Run();
677 }
678
679 return 0;
680}
681
682#endif // GFLAGS