]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/memtable/memtablerep_bench.cc
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / rocksdb / memtable / memtablerep_bench.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #ifndef GFLAGS
11 #include <cstdio>
12 int main() {
13 fprintf(stderr, "Please install gflags to run rocksdb tools\n");
14 return 1;
15 }
16 #else
17
18 #include <atomic>
19 #include <iostream>
20 #include <memory>
21 #include <thread>
22 #include <type_traits>
23 #include <vector>
24
25 #include "db/dbformat.h"
26 #include "db/memtable.h"
27 #include "memory/arena.h"
28 #include "port/port.h"
29 #include "port/stack_trace.h"
30 #include "rocksdb/comparator.h"
31 #include "rocksdb/memtablerep.h"
32 #include "rocksdb/options.h"
33 #include "rocksdb/slice_transform.h"
34 #include "rocksdb/write_buffer_manager.h"
35 #include "test_util/testutil.h"
36 #include "util/gflags_compat.h"
37 #include "util/mutexlock.h"
38 #include "util/stop_watch.h"
39
40 using GFLAGS_NAMESPACE::ParseCommandLineFlags;
41 using GFLAGS_NAMESPACE::RegisterFlagValidator;
42 using GFLAGS_NAMESPACE::SetUsageMessage;
43
44 DEFINE_string(benchmarks, "fillrandom",
45 "Comma-separated list of benchmarks to run. Options:\n"
46 "\tfillrandom -- write N random values\n"
47 "\tfillseq -- write N values in sequential order\n"
48 "\treadrandom -- read N values in random order\n"
49 "\treadseq -- scan the DB\n"
50 "\treadwrite -- 1 thread writes while N - 1 threads "
51 "do random\n"
52 "\t reads\n"
53 "\tseqreadwrite -- 1 thread writes while N - 1 threads "
54 "do scans\n");
55
56 DEFINE_string(memtablerep, "skiplist",
57 "Which implementation of memtablerep to use. See "
58 "include/memtablerep.h for\n"
59 " more details. Options:\n"
60 "\tskiplist -- backed by a skiplist\n"
61 "\tvector -- backed by an std::vector\n"
62 "\thashskiplist -- backed by a hash skip list\n"
63 "\thashlinklist -- backed by a hash linked list\n"
64 "\tcuckoo -- backed by a cuckoo hash table");
65
66 DEFINE_int64(bucket_count, 1000000,
67 "bucket_count parameter to pass into NewHashSkiplistRepFactory or "
68 "NewHashLinkListRepFactory");
69
70 DEFINE_int32(
71 hashskiplist_height, 4,
72 "skiplist_height parameter to pass into NewHashSkiplistRepFactory");
73
74 DEFINE_int32(
75 hashskiplist_branching_factor, 4,
76 "branching_factor parameter to pass into NewHashSkiplistRepFactory");
77
78 DEFINE_int32(
79 huge_page_tlb_size, 0,
80 "huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory");
81
82 DEFINE_int32(bucket_entries_logging_threshold, 4096,
83 "bucket_entries_logging_threshold parameter to pass into "
84 "NewHashLinkListRepFactory");
85
86 DEFINE_bool(if_log_bucket_dist_when_flash, true,
87 "if_log_bucket_dist_when_flash parameter to pass into "
88 "NewHashLinkListRepFactory");
89
90 DEFINE_int32(
91 threshold_use_skiplist, 256,
92 "threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory");
93
94 DEFINE_int64(write_buffer_size, 256,
95 "write_buffer_size parameter to pass into WriteBufferManager");
96
97 DEFINE_int32(
98 num_threads, 1,
99 "Number of concurrent threads to run. If the benchmark includes writes,\n"
100 "then at most one thread will be a writer");
101
102 DEFINE_int32(num_operations, 1000000,
103 "Number of operations to do for write and random read benchmarks");
104
105 DEFINE_int32(num_scans, 10,
106 "Number of times for each thread to scan the memtablerep for "
107 "sequential read "
108 "benchmarks");
109
110 DEFINE_int32(item_size, 100, "Number of bytes each item should be");
111
112 DEFINE_int32(prefix_length, 8,
113 "Prefix length to pass into NewFixedPrefixTransform");
114
115 /* VectorRep settings */
116 DEFINE_int64(vectorrep_count, 0,
117 "Number of entries to reserve on VectorRep initialization");
118
119 DEFINE_int64(seed, 0,
120 "Seed base for random number generators. "
121 "When 0 it is deterministic.");
122
123 namespace ROCKSDB_NAMESPACE {
124
125 namespace {
126 struct CallbackVerifyArgs {
127 bool found;
128 LookupKey* key;
129 MemTableRep* table;
130 InternalKeyComparator* comparator;
131 };
132 } // namespace
133
134 // Helper for quickly generating random data.
135 class RandomGenerator {
136 private:
137 std::string data_;
138 unsigned int pos_;
139
140 public:
141 RandomGenerator() {
142 Random rnd(301);
143 auto size = (unsigned)std::max(1048576, FLAGS_item_size);
144 test::RandomString(&rnd, size, &data_);
145 pos_ = 0;
146 }
147
148 Slice Generate(unsigned int len) {
149 assert(len <= data_.size());
150 if (pos_ + len > data_.size()) {
151 pos_ = 0;
152 }
153 pos_ += len;
154 return Slice(data_.data() + pos_ - len, len);
155 }
156 };
157
158 enum WriteMode { SEQUENTIAL, RANDOM, UNIQUE_RANDOM };
159
160 class KeyGenerator {
161 public:
162 KeyGenerator(Random64* rand, WriteMode mode, uint64_t num)
163 : rand_(rand), mode_(mode), num_(num), next_(0) {
164 if (mode_ == UNIQUE_RANDOM) {
165 // NOTE: if memory consumption of this approach becomes a concern,
166 // we can either break it into pieces and only random shuffle a section
167 // each time. Alternatively, use a bit map implementation
168 // (https://reviews.facebook.net/differential/diff/54627/)
169 values_.resize(num_);
170 for (uint64_t i = 0; i < num_; ++i) {
171 values_[i] = i;
172 }
173 std::shuffle(
174 values_.begin(), values_.end(),
175 std::default_random_engine(static_cast<unsigned int>(FLAGS_seed)));
176 }
177 }
178
179 uint64_t Next() {
180 switch (mode_) {
181 case SEQUENTIAL:
182 return next_++;
183 case RANDOM:
184 return rand_->Next() % num_;
185 case UNIQUE_RANDOM:
186 return values_[next_++];
187 }
188 assert(false);
189 return std::numeric_limits<uint64_t>::max();
190 }
191
192 private:
193 Random64* rand_;
194 WriteMode mode_;
195 const uint64_t num_;
196 uint64_t next_;
197 std::vector<uint64_t> values_;
198 };
199
200 class BenchmarkThread {
201 public:
202 explicit BenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
203 uint64_t* bytes_written, uint64_t* bytes_read,
204 uint64_t* sequence, uint64_t num_ops,
205 uint64_t* read_hits)
206 : table_(table),
207 key_gen_(key_gen),
208 bytes_written_(bytes_written),
209 bytes_read_(bytes_read),
210 sequence_(sequence),
211 num_ops_(num_ops),
212 read_hits_(read_hits) {}
213
214 virtual void operator()() = 0;
215 virtual ~BenchmarkThread() {}
216
217 protected:
218 MemTableRep* table_;
219 KeyGenerator* key_gen_;
220 uint64_t* bytes_written_;
221 uint64_t* bytes_read_;
222 uint64_t* sequence_;
223 uint64_t num_ops_;
224 uint64_t* read_hits_;
225 RandomGenerator generator_;
226 };
227
228 class FillBenchmarkThread : public BenchmarkThread {
229 public:
230 FillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
231 uint64_t* bytes_written, uint64_t* bytes_read,
232 uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
233 : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
234 num_ops, read_hits) {}
235
236 void FillOne() {
237 char* buf = nullptr;
238 auto internal_key_size = 16;
239 auto encoded_len =
240 FLAGS_item_size + VarintLength(internal_key_size) + internal_key_size;
241 KeyHandle handle = table_->Allocate(encoded_len, &buf);
242 assert(buf != nullptr);
243 char* p = EncodeVarint32(buf, internal_key_size);
244 auto key = key_gen_->Next();
245 EncodeFixed64(p, key);
246 p += 8;
247 EncodeFixed64(p, ++(*sequence_));
248 p += 8;
249 Slice bytes = generator_.Generate(FLAGS_item_size);
250 memcpy(p, bytes.data(), FLAGS_item_size);
251 p += FLAGS_item_size;
252 assert(p == buf + encoded_len);
253 table_->Insert(handle);
254 *bytes_written_ += encoded_len;
255 }
256
257 void operator()() override {
258 for (unsigned int i = 0; i < num_ops_; ++i) {
259 FillOne();
260 }
261 }
262 };
263
264 class ConcurrentFillBenchmarkThread : public FillBenchmarkThread {
265 public:
266 ConcurrentFillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
267 uint64_t* bytes_written, uint64_t* bytes_read,
268 uint64_t* sequence, uint64_t num_ops,
269 uint64_t* read_hits,
270 std::atomic_int* threads_done)
271 : FillBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
272 num_ops, read_hits) {
273 threads_done_ = threads_done;
274 }
275
276 void operator()() override {
277 // # of read threads will be total threads - write threads (always 1). Loop
278 // while all reads complete.
279 while ((*threads_done_).load() < (FLAGS_num_threads - 1)) {
280 FillOne();
281 }
282 }
283
284 private:
285 std::atomic_int* threads_done_;
286 };
287
288 class ReadBenchmarkThread : public BenchmarkThread {
289 public:
290 ReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
291 uint64_t* bytes_written, uint64_t* bytes_read,
292 uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
293 : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
294 num_ops, read_hits) {}
295
296 static bool callback(void* arg, const char* entry) {
297 CallbackVerifyArgs* callback_args = static_cast<CallbackVerifyArgs*>(arg);
298 assert(callback_args != nullptr);
299 uint32_t key_length;
300 const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
301 if ((callback_args->comparator)
302 ->user_comparator()
303 ->Equal(Slice(key_ptr, key_length - 8),
304 callback_args->key->user_key())) {
305 callback_args->found = true;
306 }
307 return false;
308 }
309
310 void ReadOne() {
311 std::string user_key;
312 auto key = key_gen_->Next();
313 PutFixed64(&user_key, key);
314 LookupKey lookup_key(user_key, *sequence_);
315 InternalKeyComparator internal_key_comp(BytewiseComparator());
316 CallbackVerifyArgs verify_args;
317 verify_args.found = false;
318 verify_args.key = &lookup_key;
319 verify_args.table = table_;
320 verify_args.comparator = &internal_key_comp;
321 table_->Get(lookup_key, &verify_args, callback);
322 if (verify_args.found) {
323 *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
324 ++*read_hits_;
325 }
326 }
327 void operator()() override {
328 for (unsigned int i = 0; i < num_ops_; ++i) {
329 ReadOne();
330 }
331 }
332 };
333
334 class SeqReadBenchmarkThread : public BenchmarkThread {
335 public:
336 SeqReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
337 uint64_t* bytes_written, uint64_t* bytes_read,
338 uint64_t* sequence, uint64_t num_ops,
339 uint64_t* read_hits)
340 : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
341 num_ops, read_hits) {}
342
343 void ReadOneSeq() {
344 std::unique_ptr<MemTableRep::Iterator> iter(table_->GetIterator());
345 for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
346 // pretend to read the value
347 *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
348 }
349 ++*read_hits_;
350 }
351
352 void operator()() override {
353 for (unsigned int i = 0; i < num_ops_; ++i) {
354 { ReadOneSeq(); }
355 }
356 }
357 };
358
359 class ConcurrentReadBenchmarkThread : public ReadBenchmarkThread {
360 public:
361 ConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
362 uint64_t* bytes_written, uint64_t* bytes_read,
363 uint64_t* sequence, uint64_t num_ops,
364 uint64_t* read_hits,
365 std::atomic_int* threads_done)
366 : ReadBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
367 num_ops, read_hits) {
368 threads_done_ = threads_done;
369 }
370
371 void operator()() override {
372 for (unsigned int i = 0; i < num_ops_; ++i) {
373 ReadOne();
374 }
375 ++*threads_done_;
376 }
377
378 private:
379 std::atomic_int* threads_done_;
380 };
381
382 class SeqConcurrentReadBenchmarkThread : public SeqReadBenchmarkThread {
383 public:
384 SeqConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
385 uint64_t* bytes_written,
386 uint64_t* bytes_read, uint64_t* sequence,
387 uint64_t num_ops, uint64_t* read_hits,
388 std::atomic_int* threads_done)
389 : SeqReadBenchmarkThread(table, key_gen, bytes_written, bytes_read,
390 sequence, num_ops, read_hits) {
391 threads_done_ = threads_done;
392 }
393
394 void operator()() override {
395 for (unsigned int i = 0; i < num_ops_; ++i) {
396 ReadOneSeq();
397 }
398 ++*threads_done_;
399 }
400
401 private:
402 std::atomic_int* threads_done_;
403 };
404
405 class Benchmark {
406 public:
407 explicit Benchmark(MemTableRep* table, KeyGenerator* key_gen,
408 uint64_t* sequence, uint32_t num_threads)
409 : table_(table),
410 key_gen_(key_gen),
411 sequence_(sequence),
412 num_threads_(num_threads) {}
413
414 virtual ~Benchmark() {}
415 virtual void Run() {
416 std::cout << "Number of threads: " << num_threads_ << std::endl;
417 std::vector<port::Thread> threads;
418 uint64_t bytes_written = 0;
419 uint64_t bytes_read = 0;
420 uint64_t read_hits = 0;
421 StopWatchNano timer(Env::Default(), true);
422 RunThreads(&threads, &bytes_written, &bytes_read, true, &read_hits);
423 auto elapsed_time = static_cast<double>(timer.ElapsedNanos() / 1000);
424 std::cout << "Elapsed time: " << static_cast<int>(elapsed_time) << " us"
425 << std::endl;
426
427 if (bytes_written > 0) {
428 auto MiB_written = static_cast<double>(bytes_written) / (1 << 20);
429 auto write_throughput = MiB_written / (elapsed_time / 1000000);
430 std::cout << "Total bytes written: " << MiB_written << " MiB"
431 << std::endl;
432 std::cout << "Write throughput: " << write_throughput << " MiB/s"
433 << std::endl;
434 auto us_per_op = elapsed_time / num_write_ops_per_thread_;
435 std::cout << "write us/op: " << us_per_op << std::endl;
436 }
437 if (bytes_read > 0) {
438 auto MiB_read = static_cast<double>(bytes_read) / (1 << 20);
439 auto read_throughput = MiB_read / (elapsed_time / 1000000);
440 std::cout << "Total bytes read: " << MiB_read << " MiB" << std::endl;
441 std::cout << "Read throughput: " << read_throughput << " MiB/s"
442 << std::endl;
443 auto us_per_op = elapsed_time / num_read_ops_per_thread_;
444 std::cout << "read us/op: " << us_per_op << std::endl;
445 }
446 }
447
448 virtual void RunThreads(std::vector<port::Thread>* threads,
449 uint64_t* bytes_written, uint64_t* bytes_read,
450 bool write, uint64_t* read_hits) = 0;
451
452 protected:
453 MemTableRep* table_;
454 KeyGenerator* key_gen_;
455 uint64_t* sequence_;
456 uint64_t num_write_ops_per_thread_;
457 uint64_t num_read_ops_per_thread_;
458 const uint32_t num_threads_;
459 };
460
461 class FillBenchmark : public Benchmark {
462 public:
463 explicit FillBenchmark(MemTableRep* table, KeyGenerator* key_gen,
464 uint64_t* sequence)
465 : Benchmark(table, key_gen, sequence, 1) {
466 num_write_ops_per_thread_ = FLAGS_num_operations;
467 }
468
469 void RunThreads(std::vector<port::Thread>* /*threads*/, uint64_t* bytes_written,
470 uint64_t* bytes_read, bool /*write*/,
471 uint64_t* read_hits) override {
472 FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_,
473 num_write_ops_per_thread_, read_hits)();
474 }
475 };
476
477 class ReadBenchmark : public Benchmark {
478 public:
479 explicit ReadBenchmark(MemTableRep* table, KeyGenerator* key_gen,
480 uint64_t* sequence)
481 : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
482 num_read_ops_per_thread_ = FLAGS_num_operations / FLAGS_num_threads;
483 }
484
485 void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
486 uint64_t* bytes_read, bool /*write*/,
487 uint64_t* read_hits) override {
488 for (int i = 0; i < FLAGS_num_threads; ++i) {
489 threads->emplace_back(
490 ReadBenchmarkThread(table_, key_gen_, bytes_written, bytes_read,
491 sequence_, num_read_ops_per_thread_, read_hits));
492 }
493 for (auto& thread : *threads) {
494 thread.join();
495 }
496 std::cout << "read hit%: "
497 << (static_cast<double>(*read_hits) / FLAGS_num_operations) * 100
498 << std::endl;
499 }
500 };
501
502 class SeqReadBenchmark : public Benchmark {
503 public:
504 explicit SeqReadBenchmark(MemTableRep* table, uint64_t* sequence)
505 : Benchmark(table, nullptr, sequence, FLAGS_num_threads) {
506 num_read_ops_per_thread_ = FLAGS_num_scans;
507 }
508
509 void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
510 uint64_t* bytes_read, bool /*write*/,
511 uint64_t* read_hits) override {
512 for (int i = 0; i < FLAGS_num_threads; ++i) {
513 threads->emplace_back(SeqReadBenchmarkThread(
514 table_, key_gen_, bytes_written, bytes_read, sequence_,
515 num_read_ops_per_thread_, read_hits));
516 }
517 for (auto& thread : *threads) {
518 thread.join();
519 }
520 }
521 };
522
523 template <class ReadThreadType>
524 class ReadWriteBenchmark : public Benchmark {
525 public:
526 explicit ReadWriteBenchmark(MemTableRep* table, KeyGenerator* key_gen,
527 uint64_t* sequence)
528 : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
529 num_read_ops_per_thread_ =
530 FLAGS_num_threads <= 1
531 ? 0
532 : (FLAGS_num_operations / (FLAGS_num_threads - 1));
533 num_write_ops_per_thread_ = FLAGS_num_operations;
534 }
535
536 void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
537 uint64_t* bytes_read, bool /*write*/,
538 uint64_t* read_hits) override {
539 std::atomic_int threads_done;
540 threads_done.store(0);
541 threads->emplace_back(ConcurrentFillBenchmarkThread(
542 table_, key_gen_, bytes_written, bytes_read, sequence_,
543 num_write_ops_per_thread_, read_hits, &threads_done));
544 for (int i = 1; i < FLAGS_num_threads; ++i) {
545 threads->emplace_back(
546 ReadThreadType(table_, key_gen_, bytes_written, bytes_read, sequence_,
547 num_read_ops_per_thread_, read_hits, &threads_done));
548 }
549 for (auto& thread : *threads) {
550 thread.join();
551 }
552 }
553 };
554
555 } // namespace ROCKSDB_NAMESPACE
556
557 void PrintWarnings() {
558 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
559 fprintf(stdout,
560 "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
561 #endif
562 #ifndef NDEBUG
563 fprintf(stdout,
564 "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
565 #endif
566 }
567
568 int main(int argc, char** argv) {
569 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
570 SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
571 " [OPTIONS]...");
572 ParseCommandLineFlags(&argc, &argv, true);
573
574 PrintWarnings();
575
576 ROCKSDB_NAMESPACE::Options options;
577
578 std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRepFactory> factory;
579 if (FLAGS_memtablerep == "skiplist") {
580 factory.reset(new ROCKSDB_NAMESPACE::SkipListFactory);
581 #ifndef ROCKSDB_LITE
582 } else if (FLAGS_memtablerep == "vector") {
583 factory.reset(new ROCKSDB_NAMESPACE::VectorRepFactory);
584 } else if (FLAGS_memtablerep == "hashskiplist") {
585 factory.reset(ROCKSDB_NAMESPACE::NewHashSkipListRepFactory(
586 FLAGS_bucket_count, FLAGS_hashskiplist_height,
587 FLAGS_hashskiplist_branching_factor));
588 options.prefix_extractor.reset(
589 ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
590 } else if (FLAGS_memtablerep == "hashlinklist") {
591 factory.reset(ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(
592 FLAGS_bucket_count, FLAGS_huge_page_tlb_size,
593 FLAGS_bucket_entries_logging_threshold,
594 FLAGS_if_log_bucket_dist_when_flash, FLAGS_threshold_use_skiplist));
595 options.prefix_extractor.reset(
596 ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
597 #endif // ROCKSDB_LITE
598 } else {
599 fprintf(stdout, "Unknown memtablerep: %s\n", FLAGS_memtablerep.c_str());
600 exit(1);
601 }
602
603 ROCKSDB_NAMESPACE::InternalKeyComparator internal_key_comp(
604 ROCKSDB_NAMESPACE::BytewiseComparator());
605 ROCKSDB_NAMESPACE::MemTable::KeyComparator key_comp(internal_key_comp);
606 ROCKSDB_NAMESPACE::Arena arena;
607 ROCKSDB_NAMESPACE::WriteBufferManager wb(FLAGS_write_buffer_size);
608 uint64_t sequence;
609 auto createMemtableRep = [&] {
610 sequence = 0;
611 return factory->CreateMemTableRep(key_comp, &arena,
612 options.prefix_extractor.get(),
613 options.info_log.get());
614 };
615 std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRep> memtablerep;
616 ROCKSDB_NAMESPACE::Random64 rng(FLAGS_seed);
617 const char* benchmarks = FLAGS_benchmarks.c_str();
618 while (benchmarks != nullptr) {
619 std::unique_ptr<ROCKSDB_NAMESPACE::KeyGenerator> key_gen;
620 const char* sep = strchr(benchmarks, ',');
621 ROCKSDB_NAMESPACE::Slice name;
622 if (sep == nullptr) {
623 name = benchmarks;
624 benchmarks = nullptr;
625 } else {
626 name = ROCKSDB_NAMESPACE::Slice(benchmarks, sep - benchmarks);
627 benchmarks = sep + 1;
628 }
629 std::unique_ptr<ROCKSDB_NAMESPACE::Benchmark> benchmark;
630 if (name == ROCKSDB_NAMESPACE::Slice("fillseq")) {
631 memtablerep.reset(createMemtableRep());
632 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
633 &rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
634 benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
635 memtablerep.get(), key_gen.get(), &sequence));
636 } else if (name == ROCKSDB_NAMESPACE::Slice("fillrandom")) {
637 memtablerep.reset(createMemtableRep());
638 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
639 &rng, ROCKSDB_NAMESPACE::UNIQUE_RANDOM, FLAGS_num_operations));
640 benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
641 memtablerep.get(), key_gen.get(), &sequence));
642 } else if (name == ROCKSDB_NAMESPACE::Slice("readrandom")) {
643 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
644 &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
645 benchmark.reset(new ROCKSDB_NAMESPACE::ReadBenchmark(
646 memtablerep.get(), key_gen.get(), &sequence));
647 } else if (name == ROCKSDB_NAMESPACE::Slice("readseq")) {
648 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
649 &rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
650 benchmark.reset(new ROCKSDB_NAMESPACE::SeqReadBenchmark(memtablerep.get(),
651 &sequence));
652 } else if (name == ROCKSDB_NAMESPACE::Slice("readwrite")) {
653 memtablerep.reset(createMemtableRep());
654 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
655 &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
656 benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
657 ROCKSDB_NAMESPACE::ConcurrentReadBenchmarkThread>(
658 memtablerep.get(), key_gen.get(), &sequence));
659 } else if (name == ROCKSDB_NAMESPACE::Slice("seqreadwrite")) {
660 memtablerep.reset(createMemtableRep());
661 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
662 &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
663 benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
664 ROCKSDB_NAMESPACE::SeqConcurrentReadBenchmarkThread>(
665 memtablerep.get(), key_gen.get(), &sequence));
666 } else {
667 std::cout << "WARNING: skipping unknown benchmark '" << name.ToString()
668 << std::endl;
669 continue;
670 }
671 std::cout << "Running " << name.ToString() << std::endl;
672 benchmark->Run();
673 }
674
675 return 0;
676 }
677
678 #endif // GFLAGS