]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/memtable/memtablerep_bench.cc
import quincy beta 17.1.0
[ceph.git] / ceph / src / rocksdb / memtable / memtablerep_bench.cc
CommitLineData
7c673cae 1// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
11fdf7f2
TL
2// This source code is licensed under both the GPLv2 (found in the
3// COPYING file in the root directory) and Apache 2.0 License
4// (found in the LICENSE.Apache file in the root directory).
7c673cae
FG
5//
6// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7// Use of this source code is governed by a BSD-style license that can be
8// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
7c673cae
FG
10#ifndef GFLAGS
11#include <cstdio>
12int main() {
13 fprintf(stderr, "Please install gflags to run rocksdb tools\n");
14 return 1;
15}
16#else
17
7c673cae
FG
18#include <atomic>
19#include <iostream>
20#include <memory>
21#include <thread>
22#include <type_traits>
23#include <vector>
24
25#include "db/dbformat.h"
26#include "db/memtable.h"
f67539c2 27#include "memory/arena.h"
7c673cae
FG
28#include "port/port.h"
29#include "port/stack_trace.h"
30#include "rocksdb/comparator.h"
31#include "rocksdb/memtablerep.h"
32#include "rocksdb/options.h"
33#include "rocksdb/slice_transform.h"
34#include "rocksdb/write_buffer_manager.h"
f67539c2 35#include "test_util/testutil.h"
11fdf7f2 36#include "util/gflags_compat.h"
7c673cae
FG
37#include "util/mutexlock.h"
38#include "util/stop_watch.h"
7c673cae 39
11fdf7f2
TL
40using GFLAGS_NAMESPACE::ParseCommandLineFlags;
41using GFLAGS_NAMESPACE::RegisterFlagValidator;
42using GFLAGS_NAMESPACE::SetUsageMessage;
7c673cae
FG
43
44DEFINE_string(benchmarks, "fillrandom",
45 "Comma-separated list of benchmarks to run. Options:\n"
46 "\tfillrandom -- write N random values\n"
47 "\tfillseq -- write N values in sequential order\n"
48 "\treadrandom -- read N values in random order\n"
49 "\treadseq -- scan the DB\n"
50 "\treadwrite -- 1 thread writes while N - 1 threads "
51 "do random\n"
52 "\t reads\n"
53 "\tseqreadwrite -- 1 thread writes while N - 1 threads "
54 "do scans\n");
55
56DEFINE_string(memtablerep, "skiplist",
57 "Which implementation of memtablerep to use. See "
58 "include/memtablerep.h for\n"
59 " more details. Options:\n"
60 "\tskiplist -- backed by a skiplist\n"
61 "\tvector -- backed by an std::vector\n"
62 "\thashskiplist -- backed by a hash skip list\n"
63 "\thashlinklist -- backed by a hash linked list\n"
64 "\tcuckoo -- backed by a cuckoo hash table");
65
66DEFINE_int64(bucket_count, 1000000,
67 "bucket_count parameter to pass into NewHashSkiplistRepFactory or "
68 "NewHashLinkListRepFactory");
69
70DEFINE_int32(
71 hashskiplist_height, 4,
72 "skiplist_height parameter to pass into NewHashSkiplistRepFactory");
73
74DEFINE_int32(
75 hashskiplist_branching_factor, 4,
76 "branching_factor parameter to pass into NewHashSkiplistRepFactory");
77
78DEFINE_int32(
79 huge_page_tlb_size, 0,
80 "huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory");
81
82DEFINE_int32(bucket_entries_logging_threshold, 4096,
83 "bucket_entries_logging_threshold parameter to pass into "
84 "NewHashLinkListRepFactory");
85
86DEFINE_bool(if_log_bucket_dist_when_flash, true,
87 "if_log_bucket_dist_when_flash parameter to pass into "
88 "NewHashLinkListRepFactory");
89
90DEFINE_int32(
91 threshold_use_skiplist, 256,
92 "threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory");
93
494da23a
TL
94DEFINE_int64(write_buffer_size, 256,
95 "write_buffer_size parameter to pass into WriteBufferManager");
7c673cae
FG
96
97DEFINE_int32(
98 num_threads, 1,
99 "Number of concurrent threads to run. If the benchmark includes writes,\n"
100 "then at most one thread will be a writer");
101
102DEFINE_int32(num_operations, 1000000,
103 "Number of operations to do for write and random read benchmarks");
104
105DEFINE_int32(num_scans, 10,
106 "Number of times for each thread to scan the memtablerep for "
107 "sequential read "
108 "benchmarks");
109
110DEFINE_int32(item_size, 100, "Number of bytes each item should be");
111
112DEFINE_int32(prefix_length, 8,
113 "Prefix length to pass into NewFixedPrefixTransform");
114
115/* VectorRep settings */
116DEFINE_int64(vectorrep_count, 0,
117 "Number of entries to reserve on VectorRep initialization");
118
119DEFINE_int64(seed, 0,
120 "Seed base for random number generators. "
121 "When 0 it is deterministic.");
122
f67539c2 123namespace ROCKSDB_NAMESPACE {
7c673cae
FG
124
125namespace {
126struct CallbackVerifyArgs {
127 bool found;
128 LookupKey* key;
129 MemTableRep* table;
130 InternalKeyComparator* comparator;
131};
132} // namespace
133
134// Helper for quickly generating random data.
135class RandomGenerator {
136 private:
137 std::string data_;
138 unsigned int pos_;
139
140 public:
141 RandomGenerator() {
142 Random rnd(301);
143 auto size = (unsigned)std::max(1048576, FLAGS_item_size);
20effc67 144 data_ = rnd.RandomString(size);
7c673cae
FG
145 pos_ = 0;
146 }
147
148 Slice Generate(unsigned int len) {
149 assert(len <= data_.size());
150 if (pos_ + len > data_.size()) {
151 pos_ = 0;
152 }
153 pos_ += len;
154 return Slice(data_.data() + pos_ - len, len);
155 }
156};
157
158enum WriteMode { SEQUENTIAL, RANDOM, UNIQUE_RANDOM };
159
160class KeyGenerator {
161 public:
162 KeyGenerator(Random64* rand, WriteMode mode, uint64_t num)
163 : rand_(rand), mode_(mode), num_(num), next_(0) {
164 if (mode_ == UNIQUE_RANDOM) {
165 // NOTE: if memory consumption of this approach becomes a concern,
166 // we can either break it into pieces and only random shuffle a section
167 // each time. Alternatively, use a bit map implementation
168 // (https://reviews.facebook.net/differential/diff/54627/)
169 values_.resize(num_);
170 for (uint64_t i = 0; i < num_; ++i) {
171 values_[i] = i;
172 }
20effc67
TL
173 RandomShuffle(values_.begin(), values_.end(),
174 static_cast<uint32_t>(FLAGS_seed));
7c673cae
FG
175 }
176 }
177
178 uint64_t Next() {
179 switch (mode_) {
180 case SEQUENTIAL:
181 return next_++;
182 case RANDOM:
183 return rand_->Next() % num_;
184 case UNIQUE_RANDOM:
185 return values_[next_++];
186 }
187 assert(false);
188 return std::numeric_limits<uint64_t>::max();
189 }
190
191 private:
192 Random64* rand_;
193 WriteMode mode_;
194 const uint64_t num_;
195 uint64_t next_;
196 std::vector<uint64_t> values_;
197};
198
199class BenchmarkThread {
200 public:
201 explicit BenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
202 uint64_t* bytes_written, uint64_t* bytes_read,
203 uint64_t* sequence, uint64_t num_ops,
204 uint64_t* read_hits)
205 : table_(table),
206 key_gen_(key_gen),
207 bytes_written_(bytes_written),
208 bytes_read_(bytes_read),
209 sequence_(sequence),
210 num_ops_(num_ops),
211 read_hits_(read_hits) {}
212
213 virtual void operator()() = 0;
214 virtual ~BenchmarkThread() {}
215
216 protected:
217 MemTableRep* table_;
218 KeyGenerator* key_gen_;
219 uint64_t* bytes_written_;
220 uint64_t* bytes_read_;
221 uint64_t* sequence_;
222 uint64_t num_ops_;
223 uint64_t* read_hits_;
224 RandomGenerator generator_;
225};
226
227class FillBenchmarkThread : public BenchmarkThread {
228 public:
229 FillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
230 uint64_t* bytes_written, uint64_t* bytes_read,
231 uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
232 : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
233 num_ops, read_hits) {}
234
235 void FillOne() {
236 char* buf = nullptr;
237 auto internal_key_size = 16;
238 auto encoded_len =
239 FLAGS_item_size + VarintLength(internal_key_size) + internal_key_size;
240 KeyHandle handle = table_->Allocate(encoded_len, &buf);
241 assert(buf != nullptr);
242 char* p = EncodeVarint32(buf, internal_key_size);
243 auto key = key_gen_->Next();
244 EncodeFixed64(p, key);
245 p += 8;
246 EncodeFixed64(p, ++(*sequence_));
247 p += 8;
248 Slice bytes = generator_.Generate(FLAGS_item_size);
249 memcpy(p, bytes.data(), FLAGS_item_size);
250 p += FLAGS_item_size;
251 assert(p == buf + encoded_len);
252 table_->Insert(handle);
253 *bytes_written_ += encoded_len;
254 }
255
256 void operator()() override {
257 for (unsigned int i = 0; i < num_ops_; ++i) {
258 FillOne();
259 }
260 }
261};
262
263class ConcurrentFillBenchmarkThread : public FillBenchmarkThread {
264 public:
265 ConcurrentFillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
266 uint64_t* bytes_written, uint64_t* bytes_read,
267 uint64_t* sequence, uint64_t num_ops,
268 uint64_t* read_hits,
269 std::atomic_int* threads_done)
270 : FillBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
271 num_ops, read_hits) {
272 threads_done_ = threads_done;
273 }
274
275 void operator()() override {
276 // # of read threads will be total threads - write threads (always 1). Loop
277 // while all reads complete.
278 while ((*threads_done_).load() < (FLAGS_num_threads - 1)) {
279 FillOne();
280 }
281 }
282
283 private:
284 std::atomic_int* threads_done_;
285};
286
287class ReadBenchmarkThread : public BenchmarkThread {
288 public:
289 ReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
290 uint64_t* bytes_written, uint64_t* bytes_read,
291 uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
292 : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
293 num_ops, read_hits) {}
294
295 static bool callback(void* arg, const char* entry) {
296 CallbackVerifyArgs* callback_args = static_cast<CallbackVerifyArgs*>(arg);
297 assert(callback_args != nullptr);
298 uint32_t key_length;
299 const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
300 if ((callback_args->comparator)
301 ->user_comparator()
302 ->Equal(Slice(key_ptr, key_length - 8),
303 callback_args->key->user_key())) {
304 callback_args->found = true;
305 }
306 return false;
307 }
308
309 void ReadOne() {
310 std::string user_key;
311 auto key = key_gen_->Next();
312 PutFixed64(&user_key, key);
313 LookupKey lookup_key(user_key, *sequence_);
314 InternalKeyComparator internal_key_comp(BytewiseComparator());
315 CallbackVerifyArgs verify_args;
316 verify_args.found = false;
317 verify_args.key = &lookup_key;
318 verify_args.table = table_;
319 verify_args.comparator = &internal_key_comp;
320 table_->Get(lookup_key, &verify_args, callback);
321 if (verify_args.found) {
322 *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
323 ++*read_hits_;
324 }
325 }
326 void operator()() override {
327 for (unsigned int i = 0; i < num_ops_; ++i) {
328 ReadOne();
329 }
330 }
331};
332
333class SeqReadBenchmarkThread : public BenchmarkThread {
334 public:
335 SeqReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
336 uint64_t* bytes_written, uint64_t* bytes_read,
337 uint64_t* sequence, uint64_t num_ops,
338 uint64_t* read_hits)
339 : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
340 num_ops, read_hits) {}
341
342 void ReadOneSeq() {
343 std::unique_ptr<MemTableRep::Iterator> iter(table_->GetIterator());
344 for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
345 // pretend to read the value
346 *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
347 }
348 ++*read_hits_;
349 }
350
351 void operator()() override {
352 for (unsigned int i = 0; i < num_ops_; ++i) {
353 { ReadOneSeq(); }
354 }
355 }
356};
357
358class ConcurrentReadBenchmarkThread : public ReadBenchmarkThread {
359 public:
360 ConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
361 uint64_t* bytes_written, uint64_t* bytes_read,
362 uint64_t* sequence, uint64_t num_ops,
363 uint64_t* read_hits,
364 std::atomic_int* threads_done)
365 : ReadBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
366 num_ops, read_hits) {
367 threads_done_ = threads_done;
368 }
369
370 void operator()() override {
371 for (unsigned int i = 0; i < num_ops_; ++i) {
372 ReadOne();
373 }
374 ++*threads_done_;
375 }
376
377 private:
378 std::atomic_int* threads_done_;
379};
380
381class SeqConcurrentReadBenchmarkThread : public SeqReadBenchmarkThread {
382 public:
383 SeqConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
384 uint64_t* bytes_written,
385 uint64_t* bytes_read, uint64_t* sequence,
386 uint64_t num_ops, uint64_t* read_hits,
387 std::atomic_int* threads_done)
388 : SeqReadBenchmarkThread(table, key_gen, bytes_written, bytes_read,
389 sequence, num_ops, read_hits) {
390 threads_done_ = threads_done;
391 }
392
393 void operator()() override {
394 for (unsigned int i = 0; i < num_ops_; ++i) {
395 ReadOneSeq();
396 }
397 ++*threads_done_;
398 }
399
400 private:
401 std::atomic_int* threads_done_;
402};
403
404class Benchmark {
405 public:
406 explicit Benchmark(MemTableRep* table, KeyGenerator* key_gen,
407 uint64_t* sequence, uint32_t num_threads)
408 : table_(table),
409 key_gen_(key_gen),
410 sequence_(sequence),
411 num_threads_(num_threads) {}
412
413 virtual ~Benchmark() {}
414 virtual void Run() {
415 std::cout << "Number of threads: " << num_threads_ << std::endl;
416 std::vector<port::Thread> threads;
417 uint64_t bytes_written = 0;
418 uint64_t bytes_read = 0;
419 uint64_t read_hits = 0;
420 StopWatchNano timer(Env::Default(), true);
421 RunThreads(&threads, &bytes_written, &bytes_read, true, &read_hits);
422 auto elapsed_time = static_cast<double>(timer.ElapsedNanos() / 1000);
423 std::cout << "Elapsed time: " << static_cast<int>(elapsed_time) << " us"
424 << std::endl;
425
426 if (bytes_written > 0) {
427 auto MiB_written = static_cast<double>(bytes_written) / (1 << 20);
428 auto write_throughput = MiB_written / (elapsed_time / 1000000);
429 std::cout << "Total bytes written: " << MiB_written << " MiB"
430 << std::endl;
431 std::cout << "Write throughput: " << write_throughput << " MiB/s"
432 << std::endl;
433 auto us_per_op = elapsed_time / num_write_ops_per_thread_;
434 std::cout << "write us/op: " << us_per_op << std::endl;
435 }
436 if (bytes_read > 0) {
437 auto MiB_read = static_cast<double>(bytes_read) / (1 << 20);
438 auto read_throughput = MiB_read / (elapsed_time / 1000000);
439 std::cout << "Total bytes read: " << MiB_read << " MiB" << std::endl;
440 std::cout << "Read throughput: " << read_throughput << " MiB/s"
441 << std::endl;
442 auto us_per_op = elapsed_time / num_read_ops_per_thread_;
443 std::cout << "read us/op: " << us_per_op << std::endl;
444 }
445 }
446
447 virtual void RunThreads(std::vector<port::Thread>* threads,
448 uint64_t* bytes_written, uint64_t* bytes_read,
449 bool write, uint64_t* read_hits) = 0;
450
451 protected:
452 MemTableRep* table_;
453 KeyGenerator* key_gen_;
454 uint64_t* sequence_;
20effc67
TL
455 uint64_t num_write_ops_per_thread_ = 0;
456 uint64_t num_read_ops_per_thread_ = 0;
7c673cae
FG
457 const uint32_t num_threads_;
458};
459
460class FillBenchmark : public Benchmark {
461 public:
462 explicit FillBenchmark(MemTableRep* table, KeyGenerator* key_gen,
463 uint64_t* sequence)
464 : Benchmark(table, key_gen, sequence, 1) {
465 num_write_ops_per_thread_ = FLAGS_num_operations;
466 }
467
11fdf7f2
TL
468 void RunThreads(std::vector<port::Thread>* /*threads*/, uint64_t* bytes_written,
469 uint64_t* bytes_read, bool /*write*/,
7c673cae
FG
470 uint64_t* read_hits) override {
471 FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_,
472 num_write_ops_per_thread_, read_hits)();
473 }
474};
475
476class ReadBenchmark : public Benchmark {
477 public:
478 explicit ReadBenchmark(MemTableRep* table, KeyGenerator* key_gen,
479 uint64_t* sequence)
480 : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
481 num_read_ops_per_thread_ = FLAGS_num_operations / FLAGS_num_threads;
482 }
483
484 void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
11fdf7f2 485 uint64_t* bytes_read, bool /*write*/,
7c673cae
FG
486 uint64_t* read_hits) override {
487 for (int i = 0; i < FLAGS_num_threads; ++i) {
488 threads->emplace_back(
489 ReadBenchmarkThread(table_, key_gen_, bytes_written, bytes_read,
490 sequence_, num_read_ops_per_thread_, read_hits));
491 }
492 for (auto& thread : *threads) {
493 thread.join();
494 }
495 std::cout << "read hit%: "
496 << (static_cast<double>(*read_hits) / FLAGS_num_operations) * 100
497 << std::endl;
498 }
499};
500
501class SeqReadBenchmark : public Benchmark {
502 public:
503 explicit SeqReadBenchmark(MemTableRep* table, uint64_t* sequence)
504 : Benchmark(table, nullptr, sequence, FLAGS_num_threads) {
505 num_read_ops_per_thread_ = FLAGS_num_scans;
506 }
507
508 void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
11fdf7f2 509 uint64_t* bytes_read, bool /*write*/,
7c673cae
FG
510 uint64_t* read_hits) override {
511 for (int i = 0; i < FLAGS_num_threads; ++i) {
512 threads->emplace_back(SeqReadBenchmarkThread(
513 table_, key_gen_, bytes_written, bytes_read, sequence_,
514 num_read_ops_per_thread_, read_hits));
515 }
516 for (auto& thread : *threads) {
517 thread.join();
518 }
519 }
520};
521
522template <class ReadThreadType>
523class ReadWriteBenchmark : public Benchmark {
524 public:
525 explicit ReadWriteBenchmark(MemTableRep* table, KeyGenerator* key_gen,
526 uint64_t* sequence)
527 : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
528 num_read_ops_per_thread_ =
529 FLAGS_num_threads <= 1
530 ? 0
531 : (FLAGS_num_operations / (FLAGS_num_threads - 1));
532 num_write_ops_per_thread_ = FLAGS_num_operations;
533 }
534
535 void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
11fdf7f2 536 uint64_t* bytes_read, bool /*write*/,
7c673cae
FG
537 uint64_t* read_hits) override {
538 std::atomic_int threads_done;
539 threads_done.store(0);
540 threads->emplace_back(ConcurrentFillBenchmarkThread(
541 table_, key_gen_, bytes_written, bytes_read, sequence_,
542 num_write_ops_per_thread_, read_hits, &threads_done));
543 for (int i = 1; i < FLAGS_num_threads; ++i) {
544 threads->emplace_back(
545 ReadThreadType(table_, key_gen_, bytes_written, bytes_read, sequence_,
546 num_read_ops_per_thread_, read_hits, &threads_done));
547 }
548 for (auto& thread : *threads) {
549 thread.join();
550 }
551 }
552};
553
f67539c2 554} // namespace ROCKSDB_NAMESPACE
7c673cae
FG
555
556void PrintWarnings() {
557#if defined(__GNUC__) && !defined(__OPTIMIZE__)
558 fprintf(stdout,
559 "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
560#endif
561#ifndef NDEBUG
562 fprintf(stdout,
563 "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
564#endif
565}
566
567int main(int argc, char** argv) {
f67539c2 568 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
7c673cae
FG
569 SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
570 " [OPTIONS]...");
571 ParseCommandLineFlags(&argc, &argv, true);
572
573 PrintWarnings();
574
f67539c2 575 ROCKSDB_NAMESPACE::Options options;
7c673cae 576
f67539c2 577 std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRepFactory> factory;
7c673cae 578 if (FLAGS_memtablerep == "skiplist") {
f67539c2 579 factory.reset(new ROCKSDB_NAMESPACE::SkipListFactory);
7c673cae
FG
580#ifndef ROCKSDB_LITE
581 } else if (FLAGS_memtablerep == "vector") {
f67539c2 582 factory.reset(new ROCKSDB_NAMESPACE::VectorRepFactory);
7c673cae 583 } else if (FLAGS_memtablerep == "hashskiplist") {
f67539c2 584 factory.reset(ROCKSDB_NAMESPACE::NewHashSkipListRepFactory(
7c673cae
FG
585 FLAGS_bucket_count, FLAGS_hashskiplist_height,
586 FLAGS_hashskiplist_branching_factor));
587 options.prefix_extractor.reset(
f67539c2 588 ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
7c673cae 589 } else if (FLAGS_memtablerep == "hashlinklist") {
f67539c2 590 factory.reset(ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(
7c673cae
FG
591 FLAGS_bucket_count, FLAGS_huge_page_tlb_size,
592 FLAGS_bucket_entries_logging_threshold,
593 FLAGS_if_log_bucket_dist_when_flash, FLAGS_threshold_use_skiplist));
594 options.prefix_extractor.reset(
f67539c2 595 ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
7c673cae
FG
596#endif // ROCKSDB_LITE
597 } else {
598 fprintf(stdout, "Unknown memtablerep: %s\n", FLAGS_memtablerep.c_str());
599 exit(1);
600 }
601
f67539c2
TL
602 ROCKSDB_NAMESPACE::InternalKeyComparator internal_key_comp(
603 ROCKSDB_NAMESPACE::BytewiseComparator());
604 ROCKSDB_NAMESPACE::MemTable::KeyComparator key_comp(internal_key_comp);
605 ROCKSDB_NAMESPACE::Arena arena;
606 ROCKSDB_NAMESPACE::WriteBufferManager wb(FLAGS_write_buffer_size);
7c673cae
FG
607 uint64_t sequence;
608 auto createMemtableRep = [&] {
609 sequence = 0;
11fdf7f2 610 return factory->CreateMemTableRep(key_comp, &arena,
7c673cae
FG
611 options.prefix_extractor.get(),
612 options.info_log.get());
613 };
f67539c2
TL
614 std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRep> memtablerep;
615 ROCKSDB_NAMESPACE::Random64 rng(FLAGS_seed);
7c673cae
FG
616 const char* benchmarks = FLAGS_benchmarks.c_str();
617 while (benchmarks != nullptr) {
f67539c2 618 std::unique_ptr<ROCKSDB_NAMESPACE::KeyGenerator> key_gen;
7c673cae 619 const char* sep = strchr(benchmarks, ',');
f67539c2 620 ROCKSDB_NAMESPACE::Slice name;
7c673cae
FG
621 if (sep == nullptr) {
622 name = benchmarks;
623 benchmarks = nullptr;
624 } else {
f67539c2 625 name = ROCKSDB_NAMESPACE::Slice(benchmarks, sep - benchmarks);
7c673cae
FG
626 benchmarks = sep + 1;
627 }
f67539c2
TL
628 std::unique_ptr<ROCKSDB_NAMESPACE::Benchmark> benchmark;
629 if (name == ROCKSDB_NAMESPACE::Slice("fillseq")) {
7c673cae 630 memtablerep.reset(createMemtableRep());
f67539c2
TL
631 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
632 &rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
633 benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
634 memtablerep.get(), key_gen.get(), &sequence));
635 } else if (name == ROCKSDB_NAMESPACE::Slice("fillrandom")) {
7c673cae 636 memtablerep.reset(createMemtableRep());
f67539c2
TL
637 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
638 &rng, ROCKSDB_NAMESPACE::UNIQUE_RANDOM, FLAGS_num_operations));
639 benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
640 memtablerep.get(), key_gen.get(), &sequence));
641 } else if (name == ROCKSDB_NAMESPACE::Slice("readrandom")) {
642 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
643 &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
644 benchmark.reset(new ROCKSDB_NAMESPACE::ReadBenchmark(
645 memtablerep.get(), key_gen.get(), &sequence));
646 } else if (name == ROCKSDB_NAMESPACE::Slice("readseq")) {
647 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
648 &rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
649 benchmark.reset(new ROCKSDB_NAMESPACE::SeqReadBenchmark(memtablerep.get(),
650 &sequence));
651 } else if (name == ROCKSDB_NAMESPACE::Slice("readwrite")) {
7c673cae 652 memtablerep.reset(createMemtableRep());
f67539c2
TL
653 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
654 &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
655 benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
656 ROCKSDB_NAMESPACE::ConcurrentReadBenchmarkThread>(
657 memtablerep.get(), key_gen.get(), &sequence));
658 } else if (name == ROCKSDB_NAMESPACE::Slice("seqreadwrite")) {
7c673cae 659 memtablerep.reset(createMemtableRep());
f67539c2
TL
660 key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
661 &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
662 benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
663 ROCKSDB_NAMESPACE::SeqConcurrentReadBenchmarkThread>(
664 memtablerep.get(), key_gen.get(), &sequence));
7c673cae
FG
665 } else {
666 std::cout << "WARNING: skipping unknown benchmark '" << name.ToString()
667 << std::endl;
668 continue;
669 }
670 std::cout << "Running " << name.ToString() << std::endl;
671 benchmark->Run();
672 }
673
674 return 0;
675}
676
677#endif // GFLAGS