1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
10 #include "benchmark/benchmark.h"
11 #include "db/db_impl/db_impl.h"
12 #include "rocksdb/db.h"
13 #include "rocksdb/filter_policy.h"
14 #include "rocksdb/options.h"
15 #include "table/block_based/block.h"
16 #include "table/block_based/block_builder.h"
17 #include "util/random.h"
18 #include "utilities/merge_operators.h"
20 namespace ROCKSDB_NAMESPACE
{
25 // buff: the caller needs to make sure there's enough space for generated key
26 // offset: to control the group of the key, 0 means normal key, 1 means
27 // non-existing key, 2 is reserved prefix_only: only return a prefix
28 Slice
Next(char* buff
, int8_t offset
= 0, bool prefix_only
= false) {
29 assert(max_key_
< std::numeric_limits
<uint32_t>::max() /
30 MULTIPLIER
); // TODO: add large key support
34 assert(next_sequential_key_
< max_key_
);
35 k
= (next_sequential_key_
% max_key_
) * MULTIPLIER
+ offset
;
36 if (next_sequential_key_
+ 1 == max_key_
) {
37 next_sequential_key_
= 0;
39 next_sequential_key_
++;
42 k
= (rnd_
->Next() % max_key_
) * MULTIPLIER
+ offset
;
44 // TODO: make sure the buff is large enough
45 memset(buff
, 0, key_size_
);
46 if (prefix_num_
> 0) {
47 uint32_t prefix
= (k
% prefix_num_
) * MULTIPLIER
+ offset
;
50 return {buff
, prefix_size_
};
53 Encode(buff
+ prefix_size_
, k
);
54 return {buff
, key_size_
};
57 // use internal buffer for generated key, make sure there's only one caller in
59 Slice
Next() { return Next(buff_
); }
61 // user internal buffer for generated prefix
63 assert(prefix_num_
> 0);
64 return Next(buff_
, 0, true);
67 // helper function to get non exist key
68 Slice
NextNonExist() { return Next(buff_
, 1); }
70 Slice
MaxKey(char* buff
) const {
71 memset(buff
, 0xff, key_size_
);
72 return {buff
, key_size_
};
75 Slice
MinKey(char* buff
) const {
76 memset(buff
, 0, key_size_
);
77 return {buff
, key_size_
};
80 // max_key: the max key that it could generate
81 // prefix_num: the max prefix number
83 explicit KeyGenerator(Random
* rnd
, uint64_t max_key
= 100 * 1024 * 1024,
84 size_t prefix_num
= 0, size_t key_size
= 10) {
85 prefix_num_
= prefix_num
;
90 prefix_size_
= 4; // TODO: support different prefix_size
94 // generate sequential keys
95 explicit KeyGenerator(uint64_t max_key
= 100 * 1024 * 1024,
96 size_t key_size
= 10) {
100 is_sequential_
= true;
105 size_t prefix_num_
= 0;
106 size_t prefix_size_
= 0;
109 bool is_sequential_
= false;
110 uint32_t next_sequential_key_
= 0;
111 char buff_
[256] = {0};
112 const int MULTIPLIER
= 3;
114 void static Encode(char* buf
, uint32_t value
) {
115 if (port::kLittleEndian
) {
116 buf
[0] = static_cast<char>((value
>> 24) & 0xff);
117 buf
[1] = static_cast<char>((value
>> 16) & 0xff);
118 buf
[2] = static_cast<char>((value
>> 8) & 0xff);
119 buf
[3] = static_cast<char>(value
& 0xff);
121 memcpy(buf
, &value
, sizeof(value
));
126 static void SetupDB(benchmark::State
& state
, Options
& options
,
127 std::unique_ptr
<DB
>* db
,
128 const std::string
& test_name
= "") {
129 options
.create_if_missing
= true;
130 auto env
= Env::Default();
132 Status s
= env
->GetTestDirectory(&db_path
);
134 state
.SkipWithError(s
.ToString().c_str());
137 std::string db_name
=
138 db_path
+ kFilePathSeparator
+ test_name
+ std::to_string(getpid());
139 DestroyDB(db_name
, options
);
141 DB
* db_ptr
= nullptr;
142 s
= DB::Open(options
, db_name
, &db_ptr
);
144 state
.SkipWithError(s
.ToString().c_str());
150 static void TeardownDB(benchmark::State
& state
, const std::unique_ptr
<DB
>& db
,
151 const Options
& options
, KeyGenerator
& kg
) {
152 char min_buff
[256], max_buff
[256];
153 const Range
r(kg
.MinKey(min_buff
), kg
.MaxKey(max_buff
));
155 Status s
= db
->GetApproximateSizes(&r
, 1, &size
);
157 state
.SkipWithError(s
.ToString().c_str());
159 state
.counters
["db_size"] = static_cast<double>(size
);
161 std::string db_name
= db
->GetName();
164 state
.SkipWithError(s
.ToString().c_str());
166 DestroyDB(db_name
, options
);
169 static void DBOpen(benchmark::State
& state
) {
171 std::unique_ptr
<DB
> db
;
173 SetupDB(state
, options
, &db
, "DBOpen");
175 std::string db_name
= db
->GetName();
178 options
.create_if_missing
= false;
180 auto rnd
= Random(123);
182 for (auto _
: state
) {
184 DB
* db_ptr
= nullptr;
185 Status s
= DB::Open(options
, db_name
, &db_ptr
);
187 state
.SkipWithError(s
.ToString().c_str());
192 auto wo
= WriteOptions();
194 for (int i
= 0; i
< 2; i
++) {
195 for (int j
= 0; j
< 100; j
++) {
196 s
= db
->Put(wo
, rnd
.RandomString(10), rnd
.RandomString(100));
198 state
.SkipWithError(s
.ToString().c_str());
201 s
= db
->Flush(FlushOptions());
204 state
.SkipWithError(s
.ToString().c_str());
208 state
.SkipWithError(s
.ToString().c_str());
210 state
.ResumeTiming();
212 DestroyDB(db_name
, options
);
215 BENCHMARK(DBOpen
)->Iterations(200); // specify iteration number as the db size
216 // is impacted by iteration number
218 static void DBClose(benchmark::State
& state
) {
220 std::unique_ptr
<DB
> db
;
222 SetupDB(state
, options
, &db
, "DBClose");
224 std::string db_name
= db
->GetName();
227 options
.create_if_missing
= false;
229 auto rnd
= Random(12345);
231 for (auto _
: state
) {
234 DB
* db_ptr
= nullptr;
235 Status s
= DB::Open(options
, db_name
, &db_ptr
);
237 state
.SkipWithError(s
.ToString().c_str());
241 auto wo
= WriteOptions();
243 for (int i
= 0; i
< 2; i
++) {
244 for (int j
= 0; j
< 100; j
++) {
245 s
= db
->Put(wo
, rnd
.RandomString(10), rnd
.RandomString(100));
247 state
.SkipWithError(s
.ToString().c_str());
250 s
= db
->Flush(FlushOptions());
253 state
.SkipWithError(s
.ToString().c_str());
255 state
.ResumeTiming();
258 state
.SkipWithError(s
.ToString().c_str());
261 DestroyDB(db_name
, options
);
264 BENCHMARK(DBClose
)->Iterations(200); // specify iteration number as the db size
265 // is impacted by iteration number
267 static void DBPut(benchmark::State
& state
) {
268 auto compaction_style
= static_cast<CompactionStyle
>(state
.range(0));
269 uint64_t max_data
= state
.range(1);
270 uint64_t per_key_size
= state
.range(2);
271 bool enable_statistics
= state
.range(3);
272 bool enable_wal
= state
.range(4);
273 uint64_t key_num
= max_data
/ per_key_size
;
276 static std::unique_ptr
<DB
> db
= nullptr;
278 if (enable_statistics
) {
279 options
.statistics
= CreateDBStatistics();
281 options
.compaction_style
= compaction_style
;
283 auto rnd
= Random(301 + state
.thread_index());
284 KeyGenerator
kg(&rnd
, key_num
);
286 if (state
.thread_index() == 0) {
287 SetupDB(state
, options
, &db
, "DBPut");
290 auto wo
= WriteOptions();
291 wo
.disableWAL
= !enable_wal
;
293 for (auto _
: state
) {
295 Slice key
= kg
.Next();
296 std::string val
= rnd
.RandomString(static_cast<int>(per_key_size
));
297 state
.ResumeTiming();
298 Status s
= db
->Put(wo
, key
, val
);
300 state
.SkipWithError(s
.ToString().c_str());
304 if (state
.thread_index() == 0) {
305 auto db_full
= static_cast_with_check
<DBImpl
>(db
.get());
306 Status s
= db_full
->WaitForCompact(true);
308 state
.SkipWithError(s
.ToString().c_str());
311 if (enable_statistics
) {
312 HistogramData histogram_data
;
313 options
.statistics
->histogramData(DB_WRITE
, &histogram_data
);
314 state
.counters
["put_mean"] = histogram_data
.average
* std::milli::den
;
315 state
.counters
["put_p95"] = histogram_data
.percentile95
* std::milli::den
;
316 state
.counters
["put_p99"] = histogram_data
.percentile99
* std::milli::den
;
319 TeardownDB(state
, db
, options
, kg
);
323 static void DBPutArguments(benchmark::internal::Benchmark
* b
) {
324 for (int comp_style
: {kCompactionStyleLevel
, kCompactionStyleUniversal
,
325 kCompactionStyleFIFO
}) {
326 for (int64_t max_data
: {100l << 30}) {
327 for (int64_t per_key_size
: {256, 1024}) {
328 for (bool enable_statistics
: {false, true}) {
329 for (bool wal
: {false, true}) {
331 {comp_style
, max_data
, per_key_size
, enable_statistics
, wal
});
338 {"comp_style", "max_data", "per_key_size", "enable_statistics", "wal"});
341 static const uint64_t DBPutNum
= 409600l;
342 BENCHMARK(DBPut
)->Threads(1)->Iterations(DBPutNum
)->Apply(DBPutArguments
);
343 BENCHMARK(DBPut
)->Threads(8)->Iterations(DBPutNum
/ 8)->Apply(DBPutArguments
);
345 static void ManualCompaction(benchmark::State
& state
) {
346 auto compaction_style
= static_cast<CompactionStyle
>(state
.range(0));
347 uint64_t max_data
= state
.range(1);
348 uint64_t per_key_size
= state
.range(2);
349 bool enable_statistics
= state
.range(3);
350 uint64_t key_num
= max_data
/ per_key_size
;
353 static std::unique_ptr
<DB
> db
;
355 if (enable_statistics
) {
356 options
.statistics
= CreateDBStatistics();
358 options
.compaction_style
= compaction_style
;
359 // No auto compaction
360 options
.disable_auto_compactions
= true;
361 options
.level0_file_num_compaction_trigger
= (1 << 30);
362 options
.level0_slowdown_writes_trigger
= (1 << 30);
363 options
.level0_stop_writes_trigger
= (1 << 30);
364 options
.soft_pending_compaction_bytes_limit
= 0;
365 options
.hard_pending_compaction_bytes_limit
= 0;
367 auto rnd
= Random(301 + state
.thread_index());
368 KeyGenerator
kg(&rnd
, key_num
);
370 if (state
.thread_index() == 0) {
371 SetupDB(state
, options
, &db
, "ManualCompaction");
374 auto wo
= WriteOptions();
375 wo
.disableWAL
= true;
376 uint64_t flush_mod
= key_num
/ 4; // at least generate 4 files for compaction
377 for (uint64_t i
= 0; i
< key_num
; i
++) {
378 Status s
= db
->Put(wo
, kg
.Next(),
379 rnd
.RandomString(static_cast<int>(per_key_size
)));
381 state
.SkipWithError(s
.ToString().c_str());
383 if (i
+ 1 % flush_mod
== 0) {
384 s
= db
->Flush(FlushOptions());
388 Status s
= db
->Flush(fo
);
390 state
.SkipWithError(s
.ToString().c_str());
392 std::vector
<LiveFileMetaData
> files_meta
;
393 db
->GetLiveFilesMetaData(&files_meta
);
394 std::vector
<std::string
> files_before_compact
;
395 files_before_compact
.reserve(files_meta
.size());
396 for (const LiveFileMetaData
& file
: files_meta
) {
397 files_before_compact
.emplace_back(file
.name
);
400 SetPerfLevel(kEnableTime
);
401 get_perf_context()->EnablePerLevelPerfContext();
402 get_perf_context()->Reset();
403 CompactionOptions co
;
404 for (auto _
: state
) {
405 s
= db
->CompactFiles(co
, files_before_compact
, 1);
407 state
.SkipWithError(s
.ToString().c_str());
411 if (state
.thread_index() == 0) {
412 auto db_full
= static_cast_with_check
<DBImpl
>(db
.get());
413 s
= db_full
->WaitForCompact(true);
415 state
.SkipWithError(s
.ToString().c_str());
418 if (enable_statistics
) {
419 HistogramData histogram_data
;
420 options
.statistics
->histogramData(COMPACTION_TIME
, &histogram_data
);
421 state
.counters
["comp_time"] = histogram_data
.average
;
422 options
.statistics
->histogramData(COMPACTION_CPU_TIME
, &histogram_data
);
423 state
.counters
["comp_cpu_time"] = histogram_data
.average
;
424 options
.statistics
->histogramData(COMPACTION_OUTFILE_SYNC_MICROS
,
426 state
.counters
["comp_outfile_sync"] = histogram_data
.average
;
428 state
.counters
["comp_read"] = static_cast<double>(
429 options
.statistics
->getTickerCount(COMPACT_READ_BYTES
));
430 state
.counters
["comp_write"] = static_cast<double>(
431 options
.statistics
->getTickerCount(COMPACT_WRITE_BYTES
));
433 state
.counters
["user_key_comparison_count"] =
434 static_cast<double>(get_perf_context()->user_key_comparison_count
);
435 state
.counters
["block_read_count"] =
436 static_cast<double>(get_perf_context()->block_read_count
);
437 state
.counters
["block_read_time"] =
438 static_cast<double>(get_perf_context()->block_read_time
);
439 state
.counters
["block_checksum_time"] =
440 static_cast<double>(get_perf_context()->block_checksum_time
);
441 state
.counters
["new_table_block_iter_nanos"] =
442 static_cast<double>(get_perf_context()->new_table_block_iter_nanos
);
443 state
.counters
["new_table_iterator_nanos"] =
444 static_cast<double>(get_perf_context()->new_table_iterator_nanos
);
445 state
.counters
["find_table_nanos"] =
446 static_cast<double>(get_perf_context()->find_table_nanos
);
449 TeardownDB(state
, db
, options
, kg
);
453 static void ManualCompactionArguments(benchmark::internal::Benchmark
* b
) {
454 for (int comp_style
: {kCompactionStyleLevel
, kCompactionStyleUniversal
}) {
455 for (int64_t max_data
: {32l << 20, 128l << 20}) {
456 for (int64_t per_key_size
: {256, 1024}) {
457 for (bool enable_statistics
: {false, true}) {
458 b
->Args({comp_style
, max_data
, per_key_size
, enable_statistics
});
463 b
->ArgNames({"comp_style", "max_data", "per_key_size", "enable_statistics"});
466 BENCHMARK(ManualCompaction
)->Iterations(1)->Apply(ManualCompactionArguments
);
468 static void ManualFlush(benchmark::State
& state
) {
469 uint64_t key_num
= state
.range(0);
470 uint64_t per_key_size
= state
.range(1);
471 bool enable_statistics
= true;
474 static std::unique_ptr
<DB
> db
;
476 if (enable_statistics
) {
477 options
.statistics
= CreateDBStatistics();
479 options
.disable_auto_compactions
= true;
480 options
.level0_file_num_compaction_trigger
= (1 << 30);
481 options
.level0_slowdown_writes_trigger
= (1 << 30);
482 options
.level0_stop_writes_trigger
= (1 << 30);
483 options
.soft_pending_compaction_bytes_limit
= 0;
484 options
.hard_pending_compaction_bytes_limit
= 0;
485 options
.write_buffer_size
= 2l << 30; // 2G to avoid auto flush
487 auto rnd
= Random(301 + state
.thread_index());
488 KeyGenerator
kg(&rnd
, key_num
);
490 if (state
.thread_index() == 0) {
491 SetupDB(state
, options
, &db
, "ManualFlush");
494 auto wo
= WriteOptions();
495 for (auto _
: state
) {
497 for (uint64_t i
= 0; i
< key_num
; i
++) {
498 Status s
= db
->Put(wo
, kg
.Next(),
499 rnd
.RandomString(static_cast<int>(per_key_size
)));
502 state
.ResumeTiming();
503 Status s
= db
->Flush(fo
);
505 state
.SkipWithError(s
.ToString().c_str());
509 if (state
.thread_index() == 0) {
510 auto db_full
= static_cast_with_check
<DBImpl
>(db
.get());
511 Status s
= db_full
->WaitForCompact(true);
513 state
.SkipWithError(s
.ToString().c_str());
516 if (enable_statistics
) {
517 HistogramData histogram_data
;
518 options
.statistics
->histogramData(FLUSH_TIME
, &histogram_data
);
519 state
.counters
["flush_time"] = histogram_data
.average
;
520 state
.counters
["flush_write_bytes"] = static_cast<double>(
521 options
.statistics
->getTickerCount(FLUSH_WRITE_BYTES
));
524 TeardownDB(state
, db
, options
, kg
);
528 static void ManualFlushArguments(benchmark::internal::Benchmark
* b
) {
529 for (int64_t key_num
: {1l << 10, 8l << 10, 64l << 10}) {
530 for (int64_t per_key_size
: {256, 1024}) {
531 b
->Args({key_num
, per_key_size
});
534 b
->ArgNames({"key_num", "per_key_size"});
537 BENCHMARK(ManualFlush
)->Iterations(1)->Apply(ManualFlushArguments
);
539 static void DBGet(benchmark::State
& state
) {
540 auto compaction_style
= static_cast<CompactionStyle
>(state
.range(0));
541 uint64_t max_data
= state
.range(1);
542 uint64_t per_key_size
= state
.range(2);
543 bool enable_statistics
= state
.range(3);
544 bool negative_query
= state
.range(4);
545 bool enable_filter
= state
.range(5);
546 bool mmap
= state
.range(6);
547 uint64_t key_num
= max_data
/ per_key_size
;
550 static std::unique_ptr
<DB
> db
;
552 if (enable_statistics
) {
553 options
.statistics
= CreateDBStatistics();
556 options
.allow_mmap_reads
= true;
557 options
.compression
= kNoCompression
;
559 options
.compaction_style
= compaction_style
;
561 BlockBasedTableOptions table_options
;
563 table_options
.filter_policy
.reset(NewBloomFilterPolicy(10, false));
566 table_options
.no_block_cache
= true;
567 table_options
.block_restart_interval
= 1;
569 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
571 auto rnd
= Random(301 + state
.thread_index());
572 KeyGenerator
kg(&rnd
, key_num
);
574 if (state
.thread_index() == 0) {
575 SetupDB(state
, options
, &db
, "DBGet");
578 auto wo
= WriteOptions();
579 wo
.disableWAL
= true;
580 for (uint64_t i
= 0; i
< key_num
; i
++) {
581 Status s
= db
->Put(wo
, kg
.Next(),
582 rnd
.RandomString(static_cast<int>(per_key_size
)));
584 state
.SkipWithError(s
.ToString().c_str());
589 Status s
= db
->Flush(fo
);
591 state
.SkipWithError(s
.ToString().c_str());
594 auto db_full
= static_cast_with_check
<DBImpl
>(db
.get());
595 s
= db_full
->WaitForCompact(true);
597 state
.SkipWithError(s
.ToString().c_str());
602 auto ro
= ReadOptions();
604 ro
.verify_checksums
= false;
606 size_t not_found
= 0;
607 if (negative_query
) {
608 for (auto _
: state
) {
610 Status s
= db
->Get(ro
, kg
.NextNonExist(), &val
);
611 if (s
.IsNotFound()) {
616 for (auto _
: state
) {
618 Status s
= db
->Get(ro
, kg
.Next(), &val
);
619 if (s
.IsNotFound()) {
625 state
.counters
["neg_qu_pct"] = benchmark::Counter(
626 static_cast<double>(not_found
* 100), benchmark::Counter::kAvgIterations
);
628 if (state
.thread_index() == 0) {
629 if (enable_statistics
) {
630 HistogramData histogram_data
;
631 options
.statistics
->histogramData(DB_GET
, &histogram_data
);
632 state
.counters
["get_mean"] = histogram_data
.average
* std::milli::den
;
633 state
.counters
["get_p95"] = histogram_data
.percentile95
* std::milli::den
;
634 state
.counters
["get_p99"] = histogram_data
.percentile99
* std::milli::den
;
637 TeardownDB(state
, db
, options
, kg
);
641 static void DBGetArguments(benchmark::internal::Benchmark
* b
) {
642 for (int comp_style
: {kCompactionStyleLevel
, kCompactionStyleUniversal
,
643 kCompactionStyleFIFO
}) {
644 for (int64_t max_data
: {128l << 20, 512l << 20}) {
645 for (int64_t per_key_size
: {256, 1024}) {
646 for (bool enable_statistics
: {false, true}) {
647 for (bool negative_query
: {false, true}) {
648 for (bool enable_filter
: {false, true}) {
649 for (bool mmap
: {false, true}) {
650 b
->Args({comp_style
, max_data
, per_key_size
, enable_statistics
,
651 negative_query
, enable_filter
, mmap
});
659 b
->ArgNames({"comp_style", "max_data", "per_key_size", "enable_statistics",
660 "negative_query", "enable_filter", "mmap"});
663 static constexpr uint64_t kDBGetNum
= 1l << 20;
664 BENCHMARK(DBGet
)->Threads(1)->Iterations(kDBGetNum
)->Apply(DBGetArguments
);
665 BENCHMARK(DBGet
)->Threads(8)->Iterations(kDBGetNum
/ 8)->Apply(DBGetArguments
);
667 static void SimpleGetWithPerfContext(benchmark::State
& state
) {
669 static std::unique_ptr
<DB
> db
;
672 options
.create_if_missing
= true;
673 options
.arena_block_size
= 8 << 20;
675 auto rnd
= Random(301 + state
.thread_index());
676 KeyGenerator
kg(&rnd
, 1024);
678 if (state
.thread_index() == 0) {
679 auto env
= Env::Default();
681 Status s
= env
->GetTestDirectory(&db_path
);
683 state
.SkipWithError(s
.ToString().c_str());
686 db_name
= db_path
+ "/simple_get_" + std::to_string(getpid());
687 DestroyDB(db_name
, options
);
690 DB
* db_ptr
= nullptr;
691 s
= DB::Open(options
, db_name
, &db_ptr
);
693 state
.SkipWithError(s
.ToString().c_str());
699 auto wo
= WriteOptions();
700 wo
.disableWAL
= true;
701 for (uint64_t i
= 0; i
< 1024; i
++) {
702 s
= db
->Put(wo
, kg
.Next(), rnd
.RandomString(1024));
704 state
.SkipWithError(s
.ToString().c_str());
707 auto db_full
= static_cast_with_check
<DBImpl
>(db
.get());
708 s
= db_full
->WaitForCompact(true);
710 state
.SkipWithError(s
.ToString().c_str());
716 state
.SkipWithError(s
.ToString().c_str());
720 auto ro
= ReadOptions();
721 size_t not_found
= 0;
722 uint64_t user_key_comparison_count
= 0;
723 uint64_t block_read_time
= 0;
724 uint64_t block_checksum_time
= 0;
725 uint64_t get_snapshot_time
= 0;
726 uint64_t get_post_process_time
= 0;
727 uint64_t get_from_output_files_time
= 0;
728 uint64_t new_table_block_iter_nanos
= 0;
729 uint64_t block_seek_nanos
= 0;
730 uint64_t get_cpu_nanos
= 0;
731 uint64_t get_from_table_nanos
= 0;
732 SetPerfLevel(kEnableTime
);
733 get_perf_context()->EnablePerLevelPerfContext();
734 for (auto _
: state
) {
736 get_perf_context()->Reset();
737 Status s
= db
->Get(ro
, kg
.NextNonExist(), &val
);
738 if (s
.IsNotFound()) {
741 user_key_comparison_count
+= get_perf_context()->user_key_comparison_count
;
742 block_read_time
+= get_perf_context()->block_read_time
;
743 block_checksum_time
+= get_perf_context()->block_checksum_time
;
744 get_snapshot_time
+= get_perf_context()->get_snapshot_time
;
745 get_post_process_time
+= get_perf_context()->get_post_process_time
;
746 get_from_output_files_time
+=
747 get_perf_context()->get_from_output_files_time
;
748 new_table_block_iter_nanos
+=
749 get_perf_context()->new_table_block_iter_nanos
;
750 block_seek_nanos
+= get_perf_context()->block_seek_nanos
;
751 get_cpu_nanos
+= get_perf_context()->get_cpu_nanos
;
752 get_from_table_nanos
+=
753 (*(get_perf_context()->level_to_perf_context
))[0].get_from_table_nanos
;
756 state
.counters
["neg_qu_pct"] = benchmark::Counter(
757 static_cast<double>(not_found
* 100), benchmark::Counter::kAvgIterations
);
758 state
.counters
["user_key_comparison_count"] =
759 benchmark::Counter(static_cast<double>(user_key_comparison_count
),
760 benchmark::Counter::kAvgIterations
);
761 state
.counters
["block_read_time"] = benchmark::Counter(
762 static_cast<double>(block_read_time
), benchmark::Counter::kAvgIterations
);
763 state
.counters
["block_checksum_time"] =
764 benchmark::Counter(static_cast<double>(block_checksum_time
),
765 benchmark::Counter::kAvgIterations
);
766 state
.counters
["get_snapshot_time"] =
767 benchmark::Counter(static_cast<double>(get_snapshot_time
),
768 benchmark::Counter::kAvgIterations
);
769 state
.counters
["get_post_process_time"] =
770 benchmark::Counter(static_cast<double>(get_post_process_time
),
771 benchmark::Counter::kAvgIterations
);
772 state
.counters
["get_from_output_files_time"] =
773 benchmark::Counter(static_cast<double>(get_from_output_files_time
),
774 benchmark::Counter::kAvgIterations
);
775 state
.counters
["new_table_block_iter_nanos"] =
776 benchmark::Counter(static_cast<double>(new_table_block_iter_nanos
),
777 benchmark::Counter::kAvgIterations
);
778 state
.counters
["block_seek_nanos"] =
779 benchmark::Counter(static_cast<double>(block_seek_nanos
),
780 benchmark::Counter::kAvgIterations
);
781 state
.counters
["get_cpu_nanos"] = benchmark::Counter(
782 static_cast<double>(get_cpu_nanos
), benchmark::Counter::kAvgIterations
);
783 state
.counters
["get_from_table_nanos"] =
784 benchmark::Counter(static_cast<double>(get_from_table_nanos
),
785 benchmark::Counter::kAvgIterations
);
787 if (state
.thread_index() == 0) {
788 TeardownDB(state
, db
, options
, kg
);
792 BENCHMARK(SimpleGetWithPerfContext
)->Iterations(1000000);
794 static void DBGetMergeOperandsInMemtable(benchmark::State
& state
) {
795 const uint64_t kDataLen
= 16 << 20; // 16MB
796 const uint64_t kValueLen
= 64;
797 const uint64_t kNumEntries
= kDataLen
/ kValueLen
;
798 const uint64_t kNumEntriesPerKey
= state
.range(0);
799 const uint64_t kNumKeys
= kNumEntries
/ kNumEntriesPerKey
;
802 static std::unique_ptr
<DB
> db
;
805 options
.merge_operator
= MergeOperators::CreateStringAppendOperator();
806 // Make memtable large enough that automatic flush will not be triggered.
807 options
.write_buffer_size
= 2 * kDataLen
;
809 KeyGenerator
sequential_key_gen(kNumKeys
);
810 auto rnd
= Random(301 + state
.thread_index());
812 if (state
.thread_index() == 0) {
813 SetupDB(state
, options
, &db
, "DBGetMergeOperandsInMemtable");
816 auto write_opts
= WriteOptions();
817 write_opts
.disableWAL
= true;
818 for (uint64_t i
= 0; i
< kNumEntries
; i
++) {
819 Status s
= db
->Merge(write_opts
, sequential_key_gen
.Next(),
820 rnd
.RandomString(static_cast<int>(kValueLen
)));
822 state
.SkipWithError(s
.ToString().c_str());
827 KeyGenerator
random_key_gen(kNumKeys
);
828 std::vector
<PinnableSlice
> value_operands
;
829 value_operands
.resize(kNumEntriesPerKey
);
830 GetMergeOperandsOptions get_merge_ops_opts
;
831 get_merge_ops_opts
.expected_max_number_of_operands
=
832 static_cast<int>(kNumEntriesPerKey
);
833 for (auto _
: state
) {
834 int num_value_operands
= 0;
835 Status s
= db
->GetMergeOperands(
836 ReadOptions(), db
->DefaultColumnFamily(), random_key_gen
.Next(),
837 value_operands
.data(), &get_merge_ops_opts
, &num_value_operands
);
839 state
.SkipWithError(s
.ToString().c_str());
841 if (num_value_operands
!= static_cast<int>(kNumEntriesPerKey
)) {
842 state
.SkipWithError("Unexpected number of merge operands found for key");
844 for (auto& value_operand
: value_operands
) {
845 value_operand
.Reset();
849 if (state
.thread_index() == 0) {
850 TeardownDB(state
, db
, options
, random_key_gen
);
854 static void DBGetMergeOperandsInSstFile(benchmark::State
& state
) {
855 const uint64_t kDataLen
= 16 << 20; // 16MB
856 const uint64_t kValueLen
= 64;
857 const uint64_t kNumEntries
= kDataLen
/ kValueLen
;
858 const uint64_t kNumEntriesPerKey
= state
.range(0);
859 const uint64_t kNumKeys
= kNumEntries
/ kNumEntriesPerKey
;
860 const bool kMmap
= state
.range(1);
863 static std::unique_ptr
<DB
> db
;
865 BlockBasedTableOptions table_options
;
867 table_options
.no_block_cache
= true;
869 // Make block cache large enough that eviction will not be triggered.
870 table_options
.block_cache
= NewLRUCache(2 * kDataLen
);
875 options
.allow_mmap_reads
= true;
877 options
.compression
= kNoCompression
;
878 options
.merge_operator
= MergeOperators::CreateStringAppendOperator();
879 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
880 // Make memtable large enough that automatic flush will not be triggered.
881 options
.write_buffer_size
= 2 * kDataLen
;
883 KeyGenerator
sequential_key_gen(kNumKeys
);
884 auto rnd
= Random(301 + state
.thread_index());
886 if (state
.thread_index() == 0) {
887 SetupDB(state
, options
, &db
, "DBGetMergeOperandsInBlockCache");
891 // Take a snapshot after each cycle of merges to ensure flush cannot
892 // merge any entries.
893 std::vector
<const Snapshot
*> snapshots
;
894 snapshots
.resize(kNumEntriesPerKey
);
895 auto write_opts
= WriteOptions();
896 write_opts
.disableWAL
= true;
897 for (uint64_t i
= 0; i
< kNumEntriesPerKey
; i
++) {
898 for (uint64_t j
= 0; j
< kNumKeys
; j
++) {
899 Status s
= db
->Merge(write_opts
, sequential_key_gen
.Next(),
900 rnd
.RandomString(static_cast<int>(kValueLen
)));
902 state
.SkipWithError(s
.ToString().c_str());
905 snapshots
[i
] = db
->GetSnapshot();
908 // Flush to an L0 file; read back to prime the cache/mapped memory.
909 db
->Flush(FlushOptions());
910 for (uint64_t i
= 0; i
< kNumKeys
; ++i
) {
912 Status s
= db
->Get(ReadOptions(), sequential_key_gen
.Next(), &value
);
914 state
.SkipWithError(s
.ToString().c_str());
918 if (state
.thread_index() == 0) {
919 for (uint64_t i
= 0; i
< kNumEntriesPerKey
; ++i
) {
920 db
->ReleaseSnapshot(snapshots
[i
]);
925 KeyGenerator
random_key_gen(kNumKeys
);
926 std::vector
<PinnableSlice
> value_operands
;
927 value_operands
.resize(kNumEntriesPerKey
);
928 GetMergeOperandsOptions get_merge_ops_opts
;
929 get_merge_ops_opts
.expected_max_number_of_operands
=
930 static_cast<int>(kNumEntriesPerKey
);
931 for (auto _
: state
) {
932 int num_value_operands
= 0;
933 ReadOptions read_opts
;
934 read_opts
.verify_checksums
= false;
935 Status s
= db
->GetMergeOperands(
936 read_opts
, db
->DefaultColumnFamily(), random_key_gen
.Next(),
937 value_operands
.data(), &get_merge_ops_opts
, &num_value_operands
);
939 state
.SkipWithError(s
.ToString().c_str());
941 if (num_value_operands
!= static_cast<int>(kNumEntriesPerKey
)) {
942 state
.SkipWithError("Unexpected number of merge operands found for key");
944 for (auto& value_operand
: value_operands
) {
945 value_operand
.Reset();
949 if (state
.thread_index() == 0) {
950 TeardownDB(state
, db
, options
, random_key_gen
);
954 static void DBGetMergeOperandsInMemtableArguments(
955 benchmark::internal::Benchmark
* b
) {
956 for (int entries_per_key
: {1, 32, 1024}) {
957 b
->Args({entries_per_key
});
959 b
->ArgNames({"entries_per_key"});
962 static void DBGetMergeOperandsInSstFileArguments(
963 benchmark::internal::Benchmark
* b
) {
964 for (int entries_per_key
: {1, 32, 1024}) {
965 for (bool mmap
: {false, true}) {
966 b
->Args({entries_per_key
, mmap
});
969 b
->ArgNames({"entries_per_key", "mmap"});
972 BENCHMARK(DBGetMergeOperandsInMemtable
)
974 ->Apply(DBGetMergeOperandsInMemtableArguments
);
975 BENCHMARK(DBGetMergeOperandsInMemtable
)
977 ->Apply(DBGetMergeOperandsInMemtableArguments
);
978 BENCHMARK(DBGetMergeOperandsInSstFile
)
980 ->Apply(DBGetMergeOperandsInSstFileArguments
);
981 BENCHMARK(DBGetMergeOperandsInSstFile
)
983 ->Apply(DBGetMergeOperandsInSstFileArguments
);
985 std::string
GenerateKey(int primary_key
, int secondary_key
, int padding_size
,
989 snprintf(buf
, sizeof(buf
), "%6d%4d", primary_key
, secondary_key
);
992 k
+= rnd
->RandomString(padding_size
);
998 void GenerateRandomKVs(std::vector
<std::string
>* keys
,
999 std::vector
<std::string
>* values
, const int from
,
1000 const int len
, const int step
= 1,
1001 const int padding_size
= 0,
1002 const int keys_share_prefix
= 1) {
1005 // generate different prefix
1006 for (int i
= from
; i
< from
+ len
; i
+= step
) {
1007 // generating keys that share the prefix
1008 for (int j
= 0; j
< keys_share_prefix
; ++j
) {
1009 keys
->emplace_back(GenerateKey(i
, j
, padding_size
, &rnd
));
1011 values
->emplace_back(rnd
.RandomString(100));
1016 // TODO: move it to different files, as it's testing an internal API
1017 static void DataBlockSeek(benchmark::State
& state
) {
1019 Options options
= Options();
1021 BlockBuilder
builder(16, true, false,
1022 BlockBasedTableOptions::kDataBlockBinarySearch
);
1024 int num_records
= 500;
1025 std::vector
<std::string
> keys
;
1026 std::vector
<std::string
> values
;
1028 GenerateRandomKVs(&keys
, &values
, 0, num_records
);
1030 for (int i
= 0; i
< num_records
; i
++) {
1031 std::string
ukey(keys
[i
] + "1");
1032 InternalKey
ikey(ukey
, 0, kTypeValue
);
1033 builder
.Add(ikey
.Encode().ToString(), values
[i
]);
1036 Slice rawblock
= builder
.Finish();
1038 BlockContents contents
;
1039 contents
.data
= rawblock
;
1040 Block
reader(std::move(contents
));
1042 SetPerfLevel(kEnableTime
);
1044 for (auto _
: state
) {
1045 DataBlockIter
* iter
= reader
.NewDataIterator(options
.comparator
,
1046 kDisableGlobalSequenceNumber
);
1047 uint32_t index
= rnd
.Uniform(static_cast<int>(num_records
));
1048 std::string
ukey(keys
[index
] + "1");
1049 InternalKey
ikey(ukey
, 0, kTypeValue
);
1050 get_perf_context()->Reset();
1051 bool may_exist
= iter
->SeekForGet(ikey
.Encode().ToString());
1053 state
.SkipWithError("key not found");
1055 total
+= get_perf_context()->block_seek_nanos
;
1058 state
.counters
["seek_ns"] = benchmark::Counter(
1059 static_cast<double>(total
), benchmark::Counter::kAvgIterations
);
1062 BENCHMARK(DataBlockSeek
)->Iterations(1000000);
1064 static void IteratorSeek(benchmark::State
& state
) {
1065 auto compaction_style
= static_cast<CompactionStyle
>(state
.range(0));
1066 uint64_t max_data
= state
.range(1);
1067 uint64_t per_key_size
= state
.range(2);
1068 bool enable_statistics
= state
.range(3);
1069 bool negative_query
= state
.range(4);
1070 bool enable_filter
= state
.range(5);
1071 uint64_t key_num
= max_data
/ per_key_size
;
1074 static std::unique_ptr
<DB
> db
;
1076 if (enable_statistics
) {
1077 options
.statistics
= CreateDBStatistics();
1079 options
.compaction_style
= compaction_style
;
1081 if (enable_filter
) {
1082 BlockBasedTableOptions table_options
;
1083 table_options
.filter_policy
.reset(NewBloomFilterPolicy(10, false));
1084 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
1087 auto rnd
= Random(301 + state
.thread_index());
1088 KeyGenerator
kg(&rnd
, key_num
);
1090 if (state
.thread_index() == 0) {
1091 SetupDB(state
, options
, &db
, "IteratorSeek");
1094 auto wo
= WriteOptions();
1095 wo
.disableWAL
= true;
1096 for (uint64_t i
= 0; i
< key_num
; i
++) {
1097 Status s
= db
->Put(wo
, kg
.Next(),
1098 rnd
.RandomString(static_cast<int>(per_key_size
)));
1100 state
.SkipWithError(s
.ToString().c_str());
1105 Status s
= db
->Flush(fo
);
1107 state
.SkipWithError(s
.ToString().c_str());
1110 auto db_full
= static_cast_with_check
<DBImpl
>(db
.get());
1111 s
= db_full
->WaitForCompact(true);
1113 state
.SkipWithError(s
.ToString().c_str());
1118 for (auto _
: state
) {
1119 std::unique_ptr
<Iterator
> iter
{nullptr};
1120 state
.PauseTiming();
1122 iter
.reset(db
->NewIterator(ReadOptions()));
1124 Slice key
= negative_query
? kg
.NextNonExist() : kg
.Next();
1125 if (!iter
->status().ok()) {
1126 state
.SkipWithError(iter
->status().ToString().c_str());
1129 state
.ResumeTiming();
1133 if (state
.thread_index() == 0) {
1134 TeardownDB(state
, db
, options
, kg
);
1138 static void IteratorSeekArguments(benchmark::internal::Benchmark
* b
) {
1139 for (int comp_style
: {kCompactionStyleLevel
, kCompactionStyleUniversal
,
1140 kCompactionStyleFIFO
}) {
1141 for (int64_t max_data
: {128l << 20, 512l << 20}) {
1142 for (int64_t per_key_size
: {256, 1024}) {
1143 for (bool enable_statistics
: {false, true}) {
1144 for (bool negative_query
: {false, true}) {
1145 for (bool enable_filter
: {false, true}) {
1146 b
->Args({comp_style
, max_data
, per_key_size
, enable_statistics
,
1147 negative_query
, enable_filter
});
1154 b
->ArgNames({"comp_style", "max_data", "per_key_size", "enable_statistics",
1155 "negative_query", "enable_filter"});
1158 static constexpr uint64_t kDBSeekNum
= 10l << 10;
1159 BENCHMARK(IteratorSeek
)
1161 ->Iterations(kDBSeekNum
)
1162 ->Apply(IteratorSeekArguments
);
1163 BENCHMARK(IteratorSeek
)
1165 ->Iterations(kDBSeekNum
/ 8)
1166 ->Apply(IteratorSeekArguments
);
1168 static void IteratorNext(benchmark::State
& state
) {
1169 auto compaction_style
= static_cast<CompactionStyle
>(state
.range(0));
1170 uint64_t max_data
= state
.range(1);
1171 uint64_t per_key_size
= state
.range(2);
1172 uint64_t key_num
= max_data
/ per_key_size
;
1175 static std::unique_ptr
<DB
> db
;
1177 options
.compaction_style
= compaction_style
;
1179 auto rnd
= Random(301 + state
.thread_index());
1180 KeyGenerator
kg(&rnd
, key_num
);
1182 if (state
.thread_index() == 0) {
1183 SetupDB(state
, options
, &db
, "IteratorNext");
1185 auto wo
= WriteOptions();
1186 wo
.disableWAL
= true;
1187 for (uint64_t i
= 0; i
< key_num
; i
++) {
1188 Status s
= db
->Put(wo
, kg
.Next(),
1189 rnd
.RandomString(static_cast<int>(per_key_size
)));
1191 state
.SkipWithError(s
.ToString().c_str());
1196 Status s
= db
->Flush(fo
);
1198 state
.SkipWithError(s
.ToString().c_str());
1201 auto db_full
= static_cast_with_check
<DBImpl
>(db
.get());
1202 s
= db_full
->WaitForCompact(true);
1204 state
.SkipWithError(s
.ToString().c_str());
1209 for (auto _
: state
) {
1210 std::unique_ptr
<Iterator
> iter
{nullptr};
1211 state
.PauseTiming();
1213 iter
.reset(db
->NewIterator(ReadOptions()));
1215 while (!iter
->Valid()) {
1216 iter
->Seek(kg
.Next());
1217 if (!iter
->status().ok()) {
1218 state
.SkipWithError(iter
->status().ToString().c_str());
1221 state
.ResumeTiming();
1225 if (state
.thread_index() == 0) {
1226 TeardownDB(state
, db
, options
, kg
);
1230 static void IteratorNextArguments(benchmark::internal::Benchmark
* b
) {
1231 for (int comp_style
: {kCompactionStyleLevel
, kCompactionStyleUniversal
,
1232 kCompactionStyleFIFO
}) {
1233 for (int64_t max_data
: {128l << 20, 512l << 20}) {
1234 for (int64_t per_key_size
: {256, 1024}) {
1235 b
->Args({comp_style
, max_data
, per_key_size
});
1239 b
->ArgNames({"comp_style", "max_data", "per_key_size"});
1241 static constexpr uint64_t kIteratorNextNum
= 10l << 10;
1242 BENCHMARK(IteratorNext
)
1243 ->Iterations(kIteratorNextNum
)
1244 ->Apply(IteratorNextArguments
);
1246 static void IteratorNextWithPerfContext(benchmark::State
& state
) {
1248 static std::unique_ptr
<DB
> db
;
1251 auto rnd
= Random(301 + state
.thread_index());
1252 KeyGenerator
kg(&rnd
, 1024);
1254 if (state
.thread_index() == 0) {
1255 SetupDB(state
, options
, &db
, "IteratorNextWithPerfContext");
1257 auto wo
= WriteOptions();
1258 wo
.disableWAL
= true;
1259 for (uint64_t i
= 0; i
< 1024; i
++) {
1260 Status s
= db
->Put(wo
, kg
.Next(), rnd
.RandomString(1024));
1262 state
.SkipWithError(s
.ToString().c_str());
1265 auto db_full
= static_cast_with_check
<DBImpl
>(db
.get());
1266 Status s
= db_full
->WaitForCompact(true);
1268 state
.SkipWithError(s
.ToString().c_str());
1274 state
.SkipWithError(s
.ToString().c_str());
1278 uint64_t user_key_comparison_count
= 0;
1279 uint64_t internal_key_skipped_count
= 0;
1280 uint64_t find_next_user_entry_time
= 0;
1281 uint64_t iter_next_cpu_nanos
= 0;
1283 SetPerfLevel(kEnableTime
);
1284 get_perf_context()->EnablePerLevelPerfContext();
1286 for (auto _
: state
) {
1287 std::unique_ptr
<Iterator
> iter
{nullptr};
1288 state
.PauseTiming();
1290 iter
.reset(db
->NewIterator(ReadOptions()));
1292 while (!iter
->Valid()) {
1293 iter
->Seek(kg
.Next());
1294 if (!iter
->status().ok()) {
1295 state
.SkipWithError(iter
->status().ToString().c_str());
1298 get_perf_context()->Reset();
1299 state
.ResumeTiming();
1302 user_key_comparison_count
+= get_perf_context()->user_key_comparison_count
;
1303 internal_key_skipped_count
+=
1304 get_perf_context()->internal_key_skipped_count
;
1305 find_next_user_entry_time
+= get_perf_context()->find_next_user_entry_time
;
1306 iter_next_cpu_nanos
+= get_perf_context()->iter_next_cpu_nanos
;
1309 state
.counters
["user_key_comparison_count"] =
1310 benchmark::Counter(static_cast<double>(user_key_comparison_count
),
1311 benchmark::Counter::kAvgIterations
);
1312 state
.counters
["internal_key_skipped_count"] =
1313 benchmark::Counter(static_cast<double>(internal_key_skipped_count
),
1314 benchmark::Counter::kAvgIterations
);
1315 state
.counters
["find_next_user_entry_time"] =
1316 benchmark::Counter(static_cast<double>(find_next_user_entry_time
),
1317 benchmark::Counter::kAvgIterations
);
1318 state
.counters
["iter_next_cpu_nanos"] =
1319 benchmark::Counter(static_cast<double>(iter_next_cpu_nanos
),
1320 benchmark::Counter::kAvgIterations
);
1322 if (state
.thread_index() == 0) {
1323 TeardownDB(state
, db
, options
, kg
);
1327 BENCHMARK(IteratorNextWithPerfContext
)->Iterations(100000);
1329 static void IteratorPrev(benchmark::State
& state
) {
1330 auto compaction_style
= static_cast<CompactionStyle
>(state
.range(0));
1331 uint64_t max_data
= state
.range(1);
1332 uint64_t per_key_size
= state
.range(2);
1333 uint64_t key_num
= max_data
/ per_key_size
;
1336 static std::unique_ptr
<DB
> db
;
1337 std::string db_name
;
1339 options
.compaction_style
= compaction_style
;
1341 auto rnd
= Random(301 + state
.thread_index());
1342 KeyGenerator
kg(&rnd
, key_num
);
1344 if (state
.thread_index() == 0) {
1345 SetupDB(state
, options
, &db
, "IteratorPrev");
1347 auto wo
= WriteOptions();
1348 wo
.disableWAL
= true;
1349 for (uint64_t i
= 0; i
< key_num
; i
++) {
1350 Status s
= db
->Put(wo
, kg
.Next(),
1351 rnd
.RandomString(static_cast<int>(per_key_size
)));
1353 state
.SkipWithError(s
.ToString().c_str());
1358 Status s
= db
->Flush(fo
);
1360 state
.SkipWithError(s
.ToString().c_str());
1363 auto db_full
= static_cast_with_check
<DBImpl
>(db
.get());
1364 s
= db_full
->WaitForCompact(true);
1366 state
.SkipWithError(s
.ToString().c_str());
1371 for (auto _
: state
) {
1372 std::unique_ptr
<Iterator
> iter
{nullptr};
1373 state
.PauseTiming();
1375 iter
.reset(db
->NewIterator(ReadOptions()));
1377 while (!iter
->Valid()) {
1378 iter
->Seek(kg
.Next());
1379 if (!iter
->status().ok()) {
1380 state
.SkipWithError(iter
->status().ToString().c_str());
1383 state
.ResumeTiming();
1387 if (state
.thread_index() == 0) {
1388 TeardownDB(state
, db
, options
, kg
);
1392 static void IteratorPrevArguments(benchmark::internal::Benchmark
* b
) {
1393 for (int comp_style
: {kCompactionStyleLevel
, kCompactionStyleUniversal
,
1394 kCompactionStyleFIFO
}) {
1395 for (int64_t max_data
: {128l << 20, 512l << 20}) {
1396 for (int64_t per_key_size
: {256, 1024}) {
1397 b
->Args({comp_style
, max_data
, per_key_size
});
1401 b
->ArgNames({"comp_style", "max_data", "per_key_size"});
1404 static constexpr uint64_t kIteratorPrevNum
= 10l << 10;
1405 BENCHMARK(IteratorPrev
)
1406 ->Iterations(kIteratorPrevNum
)
1407 ->Apply(IteratorPrevArguments
);
1409 static void PrefixSeek(benchmark::State
& state
) {
1410 auto compaction_style
= static_cast<CompactionStyle
>(state
.range(0));
1411 uint64_t max_data
= state
.range(1);
1412 uint64_t per_key_size
= state
.range(2);
1413 bool enable_statistics
= state
.range(3);
1414 bool enable_filter
= state
.range(4);
1415 uint64_t key_num
= max_data
/ per_key_size
;
1418 static std::unique_ptr
<DB
> db
;
1420 if (enable_statistics
) {
1421 options
.statistics
= CreateDBStatistics();
1423 options
.compaction_style
= compaction_style
;
1424 options
.prefix_extractor
.reset(NewFixedPrefixTransform(4));
1426 if (enable_filter
) {
1427 BlockBasedTableOptions table_options
;
1428 table_options
.filter_policy
.reset(NewBloomFilterPolicy(10, false));
1429 options
.table_factory
.reset(NewBlockBasedTableFactory(table_options
));
1432 auto rnd
= Random(301 + state
.thread_index());
1433 KeyGenerator
kg(&rnd
, key_num
, key_num
/ 100);
1435 if (state
.thread_index() == 0) {
1436 SetupDB(state
, options
, &db
, "PrefixSeek");
1439 auto wo
= WriteOptions();
1440 wo
.disableWAL
= true;
1441 for (uint64_t i
= 0; i
< key_num
; i
++) {
1442 Status s
= db
->Put(wo
, kg
.Next(),
1443 rnd
.RandomString(static_cast<int>(per_key_size
)));
1445 state
.SkipWithError(s
.ToString().c_str());
1450 Status s
= db
->Flush(fo
);
1452 state
.SkipWithError(s
.ToString().c_str());
1455 auto db_full
= static_cast_with_check
<DBImpl
>(db
.get());
1456 s
= db_full
->WaitForCompact(true);
1458 state
.SkipWithError(s
.ToString().c_str());
1463 for (auto _
: state
) {
1464 std::unique_ptr
<Iterator
> iter
{nullptr};
1465 state
.PauseTiming();
1467 iter
.reset(db
->NewIterator(ReadOptions()));
1469 state
.ResumeTiming();
1470 iter
->Seek(kg
.NextPrefix());
1471 if (!iter
->status().ok()) {
1472 state
.SkipWithError(iter
->status().ToString().c_str());
1477 if (state
.thread_index() == 0) {
1478 TeardownDB(state
, db
, options
, kg
);
1482 static void PrefixSeekArguments(benchmark::internal::Benchmark
* b
) {
1483 for (int comp_style
: {kCompactionStyleLevel
, kCompactionStyleUniversal
,
1484 kCompactionStyleFIFO
}) {
1485 for (int64_t max_data
: {128l << 20, 512l << 20}) {
1486 for (int64_t per_key_size
: {256, 1024}) {
1487 for (bool enable_statistics
: {false, true}) {
1488 for (bool enable_filter
: {false, true}) {
1489 b
->Args({comp_style
, max_data
, per_key_size
, enable_statistics
,
1496 b
->ArgNames({"comp_style", "max_data", "per_key_size", "enable_statistics",
1500 static constexpr uint64_t kPrefixSeekNum
= 10l << 10;
1501 BENCHMARK(PrefixSeek
)->Iterations(kPrefixSeekNum
)->Apply(PrefixSeekArguments
);
1502 BENCHMARK(PrefixSeek
)
1504 ->Iterations(kPrefixSeekNum
/ 8)
1505 ->Apply(PrefixSeekArguments
);
1507 // TODO: move it to different files, as it's testing an internal API
1508 static void RandomAccessFileReaderRead(benchmark::State
& state
) {
1509 bool enable_statistics
= state
.range(0);
1510 constexpr int kFileNum
= 10;
1511 auto env
= Env::Default();
1512 auto fs
= env
->GetFileSystem();
1513 std::string db_path
;
1514 Status s
= env
->GetTestDirectory(&db_path
);
1516 state
.SkipWithError(s
.ToString().c_str());
1520 // Setup multiple `RandomAccessFileReader`s with different parameters to be
1523 std::string fname_base
=
1524 db_path
+ kFilePathSeparator
+ "random-access-file-reader-read";
1525 std::vector
<std::unique_ptr
<RandomAccessFileReader
>> readers
;
1526 auto statistics_share
= CreateDBStatistics();
1527 Statistics
* statistics
= enable_statistics
? statistics_share
.get() : nullptr;
1528 for (int i
= 0; i
< kFileNum
; i
++) {
1529 std::string fname
= fname_base
+ std::to_string(i
);
1530 std::string content
= rand
.RandomString(kDefaultPageSize
);
1531 std::unique_ptr
<WritableFile
> tgt_file
;
1532 env
->NewWritableFile(fname
, &tgt_file
, EnvOptions());
1533 tgt_file
->Append(content
);
1536 std::unique_ptr
<FSRandomAccessFile
> f
;
1537 fs
->NewRandomAccessFile(fname
, FileOptions(), &f
, nullptr);
1538 int rand_num
= rand
.Next() % 3;
1539 auto temperature
= rand_num
== 0 ? Temperature::kUnknown
1540 : rand_num
== 1 ? Temperature::kWarm
1541 : Temperature::kCold
;
1542 readers
.emplace_back(new RandomAccessFileReader(
1543 std::move(f
), fname
, env
->GetSystemClock().get(), nullptr, statistics
,
1544 0, nullptr, nullptr, {}, temperature
, rand_num
== 1));
1547 IOOptions io_options
;
1548 std::unique_ptr
<char[]> scratch(new char[2048]);
1551 for (auto _
: state
) {
1552 s
= readers
[idx
++ % kFileNum
]->Read(io_options
, 0, kDefaultPageSize
/ 3,
1553 &result
, scratch
.get(), nullptr,
1556 state
.SkipWithError(s
.ToString().c_str());
1561 for (int i
= 0; i
< kFileNum
; i
++) {
1562 std::string fname
= fname_base
+ std::to_string(i
);
1563 env
->DeleteFile(fname
); // ignore return, okay to fail cleanup
1567 BENCHMARK(RandomAccessFileReaderRead
)
1568 ->Iterations(1000000)
1571 ->ArgName("enable_statistics");
1573 } // namespace ROCKSDB_NAMESPACE