]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/table/table_reader_bench.cc
bump version to 12.1.1-pve1 while rebasing patches
[ceph.git] / ceph / src / rocksdb / table / table_reader_bench.cc
CommitLineData
7c673cae
FG
1// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2// This source code is licensed under the BSD-style license found in the
3// LICENSE file in the root directory of this source tree. An additional grant
4// of patent rights can be found in the PATENTS file in the same directory.
5
6#ifndef GFLAGS
7#include <cstdio>
8int main() {
9 fprintf(stderr, "Please install gflags to run rocksdb tools\n");
10 return 1;
11}
12#else
13
14#include <gflags/gflags.h>
15
16#include "db/db_impl.h"
17#include "db/dbformat.h"
18#include "monitoring/histogram.h"
19#include "rocksdb/db.h"
20#include "rocksdb/slice_transform.h"
21#include "rocksdb/table.h"
22#include "table/block_based_table_factory.h"
23#include "table/get_context.h"
24#include "table/internal_iterator.h"
25#include "table/plain_table_factory.h"
26#include "table/table_builder.h"
27#include "util/file_reader_writer.h"
28#include "util/testharness.h"
29#include "util/testutil.h"
30
31using GFLAGS::ParseCommandLineFlags;
32using GFLAGS::SetUsageMessage;
33
34namespace rocksdb {
35
36namespace {
37// Make a key that i determines the first 4 characters and j determines the
38// last 4 characters.
39static std::string MakeKey(int i, int j, bool through_db) {
40 char buf[100];
41 snprintf(buf, sizeof(buf), "%04d__key___%04d", i, j);
42 if (through_db) {
43 return std::string(buf);
44 }
45 // If we directly query table, which operates on internal keys
46 // instead of user keys, we need to add 8 bytes of internal
47 // information (row type etc) to user key to make an internal
48 // key.
49 InternalKey key(std::string(buf), 0, ValueType::kTypeValue);
50 return key.Encode().ToString();
51}
52
53uint64_t Now(Env* env, bool measured_by_nanosecond) {
54 return measured_by_nanosecond ? env->NowNanos() : env->NowMicros();
55}
56} // namespace
57
58// A very simple benchmark that.
59// Create a table with roughly numKey1 * numKey2 keys,
60// where there are numKey1 prefixes of the key, each has numKey2 number of
61// distinguished key, differing in the suffix part.
62// If if_query_empty_keys = false, query the existing keys numKey1 * numKey2
63// times randomly.
64// If if_query_empty_keys = true, query numKey1 * numKey2 random empty keys.
65// Print out the total time.
66// If through_db=true, a full DB will be created and queries will be against
67// it. Otherwise, operations will be directly through table level.
68//
69// If for_terator=true, instead of just query one key each time, it queries
70// a range sharing the same prefix.
71namespace {
72void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
73 ReadOptions& read_options, int num_keys1,
74 int num_keys2, int num_iter, int prefix_len,
75 bool if_query_empty_keys, bool for_iterator,
76 bool through_db, bool measured_by_nanosecond) {
77 rocksdb::InternalKeyComparator ikc(opts.comparator);
78
79 std::string file_name = test::TmpDir()
80 + "/rocksdb_table_reader_benchmark";
81 std::string dbname = test::TmpDir() + "/rocksdb_table_reader_bench_db";
82 WriteOptions wo;
83 Env* env = Env::Default();
84 TableBuilder* tb = nullptr;
85 DB* db = nullptr;
86 Status s;
87 const ImmutableCFOptions ioptions(opts);
88 unique_ptr<WritableFileWriter> file_writer;
89 if (!through_db) {
90 unique_ptr<WritableFile> file;
91 env->NewWritableFile(file_name, &file, env_options);
92
93 std::vector<std::unique_ptr<IntTblPropCollectorFactory> >
94 int_tbl_prop_collector_factories;
95
96 file_writer.reset(new WritableFileWriter(std::move(file), env_options));
97 int unknown_level = -1;
98 tb = opts.table_factory->NewTableBuilder(
99 TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories,
100 CompressionType::kNoCompression,
101 CompressionOptions(),
102 nullptr /* compression_dict */,
103 false /* skip_filters */, kDefaultColumnFamilyName,
104 unknown_level),
105 0 /* column_family_id */, file_writer.get());
106 } else {
107 s = DB::Open(opts, dbname, &db);
108 ASSERT_OK(s);
109 ASSERT_TRUE(db != nullptr);
110 }
111 // Populate slightly more than 1M keys
112 for (int i = 0; i < num_keys1; i++) {
113 for (int j = 0; j < num_keys2; j++) {
114 std::string key = MakeKey(i * 2, j, through_db);
115 if (!through_db) {
116 tb->Add(key, key);
117 } else {
118 db->Put(wo, key, key);
119 }
120 }
121 }
122 if (!through_db) {
123 tb->Finish();
124 file_writer->Close();
125 } else {
126 db->Flush(FlushOptions());
127 }
128
129 unique_ptr<TableReader> table_reader;
130 if (!through_db) {
131 unique_ptr<RandomAccessFile> raf;
132 s = env->NewRandomAccessFile(file_name, &raf, env_options);
133 if (!s.ok()) {
134 fprintf(stderr, "Create File Error: %s\n", s.ToString().c_str());
135 exit(1);
136 }
137 uint64_t file_size;
138 env->GetFileSize(file_name, &file_size);
139 unique_ptr<RandomAccessFileReader> file_reader(
140 new RandomAccessFileReader(std::move(raf)));
141 s = opts.table_factory->NewTableReader(
142 TableReaderOptions(ioptions, env_options, ikc), std::move(file_reader),
143 file_size, &table_reader);
144 if (!s.ok()) {
145 fprintf(stderr, "Open Table Error: %s\n", s.ToString().c_str());
146 exit(1);
147 }
148 }
149
150 Random rnd(301);
151 std::string result;
152 HistogramImpl hist;
153
154 for (int it = 0; it < num_iter; it++) {
155 for (int i = 0; i < num_keys1; i++) {
156 for (int j = 0; j < num_keys2; j++) {
157 int r1 = rnd.Uniform(num_keys1) * 2;
158 int r2 = rnd.Uniform(num_keys2);
159 if (if_query_empty_keys) {
160 r1++;
161 r2 = num_keys2 * 2 - r2;
162 }
163
164 if (!for_iterator) {
165 // Query one existing key;
166 std::string key = MakeKey(r1, r2, through_db);
167 uint64_t start_time = Now(env, measured_by_nanosecond);
168 if (!through_db) {
169 PinnableSlice value;
170 MergeContext merge_context;
171 RangeDelAggregator range_del_agg(ikc, {} /* snapshots */);
172 GetContext get_context(ioptions.user_comparator,
173 ioptions.merge_operator, ioptions.info_log,
174 ioptions.statistics, GetContext::kNotFound,
175 Slice(key), &value, nullptr, &merge_context,
176 &range_del_agg, env);
177 s = table_reader->Get(read_options, key, &get_context);
178 } else {
179 s = db->Get(read_options, key, &result);
180 }
181 hist.Add(Now(env, measured_by_nanosecond) - start_time);
182 } else {
183 int r2_len;
184 if (if_query_empty_keys) {
185 r2_len = 0;
186 } else {
187 r2_len = rnd.Uniform(num_keys2) + 1;
188 if (r2_len + r2 > num_keys2) {
189 r2_len = num_keys2 - r2;
190 }
191 }
192 std::string start_key = MakeKey(r1, r2, through_db);
193 std::string end_key = MakeKey(r1, r2 + r2_len, through_db);
194 uint64_t total_time = 0;
195 uint64_t start_time = Now(env, measured_by_nanosecond);
196 Iterator* iter = nullptr;
197 InternalIterator* iiter = nullptr;
198 if (!through_db) {
199 iiter = table_reader->NewIterator(read_options);
200 } else {
201 iter = db->NewIterator(read_options);
202 }
203 int count = 0;
204 for (through_db ? iter->Seek(start_key) : iiter->Seek(start_key);
205 through_db ? iter->Valid() : iiter->Valid();
206 through_db ? iter->Next() : iiter->Next()) {
207 if (if_query_empty_keys) {
208 break;
209 }
210 // verify key;
211 total_time += Now(env, measured_by_nanosecond) - start_time;
212 assert(Slice(MakeKey(r1, r2 + count, through_db)) ==
213 (through_db ? iter->key() : iiter->key()));
214 start_time = Now(env, measured_by_nanosecond);
215 if (++count >= r2_len) {
216 break;
217 }
218 }
219 if (count != r2_len) {
220 fprintf(
221 stderr, "Iterator cannot iterate expected number of entries. "
222 "Expected %d but got %d\n", r2_len, count);
223 assert(false);
224 }
225 delete iter;
226 total_time += Now(env, measured_by_nanosecond) - start_time;
227 hist.Add(total_time);
228 }
229 }
230 }
231 }
232
233 fprintf(
234 stderr,
235 "==================================================="
236 "====================================================\n"
237 "InMemoryTableSimpleBenchmark: %20s num_key1: %5d "
238 "num_key2: %5d %10s\n"
239 "==================================================="
240 "===================================================="
241 "\nHistogram (unit: %s): \n%s",
242 opts.table_factory->Name(), num_keys1, num_keys2,
243 for_iterator ? "iterator" : (if_query_empty_keys ? "empty" : "non_empty"),
244 measured_by_nanosecond ? "nanosecond" : "microsecond",
245 hist.ToString().c_str());
246 if (!through_db) {
247 env->DeleteFile(file_name);
248 } else {
249 delete db;
250 db = nullptr;
251 DestroyDB(dbname, opts);
252 }
253}
254} // namespace
255} // namespace rocksdb
256
257DEFINE_bool(query_empty, false, "query non-existing keys instead of existing "
258 "ones.");
259DEFINE_int32(num_keys1, 4096, "number of distinguish prefix of keys");
260DEFINE_int32(num_keys2, 512, "number of distinguish keys for each prefix");
261DEFINE_int32(iter, 3, "query non-existing keys instead of existing ones");
262DEFINE_int32(prefix_len, 16, "Prefix length used for iterators and indexes");
263DEFINE_bool(iterator, false, "For test iterator");
264DEFINE_bool(through_db, false, "If enable, a DB instance will be created and "
265 "the query will be against DB. Otherwise, will be directly against "
266 "a table reader.");
267DEFINE_bool(mmap_read, true, "Whether use mmap read");
268DEFINE_string(table_factory, "block_based",
269 "Table factory to use: `block_based` (default), `plain_table` or "
270 "`cuckoo_hash`.");
271DEFINE_string(time_unit, "microsecond",
272 "The time unit used for measuring performance. User can specify "
273 "`microsecond` (default) or `nanosecond`");
274
275int main(int argc, char** argv) {
276 SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
277 " [OPTIONS]...");
278 ParseCommandLineFlags(&argc, &argv, true);
279
280 std::shared_ptr<rocksdb::TableFactory> tf;
281 rocksdb::Options options;
282 if (FLAGS_prefix_len < 16) {
283 options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(
284 FLAGS_prefix_len));
285 }
286 rocksdb::ReadOptions ro;
287 rocksdb::EnvOptions env_options;
288 options.create_if_missing = true;
289 options.compression = rocksdb::CompressionType::kNoCompression;
290
291 if (FLAGS_table_factory == "cuckoo_hash") {
292#ifndef ROCKSDB_LITE
293 options.allow_mmap_reads = FLAGS_mmap_read;
294 env_options.use_mmap_reads = FLAGS_mmap_read;
295 rocksdb::CuckooTableOptions table_options;
296 table_options.hash_table_ratio = 0.75;
297 tf.reset(rocksdb::NewCuckooTableFactory(table_options));
298#else
299 fprintf(stderr, "Plain table is not supported in lite mode\n");
300 exit(1);
301#endif // ROCKSDB_LITE
302 } else if (FLAGS_table_factory == "plain_table") {
303#ifndef ROCKSDB_LITE
304 options.allow_mmap_reads = FLAGS_mmap_read;
305 env_options.use_mmap_reads = FLAGS_mmap_read;
306
307 rocksdb::PlainTableOptions plain_table_options;
308 plain_table_options.user_key_len = 16;
309 plain_table_options.bloom_bits_per_key = (FLAGS_prefix_len == 16) ? 0 : 8;
310 plain_table_options.hash_table_ratio = 0.75;
311
312 tf.reset(new rocksdb::PlainTableFactory(plain_table_options));
313 options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(
314 FLAGS_prefix_len));
315#else
316 fprintf(stderr, "Cuckoo table is not supported in lite mode\n");
317 exit(1);
318#endif // ROCKSDB_LITE
319 } else if (FLAGS_table_factory == "block_based") {
320 tf.reset(new rocksdb::BlockBasedTableFactory());
321 } else {
322 fprintf(stderr, "Invalid table type %s\n", FLAGS_table_factory.c_str());
323 }
324
325 if (tf) {
326 // if user provides invalid options, just fall back to microsecond.
327 bool measured_by_nanosecond = FLAGS_time_unit == "nanosecond";
328
329 options.table_factory = tf;
330 rocksdb::TableReaderBenchmark(options, env_options, ro, FLAGS_num_keys1,
331 FLAGS_num_keys2, FLAGS_iter, FLAGS_prefix_len,
332 FLAGS_query_empty, FLAGS_iterator,
333 FLAGS_through_db, measured_by_nanosecond);
334 } else {
335 return 1;
336 }
337
338 return 0;
339}
340
341#endif // GFLAGS