]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/include/rocksdb/utilities/leveldb_options.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / rocksdb / include / rocksdb / utilities / leveldb_options.h
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under the BSD-style license found in the
3 // LICENSE file in the root directory of this source tree. An additional grant
4 // of patent rights can be found in the PATENTS file in the same directory.
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #pragma once
11
12 #include <stddef.h>
13
14 namespace rocksdb {
15
16 class Cache;
17 class Comparator;
18 class Env;
19 class FilterPolicy;
20 class Logger;
21 struct Options;
22 class Snapshot;
23
24 enum CompressionType : unsigned char;
25
26 // Options to control the behavior of a database (passed to
27 // DB::Open). A LevelDBOptions object can be initialized as though
28 // it were a LevelDB Options object, and then it can be converted into
29 // a RocksDB Options object.
30 struct LevelDBOptions {
31 // -------------------
32 // Parameters that affect behavior
33
34 // Comparator used to define the order of keys in the table.
35 // Default: a comparator that uses lexicographic byte-wise ordering
36 //
37 // REQUIRES: The client must ensure that the comparator supplied
38 // here has the same name and orders keys *exactly* the same as the
39 // comparator provided to previous open calls on the same DB.
40 const Comparator* comparator;
41
42 // If true, the database will be created if it is missing.
43 // Default: false
44 bool create_if_missing;
45
46 // If true, an error is raised if the database already exists.
47 // Default: false
48 bool error_if_exists;
49
50 // If true, the implementation will do aggressive checking of the
51 // data it is processing and will stop early if it detects any
52 // errors. This may have unforeseen ramifications: for example, a
53 // corruption of one DB entry may cause a large number of entries to
54 // become unreadable or for the entire DB to become unopenable.
55 // Default: false
56 bool paranoid_checks;
57
58 // Use the specified object to interact with the environment,
59 // e.g. to read/write files, schedule background work, etc.
60 // Default: Env::Default()
61 Env* env;
62
63 // Any internal progress/error information generated by the db will
64 // be written to info_log if it is non-NULL, or to a file stored
65 // in the same directory as the DB contents if info_log is NULL.
66 // Default: NULL
67 Logger* info_log;
68
69 // -------------------
70 // Parameters that affect performance
71
72 // Amount of data to build up in memory (backed by an unsorted log
73 // on disk) before converting to a sorted on-disk file.
74 //
75 // Larger values increase performance, especially during bulk loads.
76 // Up to two write buffers may be held in memory at the same time,
77 // so you may wish to adjust this parameter to control memory usage.
78 // Also, a larger write buffer will result in a longer recovery time
79 // the next time the database is opened.
80 //
81 // Default: 4MB
82 size_t write_buffer_size;
83
84 // Number of open files that can be used by the DB. You may need to
85 // increase this if your database has a large working set (budget
86 // one open file per 2MB of working set).
87 //
88 // Default: 1000
89 int max_open_files;
90
91 // Control over blocks (user data is stored in a set of blocks, and
92 // a block is the unit of reading from disk).
93
94 // If non-NULL, use the specified cache for blocks.
95 // If NULL, leveldb will automatically create and use an 8MB internal cache.
96 // Default: NULL
97 Cache* block_cache;
98
99 // Approximate size of user data packed per block. Note that the
100 // block size specified here corresponds to uncompressed data. The
101 // actual size of the unit read from disk may be smaller if
102 // compression is enabled. This parameter can be changed dynamically.
103 //
104 // Default: 4K
105 size_t block_size;
106
107 // Number of keys between restart points for delta encoding of keys.
108 // This parameter can be changed dynamically. Most clients should
109 // leave this parameter alone.
110 //
111 // Default: 16
112 int block_restart_interval;
113
114 // Compress blocks using the specified compression algorithm. This
115 // parameter can be changed dynamically.
116 //
117 // Default: kSnappyCompression, which gives lightweight but fast
118 // compression.
119 //
120 // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz:
121 // ~200-500MB/s compression
122 // ~400-800MB/s decompression
123 // Note that these speeds are significantly faster than most
124 // persistent storage speeds, and therefore it is typically never
125 // worth switching to kNoCompression. Even if the input data is
126 // incompressible, the kSnappyCompression implementation will
127 // efficiently detect that and will switch to uncompressed mode.
128 CompressionType compression;
129
130 // If non-NULL, use the specified filter policy to reduce disk reads.
131 // Many applications will benefit from passing the result of
132 // NewBloomFilterPolicy() here.
133 //
134 // Default: NULL
135 const FilterPolicy* filter_policy;
136
137 // Create a LevelDBOptions object with default values for all fields.
138 LevelDBOptions();
139 };
140
141 // Converts a LevelDBOptions object into a RocksDB Options object.
142 Options ConvertOptions(const LevelDBOptions& leveldb_options);
143
144 } // namespace rocksdb