1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under the BSD-style license found in the
3 // LICENSE file in the root directory of this source tree. An additional grant
4 // of patent rights can be found in the PATENTS file in the same directory.
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
24 enum CompressionType
: unsigned char;
26 // Options to control the behavior of a database (passed to
27 // DB::Open). A LevelDBOptions object can be initialized as though
28 // it were a LevelDB Options object, and then it can be converted into
29 // a RocksDB Options object.
30 struct LevelDBOptions
{
31 // -------------------
32 // Parameters that affect behavior
34 // Comparator used to define the order of keys in the table.
35 // Default: a comparator that uses lexicographic byte-wise ordering
37 // REQUIRES: The client must ensure that the comparator supplied
38 // here has the same name and orders keys *exactly* the same as the
39 // comparator provided to previous open calls on the same DB.
40 const Comparator
* comparator
;
42 // If true, the database will be created if it is missing.
44 bool create_if_missing
;
46 // If true, an error is raised if the database already exists.
50 // If true, the implementation will do aggressive checking of the
51 // data it is processing and will stop early if it detects any
52 // errors. This may have unforeseen ramifications: for example, a
53 // corruption of one DB entry may cause a large number of entries to
54 // become unreadable or for the entire DB to become unopenable.
58 // Use the specified object to interact with the environment,
59 // e.g. to read/write files, schedule background work, etc.
60 // Default: Env::Default()
63 // Any internal progress/error information generated by the db will
64 // be written to info_log if it is non-NULL, or to a file stored
65 // in the same directory as the DB contents if info_log is NULL.
69 // -------------------
70 // Parameters that affect performance
72 // Amount of data to build up in memory (backed by an unsorted log
73 // on disk) before converting to a sorted on-disk file.
75 // Larger values increase performance, especially during bulk loads.
76 // Up to two write buffers may be held in memory at the same time,
77 // so you may wish to adjust this parameter to control memory usage.
78 // Also, a larger write buffer will result in a longer recovery time
79 // the next time the database is opened.
82 size_t write_buffer_size
;
84 // Number of open files that can be used by the DB. You may need to
85 // increase this if your database has a large working set (budget
86 // one open file per 2MB of working set).
91 // Control over blocks (user data is stored in a set of blocks, and
92 // a block is the unit of reading from disk).
94 // If non-NULL, use the specified cache for blocks.
95 // If NULL, leveldb will automatically create and use an 8MB internal cache.
99 // Approximate size of user data packed per block. Note that the
100 // block size specified here corresponds to uncompressed data. The
101 // actual size of the unit read from disk may be smaller if
102 // compression is enabled. This parameter can be changed dynamically.
107 // Number of keys between restart points for delta encoding of keys.
108 // This parameter can be changed dynamically. Most clients should
109 // leave this parameter alone.
112 int block_restart_interval
;
114 // Compress blocks using the specified compression algorithm. This
115 // parameter can be changed dynamically.
117 // Default: kSnappyCompression, which gives lightweight but fast
120 // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz:
121 // ~200-500MB/s compression
122 // ~400-800MB/s decompression
123 // Note that these speeds are significantly faster than most
124 // persistent storage speeds, and therefore it is typically never
125 // worth switching to kNoCompression. Even if the input data is
126 // incompressible, the kSnappyCompression implementation will
127 // efficiently detect that and will switch to uncompressed mode.
128 CompressionType compression
;
130 // If non-NULL, use the specified filter policy to reduce disk reads.
131 // Many applications will benefit from passing the result of
132 // NewBloomFilterPolicy() here.
135 const FilterPolicy
* filter_policy
;
137 // Create a LevelDBOptions object with default values for all fields.
141 // Converts a LevelDBOptions object into a RocksDB Options object.
142 Options
ConvertOptions(const LevelDBOptions
& leveldb_options
);
144 } // namespace rocksdb