]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / rocksdb / java / src / main / java / org / rocksdb / ColumnFamilyOptionsInterface.java
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5
6 package org.rocksdb;
7
8 public interface ColumnFamilyOptionsInterface
9 <T extends ColumnFamilyOptionsInterface>
10 extends AdvancedColumnFamilyOptionsInterface<T> {
11
12 /**
13 * Use this if your DB is very small (like under 1GB) and you don't want to
14 * spend lots of memory for memtables.
15 *
16 * @return the instance of the current object.
17 */
18 T optimizeForSmallDb();
19
20 /**
21 * Use this if you don't need to keep the data sorted, i.e. you'll never use
22 * an iterator, only Put() and Get() API calls
23 *
24 * @param blockCacheSizeMb Block cache size in MB
25 * @return the instance of the current object.
26 */
27 T optimizeForPointLookup(long blockCacheSizeMb);
28
29 /**
30 * <p>Default values for some parameters in ColumnFamilyOptions are not
31 * optimized for heavy workloads and big datasets, which means you might
32 * observe write stalls under some conditions. As a starting point for tuning
33 * RocksDB options, use the following for level style compaction.</p>
34 *
35 * <p>Make sure to also call IncreaseParallelism(), which will provide the
36 * biggest performance gains.</p>
37 * <p>Note: we might use more memory than memtable_memory_budget during high
38 * write rate period</p>
39 *
40 * @return the instance of the current object.
41 */
42 T optimizeLevelStyleCompaction();
43
44 /**
45 * <p>Default values for some parameters in ColumnFamilyOptions are not
46 * optimized for heavy workloads and big datasets, which means you might
47 * observe write stalls under some conditions. As a starting point for tuning
48 * RocksDB options, use the following for level style compaction.</p>
49 *
50 * <p>Make sure to also call IncreaseParallelism(), which will provide the
51 * biggest performance gains.</p>
52 * <p>Note: we might use more memory than memtable_memory_budget during high
53 * write rate period</p>
54 *
55 * @param memtableMemoryBudget memory budget in bytes
56 * @return the instance of the current object.
57 */
58 T optimizeLevelStyleCompaction(
59 long memtableMemoryBudget);
60
61 /**
62 * <p>Default values for some parameters in ColumnFamilyOptions are not
63 * optimized for heavy workloads and big datasets, which means you might
64 * observe write stalls under some conditions. As a starting point for tuning
65 * RocksDB options, use the following for universal style compaction.</p>
66 *
67 * <p>Universal style compaction is focused on reducing Write Amplification
68 * Factor for big data sets, but increases Space Amplification.</p>
69 *
70 * <p>Make sure to also call IncreaseParallelism(), which will provide the
71 * biggest performance gains.</p>
72 *
73 * <p>Note: we might use more memory than memtable_memory_budget during high
74 * write rate period</p>
75 *
76 * @return the instance of the current object.
77 */
78 T optimizeUniversalStyleCompaction();
79
80 /**
81 * <p>Default values for some parameters in ColumnFamilyOptions are not
82 * optimized for heavy workloads and big datasets, which means you might
83 * observe write stalls under some conditions. As a starting point for tuning
84 * RocksDB options, use the following for universal style compaction.</p>
85 *
86 * <p>Universal style compaction is focused on reducing Write Amplification
87 * Factor for big data sets, but increases Space Amplification.</p>
88 *
89 * <p>Make sure to also call IncreaseParallelism(), which will provide the
90 * biggest performance gains.</p>
91 *
92 * <p>Note: we might use more memory than memtable_memory_budget during high
93 * write rate period</p>
94 *
95 * @param memtableMemoryBudget memory budget in bytes
96 * @return the instance of the current object.
97 */
98 T optimizeUniversalStyleCompaction(
99 long memtableMemoryBudget);
100
101 /**
102 * Set {@link BuiltinComparator} to be used with RocksDB.
103 *
104 * Note: Comparator can be set once upon database creation.
105 *
106 * Default: BytewiseComparator.
107 * @param builtinComparator a {@link BuiltinComparator} type.
108 * @return the instance of the current object.
109 */
110 T setComparator(
111 BuiltinComparator builtinComparator);
112
113 /**
114 * Use the specified comparator for key ordering.
115 *
116 * Comparator should not be disposed before options instances using this comparator is
117 * disposed. If dispose() function is not called, then comparator object will be
118 * GC'd automatically.
119 *
120 * Comparator instance can be re-used in multiple options instances.
121 *
122 * @param comparator java instance.
123 * @return the instance of the current object.
124 */
125 T setComparator(
126 AbstractComparator<? extends AbstractSlice<?>> comparator);
127
128 /**
129 * <p>Set the merge operator to be used for merging two merge operands
130 * of the same key. The merge function is invoked during
131 * compaction and at lookup time, if multiple key/value pairs belonging
132 * to the same key are found in the database.</p>
133 *
134 * @param name the name of the merge function, as defined by
135 * the MergeOperators factory (see utilities/MergeOperators.h)
136 * The merge function is specified by name and must be one of the
137 * standard merge operators provided by RocksDB. The available
138 * operators are "put", "uint64add", "stringappend" and "stringappendtest".
139 * @return the instance of the current object.
140 */
141 T setMergeOperatorName(String name);
142
143 /**
144 * <p>Set the merge operator to be used for merging two different key/value
145 * pairs that share the same key. The merge function is invoked during
146 * compaction and at lookup time, if multiple key/value pairs belonging
147 * to the same key are found in the database.</p>
148 *
149 * @param mergeOperator {@link MergeOperator} instance.
150 * @return the instance of the current object.
151 */
152 T setMergeOperator(MergeOperator mergeOperator);
153
154 /**
155 * This prefix-extractor uses the first n bytes of a key as its prefix.
156 *
157 * In some hash-based memtable representation such as HashLinkedList
158 * and HashSkipList, prefixes are used to partition the keys into
159 * several buckets. Prefix extractor is used to specify how to
160 * extract the prefix given a key.
161 *
162 * @param n use the first n bytes of a key as its prefix.
163 * @return the reference to the current option.
164 */
165 T useFixedLengthPrefixExtractor(int n);
166
167 /**
168 * Same as fixed length prefix extractor, except that when slice is
169 * shorter than the fixed length, it will use the full key.
170 *
171 * @param n use the first n bytes of a key as its prefix.
172 * @return the reference to the current option.
173 */
174 T useCappedPrefixExtractor(int n);
175
176 /**
177 * Number of files to trigger level-0 compaction. A value &lt; 0 means that
178 * level-0 compaction will not be triggered by number of files at all.
179 * Default: 4
180 *
181 * @param numFiles the number of files in level-0 to trigger compaction.
182 * @return the reference to the current option.
183 */
184 T setLevelZeroFileNumCompactionTrigger(
185 int numFiles);
186
187 /**
188 * The number of files in level 0 to trigger compaction from level-0 to
189 * level-1. A value &lt; 0 means that level-0 compaction will not be
190 * triggered by number of files at all.
191 * Default: 4
192 *
193 * @return the number of files in level 0 to trigger compaction.
194 */
195 int levelZeroFileNumCompactionTrigger();
196
197 /**
198 * Soft limit on number of level-0 files. We start slowing down writes at this
199 * point. A value &lt; 0 means that no writing slow down will be triggered by
200 * number of files in level-0.
201 *
202 * @param numFiles soft limit on number of level-0 files.
203 * @return the reference to the current option.
204 */
205 T setLevelZeroSlowdownWritesTrigger(
206 int numFiles);
207
208 /**
209 * Soft limit on the number of level-0 files. We start slowing down writes
210 * at this point. A value &lt; 0 means that no writing slow down will be
211 * triggered by number of files in level-0.
212 *
213 * @return the soft limit on the number of level-0 files.
214 */
215 int levelZeroSlowdownWritesTrigger();
216
217 /**
218 * Maximum number of level-0 files. We stop writes at this point.
219 *
220 * @param numFiles the hard limit of the number of level-0 files.
221 * @return the reference to the current option.
222 */
223 T setLevelZeroStopWritesTrigger(int numFiles);
224
225 /**
226 * Maximum number of level-0 files. We stop writes at this point.
227 *
228 * @return the hard limit of the number of level-0 file.
229 */
230 int levelZeroStopWritesTrigger();
231
232 /**
233 * The ratio between the total size of level-(L+1) files and the total
234 * size of level-L files for all L.
235 * DEFAULT: 10
236 *
237 * @param multiplier the ratio between the total size of level-(L+1)
238 * files and the total size of level-L files for all L.
239 * @return the reference to the current option.
240 */
241 T setMaxBytesForLevelMultiplier(
242 double multiplier);
243
244 /**
245 * The ratio between the total size of level-(L+1) files and the total
246 * size of level-L files for all L.
247 * DEFAULT: 10
248 *
249 * @return the ratio between the total size of level-(L+1) files and
250 * the total size of level-L files for all L.
251 */
252 double maxBytesForLevelMultiplier();
253
254 /**
255 * FIFO compaction option.
256 * The oldest table file will be deleted
257 * once the sum of table files reaches this size.
258 * The default value is 1GB (1 * 1024 * 1024 * 1024).
259 *
260 * @param maxTableFilesSize the size limit of the total sum of table files.
261 * @return the instance of the current object.
262 */
263 T setMaxTableFilesSizeFIFO(
264 long maxTableFilesSize);
265
266 /**
267 * FIFO compaction option.
268 * The oldest table file will be deleted
269 * once the sum of table files reaches this size.
270 * The default value is 1GB (1 * 1024 * 1024 * 1024).
271 *
272 * @return the size limit of the total sum of table files.
273 */
274 long maxTableFilesSizeFIFO();
275
276 /**
277 * Get the config for mem-table.
278 *
279 * @return the mem-table config.
280 */
281 MemTableConfig memTableConfig();
282
283 /**
284 * Set the config for mem-table.
285 *
286 * @param memTableConfig the mem-table config.
287 * @return the instance of the current object.
288 * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
289 * while overflowing the underlying platform specific value.
290 */
291 T setMemTableConfig(MemTableConfig memTableConfig);
292
293 /**
294 * Returns the name of the current mem table representation.
295 * Memtable format can be set using setTableFormatConfig.
296 *
297 * @return the name of the currently-used memtable factory.
298 * @see #setTableFormatConfig(org.rocksdb.TableFormatConfig)
299 */
300 String memTableFactoryName();
301
302 /**
303 * Get the config for table format.
304 *
305 * @return the table format config.
306 */
307 TableFormatConfig tableFormatConfig();
308
309 /**
310 * Set the config for table format.
311 *
312 * @param config the table format config.
313 * @return the reference of the current options.
314 */
315 T setTableFormatConfig(TableFormatConfig config);
316
317 /**
318 * @return the name of the currently used table factory.
319 */
320 String tableFactoryName();
321
322 /**
323 * Compression algorithm that will be used for the bottommost level that
324 * contain files. If level-compaction is used, this option will only affect
325 * levels after base level.
326 *
327 * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
328 *
329 * @param bottommostCompressionType The compression type to use for the
330 * bottommost level
331 *
332 * @return the reference of the current options.
333 */
334 T setBottommostCompressionType(
335 final CompressionType bottommostCompressionType);
336
337 /**
338 * Compression algorithm that will be used for the bottommost level that
339 * contain files. If level-compaction is used, this option will only affect
340 * levels after base level.
341 *
342 * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
343 *
344 * @return The compression type used for the bottommost level
345 */
346 CompressionType bottommostCompressionType();
347
348
349 /**
350 * Set the different options for compression algorithms
351 *
352 * @param compressionOptions The compression options
353 *
354 * @return the reference of the current options.
355 */
356 T setCompressionOptions(
357 CompressionOptions compressionOptions);
358
359 /**
360 * Get the different options for compression algorithms
361 *
362 * @return The compression options
363 */
364 CompressionOptions compressionOptions();
365
366 /**
367 * Default memtable memory budget used with the following methods:
368 *
369 * <ol>
370 * <li>{@link #optimizeLevelStyleCompaction()}</li>
371 * <li>{@link #optimizeUniversalStyleCompaction()}</li>
372 * </ol>
373 */
374 long DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET = 512 * 1024 * 1024;
375 }