1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
8 import java
.util
.Collection
;
11 public interface ColumnFamilyOptionsInterface
<T
extends ColumnFamilyOptionsInterface
<T
>>
12 extends AdvancedColumnFamilyOptionsInterface
<T
> {
14 * The function recovers options to a previous version. Only 4.6 or later
15 * versions are supported.
17 * @return the instance of the current object.
19 T
oldDefaults(int majorVersion
, int minorVersion
);
22 * Use this if your DB is very small (like under 1GB) and you don't want to
23 * spend lots of memory for memtables.
25 * @return the instance of the current object.
27 T
optimizeForSmallDb();
30 * Some functions that make it easier to optimize RocksDB
31 * Use this if your DB is very small (like under 1GB) and you don't want to
32 * spend lots of memory for memtables.
33 * An optional cache object is passed in to be used as the block cache
35 * @return the instance of the current object.
37 T
optimizeForSmallDb(Cache cache
);
40 * Use this if you don't need to keep the data sorted, i.e. you'll never use
41 * an iterator, only Put() and Get() API calls
43 * @param blockCacheSizeMb Block cache size in MB
44 * @return the instance of the current object.
46 T
optimizeForPointLookup(long blockCacheSizeMb
);
49 * <p>Default values for some parameters in ColumnFamilyOptions are not
50 * optimized for heavy workloads and big datasets, which means you might
51 * observe write stalls under some conditions. As a starting point for tuning
52 * RocksDB options, use the following for level style compaction.</p>
54 * <p>Make sure to also call IncreaseParallelism(), which will provide the
55 * biggest performance gains.</p>
56 * <p>Note: we might use more memory than memtable_memory_budget during high
57 * write rate period</p>
59 * @return the instance of the current object.
61 T
optimizeLevelStyleCompaction();
64 * <p>Default values for some parameters in ColumnFamilyOptions are not
65 * optimized for heavy workloads and big datasets, which means you might
66 * observe write stalls under some conditions. As a starting point for tuning
67 * RocksDB options, use the following for level style compaction.</p>
69 * <p>Make sure to also call IncreaseParallelism(), which will provide the
70 * biggest performance gains.</p>
71 * <p>Note: we might use more memory than memtable_memory_budget during high
72 * write rate period</p>
74 * @param memtableMemoryBudget memory budget in bytes
75 * @return the instance of the current object.
77 T
optimizeLevelStyleCompaction(
78 long memtableMemoryBudget
);
81 * <p>Default values for some parameters in ColumnFamilyOptions are not
82 * optimized for heavy workloads and big datasets, which means you might
83 * observe write stalls under some conditions. As a starting point for tuning
84 * RocksDB options, use the following for universal style compaction.</p>
86 * <p>Universal style compaction is focused on reducing Write Amplification
87 * Factor for big data sets, but increases Space Amplification.</p>
89 * <p>Make sure to also call IncreaseParallelism(), which will provide the
90 * biggest performance gains.</p>
92 * <p>Note: we might use more memory than memtable_memory_budget during high
93 * write rate period</p>
95 * @return the instance of the current object.
97 T
optimizeUniversalStyleCompaction();
100 * <p>Default values for some parameters in ColumnFamilyOptions are not
101 * optimized for heavy workloads and big datasets, which means you might
102 * observe write stalls under some conditions. As a starting point for tuning
103 * RocksDB options, use the following for universal style compaction.</p>
105 * <p>Universal style compaction is focused on reducing Write Amplification
106 * Factor for big data sets, but increases Space Amplification.</p>
108 * <p>Make sure to also call IncreaseParallelism(), which will provide the
109 * biggest performance gains.</p>
111 * <p>Note: we might use more memory than memtable_memory_budget during high
112 * write rate period</p>
114 * @param memtableMemoryBudget memory budget in bytes
115 * @return the instance of the current object.
117 T
optimizeUniversalStyleCompaction(
118 long memtableMemoryBudget
);
121 * Set {@link BuiltinComparator} to be used with RocksDB.
123 * Note: Comparator can be set once upon database creation.
125 * Default: BytewiseComparator.
126 * @param builtinComparator a {@link BuiltinComparator} type.
127 * @return the instance of the current object.
130 BuiltinComparator builtinComparator
);
133 * Use the specified comparator for key ordering.
135 * Comparator should not be disposed before options instances using this comparator is
136 * disposed. If dispose() function is not called, then comparator object will be
137 * GC'd automatically.
139 * Comparator instance can be re-used in multiple options instances.
141 * @param comparator java instance.
142 * @return the instance of the current object.
145 AbstractComparator comparator
);
148 * <p>Set the merge operator to be used for merging two merge operands
149 * of the same key. The merge function is invoked during
150 * compaction and at lookup time, if multiple key/value pairs belonging
151 * to the same key are found in the database.</p>
153 * @param name the name of the merge function, as defined by
154 * the MergeOperators factory (see utilities/MergeOperators.h)
155 * The merge function is specified by name and must be one of the
156 * standard merge operators provided by RocksDB. The available
157 * operators are "put", "uint64add", "stringappend" and "stringappendtest".
158 * @return the instance of the current object.
160 T
setMergeOperatorName(String name
);
163 * <p>Set the merge operator to be used for merging two different key/value
164 * pairs that share the same key. The merge function is invoked during
165 * compaction and at lookup time, if multiple key/value pairs belonging
166 * to the same key are found in the database.</p>
168 * @param mergeOperator {@link MergeOperator} instance.
169 * @return the instance of the current object.
171 T
setMergeOperator(MergeOperator mergeOperator
);
174 * A single CompactionFilter instance to call into during compaction.
175 * Allows an application to modify/delete a key-value during background
178 * If the client requires a new compaction filter to be used for different
179 * compaction runs, it can specify call
180 * {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)}
183 * The client should specify only set one of the two.
184 * {@link #setCompactionFilter(AbstractCompactionFilter)} takes precedence
185 * over {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)}
186 * if the client specifies both.
188 * If multithreaded compaction is being used, the supplied CompactionFilter
189 * instance may be used from different threads concurrently and so should be thread-safe.
191 * @param compactionFilter {@link AbstractCompactionFilter} instance.
192 * @return the instance of the current object.
194 T
setCompactionFilter(
195 final AbstractCompactionFilter
<?
extends AbstractSlice
<?
>> compactionFilter
);
198 * Accessor for the CompactionFilter instance in use.
200 * @return Reference to the CompactionFilter, or null if one hasn't been set.
202 AbstractCompactionFilter
<?
extends AbstractSlice
<?
>> compactionFilter();
205 * This is a factory that provides {@link AbstractCompactionFilter} objects
206 * which allow an application to modify/delete a key-value during background
209 * A new filter will be created on each compaction run. If multithreaded
210 * compaction is being used, each created CompactionFilter will only be used
211 * from a single thread and so does not need to be thread-safe.
213 * @param compactionFilterFactory {@link AbstractCompactionFilterFactory} instance.
214 * @return the instance of the current object.
216 T
setCompactionFilterFactory(
217 final AbstractCompactionFilterFactory
<?
extends AbstractCompactionFilter
<?
>>
218 compactionFilterFactory
);
221 * Accessor for the CompactionFilterFactory instance in use.
223 * @return Reference to the CompactionFilterFactory, or null if one hasn't been set.
225 AbstractCompactionFilterFactory
<?
extends AbstractCompactionFilter
<?
>> compactionFilterFactory();
228 * This prefix-extractor uses the first n bytes of a key as its prefix.
230 * In some hash-based memtable representation such as HashLinkedList
231 * and HashSkipList, prefixes are used to partition the keys into
232 * several buckets. Prefix extractor is used to specify how to
233 * extract the prefix given a key.
235 * @param n use the first n bytes of a key as its prefix.
236 * @return the reference to the current option.
238 T
useFixedLengthPrefixExtractor(int n
);
241 * Same as fixed length prefix extractor, except that when slice is
242 * shorter than the fixed length, it will use the full key.
244 * @param n use the first n bytes of a key as its prefix.
245 * @return the reference to the current option.
247 T
useCappedPrefixExtractor(int n
);
250 * Number of files to trigger level-0 compaction. A value < 0 means that
251 * level-0 compaction will not be triggered by number of files at all.
254 * @param numFiles the number of files in level-0 to trigger compaction.
255 * @return the reference to the current option.
257 T
setLevelZeroFileNumCompactionTrigger(
261 * The number of files in level 0 to trigger compaction from level-0 to
262 * level-1. A value < 0 means that level-0 compaction will not be
263 * triggered by number of files at all.
266 * @return the number of files in level 0 to trigger compaction.
268 int levelZeroFileNumCompactionTrigger();
271 * Soft limit on number of level-0 files. We start slowing down writes at this
272 * point. A value < 0 means that no writing slow down will be triggered by
273 * number of files in level-0.
275 * @param numFiles soft limit on number of level-0 files.
276 * @return the reference to the current option.
278 T
setLevelZeroSlowdownWritesTrigger(
282 * Soft limit on the number of level-0 files. We start slowing down writes
283 * at this point. A value < 0 means that no writing slow down will be
284 * triggered by number of files in level-0.
286 * @return the soft limit on the number of level-0 files.
288 int levelZeroSlowdownWritesTrigger();
291 * Maximum number of level-0 files. We stop writes at this point.
293 * @param numFiles the hard limit of the number of level-0 files.
294 * @return the reference to the current option.
296 T
setLevelZeroStopWritesTrigger(int numFiles
);
299 * Maximum number of level-0 files. We stop writes at this point.
301 * @return the hard limit of the number of level-0 file.
303 int levelZeroStopWritesTrigger();
306 * The ratio between the total size of level-(L+1) files and the total
307 * size of level-L files for all L.
310 * @param multiplier the ratio between the total size of level-(L+1)
311 * files and the total size of level-L files for all L.
312 * @return the reference to the current option.
314 T
setMaxBytesForLevelMultiplier(
318 * The ratio between the total size of level-(L+1) files and the total
319 * size of level-L files for all L.
322 * @return the ratio between the total size of level-(L+1) files and
323 * the total size of level-L files for all L.
325 double maxBytesForLevelMultiplier();
328 * FIFO compaction option.
329 * The oldest table file will be deleted
330 * once the sum of table files reaches this size.
331 * The default value is 1GB (1 * 1024 * 1024 * 1024).
333 * @param maxTableFilesSize the size limit of the total sum of table files.
334 * @return the instance of the current object.
336 T
setMaxTableFilesSizeFIFO(
337 long maxTableFilesSize
);
340 * FIFO compaction option.
341 * The oldest table file will be deleted
342 * once the sum of table files reaches this size.
343 * The default value is 1GB (1 * 1024 * 1024 * 1024).
345 * @return the size limit of the total sum of table files.
347 long maxTableFilesSizeFIFO();
350 * Get the config for mem-table.
352 * @return the mem-table config.
354 MemTableConfig
memTableConfig();
357 * Set the config for mem-table.
359 * @param memTableConfig the mem-table config.
360 * @return the instance of the current object.
361 * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
362 * while overflowing the underlying platform specific value.
364 T
setMemTableConfig(MemTableConfig memTableConfig
);
367 * Returns the name of the current mem table representation.
368 * Memtable format can be set using setTableFormatConfig.
370 * @return the name of the currently-used memtable factory.
371 * @see #setTableFormatConfig(org.rocksdb.TableFormatConfig)
373 String
memTableFactoryName();
376 * Get the config for table format.
378 * @return the table format config.
380 TableFormatConfig
tableFormatConfig();
383 * Set the config for table format.
385 * @param config the table format config.
386 * @return the reference of the current options.
388 T
setTableFormatConfig(TableFormatConfig config
);
391 * @return the name of the currently used table factory.
393 String
tableFactoryName();
396 * A list of paths where SST files for this column family
397 * can be put into, with its target size. Similar to db_paths,
398 * newer data is placed into paths specified earlier in the
399 * vector while older data gradually moves to paths specified
400 * later in the vector.
401 * Note that, if a path is supplied to multiple column
402 * families, it would have files and total size from all
403 * the column families combined. User should provision for the
404 * total size(from all the column families) in such cases.
406 * If left empty, db_paths will be used.
409 * @param paths collection of paths for SST files.
410 * @return the reference of the current options.
412 T
setCfPaths(final Collection
<DbPath
> paths
);
415 * @return collection of paths for SST files.
417 List
<DbPath
> cfPaths();
420 * Compression algorithm that will be used for the bottommost level that
421 * contain files. If level-compaction is used, this option will only affect
422 * levels after base level.
424 * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
426 * @param bottommostCompressionType The compression type to use for the
429 * @return the reference of the current options.
431 T
setBottommostCompressionType(
432 final CompressionType bottommostCompressionType
);
435 * Compression algorithm that will be used for the bottommost level that
436 * contain files. If level-compaction is used, this option will only affect
437 * levels after base level.
439 * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
441 * @return The compression type used for the bottommost level
443 CompressionType
bottommostCompressionType();
446 * Set the options for compression algorithms used by
447 * {@link #bottommostCompressionType()} if it is enabled.
449 * To enable it, please see the definition of
450 * {@link CompressionOptions}.
452 * @param compressionOptions the bottom most compression options.
454 * @return the reference of the current options.
456 T
setBottommostCompressionOptions(
457 final CompressionOptions compressionOptions
);
460 * Get the bottom most compression options.
462 * See {@link #setBottommostCompressionOptions(CompressionOptions)}.
464 * @return the bottom most compression options.
466 CompressionOptions
bottommostCompressionOptions();
469 * Set the different options for compression algorithms
471 * @param compressionOptions The compression options
473 * @return the reference of the current options.
475 T
setCompressionOptions(
476 CompressionOptions compressionOptions
);
479 * Get the different options for compression algorithms
481 * @return The compression options
483 CompressionOptions
compressionOptions();
486 * If non-nullptr, use the specified factory for a function to determine the
487 * partitioning of sst files. This helps compaction to split the files
488 * on interesting boundaries (key prefixes) to make propagation of sst
489 * files less write amplifying (covering the whole key space).
493 * @param factory The factory reference
494 * @return the reference of the current options.
496 @Experimental("Caution: this option is experimental")
497 T
setSstPartitionerFactory(SstPartitionerFactory factory
);
500 * Get SST partitioner factory
502 * @return SST partitioner factory
504 @Experimental("Caution: this option is experimental")
505 SstPartitionerFactory
sstPartitionerFactory();
508 * Compaction concurrent thread limiter for the column family.
509 * If non-nullptr, use given concurrent thread limiter to control
510 * the max outstanding compaction tasks. Limiter can be shared with
511 * multiple column families across db instances.
513 * @param concurrentTaskLimiter The compaction thread limiter.
514 * @return the reference of the current options.
516 T
setCompactionThreadLimiter(ConcurrentTaskLimiter concurrentTaskLimiter
);
519 * Get compaction thread limiter
521 * @return Compaction thread limiter
523 ConcurrentTaskLimiter
compactionThreadLimiter();
526 * Default memtable memory budget used with the following methods:
529 * <li>{@link #optimizeLevelStyleCompaction()}</li>
530 * <li>{@link #optimizeUniversalStyleCompaction()}</li>
533 long DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET
= 512 * 1024 * 1024;