1 // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
4 public interface MutableDBOptionsInterface
<T
extends MutableDBOptionsInterface
<T
>> {
6 * Specifies the maximum number of concurrent background jobs (both flushes
7 * and compactions combined).
10 * @param maxBackgroundJobs number of max concurrent background jobs
11 * @return the instance of the current object.
13 T
setMaxBackgroundJobs(int maxBackgroundJobs
);
16 * Returns the maximum number of concurrent background jobs (both flushes
17 * and compactions combined).
20 * @return the maximum number of concurrent background jobs.
22 int maxBackgroundJobs();
25 * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
26 * value of max_background_jobs. For backwards compatibility we will set
27 * `max_background_jobs = max_background_compactions + max_background_flushes`
28 * in the case where user sets at least one of `max_background_compactions` or
29 * `max_background_flushes` (we replace -1 by 1 in case one option is unset).
31 * Specifies the maximum number of concurrent background compaction jobs,
32 * submitted to the default LOW priority thread pool.
33 * If you're increasing this, also consider increasing number of threads in
34 * LOW priority thread pool. For more information, see
37 * @param maxBackgroundCompactions the maximum number of background
39 * @return the instance of the current object.
41 * @see RocksEnv#setBackgroundThreads(int)
42 * @see RocksEnv#setBackgroundThreads(int, Priority)
43 * @see DBOptionsInterface#maxBackgroundFlushes()
44 * @deprecated Use {@link #setMaxBackgroundJobs(int)}
47 T
setMaxBackgroundCompactions(int maxBackgroundCompactions
);
50 * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
51 * value of max_background_jobs. For backwards compatibility we will set
52 * `max_background_jobs = max_background_compactions + max_background_flushes`
53 * in the case where user sets at least one of `max_background_compactions` or
54 * `max_background_flushes` (we replace -1 by 1 in case one option is unset).
56 * Returns the maximum number of concurrent background compaction jobs,
57 * submitted to the default LOW priority thread pool.
58 * When increasing this number, we may also want to consider increasing
59 * number of threads in LOW priority thread pool.
62 * @return the maximum number of concurrent background compaction jobs.
63 * @see RocksEnv#setBackgroundThreads(int)
64 * @see RocksEnv#setBackgroundThreads(int, Priority)
66 * @deprecated Use {@link #setMaxBackgroundJobs(int)}
69 int maxBackgroundCompactions();
72 * By default RocksDB will flush all memtables on DB close if there are
73 * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
74 * DB close. Unpersisted data WILL BE LOST.
78 * Dynamically changeable through
79 * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
82 * @param avoidFlushDuringShutdown true if we should avoid flush during
85 * @return the reference to the current options.
87 T
setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown
);
90 * By default RocksDB will flush all memtables on DB close if there are
91 * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
92 * DB close. Unpersisted data WILL BE LOST.
96 * Dynamically changeable through
97 * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
100 * @return true if we should avoid flush during shutdown
102 boolean avoidFlushDuringShutdown();
105 * This is the maximum buffer size that is used by WritableFileWriter.
106 * On Windows, we need to maintain an aligned buffer for writes.
107 * We allow the buffer to grow until it's size hits the limit.
109 * Default: 1024 * 1024 (1 MB)
111 * @param writableFileMaxBufferSize the maximum buffer size
113 * @return the reference to the current options.
115 T
setWritableFileMaxBufferSize(long writableFileMaxBufferSize
);
118 * This is the maximum buffer size that is used by WritableFileWriter.
119 * On Windows, we need to maintain an aligned buffer for writes.
120 * We allow the buffer to grow until it's size hits the limit.
122 * Default: 1024 * 1024 (1 MB)
124 * @return the maximum buffer size
126 long writableFileMaxBufferSize();
129 * The limited write rate to DB if
130 * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
131 * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
132 * or we are writing to the last mem table allowed and we allow more than 3
133 * mem tables. It is calculated using size of user write requests before
134 * compression. RocksDB may decide to slow down more if the compaction still
135 * gets behind further.
136 * If the value is 0, we will infer a value from `rater_limiter` value
137 * if it is not empty, or 16MB if `rater_limiter` is empty. Note that
138 * if users change the rate in `rate_limiter` after DB is opened,
139 * `delayed_write_rate` won't be adjusted.
141 * Unit: bytes per second.
145 * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}.
147 * @param delayedWriteRate the rate in bytes per second
149 * @return the reference to the current options.
151 T
setDelayedWriteRate(long delayedWriteRate
);
154 * The limited write rate to DB if
155 * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
156 * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
157 * or we are writing to the last mem table allowed and we allow more than 3
158 * mem tables. It is calculated using size of user write requests before
159 * compression. RocksDB may decide to slow down more if the compaction still
160 * gets behind further.
161 * If the value is 0, we will infer a value from `rater_limiter` value
162 * if it is not empty, or 16MB if `rater_limiter` is empty. Note that
163 * if users change the rate in `rate_limiter` after DB is opened,
164 * `delayed_write_rate` won't be adjusted.
166 * Unit: bytes per second.
170 * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}.
172 * @return the rate in bytes per second
174 long delayedWriteRate();
177 * <p>Set the max total write-ahead log size. Once write-ahead logs exceed this size, we will
178 * start forcing the flush of column families whose memtables are backed by the oldest live WAL
181 * <p>The oldest WAL files are the ones that are causing all the space amplification.
183 * For example, with 15 column families, each with
184 * <code>write_buffer_size = 128 MB</code>
185 * <code>max_write_buffer_number = 6</code>
186 * <code>max_total_wal_size</code> will be calculated to be <code>[15 * 128MB * 6] * 4 =
189 * The RocksDB wiki has some discussion about how the WAL interacts
190 * with memtables and flushing of column families, at
191 * <a href="https://github.com/facebook/rocksdb/wiki/Column-Families">...</a>
193 * <p>If set to 0 (default), we will dynamically choose the WAL size limit to
194 * be [sum of all write_buffer_size * max_write_buffer_number] * 4</p>
195 * <p>This option takes effect only when there are more than one column family as
196 * otherwise the wal size is dictated by the write_buffer_size.</p>
199 * @param maxTotalWalSize max total wal size.
200 * @return the instance of the current object.
202 T
setMaxTotalWalSize(long maxTotalWalSize
);
205 * <p>Returns the max total write-ahead log size. Once write-ahead logs exceed this size,
206 * we will start forcing the flush of column families whose memtables are
207 * backed by the oldest live WAL file.</p>
208 * <p>The oldest WAL files are the ones that are causing all the space amplification.
210 * For example, with 15 column families, each with
211 * <code>write_buffer_size = 128 MB</code>
212 * <code>max_write_buffer_number = 6</code>
213 * <code>max_total_wal_size</code> will be calculated to be <code>[15 * 128MB * 6] * 4 =
216 * The RocksDB wiki has some discussion about how the WAL interacts
217 * with memtables and flushing of column families, at
218 * <a href="https://github.com/facebook/rocksdb/wiki/Column-Families">...</a>
220 * <p>If set to 0 (default), we will dynamically choose the WAL size limit to
221 * be [sum of all write_buffer_size * max_write_buffer_number] * 4</p>
222 * <p>This option takes effect only when there are more than one column family as
223 * otherwise the wal size is dictated by the write_buffer_size.</p>
227 * <p>If set to 0 (default), we will dynamically choose the WAL size limit
228 * to be [sum of all write_buffer_size * max_write_buffer_number] * 4
231 * @return max total wal size
233 long maxTotalWalSize();
236 * The periodicity when obsolete files get deleted. The default
237 * value is 6 hours. The files that get out of scope by compaction
238 * process will still get automatically delete on every compaction,
239 * regardless of this setting
241 * @param micros the time interval in micros
242 * @return the instance of the current object.
244 T
setDeleteObsoleteFilesPeriodMicros(long micros
);
247 * The periodicity when obsolete files get deleted. The default
248 * value is 6 hours. The files that get out of scope by compaction
249 * process will still get automatically delete on every compaction,
250 * regardless of this setting
252 * @return the time interval in micros when obsolete files will be deleted.
254 long deleteObsoleteFilesPeriodMicros();
257 * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
258 * Default: 600 (10 minutes)
260 * @param statsDumpPeriodSec time interval in seconds.
261 * @return the instance of the current object.
263 T
setStatsDumpPeriodSec(int statsDumpPeriodSec
);
266 * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
267 * Default: 600 (10 minutes)
269 * @return time interval in seconds.
271 int statsDumpPeriodSec();
274 * If not zero, dump rocksdb.stats to RocksDB every
275 * {@code statsPersistPeriodSec}
279 * @param statsPersistPeriodSec time interval in seconds.
280 * @return the instance of the current object.
282 T
setStatsPersistPeriodSec(int statsPersistPeriodSec
);
285 * If not zero, dump rocksdb.stats to RocksDB every
286 * {@code statsPersistPeriodSec}
288 * @return time interval in seconds.
290 int statsPersistPeriodSec();
293 * If not zero, periodically take stats snapshots and store in memory, the
294 * memory size for stats snapshots is capped at {@code statsHistoryBufferSize}
298 * @param statsHistoryBufferSize the size of the buffer.
299 * @return the instance of the current object.
301 T
setStatsHistoryBufferSize(long statsHistoryBufferSize
);
304 * If not zero, periodically take stats snapshots and store in memory, the
305 * memory size for stats snapshots is capped at {@code statsHistoryBufferSize}
307 * @return the size of the buffer.
309 long statsHistoryBufferSize();
312 * Number of open files that can be used by the DB. You may need to
313 * increase this if your database has a large working set. Value -1 means
314 * files opened are always kept open. You can estimate number of files based
315 * on {@code target_file_size_base} and {@code target_file_size_multiplier}
316 * for level-based compaction. For universal-style compaction, you can usually
320 * @param maxOpenFiles the maximum number of open files.
321 * @return the instance of the current object.
323 T
setMaxOpenFiles(int maxOpenFiles
);
326 * Number of open files that can be used by the DB. You may need to
327 * increase this if your database has a large working set. Value -1 means
328 * files opened are always kept open. You can estimate number of files based
329 * on {@code target_file_size_base} and {@code target_file_size_multiplier}
330 * for level-based compaction. For universal-style compaction, you can usually
334 * @return the maximum number of open files.
339 * Allows OS to incrementally sync files to disk while they are being
340 * written, asynchronously, in the background.
341 * Issue one request for every bytes_per_sync written. 0 turns it off.
344 * @param bytesPerSync size in bytes
345 * @return the instance of the current object.
347 T
setBytesPerSync(long bytesPerSync
);
350 * Allows OS to incrementally sync files to disk while they are being
351 * written, asynchronously, in the background.
352 * Issue one request for every bytes_per_sync written. 0 turns it off.
355 * @return size in bytes
360 * Same as {@link #setBytesPerSync(long)} , but applies to WAL files
362 * Default: 0, turned off
364 * @param walBytesPerSync size in bytes
365 * @return the instance of the current object.
367 T
setWalBytesPerSync(long walBytesPerSync
);
370 * Same as {@link #bytesPerSync()} , but applies to WAL files
372 * Default: 0, turned off
374 * @return size in bytes
376 long walBytesPerSync();
379 * When true, guarantees WAL files have at most {@link #walBytesPerSync()}
380 * bytes submitted for writeback at any given time, and SST files have at most
381 * {@link #bytesPerSync()} bytes pending writeback at any given time. This
382 * can be used to handle cases where processing speed exceeds I/O speed
383 * during file generation, which can lead to a huge sync when the file is
384 * finished, even with {@link #bytesPerSync()} / {@link #walBytesPerSync()}
385 * properly configured.
387 * - If `sync_file_range` is supported it achieves this by waiting for any
388 * prior `sync_file_range`s to finish before proceeding. In this way,
389 * processing (compression, etc.) can proceed uninhibited in the gap
390 * between `sync_file_range`s, and we block only when I/O falls
392 * - Otherwise the `WritableFile::Sync` method is used. Note this mechanism
393 * always blocks, thus preventing the interleaving of I/O and processing.
395 * Note: Enabling this option does not provide any additional persistence
396 * guarantees, as it may use `sync_file_range`, which does not write out
401 * @param strictBytesPerSync the bytes per sync
402 * @return the instance of the current object.
404 T
setStrictBytesPerSync(boolean strictBytesPerSync
);
407 * Return the strict byte limit per sync.
409 * See {@link #setStrictBytesPerSync(boolean)}
411 * @return the limit in bytes.
413 boolean strictBytesPerSync();
416 * If non-zero, we perform bigger reads when doing compaction. If you're
417 * running RocksDB on spinning disks, you should set this to at least 2MB.
419 * That way RocksDB's compaction is doing sequential instead of random reads.
423 * @param compactionReadaheadSize The compaction read-ahead size
425 * @return the reference to the current options.
427 T
setCompactionReadaheadSize(final long compactionReadaheadSize
);
430 * If non-zero, we perform bigger reads when doing compaction. If you're
431 * running RocksDB on spinning disks, you should set this to at least 2MB.
433 * That way RocksDB's compaction is doing sequential instead of random reads.
437 * @return The compaction read-ahead size
439 long compactionReadaheadSize();