]>
Commit | Line | Data |
---|---|---|
f67539c2 | 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. |
494da23a TL |
2 | package org.rocksdb; |
3 | ||
f67539c2 | 4 | public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T>> { |
494da23a TL |
5 | /** |
6 | * Specifies the maximum number of concurrent background jobs (both flushes | |
7 | * and compactions combined). | |
8 | * Default: 2 | |
9 | * | |
10 | * @param maxBackgroundJobs number of max concurrent background jobs | |
11 | * @return the instance of the current object. | |
12 | */ | |
13 | T setMaxBackgroundJobs(int maxBackgroundJobs); | |
14 | ||
15 | /** | |
16 | * Returns the maximum number of concurrent background jobs (both flushes | |
17 | * and compactions combined). | |
18 | * Default: 2 | |
19 | * | |
20 | * @return the maximum number of concurrent background jobs. | |
21 | */ | |
22 | int maxBackgroundJobs(); | |
23 | ||
24 | /** | |
f67539c2 TL |
25 | * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the |
26 | * value of max_background_jobs. This option is ignored. | |
27 | * | |
494da23a TL |
28 | * Suggested number of concurrent background compaction jobs, submitted to |
29 | * the default LOW priority thread pool. | |
f67539c2 | 30 | * Default: -1 |
494da23a TL |
31 | * |
32 | * @param baseBackgroundCompactions Suggested number of background compaction | |
33 | * jobs | |
34 | * | |
35 | * @deprecated Use {@link #setMaxBackgroundJobs(int)} | |
36 | */ | |
37 | @Deprecated | |
38 | void setBaseBackgroundCompactions(int baseBackgroundCompactions); | |
39 | ||
40 | /** | |
f67539c2 TL |
41 | * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the |
42 | * value of max_background_jobs. This option is ignored. | |
43 | * | |
494da23a TL |
44 | * Suggested number of concurrent background compaction jobs, submitted to |
45 | * the default LOW priority thread pool. | |
f67539c2 | 46 | * Default: -1 |
494da23a TL |
47 | * |
48 | * @return Suggested number of background compaction jobs | |
49 | */ | |
50 | int baseBackgroundCompactions(); | |
51 | ||
52 | /** | |
f67539c2 TL |
53 | * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the |
54 | * value of max_background_jobs. For backwards compatibility we will set | |
55 | * `max_background_jobs = max_background_compactions + max_background_flushes` | |
56 | * in the case where user sets at least one of `max_background_compactions` or | |
57 | * `max_background_flushes` (we replace -1 by 1 in case one option is unset). | |
58 | * | |
494da23a TL |
59 | * Specifies the maximum number of concurrent background compaction jobs, |
60 | * submitted to the default LOW priority thread pool. | |
61 | * If you're increasing this, also consider increasing number of threads in | |
62 | * LOW priority thread pool. For more information, see | |
f67539c2 | 63 | * Default: -1 |
494da23a TL |
64 | * |
65 | * @param maxBackgroundCompactions the maximum number of background | |
66 | * compaction jobs. | |
67 | * @return the instance of the current object. | |
68 | * | |
69 | * @see RocksEnv#setBackgroundThreads(int) | |
70 | * @see RocksEnv#setBackgroundThreads(int, Priority) | |
71 | * @see DBOptionsInterface#maxBackgroundFlushes() | |
f67539c2 | 72 | * @deprecated Use {@link #setMaxBackgroundJobs(int)} |
494da23a | 73 | */ |
f67539c2 | 74 | @Deprecated |
494da23a TL |
75 | T setMaxBackgroundCompactions(int maxBackgroundCompactions); |
76 | ||
77 | /** | |
f67539c2 TL |
78 | * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the |
79 | * value of max_background_jobs. For backwards compatibility we will set | |
80 | * `max_background_jobs = max_background_compactions + max_background_flushes` | |
81 | * in the case where user sets at least one of `max_background_compactions` or | |
82 | * `max_background_flushes` (we replace -1 by 1 in case one option is unset). | |
83 | * | |
494da23a TL |
84 | * Returns the maximum number of concurrent background compaction jobs, |
85 | * submitted to the default LOW priority thread pool. | |
86 | * When increasing this number, we may also want to consider increasing | |
87 | * number of threads in LOW priority thread pool. | |
f67539c2 | 88 | * Default: -1 |
494da23a TL |
89 | * |
90 | * @return the maximum number of concurrent background compaction jobs. | |
91 | * @see RocksEnv#setBackgroundThreads(int) | |
92 | * @see RocksEnv#setBackgroundThreads(int, Priority) | |
93 | * | |
94 | * @deprecated Use {@link #setMaxBackgroundJobs(int)} | |
95 | */ | |
96 | @Deprecated | |
97 | int maxBackgroundCompactions(); | |
98 | ||
99 | /** | |
100 | * By default RocksDB will flush all memtables on DB close if there are | |
101 | * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup | |
102 | * DB close. Unpersisted data WILL BE LOST. | |
103 | * | |
104 | * DEFAULT: false | |
105 | * | |
106 | * Dynamically changeable through | |
107 | * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} | |
108 | * API. | |
109 | * | |
110 | * @param avoidFlushDuringShutdown true if we should avoid flush during | |
111 | * shutdown | |
112 | * | |
113 | * @return the reference to the current options. | |
114 | */ | |
115 | T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown); | |
116 | ||
117 | /** | |
118 | * By default RocksDB will flush all memtables on DB close if there are | |
119 | * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup | |
120 | * DB close. Unpersisted data WILL BE LOST. | |
121 | * | |
122 | * DEFAULT: false | |
123 | * | |
124 | * Dynamically changeable through | |
125 | * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} | |
126 | * API. | |
127 | * | |
128 | * @return true if we should avoid flush during shutdown | |
129 | */ | |
130 | boolean avoidFlushDuringShutdown(); | |
131 | ||
132 | /** | |
133 | * This is the maximum buffer size that is used by WritableFileWriter. | |
134 | * On Windows, we need to maintain an aligned buffer for writes. | |
135 | * We allow the buffer to grow until it's size hits the limit. | |
136 | * | |
137 | * Default: 1024 * 1024 (1 MB) | |
138 | * | |
139 | * @param writableFileMaxBufferSize the maximum buffer size | |
140 | * | |
141 | * @return the reference to the current options. | |
142 | */ | |
143 | T setWritableFileMaxBufferSize(long writableFileMaxBufferSize); | |
144 | ||
145 | /** | |
146 | * This is the maximum buffer size that is used by WritableFileWriter. | |
147 | * On Windows, we need to maintain an aligned buffer for writes. | |
148 | * We allow the buffer to grow until it's size hits the limit. | |
149 | * | |
150 | * Default: 1024 * 1024 (1 MB) | |
151 | * | |
152 | * @return the maximum buffer size | |
153 | */ | |
154 | long writableFileMaxBufferSize(); | |
155 | ||
156 | /** | |
157 | * The limited write rate to DB if | |
158 | * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or | |
159 | * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, | |
160 | * or we are writing to the last mem table allowed and we allow more than 3 | |
161 | * mem tables. It is calculated using size of user write requests before | |
162 | * compression. RocksDB may decide to slow down more if the compaction still | |
163 | * gets behind further. | |
f67539c2 TL |
164 | * If the value is 0, we will infer a value from `rater_limiter` value |
165 | * if it is not empty, or 16MB if `rater_limiter` is empty. Note that | |
166 | * if users change the rate in `rate_limiter` after DB is opened, | |
167 | * `delayed_write_rate` won't be adjusted. | |
494da23a TL |
168 | * |
169 | * Unit: bytes per second. | |
170 | * | |
f67539c2 TL |
171 | * Default: 0 |
172 | * | |
173 | * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}. | |
494da23a TL |
174 | * |
175 | * @param delayedWriteRate the rate in bytes per second | |
176 | * | |
177 | * @return the reference to the current options. | |
178 | */ | |
179 | T setDelayedWriteRate(long delayedWriteRate); | |
180 | ||
181 | /** | |
182 | * The limited write rate to DB if | |
183 | * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or | |
184 | * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, | |
185 | * or we are writing to the last mem table allowed and we allow more than 3 | |
186 | * mem tables. It is calculated using size of user write requests before | |
187 | * compression. RocksDB may decide to slow down more if the compaction still | |
188 | * gets behind further. | |
f67539c2 TL |
189 | * If the value is 0, we will infer a value from `rater_limiter` value |
190 | * if it is not empty, or 16MB if `rater_limiter` is empty. Note that | |
191 | * if users change the rate in `rate_limiter` after DB is opened, | |
192 | * `delayed_write_rate` won't be adjusted. | |
494da23a TL |
193 | * |
194 | * Unit: bytes per second. | |
195 | * | |
f67539c2 TL |
196 | * Default: 0 |
197 | * | |
198 | * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}. | |
494da23a TL |
199 | * |
200 | * @return the rate in bytes per second | |
201 | */ | |
202 | long delayedWriteRate(); | |
203 | ||
204 | /** | |
205 | * <p>Once write-ahead logs exceed this size, we will start forcing the | |
206 | * flush of column families whose memtables are backed by the oldest live | |
207 | * WAL file (i.e. the ones that are causing all the space amplification). | |
208 | * </p> | |
209 | * <p>If set to 0 (default), we will dynamically choose the WAL size limit to | |
210 | * be [sum of all write_buffer_size * max_write_buffer_number] * 2</p> | |
211 | * <p>This option takes effect only when there are more than one column family as | |
212 | * otherwise the wal size is dictated by the write_buffer_size.</p> | |
213 | * <p>Default: 0</p> | |
214 | * | |
215 | * @param maxTotalWalSize max total wal size. | |
216 | * @return the instance of the current object. | |
217 | */ | |
218 | T setMaxTotalWalSize(long maxTotalWalSize); | |
219 | ||
220 | /** | |
221 | * <p>Returns the max total wal size. Once write-ahead logs exceed this size, | |
222 | * we will start forcing the flush of column families whose memtables are | |
223 | * backed by the oldest live WAL file (i.e. the ones that are causing all | |
224 | * the space amplification).</p> | |
225 | * | |
226 | * <p>If set to 0 (default), we will dynamically choose the WAL size limit | |
227 | * to be [sum of all write_buffer_size * max_write_buffer_number] * 2 | |
228 | * </p> | |
229 | * | |
230 | * @return max total wal size | |
231 | */ | |
232 | long maxTotalWalSize(); | |
233 | ||
234 | /** | |
235 | * The periodicity when obsolete files get deleted. The default | |
236 | * value is 6 hours. The files that get out of scope by compaction | |
237 | * process will still get automatically delete on every compaction, | |
238 | * regardless of this setting | |
239 | * | |
240 | * @param micros the time interval in micros | |
241 | * @return the instance of the current object. | |
242 | */ | |
243 | T setDeleteObsoleteFilesPeriodMicros(long micros); | |
244 | ||
245 | /** | |
246 | * The periodicity when obsolete files get deleted. The default | |
247 | * value is 6 hours. The files that get out of scope by compaction | |
248 | * process will still get automatically delete on every compaction, | |
249 | * regardless of this setting | |
250 | * | |
251 | * @return the time interval in micros when obsolete files will be deleted. | |
252 | */ | |
253 | long deleteObsoleteFilesPeriodMicros(); | |
254 | ||
255 | /** | |
256 | * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec | |
257 | * Default: 600 (10 minutes) | |
258 | * | |
259 | * @param statsDumpPeriodSec time interval in seconds. | |
260 | * @return the instance of the current object. | |
261 | */ | |
262 | T setStatsDumpPeriodSec(int statsDumpPeriodSec); | |
263 | ||
264 | /** | |
265 | * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec | |
266 | * Default: 600 (10 minutes) | |
267 | * | |
268 | * @return time interval in seconds. | |
269 | */ | |
270 | int statsDumpPeriodSec(); | |
271 | ||
f67539c2 TL |
272 | /** |
273 | * If not zero, dump rocksdb.stats to RocksDB every | |
274 | * {@code statsPersistPeriodSec} | |
275 | * | |
276 | * Default: 600 | |
277 | * | |
278 | * @param statsPersistPeriodSec time interval in seconds. | |
279 | * @return the instance of the current object. | |
280 | */ | |
281 | T setStatsPersistPeriodSec(int statsPersistPeriodSec); | |
282 | ||
283 | /** | |
284 | * If not zero, dump rocksdb.stats to RocksDB every | |
285 | * {@code statsPersistPeriodSec} | |
286 | * | |
287 | * @return time interval in seconds. | |
288 | */ | |
289 | int statsPersistPeriodSec(); | |
290 | ||
291 | /** | |
292 | * If not zero, periodically take stats snapshots and store in memory, the | |
293 | * memory size for stats snapshots is capped at {@code statsHistoryBufferSize} | |
294 | * | |
295 | * Default: 1MB | |
296 | * | |
297 | * @param statsHistoryBufferSize the size of the buffer. | |
298 | * @return the instance of the current object. | |
299 | */ | |
300 | T setStatsHistoryBufferSize(long statsHistoryBufferSize); | |
301 | ||
302 | /** | |
303 | * If not zero, periodically take stats snapshots and store in memory, the | |
304 | * memory size for stats snapshots is capped at {@code statsHistoryBufferSize} | |
305 | * | |
306 | * @return the size of the buffer. | |
307 | */ | |
308 | long statsHistoryBufferSize(); | |
309 | ||
494da23a TL |
310 | /** |
311 | * Number of open files that can be used by the DB. You may need to | |
312 | * increase this if your database has a large working set. Value -1 means | |
313 | * files opened are always kept open. You can estimate number of files based | |
314 | * on {@code target_file_size_base} and {@code target_file_size_multiplier} | |
315 | * for level-based compaction. For universal-style compaction, you can usually | |
316 | * set it to -1. | |
f67539c2 | 317 | * Default: -1 |
494da23a TL |
318 | * |
319 | * @param maxOpenFiles the maximum number of open files. | |
320 | * @return the instance of the current object. | |
321 | */ | |
322 | T setMaxOpenFiles(int maxOpenFiles); | |
323 | ||
324 | /** | |
325 | * Number of open files that can be used by the DB. You may need to | |
326 | * increase this if your database has a large working set. Value -1 means | |
327 | * files opened are always kept open. You can estimate number of files based | |
328 | * on {@code target_file_size_base} and {@code target_file_size_multiplier} | |
329 | * for level-based compaction. For universal-style compaction, you can usually | |
330 | * set it to -1. | |
f67539c2 | 331 | * Default: -1 |
494da23a TL |
332 | * |
333 | * @return the maximum number of open files. | |
334 | */ | |
335 | int maxOpenFiles(); | |
336 | ||
337 | /** | |
338 | * Allows OS to incrementally sync files to disk while they are being | |
339 | * written, asynchronously, in the background. | |
340 | * Issue one request for every bytes_per_sync written. 0 turns it off. | |
341 | * Default: 0 | |
342 | * | |
343 | * @param bytesPerSync size in bytes | |
344 | * @return the instance of the current object. | |
345 | */ | |
346 | T setBytesPerSync(long bytesPerSync); | |
347 | ||
348 | /** | |
349 | * Allows OS to incrementally sync files to disk while they are being | |
350 | * written, asynchronously, in the background. | |
351 | * Issue one request for every bytes_per_sync written. 0 turns it off. | |
352 | * Default: 0 | |
353 | * | |
354 | * @return size in bytes | |
355 | */ | |
356 | long bytesPerSync(); | |
357 | ||
358 | /** | |
359 | * Same as {@link #setBytesPerSync(long)} , but applies to WAL files | |
360 | * | |
361 | * Default: 0, turned off | |
362 | * | |
363 | * @param walBytesPerSync size in bytes | |
364 | * @return the instance of the current object. | |
365 | */ | |
366 | T setWalBytesPerSync(long walBytesPerSync); | |
367 | ||
368 | /** | |
369 | * Same as {@link #bytesPerSync()} , but applies to WAL files | |
370 | * | |
371 | * Default: 0, turned off | |
372 | * | |
373 | * @return size in bytes | |
374 | */ | |
375 | long walBytesPerSync(); | |
376 | ||
f67539c2 TL |
377 | /** |
378 | * When true, guarantees WAL files have at most {@link #walBytesPerSync()} | |
379 | * bytes submitted for writeback at any given time, and SST files have at most | |
380 | * {@link #bytesPerSync()} bytes pending writeback at any given time. This | |
381 | * can be used to handle cases where processing speed exceeds I/O speed | |
382 | * during file generation, which can lead to a huge sync when the file is | |
383 | * finished, even with {@link #bytesPerSync()} / {@link #walBytesPerSync()} | |
384 | * properly configured. | |
385 | * | |
386 | * - If `sync_file_range` is supported it achieves this by waiting for any | |
387 | * prior `sync_file_range`s to finish before proceeding. In this way, | |
388 | * processing (compression, etc.) can proceed uninhibited in the gap | |
389 | * between `sync_file_range`s, and we block only when I/O falls | |
390 | * behind. | |
391 | * - Otherwise the `WritableFile::Sync` method is used. Note this mechanism | |
392 | * always blocks, thus preventing the interleaving of I/O and processing. | |
393 | * | |
394 | * Note: Enabling this option does not provide any additional persistence | |
395 | * guarantees, as it may use `sync_file_range`, which does not write out | |
396 | * metadata. | |
397 | * | |
398 | * Default: false | |
399 | * | |
400 | * @param strictBytesPerSync the bytes per sync | |
401 | * @return the instance of the current object. | |
402 | */ | |
403 | T setStrictBytesPerSync(boolean strictBytesPerSync); | |
404 | ||
405 | /** | |
406 | * Return the strict byte limit per sync. | |
407 | * | |
408 | * See {@link #setStrictBytesPerSync(boolean)} | |
409 | * | |
410 | * @return the limit in bytes. | |
411 | */ | |
412 | boolean strictBytesPerSync(); | |
494da23a TL |
413 | |
414 | /** | |
415 | * If non-zero, we perform bigger reads when doing compaction. If you're | |
416 | * running RocksDB on spinning disks, you should set this to at least 2MB. | |
417 | * | |
418 | * That way RocksDB's compaction is doing sequential instead of random reads. | |
419 | * When non-zero, we also force | |
420 | * {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true. | |
421 | * | |
422 | * Default: 0 | |
423 | * | |
424 | * @param compactionReadaheadSize The compaction read-ahead size | |
425 | * | |
426 | * @return the reference to the current options. | |
427 | */ | |
428 | T setCompactionReadaheadSize(final long compactionReadaheadSize); | |
429 | ||
430 | /** | |
431 | * If non-zero, we perform bigger reads when doing compaction. If you're | |
432 | * running RocksDB on spinning disks, you should set this to at least 2MB. | |
433 | * | |
434 | * That way RocksDB's compaction is doing sequential instead of random reads. | |
435 | * When non-zero, we also force | |
436 | * {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true. | |
437 | * | |
438 | * Default: 0 | |
439 | * | |
440 | * @return The compaction read-ahead size | |
441 | */ | |
442 | long compactionReadaheadSize(); | |
443 | } |