]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / rocksdb / java / src / main / java / org / rocksdb / AdvancedMutableColumnFamilyOptionsInterface.java
CommitLineData
7c673cae 1// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
11fdf7f2
TL
2// This source code is licensed under both the GPLv2 (found in the
3// COPYING file in the root directory) and Apache 2.0 License
4// (found in the LICENSE.Apache file in the root directory).
7c673cae
FG
5
6package org.rocksdb;
7
8/**
9 * Advanced Column Family Options which are mutable
10 *
11 * Taken from include/rocksdb/advanced_options.h
12 * and MutableCFOptions in util/cf_options.h
13 */
14public interface AdvancedMutableColumnFamilyOptionsInterface
15 <T extends AdvancedMutableColumnFamilyOptionsInterface> {
16
17 /**
18 * The maximum number of write buffers that are built up in memory.
19 * The default is 2, so that when 1 write buffer is being flushed to
20 * storage, new writes can continue to the other write buffer.
21 * Default: 2
22 *
23 * @param maxWriteBufferNumber maximum number of write buffers.
24 * @return the instance of the current options.
25 */
26 T setMaxWriteBufferNumber(
27 int maxWriteBufferNumber);
28
29 /**
30 * Returns maximum number of write buffers.
31 *
32 * @return maximum number of write buffers.
33 * @see #setMaxWriteBufferNumber(int)
34 */
35 int maxWriteBufferNumber();
36
37 /**
38 * Number of locks used for inplace update
39 * Default: 10000, if inplace_update_support = true, else 0.
40 *
41 * @param inplaceUpdateNumLocks the number of locks used for
42 * inplace updates.
43 * @return the reference to the current options.
44 * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
45 * while overflowing the underlying platform specific value.
46 */
47 T setInplaceUpdateNumLocks(
48 long inplaceUpdateNumLocks);
49
50 /**
51 * Number of locks used for inplace update
52 * Default: 10000, if inplace_update_support = true, else 0.
53 *
54 * @return the number of locks used for inplace update.
55 */
56 long inplaceUpdateNumLocks();
57
58 /**
59 * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
60 * create prefix bloom for memtable with the size of
61 * write_buffer_size * memtable_prefix_bloom_size_ratio.
62 * If it is larger than 0.25, it is santinized to 0.25.
63 *
64 * Default: 0 (disable)
65 *
66 * @param memtablePrefixBloomSizeRatio The ratio
67 * @return the reference to the current options.
68 */
69 T setMemtablePrefixBloomSizeRatio(
70 double memtablePrefixBloomSizeRatio);
71
72 /**
73 * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
74 * create prefix bloom for memtable with the size of
75 * write_buffer_size * memtable_prefix_bloom_size_ratio.
76 * If it is larger than 0.25, it is santinized to 0.25.
77 *
78 * Default: 0 (disable)
79 *
80 * @return the ratio
81 */
82 double memtablePrefixBloomSizeRatio();
83
84 /**
85 * Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
86 * from huge page TLB but from malloc.
87 * Need to reserve huge pages for it to be allocated. For example:
88 * sysctl -w vm.nr_hugepages=20
89 * See linux doc Documentation/vm/hugetlbpage.txt
90 *
91 * @param memtableHugePageSize The page size of the huge
92 * page tlb
93 * @return the reference to the current options.
94 */
95 T setMemtableHugePageSize(
96 long memtableHugePageSize);
97
98 /**
99 * Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
100 * from huge page TLB but from malloc.
101 * Need to reserve huge pages for it to be allocated. For example:
102 * sysctl -w vm.nr_hugepages=20
103 * See linux doc Documentation/vm/hugetlbpage.txt
104 *
105 * @return The page size of the huge page tlb
106 */
107 long memtableHugePageSize();
108
109 /**
110 * The size of one block in arena memory allocation.
111 * If &le; 0, a proper value is automatically calculated (usually 1/10 of
112 * writer_buffer_size).
113 *
11fdf7f2 114 * There are two additional restriction of the specified size:
7c673cae
FG
115 * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
116 * (2) be the multiple of the CPU word (which helps with the memory
117 * alignment).
118 *
119 * We'll automatically check and adjust the size number to make sure it
120 * conforms to the restrictions.
121 * Default: 0
122 *
123 * @param arenaBlockSize the size of an arena block
124 * @return the reference to the current options.
125 * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
126 * while overflowing the underlying platform specific value.
127 */
128 T setArenaBlockSize(long arenaBlockSize);
129
130 /**
131 * The size of one block in arena memory allocation.
132 * If &le; 0, a proper value is automatically calculated (usually 1/10 of
133 * writer_buffer_size).
134 *
11fdf7f2 135 * There are two additional restriction of the specified size:
7c673cae
FG
136 * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
137 * (2) be the multiple of the CPU word (which helps with the memory
138 * alignment).
139 *
140 * We'll automatically check and adjust the size number to make sure it
141 * conforms to the restrictions.
142 * Default: 0
143 *
144 * @return the size of an arena block
145 */
146 long arenaBlockSize();
147
148 /**
149 * Soft limit on number of level-0 files. We start slowing down writes at this
150 * point. A value &lt; 0 means that no writing slow down will be triggered by
151 * number of files in level-0.
152 *
153 * @param level0SlowdownWritesTrigger The soft limit on the number of
154 * level-0 files
155 * @return the reference to the current options.
156 */
157 T setLevel0SlowdownWritesTrigger(
158 int level0SlowdownWritesTrigger);
159
160 /**
161 * Soft limit on number of level-0 files. We start slowing down writes at this
162 * point. A value &lt; 0 means that no writing slow down will be triggered by
163 * number of files in level-0.
164 *
165 * @return The soft limit on the number of
166 * level-0 files
167 */
168 int level0SlowdownWritesTrigger();
169
170 /**
171 * Maximum number of level-0 files. We stop writes at this point.
172 *
173 * @param level0StopWritesTrigger The maximum number of level-0 files
174 * @return the reference to the current options.
175 */
176 T setLevel0StopWritesTrigger(
177 int level0StopWritesTrigger);
178
179 /**
180 * Maximum number of level-0 files. We stop writes at this point.
181 *
182 * @return The maximum number of level-0 files
183 */
184 int level0StopWritesTrigger();
185
186 /**
187 * The target file size for compaction.
188 * This targetFileSizeBase determines a level-1 file size.
189 * Target file size for level L can be calculated by
190 * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
191 * For example, if targetFileSizeBase is 2MB and
192 * target_file_size_multiplier is 10, then each file on level-1 will
193 * be 2MB, and each file on level 2 will be 20MB,
194 * and each file on level-3 will be 200MB.
195 * by default targetFileSizeBase is 2MB.
196 *
197 * @param targetFileSizeBase the target size of a level-0 file.
198 * @return the reference to the current options.
199 *
200 * @see #setTargetFileSizeMultiplier(int)
201 */
202 T setTargetFileSizeBase(
203 long targetFileSizeBase);
204
205 /**
206 * The target file size for compaction.
207 * This targetFileSizeBase determines a level-1 file size.
208 * Target file size for level L can be calculated by
209 * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
210 * For example, if targetFileSizeBase is 2MB and
211 * target_file_size_multiplier is 10, then each file on level-1 will
212 * be 2MB, and each file on level 2 will be 20MB,
213 * and each file on level-3 will be 200MB.
214 * by default targetFileSizeBase is 2MB.
215 *
216 * @return the target size of a level-0 file.
217 *
218 * @see #targetFileSizeMultiplier()
219 */
220 long targetFileSizeBase();
221
222 /**
223 * targetFileSizeMultiplier defines the size ratio between a
224 * level-L file and level-(L+1) file.
225 * By default target_file_size_multiplier is 1, meaning
226 * files in different levels have the same target.
227 *
228 * @param multiplier the size ratio between a level-(L+1) file
229 * and level-L file.
230 * @return the reference to the current options.
231 */
232 T setTargetFileSizeMultiplier(
233 int multiplier);
234
235 /**
236 * targetFileSizeMultiplier defines the size ratio between a
237 * level-(L+1) file and level-L file.
238 * By default targetFileSizeMultiplier is 1, meaning
239 * files in different levels have the same target.
240 *
241 * @return the size ratio between a level-(L+1) file and level-L file.
242 */
243 int targetFileSizeMultiplier();
244
245 /**
246 * The ratio between the total size of level-(L+1) files and the total
247 * size of level-L files for all L.
248 * DEFAULT: 10
249 *
250 * @param multiplier the ratio between the total size of level-(L+1)
251 * files and the total size of level-L files for all L.
252 * @return the reference to the current options.
253 *
254 * See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)}
255 */
256 T setMaxBytesForLevelMultiplier(double multiplier);
257
258 /**
259 * The ratio between the total size of level-(L+1) files and the total
260 * size of level-L files for all L.
261 * DEFAULT: 10
262 *
263 * @return the ratio between the total size of level-(L+1) files and
264 * the total size of level-L files for all L.
265 *
266 * See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()}
267 */
268 double maxBytesForLevelMultiplier();
269
270 /**
271 * Different max-size multipliers for different levels.
272 * These are multiplied by max_bytes_for_level_multiplier to arrive
273 * at the max-size of each level.
274 *
275 * Default: 1
276 *
277 * @param maxBytesForLevelMultiplierAdditional The max-size multipliers
278 * for each level
279 * @return the reference to the current options.
280 */
281 T setMaxBytesForLevelMultiplierAdditional(
282 int[] maxBytesForLevelMultiplierAdditional);
283
284 /**
285 * Different max-size multipliers for different levels.
286 * These are multiplied by max_bytes_for_level_multiplier to arrive
287 * at the max-size of each level.
288 *
289 * Default: 1
290 *
291 * @return The max-size multipliers for each level
292 */
293 int[] maxBytesForLevelMultiplierAdditional();
294
295 /**
296 * All writes will be slowed down to at least delayed_write_rate if estimated
297 * bytes needed to be compaction exceed this threshold.
298 *
299 * Default: 64GB
300 *
301 * @param softPendingCompactionBytesLimit The soft limit to impose on
302 * compaction
303 * @return the reference to the current options.
304 */
305 T setSoftPendingCompactionBytesLimit(
306 long softPendingCompactionBytesLimit);
307
308 /**
309 * All writes will be slowed down to at least delayed_write_rate if estimated
310 * bytes needed to be compaction exceed this threshold.
311 *
312 * Default: 64GB
313 *
314 * @return The soft limit to impose on compaction
315 */
316 long softPendingCompactionBytesLimit();
317
318 /**
319 * All writes are stopped if estimated bytes needed to be compaction exceed
320 * this threshold.
321 *
322 * Default: 256GB
323 *
324 * @param hardPendingCompactionBytesLimit The hard limit to impose on
325 * compaction
326 * @return the reference to the current options.
327 */
328 T setHardPendingCompactionBytesLimit(
329 long hardPendingCompactionBytesLimit);
330
331 /**
332 * All writes are stopped if estimated bytes needed to be compaction exceed
333 * this threshold.
334 *
335 * Default: 256GB
336 *
337 * @return The hard limit to impose on compaction
338 */
339 long hardPendingCompactionBytesLimit();
340
341 /**
342 * An iteration-&gt;Next() sequentially skips over keys with the same
343 * user-key unless this option is set. This number specifies the number
344 * of keys (with the same userkey) that will be sequentially
345 * skipped before a reseek is issued.
346 * Default: 8
347 *
348 * @param maxSequentialSkipInIterations the number of keys could
349 * be skipped in a iteration.
350 * @return the reference to the current options.
351 */
352 T setMaxSequentialSkipInIterations(
353 long maxSequentialSkipInIterations);
354
355 /**
356 * An iteration-&gt;Next() sequentially skips over keys with the same
357 * user-key unless this option is set. This number specifies the number
358 * of keys (with the same userkey) that will be sequentially
359 * skipped before a reseek is issued.
360 * Default: 8
361 *
362 * @return the number of keys could be skipped in a iteration.
363 */
364 long maxSequentialSkipInIterations();
365
366 /**
367 * Maximum number of successive merge operations on a key in the memtable.
368 *
369 * When a merge operation is added to the memtable and the maximum number of
370 * successive merges is reached, the value of the key will be calculated and
371 * inserted into the memtable instead of the merge operation. This will
372 * ensure that there are never more than max_successive_merges merge
373 * operations in the memtable.
374 *
375 * Default: 0 (disabled)
376 *
377 * @param maxSuccessiveMerges the maximum number of successive merges.
378 * @return the reference to the current options.
379 * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
380 * while overflowing the underlying platform specific value.
381 */
382 T setMaxSuccessiveMerges(
383 long maxSuccessiveMerges);
384
385 /**
386 * Maximum number of successive merge operations on a key in the memtable.
387 *
388 * When a merge operation is added to the memtable and the maximum number of
389 * successive merges is reached, the value of the key will be calculated and
390 * inserted into the memtable instead of the merge operation. This will
391 * ensure that there are never more than max_successive_merges merge
392 * operations in the memtable.
393 *
394 * Default: 0 (disabled)
395 *
396 * @return the maximum number of successive merges.
397 */
398 long maxSuccessiveMerges();
399
400 /**
401 * After writing every SST file, reopen it and read all the keys.
402 *
403 * Default: false
404 *
405 * @param paranoidFileChecks true to enable paranoid file checks
406 * @return the reference to the current options.
407 */
408 T setParanoidFileChecks(
409 boolean paranoidFileChecks);
410
411 /**
412 * After writing every SST file, reopen it and read all the keys.
413 *
414 * Default: false
415 *
416 * @return true if paranoid file checks are enabled
417 */
418 boolean paranoidFileChecks();
419
420 /**
421 * Measure IO stats in compactions and flushes, if true.
422 *
423 * Default: false
424 *
425 * @param reportBgIoStats true to enable reporting
426 * @return the reference to the current options.
427 */
428 T setReportBgIoStats(
429 boolean reportBgIoStats);
430
431 /**
432 * Determine whether IO stats in compactions and flushes are being measured
433 *
434 * @return true if reporting is enabled
435 */
436 boolean reportBgIoStats();
494da23a
TL
437
438 /**
439 * Non-bottom-level files older than TTL will go through the compaction
440 * process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be
441 * set to -1.
442 *
443 * Enabled only for level compaction for now.
444 *
445 * Default: 0 (disabled)
446 *
447 * Dynamically changeable through
448 * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
449 *
450 * @param ttl the time-to-live.
451 *
452 * @return the reference to the current options.
453 */
454 T setTtl(final long ttl);
455
456 /**
457 * Get the TTL for Non-bottom-level files that will go through the compaction
458 * process.
459 *
460 * See {@link #setTtl(long)}.
461 *
462 * @return the time-to-live.
463 */
464 long ttl();
7c673cae 465}