]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / rocksdb / java / src / main / java / org / rocksdb / ReadOptions.java
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5
6 package org.rocksdb;
7
8 /**
9 * The class that controls the get behavior.
10 *
11 * Note that dispose() must be called before an Options instance
12 * become out-of-scope to release the allocated memory in c++.
13 */
14 public class ReadOptions extends RocksObject {
15 public ReadOptions() {
16 super(newReadOptions());
17 }
18
19 /**
20 * @param verifyChecksums verification will be performed on every read
21 * when set to true
22 * @param fillCache if true, then fill-cache behavior will be performed.
23 */
24 public ReadOptions(final boolean verifyChecksums, final boolean fillCache) {
25 super(newReadOptions(verifyChecksums, fillCache));
26 }
27
28 /**
29 * Copy constructor.
30 *
31 * NOTE: This does a shallow copy, which means snapshot, iterate_upper_bound
32 * and other pointers will be cloned!
33 *
34 * @param other The ReadOptions to copy.
35 */
36 public ReadOptions(ReadOptions other) {
37 super(copyReadOptions(other.nativeHandle_));
38 this.iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_;
39 this.iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_;
40 this.timestampSlice_ = other.timestampSlice_;
41 this.iterStartTs_ = other.iterStartTs_;
42 }
43
44 /**
45 * If true, all data read from underlying storage will be
46 * verified against corresponding checksums.
47 * Default: true
48 *
49 * @return true if checksum verification is on.
50 */
51 public boolean verifyChecksums() {
52 assert(isOwningHandle());
53 return verifyChecksums(nativeHandle_);
54 }
55
56 /**
57 * If true, all data read from underlying storage will be
58 * verified against corresponding checksums.
59 * Default: true
60 *
61 * @param verifyChecksums if true, then checksum verification
62 * will be performed on every read.
63 * @return the reference to the current ReadOptions.
64 */
65 public ReadOptions setVerifyChecksums(
66 final boolean verifyChecksums) {
67 assert(isOwningHandle());
68 setVerifyChecksums(nativeHandle_, verifyChecksums);
69 return this;
70 }
71
72 // TODO(yhchiang): this option seems to be block-based table only.
73 // move this to a better place?
74 /**
75 * Fill the cache when loading the block-based sst formated db.
76 * Callers may wish to set this field to false for bulk scans.
77 * Default: true
78 *
79 * @return true if the fill-cache behavior is on.
80 */
81 public boolean fillCache() {
82 assert(isOwningHandle());
83 return fillCache(nativeHandle_);
84 }
85
86 /**
87 * Fill the cache when loading the block-based sst formatted db.
88 * Callers may wish to set this field to false for bulk scans.
89 * Default: true
90 *
91 * @param fillCache if true, then fill-cache behavior will be
92 * performed.
93 * @return the reference to the current ReadOptions.
94 */
95 public ReadOptions setFillCache(final boolean fillCache) {
96 assert(isOwningHandle());
97 setFillCache(nativeHandle_, fillCache);
98 return this;
99 }
100
101 /**
102 * Returns the currently assigned Snapshot instance.
103 *
104 * @return the Snapshot assigned to this instance. If no Snapshot
105 * is assigned null.
106 */
107 public Snapshot snapshot() {
108 assert(isOwningHandle());
109 long snapshotHandle = snapshot(nativeHandle_);
110 if (snapshotHandle != 0) {
111 return new Snapshot(snapshotHandle);
112 }
113 return null;
114 }
115
116 /**
117 * <p>If "snapshot" is non-nullptr, read as of the supplied snapshot
118 * (which must belong to the DB that is being read and which must
119 * not have been released). If "snapshot" is nullptr, use an implicit
120 * snapshot of the state at the beginning of this read operation.</p>
121 * <p>Default: null</p>
122 *
123 * @param snapshot {@link Snapshot} instance
124 * @return the reference to the current ReadOptions.
125 */
126 public ReadOptions setSnapshot(final Snapshot snapshot) {
127 assert(isOwningHandle());
128 if (snapshot != null) {
129 setSnapshot(nativeHandle_, snapshot.nativeHandle_);
130 } else {
131 setSnapshot(nativeHandle_, 0l);
132 }
133 return this;
134 }
135
136 /**
137 * Returns the current read tier.
138 *
139 * @return the read tier in use, by default {@link ReadTier#READ_ALL_TIER}
140 */
141 public ReadTier readTier() {
142 assert(isOwningHandle());
143 return ReadTier.getReadTier(readTier(nativeHandle_));
144 }
145
146 /**
147 * Specify if this read request should process data that ALREADY
148 * resides on a particular cache. If the required data is not
149 * found at the specified cache, then {@link RocksDBException} is thrown.
150 *
151 * @param readTier {@link ReadTier} instance
152 * @return the reference to the current ReadOptions.
153 */
154 public ReadOptions setReadTier(final ReadTier readTier) {
155 assert(isOwningHandle());
156 setReadTier(nativeHandle_, readTier.getValue());
157 return this;
158 }
159
160 /**
161 * Specify to create a tailing iterator -- a special iterator that has a
162 * view of the complete database (i.e. it can also be used to read newly
163 * added data) and is optimized for sequential reads. It will return records
164 * that were inserted into the database after the creation of the iterator.
165 * Default: false
166 *
167 * Not supported in {@code ROCKSDB_LITE} mode!
168 *
169 * @return true if tailing iterator is enabled.
170 */
171 public boolean tailing() {
172 assert(isOwningHandle());
173 return tailing(nativeHandle_);
174 }
175
176 /**
177 * Specify to create a tailing iterator -- a special iterator that has a
178 * view of the complete database (i.e. it can also be used to read newly
179 * added data) and is optimized for sequential reads. It will return records
180 * that were inserted into the database after the creation of the iterator.
181 * Default: false
182 * Not supported in ROCKSDB_LITE mode!
183 *
184 * @param tailing if true, then tailing iterator will be enabled.
185 * @return the reference to the current ReadOptions.
186 */
187 public ReadOptions setTailing(final boolean tailing) {
188 assert(isOwningHandle());
189 setTailing(nativeHandle_, tailing);
190 return this;
191 }
192
193 /**
194 * Returns whether managed iterators will be used.
195 *
196 * @return the setting of whether managed iterators will be used,
197 * by default false
198 *
199 * @deprecated This options is not used anymore.
200 */
201 @Deprecated
202 public boolean managed() {
203 assert(isOwningHandle());
204 return managed(nativeHandle_);
205 }
206
207 /**
208 * Specify to create a managed iterator -- a special iterator that
209 * uses less resources by having the ability to free its underlying
210 * resources on request.
211 *
212 * @param managed if true, then managed iterators will be enabled.
213 * @return the reference to the current ReadOptions.
214 *
215 * @deprecated This options is not used anymore.
216 */
217 @Deprecated
218 public ReadOptions setManaged(final boolean managed) {
219 assert(isOwningHandle());
220 setManaged(nativeHandle_, managed);
221 return this;
222 }
223
224 /**
225 * Returns whether a total seek order will be used
226 *
227 * @return the setting of whether a total seek order will be used
228 */
229 public boolean totalOrderSeek() {
230 assert(isOwningHandle());
231 return totalOrderSeek(nativeHandle_);
232 }
233
234 /**
235 * Enable a total order seek regardless of index format (e.g. hash index)
236 * used in the table. Some table format (e.g. plain table) may not support
237 * this option.
238 *
239 * @param totalOrderSeek if true, then total order seek will be enabled.
240 * @return the reference to the current ReadOptions.
241 */
242 public ReadOptions setTotalOrderSeek(final boolean totalOrderSeek) {
243 assert(isOwningHandle());
244 setTotalOrderSeek(nativeHandle_, totalOrderSeek);
245 return this;
246 }
247
248 /**
249 * Returns whether the iterator only iterates over the same prefix as the seek
250 *
251 * @return the setting of whether the iterator only iterates over the same
252 * prefix as the seek, default is false
253 */
254 public boolean prefixSameAsStart() {
255 assert(isOwningHandle());
256 return prefixSameAsStart(nativeHandle_);
257 }
258
259 /**
260 * Enforce that the iterator only iterates over the same prefix as the seek.
261 * This option is effective only for prefix seeks, i.e. prefix_extractor is
262 * non-null for the column family and {@link #totalOrderSeek()} is false.
263 * Unlike iterate_upper_bound, {@link #setPrefixSameAsStart(boolean)} only
264 * works within a prefix but in both directions.
265 *
266 * @param prefixSameAsStart if true, then the iterator only iterates over the
267 * same prefix as the seek
268 * @return the reference to the current ReadOptions.
269 */
270 public ReadOptions setPrefixSameAsStart(final boolean prefixSameAsStart) {
271 assert(isOwningHandle());
272 setPrefixSameAsStart(nativeHandle_, prefixSameAsStart);
273 return this;
274 }
275
276 /**
277 * Returns whether the blocks loaded by the iterator will be pinned in memory
278 *
279 * @return the setting of whether the blocks loaded by the iterator will be
280 * pinned in memory
281 */
282 public boolean pinData() {
283 assert(isOwningHandle());
284 return pinData(nativeHandle_);
285 }
286
287 /**
288 * Keep the blocks loaded by the iterator pinned in memory as long as the
289 * iterator is not deleted, If used when reading from tables created with
290 * BlockBasedTableOptions::use_delta_encoding = false,
291 * Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
292 * return 1.
293 *
294 * @param pinData if true, the blocks loaded by the iterator will be pinned
295 * @return the reference to the current ReadOptions.
296 */
297 public ReadOptions setPinData(final boolean pinData) {
298 assert(isOwningHandle());
299 setPinData(nativeHandle_, pinData);
300 return this;
301 }
302
303 /**
304 * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
305 * schedule a background job in the flush job queue and delete obsolete files
306 * in background.
307 *
308 * Default: false
309 *
310 * @return true when PurgeObsoleteFile is called in CleanupIteratorState
311 */
312 public boolean backgroundPurgeOnIteratorCleanup() {
313 assert(isOwningHandle());
314 return backgroundPurgeOnIteratorCleanup(nativeHandle_);
315 }
316
317 /**
318 * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
319 * schedule a background job in the flush job queue and delete obsolete files
320 * in background.
321 *
322 * Default: false
323 *
324 * @param backgroundPurgeOnIteratorCleanup true when PurgeObsoleteFile is
325 * called in CleanupIteratorState
326 * @return the reference to the current ReadOptions.
327 */
328 public ReadOptions setBackgroundPurgeOnIteratorCleanup(
329 final boolean backgroundPurgeOnIteratorCleanup) {
330 assert(isOwningHandle());
331 setBackgroundPurgeOnIteratorCleanup(nativeHandle_,
332 backgroundPurgeOnIteratorCleanup);
333 return this;
334 }
335
336 /**
337 * If non-zero, NewIterator will create a new table reader which
338 * performs reads of the given size. Using a large size (&gt; 2MB) can
339 * improve the performance of forward iteration on spinning disks.
340 *
341 * Default: 0
342 *
343 * @return The readahead size is bytes
344 */
345 public long readaheadSize() {
346 assert(isOwningHandle());
347 return readaheadSize(nativeHandle_);
348 }
349
350 /**
351 * If non-zero, NewIterator will create a new table reader which
352 * performs reads of the given size. Using a large size (&gt; 2MB) can
353 * improve the performance of forward iteration on spinning disks.
354 *
355 * Default: 0
356 *
357 * @param readaheadSize The readahead size is bytes
358 * @return the reference to the current ReadOptions.
359 */
360 public ReadOptions setReadaheadSize(final long readaheadSize) {
361 assert(isOwningHandle());
362 setReadaheadSize(nativeHandle_, readaheadSize);
363 return this;
364 }
365
366 /**
367 * A threshold for the number of keys that can be skipped before failing an
368 * iterator seek as incomplete.
369 *
370 * @return the number of keys that can be skipped
371 * before failing an iterator seek as incomplete.
372 */
373 public long maxSkippableInternalKeys() {
374 assert(isOwningHandle());
375 return maxSkippableInternalKeys(nativeHandle_);
376 }
377
378 /**
379 * A threshold for the number of keys that can be skipped before failing an
380 * iterator seek as incomplete. The default value of 0 should be used to
381 * never fail a request as incomplete, even on skipping too many keys.
382 *
383 * Default: 0
384 *
385 * @param maxSkippableInternalKeys the number of keys that can be skipped
386 * before failing an iterator seek as incomplete.
387 *
388 * @return the reference to the current ReadOptions.
389 */
390 public ReadOptions setMaxSkippableInternalKeys(
391 final long maxSkippableInternalKeys) {
392 assert(isOwningHandle());
393 setMaxSkippableInternalKeys(nativeHandle_, maxSkippableInternalKeys);
394 return this;
395 }
396
397 /**
398 * If true, keys deleted using the DeleteRange() API will be visible to
399 * readers until they are naturally deleted during compaction. This improves
400 * read performance in DBs with many range deletions.
401 *
402 * Default: false
403 *
404 * @return true if keys deleted using the DeleteRange() API will be visible
405 */
406 public boolean ignoreRangeDeletions() {
407 assert(isOwningHandle());
408 return ignoreRangeDeletions(nativeHandle_);
409 }
410
411 /**
412 * If true, keys deleted using the DeleteRange() API will be visible to
413 * readers until they are naturally deleted during compaction. This improves
414 * read performance in DBs with many range deletions.
415 *
416 * Default: false
417 *
418 * @param ignoreRangeDeletions true if keys deleted using the DeleteRange()
419 * API should be visible
420 * @return the reference to the current ReadOptions.
421 */
422 public ReadOptions setIgnoreRangeDeletions(final boolean ignoreRangeDeletions) {
423 assert(isOwningHandle());
424 setIgnoreRangeDeletions(nativeHandle_, ignoreRangeDeletions);
425 return this;
426 }
427
428 /**
429 * Defines the smallest key at which the backward
430 * iterator can return an entry. Once the bound is passed,
431 * {@link RocksIterator#isValid()} will be false.
432 *
433 * The lower bound is inclusive i.e. the bound value is a valid
434 * entry.
435 *
436 * If prefix_extractor is not null, the Seek target and `iterate_lower_bound`
437 * need to have the same prefix. This is because ordering is not guaranteed
438 * outside of prefix domain.
439 *
440 * Default: null
441 *
442 * @param iterateLowerBound Slice representing the lower bound
443 * @return the reference to the current ReadOptions.
444 */
445 public ReadOptions setIterateLowerBound(final AbstractSlice<?> iterateLowerBound) {
446 assert(isOwningHandle());
447 setIterateLowerBound(
448 nativeHandle_, iterateLowerBound == null ? 0 : iterateLowerBound.getNativeHandle());
449 // Hold onto a reference so it doesn't get garbage collected out from under us.
450 iterateLowerBoundSlice_ = iterateLowerBound;
451 return this;
452 }
453
454 /**
455 * Returns the smallest key at which the backward
456 * iterator can return an entry.
457 *
458 * The lower bound is inclusive i.e. the bound value is a valid entry.
459 *
460 * @return the smallest key, or null if there is no lower bound defined.
461 */
462 public Slice iterateLowerBound() {
463 assert(isOwningHandle());
464 final long lowerBoundSliceHandle = iterateLowerBound(nativeHandle_);
465 if (lowerBoundSliceHandle != 0) {
466 // Disown the new slice - it's owned by the C++ side of the JNI boundary
467 // from the perspective of this method.
468 return new Slice(lowerBoundSliceHandle, false);
469 }
470 return null;
471 }
472
473 /**
474 * Defines the extent up to which the forward iterator
475 * can returns entries. Once the bound is reached,
476 * {@link RocksIterator#isValid()} will be false.
477 *
478 * The upper bound is exclusive i.e. the bound value is not a valid entry.
479 *
480 * If prefix_extractor is not null, the Seek target and iterate_upper_bound
481 * need to have the same prefix. This is because ordering is not guaranteed
482 * outside of prefix domain.
483 *
484 * Default: null
485 *
486 * @param iterateUpperBound Slice representing the upper bound
487 * @return the reference to the current ReadOptions.
488 */
489 public ReadOptions setIterateUpperBound(final AbstractSlice<?> iterateUpperBound) {
490 assert(isOwningHandle());
491 setIterateUpperBound(
492 nativeHandle_, iterateUpperBound == null ? 0 : iterateUpperBound.getNativeHandle());
493 // Hold onto a reference so it doesn't get garbage collected out from under us.
494 iterateUpperBoundSlice_ = iterateUpperBound;
495 return this;
496 }
497
498 /**
499 * Returns the largest key at which the forward
500 * iterator can return an entry.
501 *
502 * The upper bound is exclusive i.e. the bound value is not a valid entry.
503 *
504 * @return the largest key, or null if there is no upper bound defined.
505 */
506 public Slice iterateUpperBound() {
507 assert(isOwningHandle());
508 final long upperBoundSliceHandle = iterateUpperBound(nativeHandle_);
509 if (upperBoundSliceHandle != 0) {
510 // Disown the new slice - it's owned by the C++ side of the JNI boundary
511 // from the perspective of this method.
512 return new Slice(upperBoundSliceHandle, false);
513 }
514 return null;
515 }
516
517 /**
518 * A callback to determine whether relevant keys for this scan exist in a
519 * given table based on the table's properties. The callback is passed the
520 * properties of each table during iteration. If the callback returns false,
521 * the table will not be scanned. This option only affects Iterators and has
522 * no impact on point lookups.
523 *
524 * Default: null (every table will be scanned)
525 *
526 * @param tableFilter the table filter for the callback.
527 *
528 * @return the reference to the current ReadOptions.
529 */
530 public ReadOptions setTableFilter(final AbstractTableFilter tableFilter) {
531 assert(isOwningHandle());
532 setTableFilter(nativeHandle_, tableFilter.nativeHandle_);
533 return this;
534 }
535
536 /**
537 * When true, by default use total_order_seek = true, and RocksDB can
538 * selectively enable prefix seek mode if won't generate a different result
539 * from total_order_seek, based on seek key, and iterator upper bound.
540 * Not supported in ROCKSDB_LITE mode, in the way that even with value true
541 * prefix mode is not used.
542 * Default: false
543 *
544 * @return true if auto prefix mode is set.
545 *
546 */
547 public boolean autoPrefixMode() {
548 assert (isOwningHandle());
549 return autoPrefixMode(nativeHandle_);
550 }
551
552 /**
553 * When true, by default use total_order_seek = true, and RocksDB can
554 * selectively enable prefix seek mode if won't generate a different result
555 * from total_order_seek, based on seek key, and iterator upper bound.
556 * Not supported in ROCKSDB_LITE mode, in the way that even with value true
557 * prefix mode is not used.
558 * Default: false
559 * @param mode auto prefix mode
560 * @return the reference to the current ReadOptions.
561 */
562 public ReadOptions setAutoPrefixMode(final boolean mode) {
563 assert (isOwningHandle());
564 setAutoPrefixMode(nativeHandle_, mode);
565 return this;
566 }
567
568 /**
569 * Timestamp of operation. Read should return the latest data visible to the
570 * specified timestamp. All timestamps of the same database must be of the
571 * same length and format. The user is responsible for providing a customized
572 * compare function via Comparator to order &gt;key, timestamp&gt; tuples.
573 * For iterator, iter_start_ts is the lower bound (older) and timestamp
574 * serves as the upper bound. Versions of the same record that fall in
575 * the timestamp range will be returned. If iter_start_ts is nullptr,
576 * only the most recent version visible to timestamp is returned.
577 * The user-specified timestamp feature is still under active development,
578 * and the API is subject to change.
579 *
580 * Default: null
581 * @see #iterStartTs()
582 * @return Reference to timestamp or null if there is no timestamp defined.
583 */
584 public Slice timestamp() {
585 assert (isOwningHandle());
586 final long timestampSliceHandle = timestamp(nativeHandle_);
587 if (timestampSliceHandle != 0) {
588 return new Slice(timestampSliceHandle);
589 } else {
590 return null;
591 }
592 }
593
594 /**
595 * Timestamp of operation. Read should return the latest data visible to the
596 * specified timestamp. All timestamps of the same database must be of the
597 * same length and format. The user is responsible for providing a customized
598 * compare function via Comparator to order {@code <key, timestamp>} tuples.
599 * For iterator, {@code iter_start_ts} is the lower bound (older) and timestamp
600 * serves as the upper bound. Versions of the same record that fall in
601 * the timestamp range will be returned. If iter_start_ts is nullptr,
602 * only the most recent version visible to timestamp is returned.
603 * The user-specified timestamp feature is still under active development,
604 * and the API is subject to change.
605 *
606 * Default: null
607 * @see #setIterStartTs(AbstractSlice)
608 * @param timestamp Slice representing the timestamp
609 * @return the reference to the current ReadOptions.
610 */
611 public ReadOptions setTimestamp(final AbstractSlice<?> timestamp) {
612 assert (isOwningHandle());
613 setTimestamp(nativeHandle_, timestamp == null ? 0 : timestamp.getNativeHandle());
614 timestampSlice_ = timestamp;
615 return this;
616 }
617
618 /**
619 * Timestamp of operation. Read should return the latest data visible to the
620 * specified timestamp. All timestamps of the same database must be of the
621 * same length and format. The user is responsible for providing a customized
622 * compare function via Comparator to order {@code <key, timestamp>} tuples.
623 * For iterator, {@code iter_start_ts} is the lower bound (older) and timestamp
624 * serves as the upper bound. Versions of the same record that fall in
625 * the timestamp range will be returned. If iter_start_ts is nullptr,
626 * only the most recent version visible to timestamp is returned.
627 * The user-specified timestamp feature is still under active development,
628 * and the API is subject to change.
629 *
630 * Default: null
631 * @return Reference to lower bound timestamp or null if there is no lower bound timestamp
632 * defined.
633 */
634 public Slice iterStartTs() {
635 assert (isOwningHandle());
636 final long iterStartTsHandle = iterStartTs(nativeHandle_);
637 if (iterStartTsHandle != 0) {
638 return new Slice(iterStartTsHandle);
639 } else {
640 return null;
641 }
642 }
643
644 /**
645 * Timestamp of operation. Read should return the latest data visible to the
646 * specified timestamp. All timestamps of the same database must be of the
647 * same length and format. The user is responsible for providing a customized
648 * compare function via Comparator to order {@code <key, timestamp>} tuples.
649 * For iterator, {@code iter_start_ts} is the lower bound (older) and timestamp
650 * serves as the upper bound. Versions of the same record that fall in
651 * the timestamp range will be returned. If iter_start_ts is nullptr,
652 * only the most recent version visible to timestamp is returned.
653 * The user-specified timestamp feature is still under active development,
654 * and the API is subject to change.
655 *
656 * Default: null
657 *
658 * @param iterStartTs Reference to lower bound timestamp or null if there is no lower bound
659 * timestamp defined
660 * @return the reference to the current ReadOptions.
661 */
662 public ReadOptions setIterStartTs(final AbstractSlice<?> iterStartTs) {
663 assert (isOwningHandle());
664 setIterStartTs(nativeHandle_, iterStartTs == null ? 0 : iterStartTs.getNativeHandle());
665 iterStartTs_ = iterStartTs;
666 return this;
667 }
668
669 /**
670 * Deadline for completing an API call (Get/MultiGet/Seek/Next for now)
671 * in microseconds.
672 * It should be set to microseconds since epoch, i.e, {@code gettimeofday} or
673 * equivalent plus allowed duration in microseconds. The best way is to use
674 * {@code env->NowMicros() + some timeout}.
675 * This is best efforts. The call may exceed the deadline if there is IO
676 * involved and the file system doesn't support deadlines, or due to
677 * checking for deadline periodically rather than for every key if
678 * processing a batch
679 *
680 * @return deadline time in microseconds
681 */
682 public long deadline() {
683 assert (isOwningHandle());
684 return deadline(nativeHandle_);
685 }
686
687 /**
688 * Deadline for completing an API call (Get/MultiGet/Seek/Next for now)
689 * in microseconds.
690 * It should be set to microseconds since epoch, i.e, {@code gettimeofday} or
691 * equivalent plus allowed duration in microseconds. The best way is to use
692 * {@code env->NowMicros() + some timeout}.
693 * This is best efforts. The call may exceed the deadline if there is IO
694 * involved and the file system doesn't support deadlines, or due to
695 * checking for deadline periodically rather than for every key if
696 * processing a batch
697 *
698 * @param deadlineTime deadline time in microseconds.
699 * @return the reference to the current ReadOptions.
700 */
701 public ReadOptions setDeadline(final long deadlineTime) {
702 assert (isOwningHandle());
703 setDeadline(nativeHandle_, deadlineTime);
704 return this;
705 }
706
707 /**
708 * A timeout in microseconds to be passed to the underlying FileSystem for
709 * reads. As opposed to deadline, this determines the timeout for each
710 * individual file read request. If a MultiGet/Get/Seek/Next etc call
711 * results in multiple reads, each read can last up to io_timeout us.
712 * @return ioTimeout time in microseconds
713 */
714 public long ioTimeout() {
715 assert (isOwningHandle());
716 return ioTimeout(nativeHandle_);
717 }
718
719 /**
720 * A timeout in microseconds to be passed to the underlying FileSystem for
721 * reads. As opposed to deadline, this determines the timeout for each
722 * individual file read request. If a MultiGet/Get/Seek/Next etc call
723 * results in multiple reads, each read can last up to io_timeout us.
724 *
725 * @param ioTimeout time in microseconds.
726 * @return the reference to the current ReadOptions.
727 */
728 public ReadOptions setIoTimeout(final long ioTimeout) {
729 assert (isOwningHandle());
730 setIoTimeout(nativeHandle_, ioTimeout);
731 return this;
732 }
733
734 /**
735 * It limits the maximum cumulative value size of the keys in batch while
736 * reading through MultiGet. Once the cumulative value size exceeds this
737 * soft limit then all the remaining keys are returned with status Aborted.
738 *
739 * Default: {@code std::numeric_limits<uint64_t>::max()}
740 * @return actual valueSizeSofLimit
741 */
742 public long valueSizeSoftLimit() {
743 assert (isOwningHandle());
744 return valueSizeSoftLimit(nativeHandle_);
745 }
746
747 /**
748 * It limits the maximum cumulative value size of the keys in batch while
749 * reading through MultiGet. Once the cumulative value size exceeds this
750 * soft limit then all the remaining keys are returned with status Aborted.
751 *
752 * Default: {@code std::numeric_limits<uint64_t>::max()}
753 *
754 * @param valueSizeSoftLimit the maximum cumulative value size of the keys
755 * @return the reference to the current ReadOptions
756 */
757 public ReadOptions setValueSizeSoftLimit(final long valueSizeSoftLimit) {
758 assert (isOwningHandle());
759 setValueSizeSoftLimit(nativeHandle_, valueSizeSoftLimit);
760 return this;
761 }
762
763 // instance variables
764 // NOTE: If you add new member variables, please update the copy constructor above!
765 //
766 // Hold a reference to any iterate lower or upper bound that was set on this
767 // object until we're destroyed or it's overwritten. That way the caller can
768 // freely leave scope without us losing the Java Slice object, which during
769 // close() would also reap its associated rocksdb::Slice native object since
770 // it's possibly (likely) to be an owning handle.
771 private AbstractSlice<?> iterateLowerBoundSlice_;
772 private AbstractSlice<?> iterateUpperBoundSlice_;
773 private AbstractSlice<?> timestampSlice_;
774 private AbstractSlice<?> iterStartTs_;
775
776 private native static long newReadOptions();
777 private native static long newReadOptions(final boolean verifyChecksums,
778 final boolean fillCache);
779 private native static long copyReadOptions(long handle);
780 @Override protected final native void disposeInternal(final long handle);
781
782 private native boolean verifyChecksums(long handle);
783 private native void setVerifyChecksums(long handle, boolean verifyChecksums);
784 private native boolean fillCache(long handle);
785 private native void setFillCache(long handle, boolean fillCache);
786 private native long snapshot(long handle);
787 private native void setSnapshot(long handle, long snapshotHandle);
788 private native byte readTier(long handle);
789 private native void setReadTier(long handle, byte readTierValue);
790 private native boolean tailing(long handle);
791 private native void setTailing(long handle, boolean tailing);
792 private native boolean managed(long handle);
793 private native void setManaged(long handle, boolean managed);
794 private native boolean totalOrderSeek(long handle);
795 private native void setTotalOrderSeek(long handle, boolean totalOrderSeek);
796 private native boolean prefixSameAsStart(long handle);
797 private native void setPrefixSameAsStart(long handle, boolean prefixSameAsStart);
798 private native boolean pinData(long handle);
799 private native void setPinData(long handle, boolean pinData);
800 private native boolean backgroundPurgeOnIteratorCleanup(final long handle);
801 private native void setBackgroundPurgeOnIteratorCleanup(final long handle,
802 final boolean backgroundPurgeOnIteratorCleanup);
803 private native long readaheadSize(final long handle);
804 private native void setReadaheadSize(final long handle,
805 final long readaheadSize);
806 private native long maxSkippableInternalKeys(final long handle);
807 private native void setMaxSkippableInternalKeys(final long handle,
808 final long maxSkippableInternalKeys);
809 private native boolean ignoreRangeDeletions(final long handle);
810 private native void setIgnoreRangeDeletions(final long handle,
811 final boolean ignoreRangeDeletions);
812 private native void setIterateUpperBound(final long handle,
813 final long upperBoundSliceHandle);
814 private native long iterateUpperBound(final long handle);
815 private native void setIterateLowerBound(final long handle,
816 final long lowerBoundSliceHandle);
817 private native long iterateLowerBound(final long handle);
818 private native void setTableFilter(final long handle, final long tableFilterHandle);
819 private native boolean autoPrefixMode(final long handle);
820 private native void setAutoPrefixMode(final long handle, final boolean autoPrefixMode);
821 private native long timestamp(final long handle);
822 private native void setTimestamp(final long handle, final long timestampSliceHandle);
823 private native long iterStartTs(final long handle);
824 private native void setIterStartTs(final long handle, final long iterStartTsHandle);
825 private native long deadline(final long handle);
826 private native void setDeadline(final long handle, final long deadlineTime);
827 private native long ioTimeout(final long handle);
828 private native void setIoTimeout(final long handle, final long ioTimeout);
829 private native long valueSizeSoftLimit(final long handle);
830 private native void setValueSizeSoftLimit(final long handle, final long softLimit);
831 }