1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
8 import java
.nio
.ByteBuffer
;
11 * Similar to {@link org.rocksdb.WriteBatch} but with a binary searchable
12 * index built for all the keys inserted.
14 * Calling put, merge, remove or putLogData calls the same function
15 * as with {@link org.rocksdb.WriteBatch} whilst also building an index.
17 * A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to
18 * create an iterator over the write batch or
19 * {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)}
20 * to get an iterator for the database with Read-Your-Own-Writes like capability
22 public class WriteBatchWithIndex
extends AbstractWriteBatch
{
24 * Creates a WriteBatchWithIndex where no bytes
25 * are reserved up-front, bytewise comparison is
26 * used for fallback key comparisons,
27 * and duplicate keys operations are retained
29 public WriteBatchWithIndex() {
30 super(newWriteBatchWithIndex());
35 * Creates a WriteBatchWithIndex where no bytes
36 * are reserved up-front, bytewise comparison is
37 * used for fallback key comparisons, and duplicate key
38 * assignment is determined by the constructor argument
40 * @param overwriteKey if true, overwrite the key in the index when
41 * inserting a duplicate key, in this way an iterator will never
42 * show two entries with the same key.
44 public WriteBatchWithIndex(final boolean overwriteKey
) {
45 super(newWriteBatchWithIndex(overwriteKey
));
49 * Creates a WriteBatchWithIndex
51 * @param fallbackIndexComparator We fallback to this comparator
52 * to compare keys within a column family if we cannot determine
53 * the column family and so look up it's comparator.
55 * @param reservedBytes reserved bytes in underlying WriteBatch
57 * @param overwriteKey if true, overwrite the key in the index when
58 * inserting a duplicate key, in this way an iterator will never
59 * show two entries with the same key.
61 public WriteBatchWithIndex(
62 final AbstractComparator
63 fallbackIndexComparator
, final int reservedBytes
,
64 final boolean overwriteKey
) {
65 super(newWriteBatchWithIndex(fallbackIndexComparator
.nativeHandle_
,
66 fallbackIndexComparator
.getComparatorType().getValue(), reservedBytes
,
71 * <p>Private WriteBatchWithIndex constructor which is used to construct
72 * WriteBatchWithIndex instances from C++ side. As the reference to this
73 * object is also managed from C++ side the handle will be disowned.</p>
75 * @param nativeHandle address of native instance.
77 WriteBatchWithIndex(final long nativeHandle
) {
83 * Create an iterator of a column family. User can call
84 * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to
85 * search to the next entry of or after a key. Keys will be iterated in the
86 * order given by index_comparator. For multiple updates on the same key,
87 * each update will be returned as a separate entry, in the order of update
90 * @param columnFamilyHandle The column family to iterate over
91 * @return An iterator for the Write Batch contents, restricted to the column
94 public WBWIRocksIterator
newIterator(
95 final ColumnFamilyHandle columnFamilyHandle
) {
96 return new WBWIRocksIterator(this, iterator1(nativeHandle_
,
97 columnFamilyHandle
.nativeHandle_
));
101 * Create an iterator of the default column family. User can call
102 * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to
103 * search to the next entry of or after a key. Keys will be iterated in the
104 * order given by index_comparator. For multiple updates on the same key,
105 * each update will be returned as a separate entry, in the order of update
108 * @return An iterator for the Write Batch contents
110 public WBWIRocksIterator
newIterator() {
111 return new WBWIRocksIterator(this, iterator0(nativeHandle_
));
115 * Provides Read-Your-Own-Writes like functionality by
116 * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
117 * as a delta and baseIterator as a base
119 * Updating write batch with the current key of the iterator is not safe.
120 * We strongly recommand users not to do it. It will invalidate the current
121 * key() and value() of the iterator. This invalidation happens even before
122 * the write batch update finishes. The state may recover after Next() is
125 * @param columnFamilyHandle The column family to iterate over
126 * @param baseIterator The base iterator,
127 * e.g. {@link org.rocksdb.RocksDB#newIterator()}
128 * @return An iterator which shows a view comprised of both the database
129 * point-in-time from baseIterator and modifications made in this write batch.
131 public RocksIterator
newIteratorWithBase(
132 final ColumnFamilyHandle columnFamilyHandle
,
133 final RocksIterator baseIterator
) {
134 return newIteratorWithBase(columnFamilyHandle
, baseIterator
, null);
138 * Provides Read-Your-Own-Writes like functionality by
139 * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
140 * as a delta and baseIterator as a base
142 * Updating write batch with the current key of the iterator is not safe.
143 * We strongly recommand users not to do it. It will invalidate the current
144 * key() and value() of the iterator. This invalidation happens even before
145 * the write batch update finishes. The state may recover after Next() is
148 * @param columnFamilyHandle The column family to iterate over
149 * @param baseIterator The base iterator,
150 * e.g. {@link org.rocksdb.RocksDB#newIterator()}
151 * @param readOptions the read options, or null
152 * @return An iterator which shows a view comprised of both the database
153 * point-in-time from baseIterator and modifications made in this write batch.
155 public RocksIterator
newIteratorWithBase(final ColumnFamilyHandle columnFamilyHandle
,
156 final RocksIterator baseIterator
, /* @Nullable */ final ReadOptions readOptions
) {
157 final RocksIterator iterator
= new RocksIterator(baseIterator
.parent_
,
158 iteratorWithBase(nativeHandle_
, columnFamilyHandle
.nativeHandle_
,
159 baseIterator
.nativeHandle_
, readOptions
== null ?
0 : readOptions
.nativeHandle_
));
161 // when the iterator is deleted it will also delete the baseIterator
162 baseIterator
.disOwnNativeHandle();
168 * Provides Read-Your-Own-Writes like functionality by
169 * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
170 * as a delta and baseIterator as a base. Operates on the default column
173 * @param baseIterator The base iterator,
174 * e.g. {@link org.rocksdb.RocksDB#newIterator()}
175 * @return An iterator which shows a view comprised of both the database
176 * point-in-timefrom baseIterator and modifications made in this write batch.
178 public RocksIterator
newIteratorWithBase(final RocksIterator baseIterator
) {
179 return newIteratorWithBase(baseIterator
.parent_
.getDefaultColumnFamily(), baseIterator
, null);
183 * Provides Read-Your-Own-Writes like functionality by
184 * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
185 * as a delta and baseIterator as a base. Operates on the default column
188 * @param baseIterator The base iterator,
189 * e.g. {@link org.rocksdb.RocksDB#newIterator()}
190 * @param readOptions the read options, or null
191 * @return An iterator which shows a view comprised of both the database
192 * point-in-timefrom baseIterator and modifications made in this write batch.
194 public RocksIterator
newIteratorWithBase(final RocksIterator baseIterator
,
195 /* @Nullable */ final ReadOptions readOptions
) {
196 return newIteratorWithBase(
197 baseIterator
.parent_
.getDefaultColumnFamily(), baseIterator
, readOptions
);
201 * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will only
202 * read the key from this batch.
204 * @param columnFamilyHandle The column family to retrieve the value from
205 * @param options The database options to use
206 * @param key The key to read the value for
208 * @return a byte array storing the value associated with the input key if
209 * any. null if it does not find the specified key.
211 * @throws RocksDBException if the batch does not have enough data to resolve
212 * Merge operations, MergeInProgress status may be returned.
214 public byte[] getFromBatch(final ColumnFamilyHandle columnFamilyHandle
,
215 final DBOptions options
, final byte[] key
) throws RocksDBException
{
216 return getFromBatch(nativeHandle_
, options
.nativeHandle_
,
217 key
, key
.length
, columnFamilyHandle
.nativeHandle_
);
221 * Similar to {@link RocksDB#get(byte[])} but will only
222 * read the key from this batch.
224 * @param options The database options to use
225 * @param key The key to read the value for
227 * @return a byte array storing the value associated with the input key if
228 * any. null if it does not find the specified key.
230 * @throws RocksDBException if the batch does not have enough data to resolve
231 * Merge operations, MergeInProgress status may be returned.
233 public byte[] getFromBatch(final DBOptions options
, final byte[] key
)
234 throws RocksDBException
{
235 return getFromBatch(nativeHandle_
, options
.nativeHandle_
, key
, key
.length
);
239 * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will also
240 * read writes from this batch.
242 * This function will query both this batch and the DB and then merge
243 * the results using the DB's merge operator (if the batch contains any
246 * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is
247 * read from the DB but will NOT change which keys are read from the batch
248 * (the keys in this batch do not yet belong to any snapshot and will be
249 * fetched regardless).
251 * @param db The Rocks database
252 * @param columnFamilyHandle The column family to retrieve the value from
253 * @param options The read options to use
254 * @param key The key to read the value for
256 * @return a byte array storing the value associated with the input key if
257 * any. null if it does not find the specified key.
259 * @throws RocksDBException if the value for the key cannot be read
261 public byte[] getFromBatchAndDB(final RocksDB db
, final ColumnFamilyHandle columnFamilyHandle
,
262 final ReadOptions options
, final byte[] key
) throws RocksDBException
{
263 return getFromBatchAndDB(nativeHandle_
, db
.nativeHandle_
,
264 options
.nativeHandle_
, key
, key
.length
,
265 columnFamilyHandle
.nativeHandle_
);
269 * Similar to {@link RocksDB#get(byte[])} but will also
270 * read writes from this batch.
272 * This function will query both this batch and the DB and then merge
273 * the results using the DB's merge operator (if the batch contains any
276 * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is
277 * read from the DB but will NOT change which keys are read from the batch
278 * (the keys in this batch do not yet belong to any snapshot and will be
279 * fetched regardless).
281 * @param db The Rocks database
282 * @param options The read options to use
283 * @param key The key to read the value for
285 * @return a byte array storing the value associated with the input key if
286 * any. null if it does not find the specified key.
288 * @throws RocksDBException if the value for the key cannot be read
290 public byte[] getFromBatchAndDB(final RocksDB db
, final ReadOptions options
,
291 final byte[] key
) throws RocksDBException
{
292 return getFromBatchAndDB(nativeHandle_
, db
.nativeHandle_
,
293 options
.nativeHandle_
, key
, key
.length
);
296 @Override protected final native void disposeInternal(final long handle
);
297 @Override final native int count0(final long handle
);
298 @Override final native void put(final long handle
, final byte[] key
,
299 final int keyLen
, final byte[] value
, final int valueLen
);
300 @Override final native void put(final long handle
, final byte[] key
,
301 final int keyLen
, final byte[] value
, final int valueLen
,
302 final long cfHandle
);
304 final native void putDirect(final long handle
, final ByteBuffer key
, final int keyOffset
,
305 final int keyLength
, final ByteBuffer value
, final int valueOffset
, final int valueLength
,
306 final long cfHandle
);
307 @Override final native void merge(final long handle
, final byte[] key
,
308 final int keyLen
, final byte[] value
, final int valueLen
);
309 @Override final native void merge(final long handle
, final byte[] key
,
310 final int keyLen
, final byte[] value
, final int valueLen
,
311 final long cfHandle
);
312 @Override final native void delete(final long handle
, final byte[] key
,
313 final int keyLen
) throws RocksDBException
;
314 @Override final native void delete(final long handle
, final byte[] key
,
315 final int keyLen
, final long cfHandle
) throws RocksDBException
;
316 @Override final native void singleDelete(final long handle
, final byte[] key
,
317 final int keyLen
) throws RocksDBException
;
318 @Override final native void singleDelete(final long handle
, final byte[] key
,
319 final int keyLen
, final long cfHandle
) throws RocksDBException
;
321 final native void removeDirect(final long handle
, final ByteBuffer key
, final int keyOffset
,
322 final int keyLength
, final long cfHandle
) throws RocksDBException
;
323 // DO NOT USE - `WriteBatchWithIndex::deleteRange` is not yet supported
325 final native void deleteRange(final long handle
, final byte[] beginKey
, final int beginKeyLen
,
326 final byte[] endKey
, final int endKeyLen
);
327 // DO NOT USE - `WriteBatchWithIndex::deleteRange` is not yet supported
329 final native void deleteRange(final long handle
, final byte[] beginKey
, final int beginKeyLen
,
330 final byte[] endKey
, final int endKeyLen
, final long cfHandle
);
331 @Override final native void putLogData(final long handle
, final byte[] blob
,
332 final int blobLen
) throws RocksDBException
;
333 @Override final native void clear0(final long handle
);
334 @Override final native void setSavePoint0(final long handle
);
335 @Override final native void rollbackToSavePoint0(final long handle
);
336 @Override final native void popSavePoint(final long handle
) throws RocksDBException
;
337 @Override final native void setMaxBytes(final long nativeHandle
,
338 final long maxBytes
);
339 @Override final native WriteBatch
getWriteBatch(final long handle
);
341 private native static long newWriteBatchWithIndex();
342 private native static long newWriteBatchWithIndex(final boolean overwriteKey
);
343 private native static long newWriteBatchWithIndex(
344 final long fallbackIndexComparatorHandle
,
345 final byte comparatorType
, final int reservedBytes
,
346 final boolean overwriteKey
);
347 private native long iterator0(final long handle
);
348 private native long iterator1(final long handle
, final long cfHandle
);
349 private native long iteratorWithBase(final long handle
, final long baseIteratorHandle
,
350 final long cfHandle
, final long readOptionsHandle
);
351 private native byte[] getFromBatch(final long handle
, final long optHandle
,
352 final byte[] key
, final int keyLen
);
353 private native byte[] getFromBatch(final long handle
, final long optHandle
,
354 final byte[] key
, final int keyLen
, final long cfHandle
);
355 private native byte[] getFromBatchAndDB(final long handle
,
356 final long dbHandle
, final long readOptHandle
, final byte[] key
,
358 private native byte[] getFromBatchAndDB(final long handle
,
359 final long dbHandle
, final long readOptHandle
, final byte[] key
,
360 final int keyLen
, final long cfHandle
);