1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under the BSD-style license found in the
3 // LICENSE file in the root directory of this source tree. An additional grant
4 // of patent rights can be found in the PATENTS file in the same directory.
9 * Similar to {@link org.rocksdb.WriteBatch} but with a binary searchable
10 * index built for all the keys inserted.
12 * Calling put, merge, remove or putLogData calls the same function
13 * as with {@link org.rocksdb.WriteBatch} whilst also building an index.
15 * A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to
16 * create an iterator over the write batch or
17 * {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)}
18 * to get an iterator for the database with Read-Your-Own-Writes like capability
20 public class WriteBatchWithIndex
extends AbstractWriteBatch
{
22 * Creates a WriteBatchWithIndex where no bytes
23 * are reserved up-front, bytewise comparison is
24 * used for fallback key comparisons,
25 * and duplicate keys operations are retained
27 public WriteBatchWithIndex() {
28 super(newWriteBatchWithIndex());
33 * Creates a WriteBatchWithIndex where no bytes
34 * are reserved up-front, bytewise comparison is
35 * used for fallback key comparisons, and duplicate key
36 * assignment is determined by the constructor argument
38 * @param overwriteKey if true, overwrite the key in the index when
39 * inserting a duplicate key, in this way an iterator will never
40 * show two entries with the same key.
42 public WriteBatchWithIndex(final boolean overwriteKey
) {
43 super(newWriteBatchWithIndex(overwriteKey
));
47 * Creates a WriteBatchWithIndex
49 * @param fallbackIndexComparator We fallback to this comparator
50 * to compare keys within a column family if we cannot determine
51 * the column family and so look up it's comparator.
53 * @param reservedBytes reserved bytes in underlying WriteBatch
55 * @param overwriteKey if true, overwrite the key in the index when
56 * inserting a duplicate key, in this way an iterator will never
57 * show two entries with the same key.
59 public WriteBatchWithIndex(
60 final AbstractComparator
<?
extends AbstractSlice
<?
>>
61 fallbackIndexComparator
, final int reservedBytes
,
62 final boolean overwriteKey
) {
63 super(newWriteBatchWithIndex(fallbackIndexComparator
.getNativeHandle(),
64 reservedBytes
, overwriteKey
));
68 * Create an iterator of a column family. User can call
69 * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to
70 * search to the next entry of or after a key. Keys will be iterated in the
71 * order given by index_comparator. For multiple updates on the same key,
72 * each update will be returned as a separate entry, in the order of update
75 * @param columnFamilyHandle The column family to iterate over
76 * @return An iterator for the Write Batch contents, restricted to the column
79 public WBWIRocksIterator
newIterator(
80 final ColumnFamilyHandle columnFamilyHandle
) {
81 return new WBWIRocksIterator(this, iterator1(nativeHandle_
,
82 columnFamilyHandle
.nativeHandle_
));
86 * Create an iterator of the default column family. User can call
87 * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to
88 * search to the next entry of or after a key. Keys will be iterated in the
89 * order given by index_comparator. For multiple updates on the same key,
90 * each update will be returned as a separate entry, in the order of update
93 * @return An iterator for the Write Batch contents
95 public WBWIRocksIterator
newIterator() {
96 return new WBWIRocksIterator(this, iterator0(nativeHandle_
));
100 * Provides Read-Your-Own-Writes like functionality by
101 * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
102 * as a delta and baseIterator as a base
104 * @param columnFamilyHandle The column family to iterate over
105 * @param baseIterator The base iterator,
106 * e.g. {@link org.rocksdb.RocksDB#newIterator()}
107 * @return An iterator which shows a view comprised of both the database
108 * point-in-time from baseIterator and modifications made in this write batch.
110 public RocksIterator
newIteratorWithBase(
111 final ColumnFamilyHandle columnFamilyHandle
,
112 final RocksIterator baseIterator
) {
113 RocksIterator iterator
= new RocksIterator(
114 baseIterator
.parent_
,
115 iteratorWithBase(nativeHandle_
,
116 columnFamilyHandle
.nativeHandle_
,
117 baseIterator
.nativeHandle_
));
118 //when the iterator is deleted it will also delete the baseIterator
119 baseIterator
.disOwnNativeHandle();
124 * Provides Read-Your-Own-Writes like functionality by
125 * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
126 * as a delta and baseIterator as a base. Operates on the default column
129 * @param baseIterator The base iterator,
130 * e.g. {@link org.rocksdb.RocksDB#newIterator()}
131 * @return An iterator which shows a view comprised of both the database
132 * point-in-timefrom baseIterator and modifications made in this write batch.
134 public RocksIterator
newIteratorWithBase(final RocksIterator baseIterator
) {
135 return newIteratorWithBase(baseIterator
.parent_
.getDefaultColumnFamily(),
140 * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will only
141 * read the key from this batch.
143 * @param columnFamilyHandle The column family to retrieve the value from
144 * @param options The database options to use
145 * @param key The key to read the value for
147 * @return a byte array storing the value associated with the input key if
148 * any. null if it does not find the specified key.
150 * @throws RocksDBException if the batch does not have enough data to resolve
151 * Merge operations, MergeInProgress status may be returned.
153 public byte[] getFromBatch(final ColumnFamilyHandle columnFamilyHandle
,
154 final DBOptions options
, final byte[] key
) throws RocksDBException
{
155 return getFromBatch(nativeHandle_
, options
.nativeHandle_
,
156 key
, key
.length
, columnFamilyHandle
.nativeHandle_
);
160 * Similar to {@link RocksDB#get(byte[])} but will only
161 * read the key from this batch.
163 * @param options The database options to use
164 * @param key The key to read the value for
166 * @return a byte array storing the value associated with the input key if
167 * any. null if it does not find the specified key.
169 * @throws RocksDBException if the batch does not have enough data to resolve
170 * Merge operations, MergeInProgress status may be returned.
172 public byte[] getFromBatch(final DBOptions options
, final byte[] key
)
173 throws RocksDBException
{
174 return getFromBatch(nativeHandle_
, options
.nativeHandle_
, key
, key
.length
);
178 * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will also
179 * read writes from this batch.
181 * This function will query both this batch and the DB and then merge
182 * the results using the DB's merge operator (if the batch contains any
185 * Setting {@link ReadOptions#setSnapshot(long, long)} will affect what is
186 * read from the DB but will NOT change which keys are read from the batch
187 * (the keys in this batch do not yet belong to any snapshot and will be
188 * fetched regardless).
190 * @param db The Rocks database
191 * @param columnFamilyHandle The column family to retrieve the value from
192 * @param options The read options to use
193 * @param key The key to read the value for
195 * @return a byte array storing the value associated with the input key if
196 * any. null if it does not find the specified key.
198 * @throws RocksDBException if the value for the key cannot be read
200 public byte[] getFromBatchAndDB(final RocksDB db
, final ColumnFamilyHandle columnFamilyHandle
,
201 final ReadOptions options
, final byte[] key
) throws RocksDBException
{
202 return getFromBatchAndDB(nativeHandle_
, db
.nativeHandle_
,
203 options
.nativeHandle_
, key
, key
.length
,
204 columnFamilyHandle
.nativeHandle_
);
208 * Similar to {@link RocksDB#get(byte[])} but will also
209 * read writes from this batch.
211 * This function will query both this batch and the DB and then merge
212 * the results using the DB's merge operator (if the batch contains any
215 * Setting {@link ReadOptions#setSnapshot(long, long)} will affect what is
216 * read from the DB but will NOT change which keys are read from the batch
217 * (the keys in this batch do not yet belong to any snapshot and will be
218 * fetched regardless).
220 * @param db The Rocks database
221 * @param options The read options to use
222 * @param key The key to read the value for
224 * @return a byte array storing the value associated with the input key if
225 * any. null if it does not find the specified key.
227 * @throws RocksDBException if the value for the key cannot be read
229 public byte[] getFromBatchAndDB(final RocksDB db
, final ReadOptions options
,
230 final byte[] key
) throws RocksDBException
{
231 return getFromBatchAndDB(nativeHandle_
, db
.nativeHandle_
,
232 options
.nativeHandle_
, key
, key
.length
);
235 @Override protected final native void disposeInternal(final long handle
);
236 @Override final native int count0(final long handle
);
237 @Override final native void put(final long handle
, final byte[] key
,
238 final int keyLen
, final byte[] value
, final int valueLen
);
239 @Override final native void put(final long handle
, final byte[] key
,
240 final int keyLen
, final byte[] value
, final int valueLen
,
241 final long cfHandle
);
242 @Override final native void merge(final long handle
, final byte[] key
,
243 final int keyLen
, final byte[] value
, final int valueLen
);
244 @Override final native void merge(final long handle
, final byte[] key
,
245 final int keyLen
, final byte[] value
, final int valueLen
,
246 final long cfHandle
);
247 @Override final native void remove(final long handle
, final byte[] key
,
249 @Override final native void remove(final long handle
, final byte[] key
,
250 final int keyLen
, final long cfHandle
);
252 final native void deleteRange(final long handle
, final byte[] beginKey
, final int beginKeyLen
,
253 final byte[] endKey
, final int endKeyLen
);
255 final native void deleteRange(final long handle
, final byte[] beginKey
, final int beginKeyLen
,
256 final byte[] endKey
, final int endKeyLen
, final long cfHandle
);
257 @Override final native void putLogData(final long handle
, final byte[] blob
,
259 @Override final native void clear0(final long handle
);
260 @Override final native void setSavePoint0(final long handle
);
261 @Override final native void rollbackToSavePoint0(final long handle
);
263 private native static long newWriteBatchWithIndex();
264 private native static long newWriteBatchWithIndex(final boolean overwriteKey
);
265 private native static long newWriteBatchWithIndex(
266 final long fallbackIndexComparatorHandle
, final int reservedBytes
,
267 final boolean overwriteKey
);
268 private native long iterator0(final long handle
);
269 private native long iterator1(final long handle
, final long cfHandle
);
270 private native long iteratorWithBase(final long handle
,
271 final long baseIteratorHandle
, final long cfHandle
);
272 private native byte[] getFromBatch(final long handle
, final long optHandle
,
273 final byte[] key
, final int keyLen
);
274 private native byte[] getFromBatch(final long handle
, final long optHandle
,
275 final byte[] key
, final int keyLen
, final long cfHandle
);
276 private native byte[] getFromBatchAndDB(final long handle
,
277 final long dbHandle
, final long readOptHandle
, final byte[] key
,
279 private native byte[] getFromBatchAndDB(final long handle
,
280 final long dbHandle
, final long readOptHandle
, final byte[] key
,
281 final int keyLen
, final long cfHandle
);