1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under the BSD-style license found in the
3 // LICENSE file in the root directory of this source tree. An additional grant
4 // of patent rights can be found in the PATENTS file in the same directory.
6 #include "db/memtable_list.h"
10 #include "db/merge_context.h"
11 #include "db/range_del_aggregator.h"
12 #include "db/version_set.h"
13 #include "db/write_controller.h"
14 #include "rocksdb/db.h"
15 #include "rocksdb/status.h"
16 #include "rocksdb/write_buffer_manager.h"
17 #include "util/string_util.h"
18 #include "util/testharness.h"
19 #include "util/testutil.h"
23 class MemTableListTest
: public testing::Test
{
29 MemTableListTest() : db(nullptr) {
30 dbname
= test::TmpDir() + "/memtable_list_test";
33 // Create a test db if not yet created
36 options
.create_if_missing
= true;
37 DestroyDB(dbname
, options
);
38 Status s
= DB::Open(options
, dbname
, &db
);
46 DestroyDB(dbname
, options
);
50 // Calls MemTableList::InstallMemtableFlushResults() and sets up all
51 // structures needed to call this function.
52 Status
Mock_InstallMemtableFlushResults(
53 MemTableList
* list
, const MutableCFOptions
& mutable_cf_options
,
54 const autovector
<MemTable
*>& m
, autovector
<MemTable
*>* to_delete
) {
55 // Create a mock Logger
56 test::NullLogger logger
;
57 LogBuffer
log_buffer(DEBUG_LEVEL
, &logger
);
59 // Create a mock VersionSet
61 ImmutableDBOptions
immutable_db_options(db_options
);
62 EnvOptions env_options
;
63 shared_ptr
<Cache
> table_cache(NewLRUCache(50000, 16));
64 WriteBufferManager
write_buffer_manager(db_options
.db_write_buffer_size
);
65 WriteController
write_controller(10000000u);
68 VersionSet
versions(dbname
, &immutable_db_options
, env_options
,
69 table_cache
.get(), &write_buffer_manager
,
72 // Create mock default ColumnFamilyData
73 ColumnFamilyOptions cf_options
;
74 std::vector
<ColumnFamilyDescriptor
> column_families
;
75 column_families
.emplace_back(kDefaultColumnFamilyName
, cf_options
);
76 EXPECT_OK(versions
.Recover(column_families
, false));
78 auto column_family_set
= versions
.GetColumnFamilySet();
79 auto cfd
= column_family_set
->GetColumnFamily(0);
80 EXPECT_TRUE(cfd
!= nullptr);
82 // Create dummy mutex.
83 InstrumentedMutex mutex
;
84 InstrumentedMutexLock
l(&mutex
);
86 return list
->InstallMemtableFlushResults(cfd
, mutable_cf_options
, m
,
87 &versions
, &mutex
, 1, to_delete
,
88 nullptr, &log_buffer
);
92 TEST_F(MemTableListTest
, Empty
) {
93 // Create an empty MemTableList and validate basic functions.
94 MemTableList
list(1, 0);
96 ASSERT_EQ(0, list
.NumNotFlushed());
97 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
98 ASSERT_FALSE(list
.IsFlushPending());
100 autovector
<MemTable
*> mems
;
101 list
.PickMemtablesToFlush(&mems
);
102 ASSERT_EQ(0, mems
.size());
104 autovector
<MemTable
*> to_delete
;
105 list
.current()->Unref(&to_delete
);
106 ASSERT_EQ(0, to_delete
.size());
109 TEST_F(MemTableListTest
, GetTest
) {
110 // Create MemTableList
111 int min_write_buffer_number_to_merge
= 2;
112 int max_write_buffer_number_to_maintain
= 0;
113 MemTableList
list(min_write_buffer_number_to_merge
,
114 max_write_buffer_number_to_maintain
);
116 SequenceNumber seq
= 1;
119 MergeContext merge_context
;
120 InternalKeyComparator
ikey_cmp(options
.comparator
);
121 RangeDelAggregator
range_del_agg(ikey_cmp
, {} /* snapshots */);
122 autovector
<MemTable
*> to_delete
;
124 LookupKey
lkey("key1", seq
);
125 bool found
= list
.current()->Get(lkey
, &value
, &s
, &merge_context
,
126 &range_del_agg
, ReadOptions());
130 InternalKeyComparator
cmp(BytewiseComparator());
131 auto factory
= std::make_shared
<SkipListFactory
>();
132 options
.memtable_factory
= factory
;
133 ImmutableCFOptions
ioptions(options
);
135 WriteBufferManager
wb(options
.db_write_buffer_size
);
136 MemTable
* mem
= new MemTable(cmp
, ioptions
, MutableCFOptions(options
), &wb
,
140 // Write some keys to this memtable.
141 mem
->Add(++seq
, kTypeDeletion
, "key1", "");
142 mem
->Add(++seq
, kTypeValue
, "key2", "value2");
143 mem
->Add(++seq
, kTypeValue
, "key1", "value1");
144 mem
->Add(++seq
, kTypeValue
, "key2", "value2.2");
146 // Fetch the newly written keys
147 merge_context
.Clear();
148 found
= mem
->Get(LookupKey("key1", seq
), &value
, &s
, &merge_context
,
149 &range_del_agg
, ReadOptions());
150 ASSERT_TRUE(s
.ok() && found
);
151 ASSERT_EQ(value
, "value1");
153 merge_context
.Clear();
154 found
= mem
->Get(LookupKey("key1", 2), &value
, &s
, &merge_context
,
155 &range_del_agg
, ReadOptions());
156 // MemTable found out that this key is *not* found (at this sequence#)
157 ASSERT_TRUE(found
&& s
.IsNotFound());
159 merge_context
.Clear();
160 found
= mem
->Get(LookupKey("key2", seq
), &value
, &s
, &merge_context
,
161 &range_del_agg
, ReadOptions());
162 ASSERT_TRUE(s
.ok() && found
);
163 ASSERT_EQ(value
, "value2.2");
165 ASSERT_EQ(4, mem
->num_entries());
166 ASSERT_EQ(1, mem
->num_deletes());
168 // Add memtable to list
169 list
.Add(mem
, &to_delete
);
171 SequenceNumber saved_seq
= seq
;
173 // Create another memtable and write some keys to it
174 WriteBufferManager
wb2(options
.db_write_buffer_size
);
175 MemTable
* mem2
= new MemTable(cmp
, ioptions
, MutableCFOptions(options
), &wb2
,
179 mem2
->Add(++seq
, kTypeDeletion
, "key1", "");
180 mem2
->Add(++seq
, kTypeValue
, "key2", "value2.3");
182 // Add second memtable to list
183 list
.Add(mem2
, &to_delete
);
185 // Fetch keys via MemTableList
186 merge_context
.Clear();
187 found
= list
.current()->Get(LookupKey("key1", seq
), &value
, &s
,
188 &merge_context
, &range_del_agg
, ReadOptions());
189 ASSERT_TRUE(found
&& s
.IsNotFound());
191 merge_context
.Clear();
192 found
= list
.current()->Get(LookupKey("key1", saved_seq
), &value
, &s
,
193 &merge_context
, &range_del_agg
, ReadOptions());
194 ASSERT_TRUE(s
.ok() && found
);
195 ASSERT_EQ("value1", value
);
197 merge_context
.Clear();
198 found
= list
.current()->Get(LookupKey("key2", seq
), &value
, &s
,
199 &merge_context
, &range_del_agg
, ReadOptions());
200 ASSERT_TRUE(s
.ok() && found
);
201 ASSERT_EQ(value
, "value2.3");
203 merge_context
.Clear();
204 found
= list
.current()->Get(LookupKey("key2", 1), &value
, &s
, &merge_context
,
205 &range_del_agg
, ReadOptions());
208 ASSERT_EQ(2, list
.NumNotFlushed());
210 list
.current()->Unref(&to_delete
);
211 for (MemTable
* m
: to_delete
) {
216 TEST_F(MemTableListTest
, GetFromHistoryTest
) {
217 // Create MemTableList
218 int min_write_buffer_number_to_merge
= 2;
219 int max_write_buffer_number_to_maintain
= 2;
220 MemTableList
list(min_write_buffer_number_to_merge
,
221 max_write_buffer_number_to_maintain
);
223 SequenceNumber seq
= 1;
226 MergeContext merge_context
;
227 InternalKeyComparator
ikey_cmp(options
.comparator
);
228 RangeDelAggregator
range_del_agg(ikey_cmp
, {} /* snapshots */);
229 autovector
<MemTable
*> to_delete
;
231 LookupKey
lkey("key1", seq
);
232 bool found
= list
.current()->Get(lkey
, &value
, &s
, &merge_context
,
233 &range_del_agg
, ReadOptions());
237 InternalKeyComparator
cmp(BytewiseComparator());
238 auto factory
= std::make_shared
<SkipListFactory
>();
239 options
.memtable_factory
= factory
;
240 ImmutableCFOptions
ioptions(options
);
242 WriteBufferManager
wb(options
.db_write_buffer_size
);
243 MemTable
* mem
= new MemTable(cmp
, ioptions
, MutableCFOptions(options
), &wb
,
247 // Write some keys to this memtable.
248 mem
->Add(++seq
, kTypeDeletion
, "key1", "");
249 mem
->Add(++seq
, kTypeValue
, "key2", "value2");
250 mem
->Add(++seq
, kTypeValue
, "key2", "value2.2");
252 // Fetch the newly written keys
253 merge_context
.Clear();
254 found
= mem
->Get(LookupKey("key1", seq
), &value
, &s
, &merge_context
,
255 &range_del_agg
, ReadOptions());
256 // MemTable found out that this key is *not* found (at this sequence#)
257 ASSERT_TRUE(found
&& s
.IsNotFound());
259 merge_context
.Clear();
260 found
= mem
->Get(LookupKey("key2", seq
), &value
, &s
, &merge_context
,
261 &range_del_agg
, ReadOptions());
262 ASSERT_TRUE(s
.ok() && found
);
263 ASSERT_EQ(value
, "value2.2");
265 // Add memtable to list
266 list
.Add(mem
, &to_delete
);
267 ASSERT_EQ(0, to_delete
.size());
269 // Fetch keys via MemTableList
270 merge_context
.Clear();
271 found
= list
.current()->Get(LookupKey("key1", seq
), &value
, &s
,
272 &merge_context
, &range_del_agg
, ReadOptions());
273 ASSERT_TRUE(found
&& s
.IsNotFound());
275 merge_context
.Clear();
276 found
= list
.current()->Get(LookupKey("key2", seq
), &value
, &s
,
277 &merge_context
, &range_del_agg
, ReadOptions());
278 ASSERT_TRUE(s
.ok() && found
);
279 ASSERT_EQ("value2.2", value
);
281 // Flush this memtable from the list.
282 // (It will then be a part of the memtable history).
283 autovector
<MemTable
*> to_flush
;
284 list
.PickMemtablesToFlush(&to_flush
);
285 ASSERT_EQ(1, to_flush
.size());
287 s
= Mock_InstallMemtableFlushResults(&list
, MutableCFOptions(options
),
288 to_flush
, &to_delete
);
290 ASSERT_EQ(0, list
.NumNotFlushed());
291 ASSERT_EQ(1, list
.NumFlushed());
292 ASSERT_EQ(0, to_delete
.size());
294 // Verify keys are no longer in MemTableList
295 merge_context
.Clear();
296 found
= list
.current()->Get(LookupKey("key1", seq
), &value
, &s
,
297 &merge_context
, &range_del_agg
, ReadOptions());
300 merge_context
.Clear();
301 found
= list
.current()->Get(LookupKey("key2", seq
), &value
, &s
,
302 &merge_context
, &range_del_agg
, ReadOptions());
305 // Verify keys are present in history
306 merge_context
.Clear();
307 found
= list
.current()->GetFromHistory(LookupKey("key1", seq
), &value
, &s
,
308 &merge_context
, &range_del_agg
,
310 ASSERT_TRUE(found
&& s
.IsNotFound());
312 merge_context
.Clear();
313 found
= list
.current()->GetFromHistory(LookupKey("key2", seq
), &value
, &s
,
314 &merge_context
, &range_del_agg
,
317 ASSERT_EQ("value2.2", value
);
319 // Create another memtable and write some keys to it
320 WriteBufferManager
wb2(options
.db_write_buffer_size
);
321 MemTable
* mem2
= new MemTable(cmp
, ioptions
, MutableCFOptions(options
), &wb2
,
325 mem2
->Add(++seq
, kTypeDeletion
, "key1", "");
326 mem2
->Add(++seq
, kTypeValue
, "key3", "value3");
328 // Add second memtable to list
329 list
.Add(mem2
, &to_delete
);
330 ASSERT_EQ(0, to_delete
.size());
333 list
.PickMemtablesToFlush(&to_flush
);
334 ASSERT_EQ(1, to_flush
.size());
336 // Flush second memtable
337 s
= Mock_InstallMemtableFlushResults(&list
, MutableCFOptions(options
),
338 to_flush
, &to_delete
);
340 ASSERT_EQ(0, list
.NumNotFlushed());
341 ASSERT_EQ(2, list
.NumFlushed());
342 ASSERT_EQ(0, to_delete
.size());
344 // Add a third memtable to push the first memtable out of the history
345 WriteBufferManager
wb3(options
.db_write_buffer_size
);
346 MemTable
* mem3
= new MemTable(cmp
, ioptions
, MutableCFOptions(options
), &wb3
,
349 list
.Add(mem3
, &to_delete
);
350 ASSERT_EQ(1, list
.NumNotFlushed());
351 ASSERT_EQ(1, list
.NumFlushed());
352 ASSERT_EQ(1, to_delete
.size());
354 // Verify keys are no longer in MemTableList
355 merge_context
.Clear();
356 found
= list
.current()->Get(LookupKey("key1", seq
), &value
, &s
,
357 &merge_context
, &range_del_agg
, ReadOptions());
360 merge_context
.Clear();
361 found
= list
.current()->Get(LookupKey("key2", seq
), &value
, &s
,
362 &merge_context
, &range_del_agg
, ReadOptions());
365 merge_context
.Clear();
366 found
= list
.current()->Get(LookupKey("key3", seq
), &value
, &s
,
367 &merge_context
, &range_del_agg
, ReadOptions());
370 // Verify that the second memtable's keys are in the history
371 merge_context
.Clear();
372 found
= list
.current()->GetFromHistory(LookupKey("key1", seq
), &value
, &s
,
373 &merge_context
, &range_del_agg
,
375 ASSERT_TRUE(found
&& s
.IsNotFound());
377 merge_context
.Clear();
378 found
= list
.current()->GetFromHistory(LookupKey("key3", seq
), &value
, &s
,
379 &merge_context
, &range_del_agg
,
382 ASSERT_EQ("value3", value
);
384 // Verify that key2 from the first memtable is no longer in the history
385 merge_context
.Clear();
386 found
= list
.current()->Get(LookupKey("key2", seq
), &value
, &s
,
387 &merge_context
, &range_del_agg
, ReadOptions());
391 list
.current()->Unref(&to_delete
);
392 ASSERT_EQ(3, to_delete
.size());
393 for (MemTable
* m
: to_delete
) {
398 TEST_F(MemTableListTest
, FlushPendingTest
) {
399 const int num_tables
= 5;
400 SequenceNumber seq
= 1;
403 auto factory
= std::make_shared
<SkipListFactory
>();
404 options
.memtable_factory
= factory
;
405 ImmutableCFOptions
ioptions(options
);
406 InternalKeyComparator
cmp(BytewiseComparator());
407 WriteBufferManager
wb(options
.db_write_buffer_size
);
408 autovector
<MemTable
*> to_delete
;
410 // Create MemTableList
411 int min_write_buffer_number_to_merge
= 3;
412 int max_write_buffer_number_to_maintain
= 7;
413 MemTableList
list(min_write_buffer_number_to_merge
,
414 max_write_buffer_number_to_maintain
);
416 // Create some MemTables
417 std::vector
<MemTable
*> tables
;
418 MutableCFOptions
mutable_cf_options(options
);
419 for (int i
= 0; i
< num_tables
; i
++) {
420 MemTable
* mem
= new MemTable(cmp
, ioptions
, mutable_cf_options
, &wb
,
425 MergeContext merge_context
;
427 mem
->Add(++seq
, kTypeValue
, "key1", ToString(i
));
428 mem
->Add(++seq
, kTypeValue
, "keyN" + ToString(i
), "valueN");
429 mem
->Add(++seq
, kTypeValue
, "keyX" + ToString(i
), "value");
430 mem
->Add(++seq
, kTypeValue
, "keyM" + ToString(i
), "valueM");
431 mem
->Add(++seq
, kTypeDeletion
, "keyX" + ToString(i
), "");
433 tables
.push_back(mem
);
437 ASSERT_FALSE(list
.IsFlushPending());
438 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
439 autovector
<MemTable
*> to_flush
;
440 list
.PickMemtablesToFlush(&to_flush
);
441 ASSERT_EQ(0, to_flush
.size());
443 // Request a flush even though there is nothing to flush
444 list
.FlushRequested();
445 ASSERT_FALSE(list
.IsFlushPending());
446 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
448 // Attempt to 'flush' to clear request for flush
449 list
.PickMemtablesToFlush(&to_flush
);
450 ASSERT_EQ(0, to_flush
.size());
451 ASSERT_FALSE(list
.IsFlushPending());
452 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
454 // Request a flush again
455 list
.FlushRequested();
456 // No flush pending since the list is empty.
457 ASSERT_FALSE(list
.IsFlushPending());
458 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
461 list
.Add(tables
[0], &to_delete
);
462 list
.Add(tables
[1], &to_delete
);
463 ASSERT_EQ(2, list
.NumNotFlushed());
464 ASSERT_EQ(0, to_delete
.size());
466 // Even though we have less than the minimum to flush, a flush is
467 // pending since we had previously requested a flush and never called
468 // PickMemtablesToFlush() to clear the flush.
469 ASSERT_TRUE(list
.IsFlushPending());
470 ASSERT_TRUE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
472 // Pick tables to flush
473 list
.PickMemtablesToFlush(&to_flush
);
474 ASSERT_EQ(2, to_flush
.size());
475 ASSERT_EQ(2, list
.NumNotFlushed());
476 ASSERT_FALSE(list
.IsFlushPending());
477 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
480 list
.RollbackMemtableFlush(to_flush
, 0);
481 ASSERT_FALSE(list
.IsFlushPending());
482 ASSERT_TRUE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
486 list
.Add(tables
[2], &to_delete
);
487 // We now have the minimum to flush regardles of whether FlushRequested()
489 ASSERT_TRUE(list
.IsFlushPending());
490 ASSERT_TRUE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
491 ASSERT_EQ(0, to_delete
.size());
493 // Pick tables to flush
494 list
.PickMemtablesToFlush(&to_flush
);
495 ASSERT_EQ(3, to_flush
.size());
496 ASSERT_EQ(3, list
.NumNotFlushed());
497 ASSERT_FALSE(list
.IsFlushPending());
498 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
500 // Pick tables to flush again
501 autovector
<MemTable
*> to_flush2
;
502 list
.PickMemtablesToFlush(&to_flush2
);
503 ASSERT_EQ(0, to_flush2
.size());
504 ASSERT_EQ(3, list
.NumNotFlushed());
505 ASSERT_FALSE(list
.IsFlushPending());
506 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
509 list
.Add(tables
[3], &to_delete
);
510 ASSERT_FALSE(list
.IsFlushPending());
511 ASSERT_TRUE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
512 ASSERT_EQ(0, to_delete
.size());
514 // Request a flush again
515 list
.FlushRequested();
516 ASSERT_TRUE(list
.IsFlushPending());
517 ASSERT_TRUE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
519 // Pick tables to flush again
520 list
.PickMemtablesToFlush(&to_flush2
);
521 ASSERT_EQ(1, to_flush2
.size());
522 ASSERT_EQ(4, list
.NumNotFlushed());
523 ASSERT_FALSE(list
.IsFlushPending());
524 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
526 // Rollback first pick of tables
527 list
.RollbackMemtableFlush(to_flush
, 0);
528 ASSERT_TRUE(list
.IsFlushPending());
529 ASSERT_TRUE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
532 // Add another tables
533 list
.Add(tables
[4], &to_delete
);
534 ASSERT_EQ(5, list
.NumNotFlushed());
535 // We now have the minimum to flush regardles of whether FlushRequested()
536 ASSERT_TRUE(list
.IsFlushPending());
537 ASSERT_TRUE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
538 ASSERT_EQ(0, to_delete
.size());
540 // Pick tables to flush
541 list
.PickMemtablesToFlush(&to_flush
);
542 // Should pick 4 of 5 since 1 table has been picked in to_flush2
543 ASSERT_EQ(4, to_flush
.size());
544 ASSERT_EQ(5, list
.NumNotFlushed());
545 ASSERT_FALSE(list
.IsFlushPending());
546 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
548 // Pick tables to flush again
549 autovector
<MemTable
*> to_flush3
;
550 ASSERT_EQ(0, to_flush3
.size()); // nothing not in progress of being flushed
551 ASSERT_EQ(5, list
.NumNotFlushed());
552 ASSERT_FALSE(list
.IsFlushPending());
553 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
555 // Flush the 4 memtables that were picked in to_flush
556 s
= Mock_InstallMemtableFlushResults(&list
, MutableCFOptions(options
),
557 to_flush
, &to_delete
);
560 // Note: now to_flush contains tables[0,1,2,4]. to_flush2 contains
562 // Current implementation will only commit memtables in the order they were
563 // created. So InstallMemtableFlushResults will install the first 3 tables
564 // in to_flush and stop when it encounters a table not yet flushed.
565 ASSERT_EQ(2, list
.NumNotFlushed());
566 int num_in_history
= std::min(3, max_write_buffer_number_to_maintain
);
567 ASSERT_EQ(num_in_history
, list
.NumFlushed());
568 ASSERT_EQ(5 - list
.NumNotFlushed() - num_in_history
, to_delete
.size());
570 // Request a flush again. Should be nothing to flush
571 list
.FlushRequested();
572 ASSERT_FALSE(list
.IsFlushPending());
573 ASSERT_FALSE(list
.imm_flush_needed
.load(std::memory_order_acquire
));
575 // Flush the 1 memtable that was picked in to_flush2
576 s
= MemTableListTest::Mock_InstallMemtableFlushResults(
577 &list
, MutableCFOptions(options
), to_flush2
, &to_delete
);
580 // This will actually install 2 tables. The 1 we told it to flush, and also
581 // tables[4] which has been waiting for tables[3] to commit.
582 ASSERT_EQ(0, list
.NumNotFlushed());
583 num_in_history
= std::min(5, max_write_buffer_number_to_maintain
);
584 ASSERT_EQ(num_in_history
, list
.NumFlushed());
585 ASSERT_EQ(5 - list
.NumNotFlushed() - num_in_history
, to_delete
.size());
587 for (const auto& m
: to_delete
) {
588 // Refcount should be 0 after calling InstallMemtableFlushResults.
589 // Verify this, by Ref'ing then UnRef'ing:
591 ASSERT_EQ(m
, m
->Unref());
596 list
.current()->Unref(&to_delete
);
597 int to_delete_size
= std::min(5, max_write_buffer_number_to_maintain
);
598 ASSERT_EQ(to_delete_size
, to_delete
.size());
600 for (const auto& m
: to_delete
) {
601 // Refcount should be 0 after calling InstallMemtableFlushResults.
602 // Verify this, by Ref'ing then UnRef'ing:
604 ASSERT_EQ(m
, m
->Unref());
610 } // namespace rocksdb
612 int main(int argc
, char** argv
) {
613 ::testing::InitGoogleTest(&argc
, argv
);
614 return RUN_ALL_TESTS();