1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Test for issue 178: a manual compaction causes deleted data to reappear.
10 #include "rocksdb/compaction_filter.h"
11 #include "rocksdb/db.h"
12 #include "rocksdb/slice.h"
13 #include "rocksdb/write_batch.h"
14 #include "test_util/testharness.h"
16 using ROCKSDB_NAMESPACE::CompactionFilter
;
17 using ROCKSDB_NAMESPACE::CompactionStyle
;
18 using ROCKSDB_NAMESPACE::CompactRangeOptions
;
19 using ROCKSDB_NAMESPACE::CompressionType
;
20 using ROCKSDB_NAMESPACE::DB
;
21 using ROCKSDB_NAMESPACE::DestroyDB
;
22 using ROCKSDB_NAMESPACE::FlushOptions
;
23 using ROCKSDB_NAMESPACE::Iterator
;
24 using ROCKSDB_NAMESPACE::Options
;
25 using ROCKSDB_NAMESPACE::ReadOptions
;
26 using ROCKSDB_NAMESPACE::Slice
;
27 using ROCKSDB_NAMESPACE::WriteBatch
;
28 using ROCKSDB_NAMESPACE::WriteOptions
;
32 // Reasoning: previously the number was 1100000. Since the keys are written to
33 // the batch in one write each write will result into one SST file. each write
34 // will result into one SST file. We reduced the write_buffer_size to 1K to
35 // basically have the same effect with however less number of keys, which
36 // results into less test runtime.
37 const int kNumKeys
= 1100;
39 std::string
Key1(int i
) {
41 snprintf(buf
, sizeof(buf
), "my_key_%d", i
);
45 std::string
Key2(int i
) { return Key1(i
) + "_xxx"; }
47 class ManualCompactionTest
: public testing::Test
{
49 ManualCompactionTest() {
50 // Get rid of any state from an old run.
51 dbname_
= ROCKSDB_NAMESPACE::test::PerThreadDBPath(
52 "rocksdb_manual_compaction_test");
53 EXPECT_OK(DestroyDB(dbname_
, Options()));
59 class DestroyAllCompactionFilter
: public CompactionFilter
{
61 DestroyAllCompactionFilter() {}
63 bool Filter(int /*level*/, const Slice
& /*key*/, const Slice
& existing_value
,
64 std::string
* /*new_value*/,
65 bool* /*value_changed*/) const override
{
66 return existing_value
.ToString() == "destroy";
69 const char* Name() const override
{ return "DestroyAllCompactionFilter"; }
72 class LogCompactionFilter
: public CompactionFilter
{
74 const char* Name() const override
{ return "LogCompactionFilter"; }
76 bool Filter(int level
, const Slice
& key
, const Slice
& /*existing_value*/,
77 std::string
* /*new_value*/,
78 bool* /*value_changed*/) const override
{
79 key_level_
[key
.ToString()] = level
;
83 void Reset() { key_level_
.clear(); }
85 size_t NumKeys() const { return key_level_
.size(); }
87 int KeyLevel(const Slice
& key
) {
88 auto it
= key_level_
.find(key
.ToString());
89 if (it
== key_level_
.end()) {
96 mutable std::map
<std::string
, int> key_level_
;
99 TEST_F(ManualCompactionTest
, CompactTouchesAllKeys
) {
100 for (int iter
= 0; iter
< 2; ++iter
) {
103 if (iter
== 0) { // level compaction
104 options
.num_levels
= 3;
105 options
.compaction_style
= CompactionStyle::kCompactionStyleLevel
;
106 } else { // universal compaction
107 options
.compaction_style
= CompactionStyle::kCompactionStyleUniversal
;
109 options
.create_if_missing
= true;
110 options
.compression
= CompressionType::kNoCompression
;
111 options
.compaction_filter
= new DestroyAllCompactionFilter();
112 ASSERT_OK(DB::Open(options
, dbname_
, &db
));
114 ASSERT_OK(db
->Put(WriteOptions(), Slice("key1"), Slice("destroy")));
115 ASSERT_OK(db
->Put(WriteOptions(), Slice("key2"), Slice("destroy")));
116 ASSERT_OK(db
->Put(WriteOptions(), Slice("key3"), Slice("value3")));
117 ASSERT_OK(db
->Put(WriteOptions(), Slice("key4"), Slice("destroy")));
120 ASSERT_OK(db
->CompactRange(CompactRangeOptions(), nullptr, &key4
));
121 Iterator
* itr
= db
->NewIterator(ReadOptions());
123 ASSERT_TRUE(itr
->Valid());
124 ASSERT_EQ("key3", itr
->key().ToString());
126 ASSERT_TRUE(!itr
->Valid());
129 delete options
.compaction_filter
;
131 ASSERT_OK(DestroyDB(dbname_
, options
));
135 TEST_F(ManualCompactionTest
, Test
) {
136 // Open database. Disable compression since it affects the creation
137 // of layers and the code below is trying to test against a very
138 // specific scenario.
141 db_options
.write_buffer_size
= 1024;
142 db_options
.create_if_missing
= true;
143 db_options
.compression
= CompressionType::kNoCompression
;
144 ASSERT_OK(DB::Open(db_options
, dbname_
, &db
));
146 // create first key range
148 for (int i
= 0; i
< kNumKeys
; i
++) {
149 ASSERT_OK(batch
.Put(Key1(i
), "value for range 1 key"));
151 ASSERT_OK(db
->Write(WriteOptions(), &batch
));
153 // create second key range
155 for (int i
= 0; i
< kNumKeys
; i
++) {
156 ASSERT_OK(batch
.Put(Key2(i
), "value for range 2 key"));
158 ASSERT_OK(db
->Write(WriteOptions(), &batch
));
160 // delete second key range
162 for (int i
= 0; i
< kNumKeys
; i
++) {
163 ASSERT_OK(batch
.Delete(Key2(i
)));
165 ASSERT_OK(db
->Write(WriteOptions(), &batch
));
168 std::string start_key
= Key1(0);
169 std::string end_key
= Key1(kNumKeys
- 1);
170 Slice
least(start_key
.data(), start_key
.size());
171 Slice
greatest(end_key
.data(), end_key
.size());
173 // commenting out the line below causes the example to work correctly
174 ASSERT_OK(db
->CompactRange(CompactRangeOptions(), &least
, &greatest
));
177 Iterator
* iter
= db
->NewIterator(ReadOptions());
179 for (iter
->SeekToFirst(); iter
->Valid(); iter
->Next()) {
183 ASSERT_EQ(kNumKeys
, num_keys
) << "Bad number of keys";
187 ASSERT_OK(DestroyDB(dbname_
, Options()));
190 TEST_F(ManualCompactionTest
, SkipLevel
) {
193 options
.num_levels
= 3;
194 // Initially, flushed L0 files won't exceed 100.
195 options
.level0_file_num_compaction_trigger
= 100;
196 options
.compaction_style
= CompactionStyle::kCompactionStyleLevel
;
197 options
.create_if_missing
= true;
198 options
.compression
= CompressionType::kNoCompression
;
199 LogCompactionFilter
* filter
= new LogCompactionFilter();
200 options
.compaction_filter
= filter
;
201 ASSERT_OK(DB::Open(options
, dbname_
, &db
));
205 ASSERT_OK(db
->Put(wo
, "1", ""));
206 ASSERT_OK(db
->Flush(fo
));
207 ASSERT_OK(db
->Put(wo
, "2", ""));
208 ASSERT_OK(db
->Flush(fo
));
209 ASSERT_OK(db
->Put(wo
, "4", ""));
210 ASSERT_OK(db
->Put(wo
, "8", ""));
211 ASSERT_OK(db
->Flush(fo
));
215 // no file has keys in range [5, 7]
219 ASSERT_OK(db
->CompactRange(CompactRangeOptions(), &start
, &end
));
220 ASSERT_EQ(0, filter
->NumKeys());
225 // [3, 7] overlaps with 4 in L0
229 ASSERT_OK(db
->CompactRange(CompactRangeOptions(), &start
, &end
));
230 ASSERT_EQ(2, filter
->NumKeys());
231 ASSERT_EQ(0, filter
->KeyLevel("4"));
232 ASSERT_EQ(0, filter
->KeyLevel("8"));
238 // no file has keys in range (-inf, 0]
241 ASSERT_OK(db
->CompactRange(CompactRangeOptions(), nullptr, &end
));
242 ASSERT_EQ(0, filter
->NumKeys());
248 // no file has keys in range [9, inf)
251 ASSERT_OK(db
->CompactRange(CompactRangeOptions(), &start
, nullptr));
252 ASSERT_EQ(0, filter
->NumKeys());
258 // [2, 2] overlaps with 2 in L0
262 ASSERT_OK(db
->CompactRange(CompactRangeOptions(), &start
, &end
));
263 ASSERT_EQ(1, filter
->NumKeys());
264 ASSERT_EQ(0, filter
->KeyLevel("2"));
270 // [2, 5] overlaps with 2 and [4, 8) in L1, skip L0
274 ASSERT_OK(db
->CompactRange(CompactRangeOptions(), &start
, &end
));
275 ASSERT_EQ(3, filter
->NumKeys());
276 ASSERT_EQ(1, filter
->KeyLevel("2"));
277 ASSERT_EQ(1, filter
->KeyLevel("4"));
278 ASSERT_EQ(1, filter
->KeyLevel("8"));
284 // [0, inf) overlaps all files
287 ASSERT_OK(db
->CompactRange(CompactRangeOptions(), &start
, nullptr));
288 ASSERT_EQ(4, filter
->NumKeys());
289 // 1 is first compacted to L1 and then further compacted into [2, 4, 8],
290 // so finally the logged level for 1 is L1.
291 ASSERT_EQ(1, filter
->KeyLevel("1"));
292 ASSERT_EQ(1, filter
->KeyLevel("2"));
293 ASSERT_EQ(1, filter
->KeyLevel("4"));
294 ASSERT_EQ(1, filter
->KeyLevel("8"));
299 ASSERT_OK(DestroyDB(dbname_
, options
));
302 } // anonymous namespace
304 int main(int argc
, char** argv
) {
305 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
306 ::testing::InitGoogleTest(&argc
, argv
);
307 return RUN_ALL_TESTS();