]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/db/manual_compaction_test.cc
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / rocksdb / db / manual_compaction_test.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Test for issue 178: a manual compaction causes deleted data to reappear.
7 #include <cstdlib>
8
9 #include "port/port.h"
10 #include "rocksdb/compaction_filter.h"
11 #include "rocksdb/db.h"
12 #include "rocksdb/slice.h"
13 #include "rocksdb/write_batch.h"
14 #include "test_util/testharness.h"
15
16 using ROCKSDB_NAMESPACE::CompactionFilter;
17 using ROCKSDB_NAMESPACE::CompactionStyle;
18 using ROCKSDB_NAMESPACE::CompactRangeOptions;
19 using ROCKSDB_NAMESPACE::CompressionType;
20 using ROCKSDB_NAMESPACE::DB;
21 using ROCKSDB_NAMESPACE::DestroyDB;
22 using ROCKSDB_NAMESPACE::FlushOptions;
23 using ROCKSDB_NAMESPACE::Iterator;
24 using ROCKSDB_NAMESPACE::Options;
25 using ROCKSDB_NAMESPACE::ReadOptions;
26 using ROCKSDB_NAMESPACE::Slice;
27 using ROCKSDB_NAMESPACE::WriteBatch;
28 using ROCKSDB_NAMESPACE::WriteOptions;
29
30 namespace {
31
32 // Reasoning: previously the number was 1100000. Since the keys are written to
33 // the batch in one write each write will result into one SST file. each write
34 // will result into one SST file. We reduced the write_buffer_size to 1K to
35 // basically have the same effect with however less number of keys, which
36 // results into less test runtime.
37 const int kNumKeys = 1100;
38
39 std::string Key1(int i) {
40 char buf[100];
41 snprintf(buf, sizeof(buf), "my_key_%d", i);
42 return buf;
43 }
44
45 std::string Key2(int i) { return Key1(i) + "_xxx"; }
46
47 class ManualCompactionTest : public testing::Test {
48 public:
49 ManualCompactionTest() {
50 // Get rid of any state from an old run.
51 dbname_ = ROCKSDB_NAMESPACE::test::PerThreadDBPath(
52 "rocksdb_manual_compaction_test");
53 EXPECT_OK(DestroyDB(dbname_, Options()));
54 }
55
56 std::string dbname_;
57 };
58
59 class DestroyAllCompactionFilter : public CompactionFilter {
60 public:
61 DestroyAllCompactionFilter() {}
62
63 bool Filter(int /*level*/, const Slice& /*key*/, const Slice& existing_value,
64 std::string* /*new_value*/,
65 bool* /*value_changed*/) const override {
66 return existing_value.ToString() == "destroy";
67 }
68
69 const char* Name() const override { return "DestroyAllCompactionFilter"; }
70 };
71
72 class LogCompactionFilter : public CompactionFilter {
73 public:
74 const char* Name() const override { return "LogCompactionFilter"; }
75
76 bool Filter(int level, const Slice& key, const Slice& /*existing_value*/,
77 std::string* /*new_value*/,
78 bool* /*value_changed*/) const override {
79 key_level_[key.ToString()] = level;
80 return false;
81 }
82
83 void Reset() { key_level_.clear(); }
84
85 size_t NumKeys() const { return key_level_.size(); }
86
87 int KeyLevel(const Slice& key) {
88 auto it = key_level_.find(key.ToString());
89 if (it == key_level_.end()) {
90 return -1;
91 }
92 return it->second;
93 }
94
95 private:
96 mutable std::map<std::string, int> key_level_;
97 };
98
99 TEST_F(ManualCompactionTest, CompactTouchesAllKeys) {
100 for (int iter = 0; iter < 2; ++iter) {
101 DB* db;
102 Options options;
103 if (iter == 0) { // level compaction
104 options.num_levels = 3;
105 options.compaction_style = CompactionStyle::kCompactionStyleLevel;
106 } else { // universal compaction
107 options.compaction_style = CompactionStyle::kCompactionStyleUniversal;
108 }
109 options.create_if_missing = true;
110 options.compression = CompressionType::kNoCompression;
111 options.compaction_filter = new DestroyAllCompactionFilter();
112 ASSERT_OK(DB::Open(options, dbname_, &db));
113
114 ASSERT_OK(db->Put(WriteOptions(), Slice("key1"), Slice("destroy")));
115 ASSERT_OK(db->Put(WriteOptions(), Slice("key2"), Slice("destroy")));
116 ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3")));
117 ASSERT_OK(db->Put(WriteOptions(), Slice("key4"), Slice("destroy")));
118
119 Slice key4("key4");
120 ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, &key4));
121 Iterator* itr = db->NewIterator(ReadOptions());
122 itr->SeekToFirst();
123 ASSERT_TRUE(itr->Valid());
124 ASSERT_EQ("key3", itr->key().ToString());
125 itr->Next();
126 ASSERT_TRUE(!itr->Valid());
127 delete itr;
128
129 delete options.compaction_filter;
130 delete db;
131 ASSERT_OK(DestroyDB(dbname_, options));
132 }
133 }
134
135 TEST_F(ManualCompactionTest, Test) {
136 // Open database. Disable compression since it affects the creation
137 // of layers and the code below is trying to test against a very
138 // specific scenario.
139 DB* db;
140 Options db_options;
141 db_options.write_buffer_size = 1024;
142 db_options.create_if_missing = true;
143 db_options.compression = CompressionType::kNoCompression;
144 ASSERT_OK(DB::Open(db_options, dbname_, &db));
145
146 // create first key range
147 WriteBatch batch;
148 for (int i = 0; i < kNumKeys; i++) {
149 ASSERT_OK(batch.Put(Key1(i), "value for range 1 key"));
150 }
151 ASSERT_OK(db->Write(WriteOptions(), &batch));
152
153 // create second key range
154 batch.Clear();
155 for (int i = 0; i < kNumKeys; i++) {
156 ASSERT_OK(batch.Put(Key2(i), "value for range 2 key"));
157 }
158 ASSERT_OK(db->Write(WriteOptions(), &batch));
159
160 // delete second key range
161 batch.Clear();
162 for (int i = 0; i < kNumKeys; i++) {
163 ASSERT_OK(batch.Delete(Key2(i)));
164 }
165 ASSERT_OK(db->Write(WriteOptions(), &batch));
166
167 // compact database
168 std::string start_key = Key1(0);
169 std::string end_key = Key1(kNumKeys - 1);
170 Slice least(start_key.data(), start_key.size());
171 Slice greatest(end_key.data(), end_key.size());
172
173 // commenting out the line below causes the example to work correctly
174 ASSERT_OK(db->CompactRange(CompactRangeOptions(), &least, &greatest));
175
176 // count the keys
177 Iterator* iter = db->NewIterator(ReadOptions());
178 int num_keys = 0;
179 for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
180 num_keys++;
181 }
182 delete iter;
183 ASSERT_EQ(kNumKeys, num_keys) << "Bad number of keys";
184
185 // close database
186 delete db;
187 ASSERT_OK(DestroyDB(dbname_, Options()));
188 }
189
190 TEST_F(ManualCompactionTest, SkipLevel) {
191 DB* db;
192 Options options;
193 options.num_levels = 3;
194 // Initially, flushed L0 files won't exceed 100.
195 options.level0_file_num_compaction_trigger = 100;
196 options.compaction_style = CompactionStyle::kCompactionStyleLevel;
197 options.create_if_missing = true;
198 options.compression = CompressionType::kNoCompression;
199 LogCompactionFilter* filter = new LogCompactionFilter();
200 options.compaction_filter = filter;
201 ASSERT_OK(DB::Open(options, dbname_, &db));
202
203 WriteOptions wo;
204 FlushOptions fo;
205 ASSERT_OK(db->Put(wo, "1", ""));
206 ASSERT_OK(db->Flush(fo));
207 ASSERT_OK(db->Put(wo, "2", ""));
208 ASSERT_OK(db->Flush(fo));
209 ASSERT_OK(db->Put(wo, "4", ""));
210 ASSERT_OK(db->Put(wo, "8", ""));
211 ASSERT_OK(db->Flush(fo));
212
213 {
214 // L0: 1, 2, [4, 8]
215 // no file has keys in range [5, 7]
216 Slice start("5");
217 Slice end("7");
218 filter->Reset();
219 ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end));
220 ASSERT_EQ(0, filter->NumKeys());
221 }
222
223 {
224 // L0: 1, 2, [4, 8]
225 // [3, 7] overlaps with 4 in L0
226 Slice start("3");
227 Slice end("7");
228 filter->Reset();
229 ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end));
230 ASSERT_EQ(2, filter->NumKeys());
231 ASSERT_EQ(0, filter->KeyLevel("4"));
232 ASSERT_EQ(0, filter->KeyLevel("8"));
233 }
234
235 {
236 // L0: 1, 2
237 // L1: [4, 8]
238 // no file has keys in range (-inf, 0]
239 Slice end("0");
240 filter->Reset();
241 ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, &end));
242 ASSERT_EQ(0, filter->NumKeys());
243 }
244
245 {
246 // L0: 1, 2
247 // L1: [4, 8]
248 // no file has keys in range [9, inf)
249 Slice start("9");
250 filter->Reset();
251 ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, nullptr));
252 ASSERT_EQ(0, filter->NumKeys());
253 }
254
255 {
256 // L0: 1, 2
257 // L1: [4, 8]
258 // [2, 2] overlaps with 2 in L0
259 Slice start("2");
260 Slice end("2");
261 filter->Reset();
262 ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end));
263 ASSERT_EQ(1, filter->NumKeys());
264 ASSERT_EQ(0, filter->KeyLevel("2"));
265 }
266
267 {
268 // L0: 1
269 // L1: 2, [4, 8]
270 // [2, 5] overlaps with 2 and [4, 8) in L1, skip L0
271 Slice start("2");
272 Slice end("5");
273 filter->Reset();
274 ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end));
275 ASSERT_EQ(3, filter->NumKeys());
276 ASSERT_EQ(1, filter->KeyLevel("2"));
277 ASSERT_EQ(1, filter->KeyLevel("4"));
278 ASSERT_EQ(1, filter->KeyLevel("8"));
279 }
280
281 {
282 // L0: 1
283 // L1: [2, 4, 8]
284 // [0, inf) overlaps all files
285 Slice start("0");
286 filter->Reset();
287 ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, nullptr));
288 ASSERT_EQ(4, filter->NumKeys());
289 // 1 is first compacted to L1 and then further compacted into [2, 4, 8],
290 // so finally the logged level for 1 is L1.
291 ASSERT_EQ(1, filter->KeyLevel("1"));
292 ASSERT_EQ(1, filter->KeyLevel("2"));
293 ASSERT_EQ(1, filter->KeyLevel("4"));
294 ASSERT_EQ(1, filter->KeyLevel("8"));
295 }
296
297 delete filter;
298 delete db;
299 ASSERT_OK(DestroyDB(dbname_, options));
300 }
301
302 } // anonymous namespace
303
304 int main(int argc, char** argv) {
305 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
306 ::testing::InitGoogleTest(&argc, argv);
307 return RUN_ALL_TESTS();
308 }