]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/db/deletefile_test.cc
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / rocksdb / db / deletefile_test.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #ifndef ROCKSDB_LITE
11
12 #include <stdlib.h>
13 #include <map>
14 #include <string>
15 #include <vector>
16 #include "db/db_impl.h"
17 #include "db/version_set.h"
18 #include "db/write_batch_internal.h"
19 #include "rocksdb/db.h"
20 #include "rocksdb/env.h"
21 #include "rocksdb/transaction_log.h"
22 #include "util/filename.h"
23 #include "util/string_util.h"
24 #include "util/sync_point.h"
25 #include "util/testharness.h"
26 #include "util/testutil.h"
27
28 namespace rocksdb {
29
30 class DeleteFileTest : public testing::Test {
31 public:
32 std::string dbname_;
33 Options options_;
34 DB* db_;
35 Env* env_;
36 int numlevels_;
37
38 DeleteFileTest() {
39 db_ = nullptr;
40 env_ = Env::Default();
41 options_.delete_obsolete_files_period_micros = 0; // always do full purge
42 options_.enable_thread_tracking = true;
43 options_.write_buffer_size = 1024*1024*1000;
44 options_.target_file_size_base = 1024*1024*1000;
45 options_.max_bytes_for_level_base = 1024*1024*1000;
46 options_.WAL_ttl_seconds = 300; // Used to test log files
47 options_.WAL_size_limit_MB = 1024; // Used to test log files
48 dbname_ = test::PerThreadDBPath("deletefile_test");
49 options_.wal_dir = dbname_ + "/wal_files";
50
51 // clean up all the files that might have been there before
52 std::vector<std::string> old_files;
53 env_->GetChildren(dbname_, &old_files);
54 for (auto file : old_files) {
55 env_->DeleteFile(dbname_ + "/" + file);
56 }
57 env_->GetChildren(options_.wal_dir, &old_files);
58 for (auto file : old_files) {
59 env_->DeleteFile(options_.wal_dir + "/" + file);
60 }
61
62 DestroyDB(dbname_, options_);
63 numlevels_ = 7;
64 EXPECT_OK(ReopenDB(true));
65 }
66
67 Status ReopenDB(bool create) {
68 delete db_;
69 if (create) {
70 DestroyDB(dbname_, options_);
71 }
72 db_ = nullptr;
73 options_.create_if_missing = create;
74 return DB::Open(options_, dbname_, &db_);
75 }
76
77 void CloseDB() {
78 delete db_;
79 db_ = nullptr;
80 }
81
82 void AddKeys(int numkeys, int startkey = 0) {
83 WriteOptions options;
84 options.sync = false;
85 ReadOptions roptions;
86 for (int i = startkey; i < (numkeys + startkey) ; i++) {
87 std::string temp = ToString(i);
88 Slice key(temp);
89 Slice value(temp);
90 ASSERT_OK(db_->Put(options, key, value));
91 }
92 }
93
94 int numKeysInLevels(
95 std::vector<LiveFileMetaData> &metadata,
96 std::vector<int> *keysperlevel = nullptr) {
97
98 if (keysperlevel != nullptr) {
99 keysperlevel->resize(numlevels_);
100 }
101
102 int numKeys = 0;
103 for (size_t i = 0; i < metadata.size(); i++) {
104 int startkey = atoi(metadata[i].smallestkey.c_str());
105 int endkey = atoi(metadata[i].largestkey.c_str());
106 int numkeysinfile = (endkey - startkey + 1);
107 numKeys += numkeysinfile;
108 if (keysperlevel != nullptr) {
109 (*keysperlevel)[(int)metadata[i].level] += numkeysinfile;
110 }
111 fprintf(stderr, "level %d name %s smallest %s largest %s\n",
112 metadata[i].level, metadata[i].name.c_str(),
113 metadata[i].smallestkey.c_str(),
114 metadata[i].largestkey.c_str());
115 }
116 return numKeys;
117 }
118
119 void CreateTwoLevels() {
120 AddKeys(50000, 10000);
121 DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
122 ASSERT_OK(dbi->TEST_FlushMemTable());
123 ASSERT_OK(dbi->TEST_WaitForFlushMemTable());
124 for (int i = 0; i < 2; ++i) {
125 ASSERT_OK(dbi->TEST_CompactRange(i, nullptr, nullptr));
126 }
127
128 AddKeys(50000, 10000);
129 ASSERT_OK(dbi->TEST_FlushMemTable());
130 ASSERT_OK(dbi->TEST_WaitForFlushMemTable());
131 ASSERT_OK(dbi->TEST_CompactRange(0, nullptr, nullptr));
132 }
133
134 void CheckFileTypeCounts(std::string& dir,
135 int required_log,
136 int required_sst,
137 int required_manifest) {
138 std::vector<std::string> filenames;
139 env_->GetChildren(dir, &filenames);
140
141 int log_cnt = 0, sst_cnt = 0, manifest_cnt = 0;
142 for (auto file : filenames) {
143 uint64_t number;
144 FileType type;
145 if (ParseFileName(file, &number, &type)) {
146 log_cnt += (type == kLogFile);
147 sst_cnt += (type == kTableFile);
148 manifest_cnt += (type == kDescriptorFile);
149 }
150 }
151 ASSERT_EQ(required_log, log_cnt);
152 ASSERT_EQ(required_sst, sst_cnt);
153 ASSERT_EQ(required_manifest, manifest_cnt);
154 }
155
156 static void DoSleep(void* arg) {
157 auto test = reinterpret_cast<DeleteFileTest*>(arg);
158 test->env_->SleepForMicroseconds(2 * 1000 * 1000);
159 }
160
161 // An empty job to guard all jobs are processed
162 static void GuardFinish(void* /*arg*/) {
163 TEST_SYNC_POINT("DeleteFileTest::GuardFinish");
164 }
165 };
166
167 TEST_F(DeleteFileTest, AddKeysAndQueryLevels) {
168 CreateTwoLevels();
169 std::vector<LiveFileMetaData> metadata;
170 db_->GetLiveFilesMetaData(&metadata);
171
172 std::string level1file = "";
173 int level1keycount = 0;
174 std::string level2file = "";
175 int level2keycount = 0;
176 int level1index = 0;
177 int level2index = 1;
178
179 ASSERT_EQ((int)metadata.size(), 2);
180 if (metadata[0].level == 2) {
181 level1index = 1;
182 level2index = 0;
183 }
184
185 level1file = metadata[level1index].name;
186 int startkey = atoi(metadata[level1index].smallestkey.c_str());
187 int endkey = atoi(metadata[level1index].largestkey.c_str());
188 level1keycount = (endkey - startkey + 1);
189 level2file = metadata[level2index].name;
190 startkey = atoi(metadata[level2index].smallestkey.c_str());
191 endkey = atoi(metadata[level2index].largestkey.c_str());
192 level2keycount = (endkey - startkey + 1);
193
194 // COntrolled setup. Levels 1 and 2 should both have 50K files.
195 // This is a little fragile as it depends on the current
196 // compaction heuristics.
197 ASSERT_EQ(level1keycount, 50000);
198 ASSERT_EQ(level2keycount, 50000);
199
200 Status status = db_->DeleteFile("0.sst");
201 ASSERT_TRUE(status.IsInvalidArgument());
202
203 // intermediate level files cannot be deleted.
204 status = db_->DeleteFile(level1file);
205 ASSERT_TRUE(status.IsInvalidArgument());
206
207 // Lowest level file deletion should succeed.
208 ASSERT_OK(db_->DeleteFile(level2file));
209
210 CloseDB();
211 }
212
213 TEST_F(DeleteFileTest, PurgeObsoleteFilesTest) {
214 CreateTwoLevels();
215 // there should be only one (empty) log file because CreateTwoLevels()
216 // flushes the memtables to disk
217 CheckFileTypeCounts(options_.wal_dir, 1, 0, 0);
218 // 2 ssts, 1 manifest
219 CheckFileTypeCounts(dbname_, 0, 2, 1);
220 std::string first("0"), last("999999");
221 CompactRangeOptions compact_options;
222 compact_options.change_level = true;
223 compact_options.target_level = 2;
224 Slice first_slice(first), last_slice(last);
225 db_->CompactRange(compact_options, &first_slice, &last_slice);
226 // 1 sst after compaction
227 CheckFileTypeCounts(dbname_, 0, 1, 1);
228
229 // this time, we keep an iterator alive
230 ReopenDB(true);
231 Iterator *itr = nullptr;
232 CreateTwoLevels();
233 itr = db_->NewIterator(ReadOptions());
234 db_->CompactRange(compact_options, &first_slice, &last_slice);
235 // 3 sst after compaction with live iterator
236 CheckFileTypeCounts(dbname_, 0, 3, 1);
237 delete itr;
238 // 1 sst after iterator deletion
239 CheckFileTypeCounts(dbname_, 0, 1, 1);
240
241 CloseDB();
242 }
243
244 TEST_F(DeleteFileTest, BackgroundPurgeTest) {
245 std::string first("0"), last("999999");
246 CompactRangeOptions compact_options;
247 compact_options.change_level = true;
248 compact_options.target_level = 2;
249 Slice first_slice(first), last_slice(last);
250
251 // We keep an iterator alive
252 Iterator* itr = nullptr;
253 CreateTwoLevels();
254 ReadOptions options;
255 options.background_purge_on_iterator_cleanup = true;
256 itr = db_->NewIterator(options);
257 db_->CompactRange(compact_options, &first_slice, &last_slice);
258 // 3 sst after compaction with live iterator
259 CheckFileTypeCounts(dbname_, 0, 3, 1);
260 test::SleepingBackgroundTask sleeping_task_before;
261 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
262 &sleeping_task_before, Env::Priority::HIGH);
263 delete itr;
264 test::SleepingBackgroundTask sleeping_task_after;
265 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
266 &sleeping_task_after, Env::Priority::HIGH);
267
268 // Make sure no purges are executed foreground
269 CheckFileTypeCounts(dbname_, 0, 3, 1);
270 sleeping_task_before.WakeUp();
271 sleeping_task_before.WaitUntilDone();
272
273 // Make sure all background purges are executed
274 sleeping_task_after.WakeUp();
275 sleeping_task_after.WaitUntilDone();
276 // 1 sst after iterator deletion
277 CheckFileTypeCounts(dbname_, 0, 1, 1);
278
279 CloseDB();
280 }
281
282 // This test is to reproduce a bug that read invalid ReadOption in iterator
283 // cleanup function
284 TEST_F(DeleteFileTest, BackgroundPurgeCopyOptions) {
285 std::string first("0"), last("999999");
286 CompactRangeOptions compact_options;
287 compact_options.change_level = true;
288 compact_options.target_level = 2;
289 Slice first_slice(first), last_slice(last);
290
291 // We keep an iterator alive
292 Iterator* itr = nullptr;
293 CreateTwoLevels();
294 ReadOptions* options = new ReadOptions();
295 options->background_purge_on_iterator_cleanup = true;
296 itr = db_->NewIterator(*options);
297 // ReadOptions is deleted, but iterator cleanup function should not be
298 // affected
299 delete options;
300
301 db_->CompactRange(compact_options, &first_slice, &last_slice);
302 // 3 sst after compaction with live iterator
303 CheckFileTypeCounts(dbname_, 0, 3, 1);
304 delete itr;
305
306 test::SleepingBackgroundTask sleeping_task_after;
307 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
308 &sleeping_task_after, Env::Priority::HIGH);
309
310 // Make sure all background purges are executed
311 sleeping_task_after.WakeUp();
312 sleeping_task_after.WaitUntilDone();
313 // 1 sst after iterator deletion
314 CheckFileTypeCounts(dbname_, 0, 1, 1);
315
316 CloseDB();
317 }
318
319 TEST_F(DeleteFileTest, BackgroundPurgeTestMultipleJobs) {
320 std::string first("0"), last("999999");
321 CompactRangeOptions compact_options;
322 compact_options.change_level = true;
323 compact_options.target_level = 2;
324 Slice first_slice(first), last_slice(last);
325
326 // We keep an iterator alive
327 CreateTwoLevels();
328 ReadOptions options;
329 options.background_purge_on_iterator_cleanup = true;
330 Iterator* itr1 = db_->NewIterator(options);
331 CreateTwoLevels();
332 Iterator* itr2 = db_->NewIterator(options);
333 db_->CompactRange(compact_options, &first_slice, &last_slice);
334 // 5 sst files after 2 compactions with 2 live iterators
335 CheckFileTypeCounts(dbname_, 0, 5, 1);
336
337 // ~DBImpl should wait until all BGWorkPurge are finished
338 rocksdb::SyncPoint::GetInstance()->LoadDependency(
339 {{"DBImpl::~DBImpl:WaitJob", "DBImpl::BGWorkPurge"},
340 {"DeleteFileTest::GuardFinish",
341 "DeleteFileTest::BackgroundPurgeTestMultipleJobs:DBClose"}});
342 rocksdb::SyncPoint::GetInstance()->EnableProcessing();
343
344 delete itr1;
345 env_->Schedule(&DeleteFileTest::DoSleep, this, Env::Priority::HIGH);
346 delete itr2;
347 env_->Schedule(&DeleteFileTest::GuardFinish, nullptr, Env::Priority::HIGH);
348 CloseDB();
349
350 TEST_SYNC_POINT("DeleteFileTest::BackgroundPurgeTestMultipleJobs:DBClose");
351 // 1 sst after iterator deletion
352 CheckFileTypeCounts(dbname_, 0, 1, 1);
353 rocksdb::SyncPoint::GetInstance()->DisableProcessing();
354 }
355
356 TEST_F(DeleteFileTest, DeleteFileWithIterator) {
357 CreateTwoLevels();
358 ReadOptions options;
359 Iterator* it = db_->NewIterator(options);
360 std::vector<LiveFileMetaData> metadata;
361 db_->GetLiveFilesMetaData(&metadata);
362
363 std::string level2file = "";
364
365 ASSERT_EQ((int)metadata.size(), 2);
366 if (metadata[0].level == 1) {
367 level2file = metadata[1].name;
368 } else {
369 level2file = metadata[0].name;
370 }
371
372 Status status = db_->DeleteFile(level2file);
373 fprintf(stdout, "Deletion status %s: %s\n",
374 level2file.c_str(), status.ToString().c_str());
375 ASSERT_TRUE(status.ok());
376 it->SeekToFirst();
377 int numKeysIterated = 0;
378 while(it->Valid()) {
379 numKeysIterated++;
380 it->Next();
381 }
382 ASSERT_EQ(numKeysIterated, 50000);
383 delete it;
384 CloseDB();
385 }
386
387 TEST_F(DeleteFileTest, DeleteLogFiles) {
388 AddKeys(10, 0);
389 VectorLogPtr logfiles;
390 db_->GetSortedWalFiles(logfiles);
391 ASSERT_GT(logfiles.size(), 0UL);
392 // Take the last log file which is expected to be alive and try to delete it
393 // Should not succeed because live logs are not allowed to be deleted
394 std::unique_ptr<LogFile> alive_log = std::move(logfiles.back());
395 ASSERT_EQ(alive_log->Type(), kAliveLogFile);
396 ASSERT_OK(env_->FileExists(options_.wal_dir + "/" + alive_log->PathName()));
397 fprintf(stdout, "Deleting alive log file %s\n",
398 alive_log->PathName().c_str());
399 ASSERT_TRUE(!db_->DeleteFile(alive_log->PathName()).ok());
400 ASSERT_OK(env_->FileExists(options_.wal_dir + "/" + alive_log->PathName()));
401 logfiles.clear();
402
403 // Call Flush to bring about a new working log file and add more keys
404 // Call Flush again to flush out memtable and move alive log to archived log
405 // and try to delete the archived log file
406 FlushOptions fopts;
407 db_->Flush(fopts);
408 AddKeys(10, 0);
409 db_->Flush(fopts);
410 db_->GetSortedWalFiles(logfiles);
411 ASSERT_GT(logfiles.size(), 0UL);
412 std::unique_ptr<LogFile> archived_log = std::move(logfiles.front());
413 ASSERT_EQ(archived_log->Type(), kArchivedLogFile);
414 ASSERT_OK(
415 env_->FileExists(options_.wal_dir + "/" + archived_log->PathName()));
416 fprintf(stdout, "Deleting archived log file %s\n",
417 archived_log->PathName().c_str());
418 ASSERT_OK(db_->DeleteFile(archived_log->PathName()));
419 ASSERT_EQ(Status::NotFound(), env_->FileExists(options_.wal_dir + "/" +
420 archived_log->PathName()));
421 CloseDB();
422 }
423
424 TEST_F(DeleteFileTest, DeleteNonDefaultColumnFamily) {
425 CloseDB();
426 DBOptions db_options;
427 db_options.create_if_missing = true;
428 db_options.create_missing_column_families = true;
429 std::vector<ColumnFamilyDescriptor> column_families;
430 column_families.emplace_back();
431 column_families.emplace_back("new_cf", ColumnFamilyOptions());
432
433 std::vector<rocksdb::ColumnFamilyHandle*> handles;
434 rocksdb::DB* db;
435 ASSERT_OK(DB::Open(db_options, dbname_, column_families, &handles, &db));
436
437 Random rnd(5);
438 for (int i = 0; i < 1000; ++i) {
439 ASSERT_OK(db->Put(WriteOptions(), handles[1], test::RandomKey(&rnd, 10),
440 test::RandomKey(&rnd, 10)));
441 }
442 ASSERT_OK(db->Flush(FlushOptions(), handles[1]));
443 for (int i = 0; i < 1000; ++i) {
444 ASSERT_OK(db->Put(WriteOptions(), handles[1], test::RandomKey(&rnd, 10),
445 test::RandomKey(&rnd, 10)));
446 }
447 ASSERT_OK(db->Flush(FlushOptions(), handles[1]));
448
449 std::vector<LiveFileMetaData> metadata;
450 db->GetLiveFilesMetaData(&metadata);
451 ASSERT_EQ(2U, metadata.size());
452 ASSERT_EQ("new_cf", metadata[0].column_family_name);
453 ASSERT_EQ("new_cf", metadata[1].column_family_name);
454 auto old_file = metadata[0].smallest_seqno < metadata[1].smallest_seqno
455 ? metadata[0].name
456 : metadata[1].name;
457 auto new_file = metadata[0].smallest_seqno > metadata[1].smallest_seqno
458 ? metadata[0].name
459 : metadata[1].name;
460 ASSERT_TRUE(db->DeleteFile(new_file).IsInvalidArgument());
461 ASSERT_OK(db->DeleteFile(old_file));
462
463 {
464 std::unique_ptr<Iterator> itr(db->NewIterator(ReadOptions(), handles[1]));
465 int count = 0;
466 for (itr->SeekToFirst(); itr->Valid(); itr->Next()) {
467 ASSERT_OK(itr->status());
468 ++count;
469 }
470 ASSERT_EQ(count, 1000);
471 }
472
473 delete handles[0];
474 delete handles[1];
475 delete db;
476
477 ASSERT_OK(DB::Open(db_options, dbname_, column_families, &handles, &db));
478 {
479 std::unique_ptr<Iterator> itr(db->NewIterator(ReadOptions(), handles[1]));
480 int count = 0;
481 for (itr->SeekToFirst(); itr->Valid(); itr->Next()) {
482 ASSERT_OK(itr->status());
483 ++count;
484 }
485 ASSERT_EQ(count, 1000);
486 }
487
488 delete handles[0];
489 delete handles[1];
490 delete db;
491 }
492
493 } //namespace rocksdb
494
495 int main(int argc, char** argv) {
496 ::testing::InitGoogleTest(&argc, argv);
497 return RUN_ALL_TESTS();
498 }
499
500 #else
501 #include <stdio.h>
502
503 int main(int /*argc*/, char** /*argv*/) {
504 fprintf(stderr,
505 "SKIPPED as DBImpl::DeleteFile is not supported in ROCKSDB_LITE\n");
506 return 0;
507 }
508
509 #endif // !ROCKSDB_LITE