]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/db/manual_compaction_test.cc
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / rocksdb / db / manual_compaction_test.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Test for issue 178: a manual compaction causes deleted data to reappear.
7 #include <iostream>
8 #include <sstream>
9 #include <cstdlib>
10
11 #include "rocksdb/db.h"
12 #include "rocksdb/compaction_filter.h"
13 #include "rocksdb/slice.h"
14 #include "rocksdb/write_batch.h"
15 #include "util/testharness.h"
16 #include "port/port.h"
17
18 using namespace rocksdb;
19
20 namespace {
21
22 // Reasoning: previously the number was 1100000. Since the keys are written to
23 // the batch in one write each write will result into one SST file. each write
24 // will result into one SST file. We reduced the write_buffer_size to 1K to
25 // basically have the same effect with however less number of keys, which
26 // results into less test runtime.
27 const int kNumKeys = 1100;
28
29 std::string Key1(int i) {
30 char buf[100];
31 snprintf(buf, sizeof(buf), "my_key_%d", i);
32 return buf;
33 }
34
35 std::string Key2(int i) {
36 return Key1(i) + "_xxx";
37 }
38
39 class ManualCompactionTest : public testing::Test {
40 public:
41 ManualCompactionTest() {
42 // Get rid of any state from an old run.
43 dbname_ = rocksdb::test::PerThreadDBPath("rocksdb_cbug_test");
44 DestroyDB(dbname_, rocksdb::Options());
45 }
46
47 std::string dbname_;
48 };
49
50 class DestroyAllCompactionFilter : public CompactionFilter {
51 public:
52 DestroyAllCompactionFilter() {}
53
54 bool Filter(int /*level*/, const Slice& /*key*/, const Slice& existing_value,
55 std::string* /*new_value*/,
56 bool* /*value_changed*/) const override {
57 return existing_value.ToString() == "destroy";
58 }
59
60 const char* Name() const override { return "DestroyAllCompactionFilter"; }
61 };
62
63 TEST_F(ManualCompactionTest, CompactTouchesAllKeys) {
64 for (int iter = 0; iter < 2; ++iter) {
65 DB* db;
66 Options options;
67 if (iter == 0) { // level compaction
68 options.num_levels = 3;
69 options.compaction_style = kCompactionStyleLevel;
70 } else { // universal compaction
71 options.compaction_style = kCompactionStyleUniversal;
72 }
73 options.create_if_missing = true;
74 options.compression = rocksdb::kNoCompression;
75 options.compaction_filter = new DestroyAllCompactionFilter();
76 ASSERT_OK(DB::Open(options, dbname_, &db));
77
78 db->Put(WriteOptions(), Slice("key1"), Slice("destroy"));
79 db->Put(WriteOptions(), Slice("key2"), Slice("destroy"));
80 db->Put(WriteOptions(), Slice("key3"), Slice("value3"));
81 db->Put(WriteOptions(), Slice("key4"), Slice("destroy"));
82
83 Slice key4("key4");
84 db->CompactRange(CompactRangeOptions(), nullptr, &key4);
85 Iterator* itr = db->NewIterator(ReadOptions());
86 itr->SeekToFirst();
87 ASSERT_TRUE(itr->Valid());
88 ASSERT_EQ("key3", itr->key().ToString());
89 itr->Next();
90 ASSERT_TRUE(!itr->Valid());
91 delete itr;
92
93 delete options.compaction_filter;
94 delete db;
95 DestroyDB(dbname_, options);
96 }
97 }
98
99 TEST_F(ManualCompactionTest, Test) {
100 // Open database. Disable compression since it affects the creation
101 // of layers and the code below is trying to test against a very
102 // specific scenario.
103 rocksdb::DB* db;
104 rocksdb::Options db_options;
105 db_options.write_buffer_size = 1024;
106 db_options.create_if_missing = true;
107 db_options.compression = rocksdb::kNoCompression;
108 ASSERT_OK(rocksdb::DB::Open(db_options, dbname_, &db));
109
110 // create first key range
111 rocksdb::WriteBatch batch;
112 for (int i = 0; i < kNumKeys; i++) {
113 batch.Put(Key1(i), "value for range 1 key");
114 }
115 ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch));
116
117 // create second key range
118 batch.Clear();
119 for (int i = 0; i < kNumKeys; i++) {
120 batch.Put(Key2(i), "value for range 2 key");
121 }
122 ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch));
123
124 // delete second key range
125 batch.Clear();
126 for (int i = 0; i < kNumKeys; i++) {
127 batch.Delete(Key2(i));
128 }
129 ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch));
130
131 // compact database
132 std::string start_key = Key1(0);
133 std::string end_key = Key1(kNumKeys - 1);
134 rocksdb::Slice least(start_key.data(), start_key.size());
135 rocksdb::Slice greatest(end_key.data(), end_key.size());
136
137 // commenting out the line below causes the example to work correctly
138 db->CompactRange(CompactRangeOptions(), &least, &greatest);
139
140 // count the keys
141 rocksdb::Iterator* iter = db->NewIterator(rocksdb::ReadOptions());
142 int num_keys = 0;
143 for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
144 num_keys++;
145 }
146 delete iter;
147 ASSERT_EQ(kNumKeys, num_keys) << "Bad number of keys";
148
149 // close database
150 delete db;
151 DestroyDB(dbname_, rocksdb::Options());
152 }
153
154 } // anonymous namespace
155
156 int main(int argc, char** argv) {
157 ::testing::InitGoogleTest(&argc, argv);
158 return RUN_ALL_TESTS();
159 }