]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/db/manual_compaction_test.cc
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / rocksdb / db / manual_compaction_test.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Test for issue 178: a manual compaction causes deleted data to reappear.
7 #include <iostream>
8 #include <sstream>
9 #include <cstdlib>
10
11 #include "rocksdb/db.h"
12 #include "rocksdb/compaction_filter.h"
13 #include "rocksdb/slice.h"
14 #include "rocksdb/write_batch.h"
15 #include "util/testharness.h"
16 #include "port/port.h"
17
18 using namespace rocksdb;
19
20 namespace {
21
22 // Reasoning: previously the number was 1100000. Since the keys are written to
23 // the batch in one write each write will result into one SST file. each write
24 // will result into one SST file. We reduced the write_buffer_size to 1K to
25 // basically have the same effect with however less number of keys, which
26 // results into less test runtime.
27 const int kNumKeys = 1100;
28
29 std::string Key1(int i) {
30 char buf[100];
31 snprintf(buf, sizeof(buf), "my_key_%d", i);
32 return buf;
33 }
34
35 std::string Key2(int i) {
36 return Key1(i) + "_xxx";
37 }
38
39 class ManualCompactionTest : public testing::Test {
40 public:
41 ManualCompactionTest() {
42 // Get rid of any state from an old run.
43 dbname_ = rocksdb::test::PerThreadDBPath("rocksdb_cbug_test");
44 DestroyDB(dbname_, rocksdb::Options());
45 }
46
47 std::string dbname_;
48 };
49
50 class DestroyAllCompactionFilter : public CompactionFilter {
51 public:
52 DestroyAllCompactionFilter() {}
53
54 virtual bool Filter(int /*level*/, const Slice& /*key*/,
55 const Slice& existing_value, std::string* /*new_value*/,
56 bool* /*value_changed*/) const override {
57 return existing_value.ToString() == "destroy";
58 }
59
60 virtual const char* Name() const override {
61 return "DestroyAllCompactionFilter";
62 }
63 };
64
65 TEST_F(ManualCompactionTest, CompactTouchesAllKeys) {
66 for (int iter = 0; iter < 2; ++iter) {
67 DB* db;
68 Options options;
69 if (iter == 0) { // level compaction
70 options.num_levels = 3;
71 options.compaction_style = kCompactionStyleLevel;
72 } else { // universal compaction
73 options.compaction_style = kCompactionStyleUniversal;
74 }
75 options.create_if_missing = true;
76 options.compression = rocksdb::kNoCompression;
77 options.compaction_filter = new DestroyAllCompactionFilter();
78 ASSERT_OK(DB::Open(options, dbname_, &db));
79
80 db->Put(WriteOptions(), Slice("key1"), Slice("destroy"));
81 db->Put(WriteOptions(), Slice("key2"), Slice("destroy"));
82 db->Put(WriteOptions(), Slice("key3"), Slice("value3"));
83 db->Put(WriteOptions(), Slice("key4"), Slice("destroy"));
84
85 Slice key4("key4");
86 db->CompactRange(CompactRangeOptions(), nullptr, &key4);
87 Iterator* itr = db->NewIterator(ReadOptions());
88 itr->SeekToFirst();
89 ASSERT_TRUE(itr->Valid());
90 ASSERT_EQ("key3", itr->key().ToString());
91 itr->Next();
92 ASSERT_TRUE(!itr->Valid());
93 delete itr;
94
95 delete options.compaction_filter;
96 delete db;
97 DestroyDB(dbname_, options);
98 }
99 }
100
101 TEST_F(ManualCompactionTest, Test) {
102 // Open database. Disable compression since it affects the creation
103 // of layers and the code below is trying to test against a very
104 // specific scenario.
105 rocksdb::DB* db;
106 rocksdb::Options db_options;
107 db_options.write_buffer_size = 1024;
108 db_options.create_if_missing = true;
109 db_options.compression = rocksdb::kNoCompression;
110 ASSERT_OK(rocksdb::DB::Open(db_options, dbname_, &db));
111
112 // create first key range
113 rocksdb::WriteBatch batch;
114 for (int i = 0; i < kNumKeys; i++) {
115 batch.Put(Key1(i), "value for range 1 key");
116 }
117 ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch));
118
119 // create second key range
120 batch.Clear();
121 for (int i = 0; i < kNumKeys; i++) {
122 batch.Put(Key2(i), "value for range 2 key");
123 }
124 ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch));
125
126 // delete second key range
127 batch.Clear();
128 for (int i = 0; i < kNumKeys; i++) {
129 batch.Delete(Key2(i));
130 }
131 ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch));
132
133 // compact database
134 std::string start_key = Key1(0);
135 std::string end_key = Key1(kNumKeys - 1);
136 rocksdb::Slice least(start_key.data(), start_key.size());
137 rocksdb::Slice greatest(end_key.data(), end_key.size());
138
139 // commenting out the line below causes the example to work correctly
140 db->CompactRange(CompactRangeOptions(), &least, &greatest);
141
142 // count the keys
143 rocksdb::Iterator* iter = db->NewIterator(rocksdb::ReadOptions());
144 int num_keys = 0;
145 for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
146 num_keys++;
147 }
148 delete iter;
149 ASSERT_EQ(kNumKeys, num_keys) << "Bad number of keys";
150
151 // close database
152 delete db;
153 DestroyDB(dbname_, rocksdb::Options());
154 }
155
156 } // anonymous namespace
157
158 int main(int argc, char** argv) {
159 ::testing::InitGoogleTest(&argc, argv);
160 return RUN_ALL_TESTS();
161 }