]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/db/db_log_iter_test.cc
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / rocksdb / db / db_log_iter_test.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under the BSD-style license found in the
3 // LICENSE file in the root directory of this source tree. An additional grant
4 // of patent rights can be found in the PATENTS file in the same directory.
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 // Introduction of SyncPoint effectively disabled building and running this test
11 // in Release build.
12 // which is a pity, it is a good test
13 #if !defined(ROCKSDB_LITE)
14
15 #include "db/db_test_util.h"
16 #include "port/stack_trace.h"
17
18 namespace rocksdb {
19
20 class DBTestXactLogIterator : public DBTestBase {
21 public:
22 DBTestXactLogIterator() : DBTestBase("/db_log_iter_test") {}
23
24 std::unique_ptr<TransactionLogIterator> OpenTransactionLogIter(
25 const SequenceNumber seq) {
26 unique_ptr<TransactionLogIterator> iter;
27 Status status = dbfull()->GetUpdatesSince(seq, &iter);
28 EXPECT_OK(status);
29 EXPECT_TRUE(iter->Valid());
30 return iter;
31 }
32 };
33
34 namespace {
35 SequenceNumber ReadRecords(
36 std::unique_ptr<TransactionLogIterator>& iter,
37 int& count) {
38 count = 0;
39 SequenceNumber lastSequence = 0;
40 BatchResult res;
41 while (iter->Valid()) {
42 res = iter->GetBatch();
43 EXPECT_TRUE(res.sequence > lastSequence);
44 ++count;
45 lastSequence = res.sequence;
46 EXPECT_OK(iter->status());
47 iter->Next();
48 }
49 return res.sequence;
50 }
51
52 void ExpectRecords(
53 const int expected_no_records,
54 std::unique_ptr<TransactionLogIterator>& iter) {
55 int num_records;
56 ReadRecords(iter, num_records);
57 ASSERT_EQ(num_records, expected_no_records);
58 }
59 } // namespace
60
61 TEST_F(DBTestXactLogIterator, TransactionLogIterator) {
62 do {
63 Options options = OptionsForLogIterTest();
64 DestroyAndReopen(options);
65 CreateAndReopenWithCF({"pikachu"}, options);
66 Put(0, "key1", DummyString(1024));
67 Put(1, "key2", DummyString(1024));
68 Put(1, "key2", DummyString(1024));
69 ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3U);
70 {
71 auto iter = OpenTransactionLogIter(0);
72 ExpectRecords(3, iter);
73 }
74 ReopenWithColumnFamilies({"default", "pikachu"}, options);
75 env_->SleepForMicroseconds(2 * 1000 * 1000);
76 {
77 Put(0, "key4", DummyString(1024));
78 Put(1, "key5", DummyString(1024));
79 Put(0, "key6", DummyString(1024));
80 }
81 {
82 auto iter = OpenTransactionLogIter(0);
83 ExpectRecords(6, iter);
84 }
85 } while (ChangeCompactOptions());
86 }
87
88 #ifndef NDEBUG // sync point is not included with DNDEBUG build
89 TEST_F(DBTestXactLogIterator, TransactionLogIteratorRace) {
90 static const int LOG_ITERATOR_RACE_TEST_COUNT = 2;
91 static const char* sync_points[LOG_ITERATOR_RACE_TEST_COUNT][4] = {
92 {"WalManager::GetSortedWalFiles:1", "WalManager::PurgeObsoleteFiles:1",
93 "WalManager::PurgeObsoleteFiles:2", "WalManager::GetSortedWalFiles:2"},
94 {"WalManager::GetSortedWalsOfType:1",
95 "WalManager::PurgeObsoleteFiles:1",
96 "WalManager::PurgeObsoleteFiles:2",
97 "WalManager::GetSortedWalsOfType:2"}};
98 for (int test = 0; test < LOG_ITERATOR_RACE_TEST_COUNT; ++test) {
99 // Setup sync point dependency to reproduce the race condition of
100 // a log file moved to archived dir, in the middle of GetSortedWalFiles
101 rocksdb::SyncPoint::GetInstance()->LoadDependency(
102 { { sync_points[test][0], sync_points[test][1] },
103 { sync_points[test][2], sync_points[test][3] },
104 });
105
106 do {
107 rocksdb::SyncPoint::GetInstance()->ClearTrace();
108 rocksdb::SyncPoint::GetInstance()->DisableProcessing();
109 Options options = OptionsForLogIterTest();
110 DestroyAndReopen(options);
111 Put("key1", DummyString(1024));
112 dbfull()->Flush(FlushOptions());
113 Put("key2", DummyString(1024));
114 dbfull()->Flush(FlushOptions());
115 Put("key3", DummyString(1024));
116 dbfull()->Flush(FlushOptions());
117 Put("key4", DummyString(1024));
118 ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4U);
119
120 {
121 auto iter = OpenTransactionLogIter(0);
122 ExpectRecords(4, iter);
123 }
124
125 rocksdb::SyncPoint::GetInstance()->EnableProcessing();
126 // trigger async flush, and log move. Well, log move will
127 // wait until the GetSortedWalFiles:1 to reproduce the race
128 // condition
129 FlushOptions flush_options;
130 flush_options.wait = false;
131 dbfull()->Flush(flush_options);
132
133 // "key5" would be written in a new memtable and log
134 Put("key5", DummyString(1024));
135 {
136 // this iter would miss "key4" if not fixed
137 auto iter = OpenTransactionLogIter(0);
138 ExpectRecords(5, iter);
139 }
140 } while (ChangeCompactOptions());
141 }
142 }
143 #endif
144
145 TEST_F(DBTestXactLogIterator, TransactionLogIteratorStallAtLastRecord) {
146 do {
147 Options options = OptionsForLogIterTest();
148 DestroyAndReopen(options);
149 Put("key1", DummyString(1024));
150 auto iter = OpenTransactionLogIter(0);
151 ASSERT_OK(iter->status());
152 ASSERT_TRUE(iter->Valid());
153 iter->Next();
154 ASSERT_TRUE(!iter->Valid());
155 ASSERT_OK(iter->status());
156 Put("key2", DummyString(1024));
157 iter->Next();
158 ASSERT_OK(iter->status());
159 ASSERT_TRUE(iter->Valid());
160 } while (ChangeCompactOptions());
161 }
162
163 TEST_F(DBTestXactLogIterator, TransactionLogIteratorCheckAfterRestart) {
164 do {
165 Options options = OptionsForLogIterTest();
166 DestroyAndReopen(options);
167 Put("key1", DummyString(1024));
168 Put("key2", DummyString(1023));
169 dbfull()->Flush(FlushOptions());
170 Reopen(options);
171 auto iter = OpenTransactionLogIter(0);
172 ExpectRecords(2, iter);
173 } while (ChangeCompactOptions());
174 }
175
176 TEST_F(DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) {
177 do {
178 Options options = OptionsForLogIterTest();
179 DestroyAndReopen(options);
180 for (int i = 0; i < 1024; i++) {
181 Put("key"+ToString(i), DummyString(10));
182 }
183 dbfull()->Flush(FlushOptions());
184 // Corrupt this log to create a gap
185 rocksdb::VectorLogPtr wal_files;
186 ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
187 const auto logfile_path = dbname_ + "/" + wal_files.front()->PathName();
188 if (mem_env_) {
189 mem_env_->Truncate(logfile_path, wal_files.front()->SizeFileBytes() / 2);
190 } else {
191 ASSERT_EQ(0, truncate(logfile_path.c_str(),
192 wal_files.front()->SizeFileBytes() / 2));
193 }
194
195 // Insert a new entry to a new log file
196 Put("key1025", DummyString(10));
197 // Try to read from the beginning. Should stop before the gap and read less
198 // than 1025 entries
199 auto iter = OpenTransactionLogIter(0);
200 int count;
201 SequenceNumber last_sequence_read = ReadRecords(iter, count);
202 ASSERT_LT(last_sequence_read, 1025U);
203 // Try to read past the gap, should be able to seek to key1025
204 auto iter2 = OpenTransactionLogIter(last_sequence_read + 1);
205 ExpectRecords(1, iter2);
206 } while (ChangeCompactOptions());
207 }
208
209 TEST_F(DBTestXactLogIterator, TransactionLogIteratorBatchOperations) {
210 do {
211 Options options = OptionsForLogIterTest();
212 DestroyAndReopen(options);
213 CreateAndReopenWithCF({"pikachu"}, options);
214 WriteBatch batch;
215 batch.Put(handles_[1], "key1", DummyString(1024));
216 batch.Put(handles_[0], "key2", DummyString(1024));
217 batch.Put(handles_[1], "key3", DummyString(1024));
218 batch.Delete(handles_[0], "key2");
219 dbfull()->Write(WriteOptions(), &batch);
220 Flush(1);
221 Flush(0);
222 ReopenWithColumnFamilies({"default", "pikachu"}, options);
223 Put(1, "key4", DummyString(1024));
224 auto iter = OpenTransactionLogIter(3);
225 ExpectRecords(2, iter);
226 } while (ChangeCompactOptions());
227 }
228
229 TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) {
230 Options options = OptionsForLogIterTest();
231 DestroyAndReopen(options);
232 CreateAndReopenWithCF({"pikachu"}, options);
233 {
234 WriteBatch batch;
235 batch.Put(handles_[1], "key1", DummyString(1024));
236 batch.Put(handles_[0], "key2", DummyString(1024));
237 batch.PutLogData(Slice("blob1"));
238 batch.Put(handles_[1], "key3", DummyString(1024));
239 batch.PutLogData(Slice("blob2"));
240 batch.Delete(handles_[0], "key2");
241 dbfull()->Write(WriteOptions(), &batch);
242 ReopenWithColumnFamilies({"default", "pikachu"}, options);
243 }
244
245 auto res = OpenTransactionLogIter(0)->GetBatch();
246 struct Handler : public WriteBatch::Handler {
247 std::string seen;
248 virtual Status PutCF(uint32_t cf, const Slice& key,
249 const Slice& value) override {
250 seen += "Put(" + ToString(cf) + ", " + key.ToString() + ", " +
251 ToString(value.size()) + ")";
252 return Status::OK();
253 }
254 virtual Status MergeCF(uint32_t cf, const Slice& key,
255 const Slice& value) override {
256 seen += "Merge(" + ToString(cf) + ", " + key.ToString() + ", " +
257 ToString(value.size()) + ")";
258 return Status::OK();
259 }
260 virtual void LogData(const Slice& blob) override {
261 seen += "LogData(" + blob.ToString() + ")";
262 }
263 virtual Status DeleteCF(uint32_t cf, const Slice& key) override {
264 seen += "Delete(" + ToString(cf) + ", " + key.ToString() + ")";
265 return Status::OK();
266 }
267 } handler;
268 res.writeBatchPtr->Iterate(&handler);
269 ASSERT_EQ(
270 "Put(1, key1, 1024)"
271 "Put(0, key2, 1024)"
272 "LogData(blob1)"
273 "Put(1, key3, 1024)"
274 "LogData(blob2)"
275 "Delete(0, key2)",
276 handler.seen);
277 }
278 } // namespace rocksdb
279
280 #endif // !defined(ROCKSDB_LITE)
281
282 int main(int argc, char** argv) {
283 #if !defined(ROCKSDB_LITE)
284 rocksdb::port::InstallStackTraceHandler();
285 ::testing::InitGoogleTest(&argc, argv);
286 return RUN_ALL_TESTS();
287 #else
288 return 0;
289 #endif
290 }