]>
Commit | Line | Data |
---|---|---|
7c673cae | 1 | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
11fdf7f2 TL |
2 | // This source code is licensed under both the GPLv2 (found in the |
3 | // COPYING file in the root directory) and Apache 2.0 License | |
4 | // (found in the LICENSE.Apache file in the root directory). | |
7c673cae FG |
5 | // |
6 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |
7 | // Use of this source code is governed by a BSD-style license that can be | |
8 | // found in the LICENSE file. See the AUTHORS file for names of contributors. | |
9 | ||
10 | #include "memtable/skiplist.h" | |
11 | #include <set> | |
12 | #include "rocksdb/env.h" | |
13 | #include "util/arena.h" | |
14 | #include "util/hash.h" | |
15 | #include "util/random.h" | |
16 | #include "util/testharness.h" | |
17 | ||
18 | namespace rocksdb { | |
19 | ||
20 | typedef uint64_t Key; | |
21 | ||
22 | struct TestComparator { | |
23 | int operator()(const Key& a, const Key& b) const { | |
24 | if (a < b) { | |
25 | return -1; | |
26 | } else if (a > b) { | |
27 | return +1; | |
28 | } else { | |
29 | return 0; | |
30 | } | |
31 | } | |
32 | }; | |
33 | ||
34 | class SkipTest : public testing::Test {}; | |
35 | ||
36 | TEST_F(SkipTest, Empty) { | |
37 | Arena arena; | |
38 | TestComparator cmp; | |
39 | SkipList<Key, TestComparator> list(cmp, &arena); | |
40 | ASSERT_TRUE(!list.Contains(10)); | |
41 | ||
42 | SkipList<Key, TestComparator>::Iterator iter(&list); | |
43 | ASSERT_TRUE(!iter.Valid()); | |
44 | iter.SeekToFirst(); | |
45 | ASSERT_TRUE(!iter.Valid()); | |
46 | iter.Seek(100); | |
47 | ASSERT_TRUE(!iter.Valid()); | |
48 | iter.SeekForPrev(100); | |
49 | ASSERT_TRUE(!iter.Valid()); | |
50 | iter.SeekToLast(); | |
51 | ASSERT_TRUE(!iter.Valid()); | |
52 | } | |
53 | ||
54 | TEST_F(SkipTest, InsertAndLookup) { | |
55 | const int N = 2000; | |
56 | const int R = 5000; | |
57 | Random rnd(1000); | |
58 | std::set<Key> keys; | |
59 | Arena arena; | |
60 | TestComparator cmp; | |
61 | SkipList<Key, TestComparator> list(cmp, &arena); | |
62 | for (int i = 0; i < N; i++) { | |
63 | Key key = rnd.Next() % R; | |
64 | if (keys.insert(key).second) { | |
65 | list.Insert(key); | |
66 | } | |
67 | } | |
68 | ||
69 | for (int i = 0; i < R; i++) { | |
70 | if (list.Contains(i)) { | |
71 | ASSERT_EQ(keys.count(i), 1U); | |
72 | } else { | |
73 | ASSERT_EQ(keys.count(i), 0U); | |
74 | } | |
75 | } | |
76 | ||
77 | // Simple iterator tests | |
78 | { | |
79 | SkipList<Key, TestComparator>::Iterator iter(&list); | |
80 | ASSERT_TRUE(!iter.Valid()); | |
81 | ||
82 | iter.Seek(0); | |
83 | ASSERT_TRUE(iter.Valid()); | |
84 | ASSERT_EQ(*(keys.begin()), iter.key()); | |
85 | ||
86 | iter.SeekForPrev(R - 1); | |
87 | ASSERT_TRUE(iter.Valid()); | |
88 | ASSERT_EQ(*(keys.rbegin()), iter.key()); | |
89 | ||
90 | iter.SeekToFirst(); | |
91 | ASSERT_TRUE(iter.Valid()); | |
92 | ASSERT_EQ(*(keys.begin()), iter.key()); | |
93 | ||
94 | iter.SeekToLast(); | |
95 | ASSERT_TRUE(iter.Valid()); | |
96 | ASSERT_EQ(*(keys.rbegin()), iter.key()); | |
97 | } | |
98 | ||
99 | // Forward iteration test | |
100 | for (int i = 0; i < R; i++) { | |
101 | SkipList<Key, TestComparator>::Iterator iter(&list); | |
102 | iter.Seek(i); | |
103 | ||
104 | // Compare against model iterator | |
105 | std::set<Key>::iterator model_iter = keys.lower_bound(i); | |
106 | for (int j = 0; j < 3; j++) { | |
107 | if (model_iter == keys.end()) { | |
108 | ASSERT_TRUE(!iter.Valid()); | |
109 | break; | |
110 | } else { | |
111 | ASSERT_TRUE(iter.Valid()); | |
112 | ASSERT_EQ(*model_iter, iter.key()); | |
113 | ++model_iter; | |
114 | iter.Next(); | |
115 | } | |
116 | } | |
117 | } | |
118 | ||
119 | // Backward iteration test | |
120 | for (int i = 0; i < R; i++) { | |
121 | SkipList<Key, TestComparator>::Iterator iter(&list); | |
122 | iter.SeekForPrev(i); | |
123 | ||
124 | // Compare against model iterator | |
125 | std::set<Key>::iterator model_iter = keys.upper_bound(i); | |
126 | for (int j = 0; j < 3; j++) { | |
127 | if (model_iter == keys.begin()) { | |
128 | ASSERT_TRUE(!iter.Valid()); | |
129 | break; | |
130 | } else { | |
131 | ASSERT_TRUE(iter.Valid()); | |
132 | ASSERT_EQ(*--model_iter, iter.key()); | |
133 | iter.Prev(); | |
134 | } | |
135 | } | |
136 | } | |
137 | } | |
138 | ||
139 | // We want to make sure that with a single writer and multiple | |
140 | // concurrent readers (with no synchronization other than when a | |
141 | // reader's iterator is created), the reader always observes all the | |
142 | // data that was present in the skip list when the iterator was | |
143 | // constructor. Because insertions are happening concurrently, we may | |
144 | // also observe new values that were inserted since the iterator was | |
145 | // constructed, but we should never miss any values that were present | |
146 | // at iterator construction time. | |
147 | // | |
148 | // We generate multi-part keys: | |
149 | // <key,gen,hash> | |
150 | // where: | |
151 | // key is in range [0..K-1] | |
152 | // gen is a generation number for key | |
153 | // hash is hash(key,gen) | |
154 | // | |
155 | // The insertion code picks a random key, sets gen to be 1 + the last | |
156 | // generation number inserted for that key, and sets hash to Hash(key,gen). | |
157 | // | |
158 | // At the beginning of a read, we snapshot the last inserted | |
159 | // generation number for each key. We then iterate, including random | |
160 | // calls to Next() and Seek(). For every key we encounter, we | |
161 | // check that it is either expected given the initial snapshot or has | |
162 | // been concurrently added since the iterator started. | |
163 | class ConcurrentTest { | |
164 | private: | |
165 | static const uint32_t K = 4; | |
166 | ||
167 | static uint64_t key(Key key) { return (key >> 40); } | |
168 | static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; } | |
169 | static uint64_t hash(Key key) { return key & 0xff; } | |
170 | ||
171 | static uint64_t HashNumbers(uint64_t k, uint64_t g) { | |
172 | uint64_t data[2] = { k, g }; | |
173 | return Hash(reinterpret_cast<char*>(data), sizeof(data), 0); | |
174 | } | |
175 | ||
176 | static Key MakeKey(uint64_t k, uint64_t g) { | |
177 | assert(sizeof(Key) == sizeof(uint64_t)); | |
178 | assert(k <= K); // We sometimes pass K to seek to the end of the skiplist | |
179 | assert(g <= 0xffffffffu); | |
180 | return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff)); | |
181 | } | |
182 | ||
183 | static bool IsValidKey(Key k) { | |
184 | return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff); | |
185 | } | |
186 | ||
187 | static Key RandomTarget(Random* rnd) { | |
188 | switch (rnd->Next() % 10) { | |
189 | case 0: | |
190 | // Seek to beginning | |
191 | return MakeKey(0, 0); | |
192 | case 1: | |
193 | // Seek to end | |
194 | return MakeKey(K, 0); | |
195 | default: | |
196 | // Seek to middle | |
197 | return MakeKey(rnd->Next() % K, 0); | |
198 | } | |
199 | } | |
200 | ||
201 | // Per-key generation | |
202 | struct State { | |
203 | std::atomic<int> generation[K]; | |
204 | void Set(int k, int v) { | |
205 | generation[k].store(v, std::memory_order_release); | |
206 | } | |
207 | int Get(int k) { return generation[k].load(std::memory_order_acquire); } | |
208 | ||
209 | State() { | |
210 | for (unsigned int k = 0; k < K; k++) { | |
211 | Set(k, 0); | |
212 | } | |
213 | } | |
214 | }; | |
215 | ||
216 | // Current state of the test | |
217 | State current_; | |
218 | ||
219 | Arena arena_; | |
220 | ||
221 | // SkipList is not protected by mu_. We just use a single writer | |
222 | // thread to modify it. | |
223 | SkipList<Key, TestComparator> list_; | |
224 | ||
225 | public: | |
226 | ConcurrentTest() : list_(TestComparator(), &arena_) {} | |
227 | ||
228 | // REQUIRES: External synchronization | |
229 | void WriteStep(Random* rnd) { | |
230 | const uint32_t k = rnd->Next() % K; | |
231 | const int g = current_.Get(k) + 1; | |
232 | const Key new_key = MakeKey(k, g); | |
233 | list_.Insert(new_key); | |
234 | current_.Set(k, g); | |
235 | } | |
236 | ||
237 | void ReadStep(Random* rnd) { | |
238 | // Remember the initial committed state of the skiplist. | |
239 | State initial_state; | |
240 | for (unsigned int k = 0; k < K; k++) { | |
241 | initial_state.Set(k, current_.Get(k)); | |
242 | } | |
243 | ||
244 | Key pos = RandomTarget(rnd); | |
245 | SkipList<Key, TestComparator>::Iterator iter(&list_); | |
246 | iter.Seek(pos); | |
247 | while (true) { | |
248 | Key current; | |
249 | if (!iter.Valid()) { | |
250 | current = MakeKey(K, 0); | |
251 | } else { | |
252 | current = iter.key(); | |
253 | ASSERT_TRUE(IsValidKey(current)) << current; | |
254 | } | |
255 | ASSERT_LE(pos, current) << "should not go backwards"; | |
256 | ||
257 | // Verify that everything in [pos,current) was not present in | |
258 | // initial_state. | |
259 | while (pos < current) { | |
260 | ASSERT_LT(key(pos), K) << pos; | |
261 | ||
262 | // Note that generation 0 is never inserted, so it is ok if | |
263 | // <*,0,*> is missing. | |
264 | ASSERT_TRUE((gen(pos) == 0U) || | |
265 | (gen(pos) > static_cast<uint64_t>(initial_state.Get( | |
266 | static_cast<int>(key(pos)))))) | |
267 | << "key: " << key(pos) << "; gen: " << gen(pos) | |
268 | << "; initgen: " << initial_state.Get(static_cast<int>(key(pos))); | |
269 | ||
270 | // Advance to next key in the valid key space | |
271 | if (key(pos) < key(current)) { | |
272 | pos = MakeKey(key(pos) + 1, 0); | |
273 | } else { | |
274 | pos = MakeKey(key(pos), gen(pos) + 1); | |
275 | } | |
276 | } | |
277 | ||
278 | if (!iter.Valid()) { | |
279 | break; | |
280 | } | |
281 | ||
282 | if (rnd->Next() % 2) { | |
283 | iter.Next(); | |
284 | pos = MakeKey(key(pos), gen(pos) + 1); | |
285 | } else { | |
286 | Key new_target = RandomTarget(rnd); | |
287 | if (new_target > pos) { | |
288 | pos = new_target; | |
289 | iter.Seek(new_target); | |
290 | } | |
291 | } | |
292 | } | |
293 | } | |
294 | }; | |
295 | const uint32_t ConcurrentTest::K; | |
296 | ||
297 | // Simple test that does single-threaded testing of the ConcurrentTest | |
298 | // scaffolding. | |
299 | TEST_F(SkipTest, ConcurrentWithoutThreads) { | |
300 | ConcurrentTest test; | |
301 | Random rnd(test::RandomSeed()); | |
302 | for (int i = 0; i < 10000; i++) { | |
303 | test.ReadStep(&rnd); | |
304 | test.WriteStep(&rnd); | |
305 | } | |
306 | } | |
307 | ||
308 | class TestState { | |
309 | public: | |
310 | ConcurrentTest t_; | |
311 | int seed_; | |
312 | std::atomic<bool> quit_flag_; | |
313 | ||
314 | enum ReaderState { | |
315 | STARTING, | |
316 | RUNNING, | |
317 | DONE | |
318 | }; | |
319 | ||
320 | explicit TestState(int s) | |
321 | : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {} | |
322 | ||
323 | void Wait(ReaderState s) { | |
324 | mu_.Lock(); | |
325 | while (state_ != s) { | |
326 | state_cv_.Wait(); | |
327 | } | |
328 | mu_.Unlock(); | |
329 | } | |
330 | ||
331 | void Change(ReaderState s) { | |
332 | mu_.Lock(); | |
333 | state_ = s; | |
334 | state_cv_.Signal(); | |
335 | mu_.Unlock(); | |
336 | } | |
337 | ||
338 | private: | |
339 | port::Mutex mu_; | |
340 | ReaderState state_; | |
341 | port::CondVar state_cv_; | |
342 | }; | |
343 | ||
344 | static void ConcurrentReader(void* arg) { | |
345 | TestState* state = reinterpret_cast<TestState*>(arg); | |
346 | Random rnd(state->seed_); | |
347 | int64_t reads = 0; | |
348 | state->Change(TestState::RUNNING); | |
349 | while (!state->quit_flag_.load(std::memory_order_acquire)) { | |
350 | state->t_.ReadStep(&rnd); | |
351 | ++reads; | |
352 | } | |
353 | state->Change(TestState::DONE); | |
354 | } | |
355 | ||
356 | static void RunConcurrent(int run) { | |
357 | const int seed = test::RandomSeed() + (run * 100); | |
358 | Random rnd(seed); | |
359 | const int N = 1000; | |
360 | const int kSize = 1000; | |
361 | for (int i = 0; i < N; i++) { | |
362 | if ((i % 100) == 0) { | |
363 | fprintf(stderr, "Run %d of %d\n", i, N); | |
364 | } | |
365 | TestState state(seed + 1); | |
11fdf7f2 | 366 | Env::Default()->SetBackgroundThreads(1); |
7c673cae FG |
367 | Env::Default()->Schedule(ConcurrentReader, &state); |
368 | state.Wait(TestState::RUNNING); | |
369 | for (int k = 0; k < kSize; k++) { | |
370 | state.t_.WriteStep(&rnd); | |
371 | } | |
372 | state.quit_flag_.store(true, std::memory_order_release); | |
373 | state.Wait(TestState::DONE); | |
374 | } | |
375 | } | |
376 | ||
377 | TEST_F(SkipTest, Concurrent1) { RunConcurrent(1); } | |
378 | TEST_F(SkipTest, Concurrent2) { RunConcurrent(2); } | |
379 | TEST_F(SkipTest, Concurrent3) { RunConcurrent(3); } | |
380 | TEST_F(SkipTest, Concurrent4) { RunConcurrent(4); } | |
381 | TEST_F(SkipTest, Concurrent5) { RunConcurrent(5); } | |
382 | ||
383 | } // namespace rocksdb | |
384 | ||
385 | int main(int argc, char** argv) { | |
386 | ::testing::InitGoogleTest(&argc, argv); | |
387 | return RUN_ALL_TESTS(); | |
388 | } |