]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/cache/cache_test.cc
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / rocksdb / cache / cache_test.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #include "rocksdb/cache.h"
11
12 #include <forward_list>
13 #include <functional>
14 #include <iostream>
15 #include <string>
16 #include <vector>
17 #include "cache/clock_cache.h"
18 #include "cache/lru_cache.h"
19 #include "util/coding.h"
20 #include "util/string_util.h"
21 #include "util/testharness.h"
22
23 namespace rocksdb {
24
25 // Conversions between numeric keys/values and the types expected by Cache.
26 static std::string EncodeKey(int k) {
27 std::string result;
28 PutFixed32(&result, k);
29 return result;
30 }
31 static int DecodeKey(const Slice& k) {
32 assert(k.size() == 4);
33 return DecodeFixed32(k.data());
34 }
35 static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
36 static int DecodeValue(void* v) {
37 return static_cast<int>(reinterpret_cast<uintptr_t>(v));
38 }
39
40 const std::string kLRU = "lru";
41 const std::string kClock = "clock";
42
43 void dumbDeleter(const Slice& /*key*/, void* /*value*/) {}
44
45 void eraseDeleter(const Slice& /*key*/, void* value) {
46 Cache* cache = reinterpret_cast<Cache*>(value);
47 cache->Erase("foo");
48 }
49
50 class CacheTest : public testing::TestWithParam<std::string> {
51 public:
52 static CacheTest* current_;
53
54 static void Deleter(const Slice& key, void* v) {
55 current_->deleted_keys_.push_back(DecodeKey(key));
56 current_->deleted_values_.push_back(DecodeValue(v));
57 }
58
59 static const int kCacheSize = 1000;
60 static const int kNumShardBits = 4;
61
62 static const int kCacheSize2 = 100;
63 static const int kNumShardBits2 = 2;
64
65 std::vector<int> deleted_keys_;
66 std::vector<int> deleted_values_;
67 std::shared_ptr<Cache> cache_;
68 std::shared_ptr<Cache> cache2_;
69
70 CacheTest()
71 : cache_(NewCache(kCacheSize, kNumShardBits, false)),
72 cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
73 current_ = this;
74 }
75
76 ~CacheTest() override {}
77
78 std::shared_ptr<Cache> NewCache(size_t capacity) {
79 auto type = GetParam();
80 if (type == kLRU) {
81 return NewLRUCache(capacity);
82 }
83 if (type == kClock) {
84 return NewClockCache(capacity);
85 }
86 return nullptr;
87 }
88
89 std::shared_ptr<Cache> NewCache(size_t capacity, int num_shard_bits,
90 bool strict_capacity_limit) {
91 auto type = GetParam();
92 if (type == kLRU) {
93 return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit);
94 }
95 if (type == kClock) {
96 return NewClockCache(capacity, num_shard_bits, strict_capacity_limit);
97 }
98 return nullptr;
99 }
100
101 int Lookup(std::shared_ptr<Cache> cache, int key) {
102 Cache::Handle* handle = cache->Lookup(EncodeKey(key));
103 const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
104 if (handle != nullptr) {
105 cache->Release(handle);
106 }
107 return r;
108 }
109
110 void Insert(std::shared_ptr<Cache> cache, int key, int value,
111 int charge = 1) {
112 cache->Insert(EncodeKey(key), EncodeValue(value), charge,
113 &CacheTest::Deleter);
114 }
115
116 void Erase(std::shared_ptr<Cache> cache, int key) {
117 cache->Erase(EncodeKey(key));
118 }
119
120 int Lookup(int key) {
121 return Lookup(cache_, key);
122 }
123
124 void Insert(int key, int value, int charge = 1) {
125 Insert(cache_, key, value, charge);
126 }
127
128 void Erase(int key) {
129 Erase(cache_, key);
130 }
131
132 int Lookup2(int key) {
133 return Lookup(cache2_, key);
134 }
135
136 void Insert2(int key, int value, int charge = 1) {
137 Insert(cache2_, key, value, charge);
138 }
139
140 void Erase2(int key) {
141 Erase(cache2_, key);
142 }
143 };
144 CacheTest* CacheTest::current_;
145
146 TEST_P(CacheTest, UsageTest) {
147 // cache is std::shared_ptr and will be automatically cleaned up.
148 const uint64_t kCapacity = 100000;
149 auto cache = NewCache(kCapacity, 8, false);
150
151 size_t usage = 0;
152 char value[10] = "abcdef";
153 // make sure everything will be cached
154 for (int i = 1; i < 100; ++i) {
155 std::string key(i, 'a');
156 auto kv_size = key.size() + 5;
157 cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter);
158 usage += kv_size;
159 ASSERT_EQ(usage, cache->GetUsage());
160 }
161
162 // make sure the cache will be overloaded
163 for (uint64_t i = 1; i < kCapacity; ++i) {
164 auto key = ToString(i);
165 cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
166 dumbDeleter);
167 }
168
169 // the usage should be close to the capacity
170 ASSERT_GT(kCapacity, cache->GetUsage());
171 ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
172 }
173
174 TEST_P(CacheTest, PinnedUsageTest) {
175 // cache is std::shared_ptr and will be automatically cleaned up.
176 const uint64_t kCapacity = 100000;
177 auto cache = NewCache(kCapacity, 8, false);
178
179 size_t pinned_usage = 0;
180 char value[10] = "abcdef";
181
182 std::forward_list<Cache::Handle*> unreleased_handles;
183
184 // Add entries. Unpin some of them after insertion. Then, pin some of them
185 // again. Check GetPinnedUsage().
186 for (int i = 1; i < 100; ++i) {
187 std::string key(i, 'a');
188 auto kv_size = key.size() + 5;
189 Cache::Handle* handle;
190 cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter,
191 &handle);
192 pinned_usage += kv_size;
193 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
194 if (i % 2 == 0) {
195 cache->Release(handle);
196 pinned_usage -= kv_size;
197 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
198 } else {
199 unreleased_handles.push_front(handle);
200 }
201 if (i % 3 == 0) {
202 unreleased_handles.push_front(cache->Lookup(key));
203 // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
204 // usage increased
205 if (i % 2 == 0) {
206 pinned_usage += kv_size;
207 }
208 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
209 }
210 }
211
212 // check that overloading the cache does not change the pinned usage
213 for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
214 auto key = ToString(i);
215 cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
216 dumbDeleter);
217 }
218 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
219
220 // release handles for pinned entries to prevent memory leaks
221 for (auto handle : unreleased_handles) {
222 cache->Release(handle);
223 }
224 }
225
226 TEST_P(CacheTest, HitAndMiss) {
227 ASSERT_EQ(-1, Lookup(100));
228
229 Insert(100, 101);
230 ASSERT_EQ(101, Lookup(100));
231 ASSERT_EQ(-1, Lookup(200));
232 ASSERT_EQ(-1, Lookup(300));
233
234 Insert(200, 201);
235 ASSERT_EQ(101, Lookup(100));
236 ASSERT_EQ(201, Lookup(200));
237 ASSERT_EQ(-1, Lookup(300));
238
239 Insert(100, 102);
240 ASSERT_EQ(102, Lookup(100));
241 ASSERT_EQ(201, Lookup(200));
242 ASSERT_EQ(-1, Lookup(300));
243
244 ASSERT_EQ(1U, deleted_keys_.size());
245 ASSERT_EQ(100, deleted_keys_[0]);
246 ASSERT_EQ(101, deleted_values_[0]);
247 }
248
249 TEST_P(CacheTest, InsertSameKey) {
250 Insert(1, 1);
251 Insert(1, 2);
252 ASSERT_EQ(2, Lookup(1));
253 }
254
255 TEST_P(CacheTest, Erase) {
256 Erase(200);
257 ASSERT_EQ(0U, deleted_keys_.size());
258
259 Insert(100, 101);
260 Insert(200, 201);
261 Erase(100);
262 ASSERT_EQ(-1, Lookup(100));
263 ASSERT_EQ(201, Lookup(200));
264 ASSERT_EQ(1U, deleted_keys_.size());
265 ASSERT_EQ(100, deleted_keys_[0]);
266 ASSERT_EQ(101, deleted_values_[0]);
267
268 Erase(100);
269 ASSERT_EQ(-1, Lookup(100));
270 ASSERT_EQ(201, Lookup(200));
271 ASSERT_EQ(1U, deleted_keys_.size());
272 }
273
274 TEST_P(CacheTest, EntriesArePinned) {
275 Insert(100, 101);
276 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
277 ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
278 ASSERT_EQ(1U, cache_->GetUsage());
279
280 Insert(100, 102);
281 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
282 ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
283 ASSERT_EQ(0U, deleted_keys_.size());
284 ASSERT_EQ(2U, cache_->GetUsage());
285
286 cache_->Release(h1);
287 ASSERT_EQ(1U, deleted_keys_.size());
288 ASSERT_EQ(100, deleted_keys_[0]);
289 ASSERT_EQ(101, deleted_values_[0]);
290 ASSERT_EQ(1U, cache_->GetUsage());
291
292 Erase(100);
293 ASSERT_EQ(-1, Lookup(100));
294 ASSERT_EQ(1U, deleted_keys_.size());
295 ASSERT_EQ(1U, cache_->GetUsage());
296
297 cache_->Release(h2);
298 ASSERT_EQ(2U, deleted_keys_.size());
299 ASSERT_EQ(100, deleted_keys_[1]);
300 ASSERT_EQ(102, deleted_values_[1]);
301 ASSERT_EQ(0U, cache_->GetUsage());
302 }
303
304 TEST_P(CacheTest, EvictionPolicy) {
305 Insert(100, 101);
306 Insert(200, 201);
307
308 // Frequently used entry must be kept around
309 for (int i = 0; i < kCacheSize + 200; i++) {
310 Insert(1000+i, 2000+i);
311 ASSERT_EQ(101, Lookup(100));
312 }
313 ASSERT_EQ(101, Lookup(100));
314 ASSERT_EQ(-1, Lookup(200));
315 }
316
317 TEST_P(CacheTest, ExternalRefPinsEntries) {
318 Insert(100, 101);
319 Cache::Handle* h = cache_->Lookup(EncodeKey(100));
320 ASSERT_TRUE(cache_->Ref(h));
321 ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
322 ASSERT_EQ(1U, cache_->GetUsage());
323
324 for (int i = 0; i < 3; ++i) {
325 if (i > 0) {
326 // First release (i == 1) corresponds to Ref(), second release (i == 2)
327 // corresponds to Lookup(). Then, since all external refs are released,
328 // the below insertions should push out the cache entry.
329 cache_->Release(h);
330 }
331 // double cache size because the usage bit in block cache prevents 100 from
332 // being evicted in the first kCacheSize iterations
333 for (int j = 0; j < 2 * kCacheSize + 100; j++) {
334 Insert(1000 + j, 2000 + j);
335 }
336 if (i < 2) {
337 ASSERT_EQ(101, Lookup(100));
338 }
339 }
340 ASSERT_EQ(-1, Lookup(100));
341 }
342
343 TEST_P(CacheTest, EvictionPolicyRef) {
344 Insert(100, 101);
345 Insert(101, 102);
346 Insert(102, 103);
347 Insert(103, 104);
348 Insert(200, 101);
349 Insert(201, 102);
350 Insert(202, 103);
351 Insert(203, 104);
352 Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
353 Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
354 Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
355 Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
356 Insert(300, 101);
357 Insert(301, 102);
358 Insert(302, 103);
359 Insert(303, 104);
360
361 // Insert entries much more than Cache capacity
362 for (int i = 0; i < kCacheSize + 200; i++) {
363 Insert(1000 + i, 2000 + i);
364 }
365
366 // Check whether the entries inserted in the beginning
367 // are evicted. Ones without extra ref are evicted and
368 // those with are not.
369 ASSERT_EQ(-1, Lookup(100));
370 ASSERT_EQ(-1, Lookup(101));
371 ASSERT_EQ(-1, Lookup(102));
372 ASSERT_EQ(-1, Lookup(103));
373
374 ASSERT_EQ(-1, Lookup(300));
375 ASSERT_EQ(-1, Lookup(301));
376 ASSERT_EQ(-1, Lookup(302));
377 ASSERT_EQ(-1, Lookup(303));
378
379 ASSERT_EQ(101, Lookup(200));
380 ASSERT_EQ(102, Lookup(201));
381 ASSERT_EQ(103, Lookup(202));
382 ASSERT_EQ(104, Lookup(203));
383
384 // Cleaning up all the handles
385 cache_->Release(h201);
386 cache_->Release(h202);
387 cache_->Release(h203);
388 cache_->Release(h204);
389 }
390
391 TEST_P(CacheTest, EvictEmptyCache) {
392 // Insert item large than capacity to trigger eviction on empty cache.
393 auto cache = NewCache(1, 0, false);
394 ASSERT_OK(cache->Insert("foo", nullptr, 10, dumbDeleter));
395 }
396
397 TEST_P(CacheTest, EraseFromDeleter) {
398 // Have deleter which will erase item from cache, which will re-enter
399 // the cache at that point.
400 std::shared_ptr<Cache> cache = NewCache(10, 0, false);
401 ASSERT_OK(cache->Insert("foo", nullptr, 1, dumbDeleter));
402 ASSERT_OK(cache->Insert("bar", cache.get(), 1, eraseDeleter));
403 cache->Erase("bar");
404 ASSERT_EQ(nullptr, cache->Lookup("foo"));
405 ASSERT_EQ(nullptr, cache->Lookup("bar"));
406 }
407
408 TEST_P(CacheTest, ErasedHandleState) {
409 // insert a key and get two handles
410 Insert(100, 1000);
411 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
412 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
413 ASSERT_EQ(h1, h2);
414 ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
415 ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
416
417 // delete the key from the cache
418 Erase(100);
419 // can no longer find in the cache
420 ASSERT_EQ(-1, Lookup(100));
421
422 // release one handle
423 cache_->Release(h1);
424 // still can't find in cache
425 ASSERT_EQ(-1, Lookup(100));
426
427 cache_->Release(h2);
428 }
429
430 TEST_P(CacheTest, HeavyEntries) {
431 // Add a bunch of light and heavy entries and then count the combined
432 // size of items still in the cache, which must be approximately the
433 // same as the total capacity.
434 const int kLight = 1;
435 const int kHeavy = 10;
436 int added = 0;
437 int index = 0;
438 while (added < 2*kCacheSize) {
439 const int weight = (index & 1) ? kLight : kHeavy;
440 Insert(index, 1000+index, weight);
441 added += weight;
442 index++;
443 }
444
445 int cached_weight = 0;
446 for (int i = 0; i < index; i++) {
447 const int weight = (i & 1 ? kLight : kHeavy);
448 int r = Lookup(i);
449 if (r >= 0) {
450 cached_weight += weight;
451 ASSERT_EQ(1000+i, r);
452 }
453 }
454 ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
455 }
456
457 TEST_P(CacheTest, NewId) {
458 uint64_t a = cache_->NewId();
459 uint64_t b = cache_->NewId();
460 ASSERT_NE(a, b);
461 }
462
463
464 class Value {
465 public:
466 explicit Value(size_t v) : v_(v) { }
467
468 size_t v_;
469 };
470
471 namespace {
472 void deleter(const Slice& /*key*/, void* value) {
473 delete static_cast<Value *>(value);
474 }
475 } // namespace
476
477 TEST_P(CacheTest, ReleaseAndErase) {
478 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
479 Cache::Handle* handle;
480 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
481 &CacheTest::Deleter, &handle);
482 ASSERT_TRUE(s.ok());
483 ASSERT_EQ(5U, cache->GetCapacity());
484 ASSERT_EQ(1U, cache->GetUsage());
485 ASSERT_EQ(0U, deleted_keys_.size());
486 auto erased = cache->Release(handle, true);
487 ASSERT_TRUE(erased);
488 // This tests that deleter has been called
489 ASSERT_EQ(1U, deleted_keys_.size());
490 }
491
492 TEST_P(CacheTest, ReleaseWithoutErase) {
493 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
494 Cache::Handle* handle;
495 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
496 &CacheTest::Deleter, &handle);
497 ASSERT_TRUE(s.ok());
498 ASSERT_EQ(5U, cache->GetCapacity());
499 ASSERT_EQ(1U, cache->GetUsage());
500 ASSERT_EQ(0U, deleted_keys_.size());
501 auto erased = cache->Release(handle);
502 ASSERT_FALSE(erased);
503 // This tests that deleter is not called. When cache has free capacity it is
504 // not expected to immediately erase the released items.
505 ASSERT_EQ(0U, deleted_keys_.size());
506 }
507
508 TEST_P(CacheTest, SetCapacity) {
509 // test1: increase capacity
510 // lets create a cache with capacity 5,
511 // then, insert 5 elements, then increase capacity
512 // to 10, returned capacity should be 10, usage=5
513 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
514 std::vector<Cache::Handle*> handles(10);
515 // Insert 5 entries, but not releasing.
516 for (size_t i = 0; i < 5; i++) {
517 std::string key = ToString(i+1);
518 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
519 ASSERT_TRUE(s.ok());
520 }
521 ASSERT_EQ(5U, cache->GetCapacity());
522 ASSERT_EQ(5U, cache->GetUsage());
523 cache->SetCapacity(10);
524 ASSERT_EQ(10U, cache->GetCapacity());
525 ASSERT_EQ(5U, cache->GetUsage());
526
527 // test2: decrease capacity
528 // insert 5 more elements to cache, then release 5,
529 // then decrease capacity to 7, final capacity should be 7
530 // and usage should be 7
531 for (size_t i = 5; i < 10; i++) {
532 std::string key = ToString(i+1);
533 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
534 ASSERT_TRUE(s.ok());
535 }
536 ASSERT_EQ(10U, cache->GetCapacity());
537 ASSERT_EQ(10U, cache->GetUsage());
538 for (size_t i = 0; i < 5; i++) {
539 cache->Release(handles[i]);
540 }
541 ASSERT_EQ(10U, cache->GetCapacity());
542 ASSERT_EQ(10U, cache->GetUsage());
543 cache->SetCapacity(7);
544 ASSERT_EQ(7, cache->GetCapacity());
545 ASSERT_EQ(7, cache->GetUsage());
546
547 // release remaining 5 to keep valgrind happy
548 for (size_t i = 5; i < 10; i++) {
549 cache->Release(handles[i]);
550 }
551 }
552
553 TEST_P(CacheTest, SetStrictCapacityLimit) {
554 // test1: set the flag to false. Insert more keys than capacity. See if they
555 // all go through.
556 std::shared_ptr<Cache> cache = NewLRUCache(5, 0, false);
557 std::vector<Cache::Handle*> handles(10);
558 Status s;
559 for (size_t i = 0; i < 10; i++) {
560 std::string key = ToString(i + 1);
561 s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
562 ASSERT_OK(s);
563 ASSERT_NE(nullptr, handles[i]);
564 }
565
566 // test2: set the flag to true. Insert and check if it fails.
567 std::string extra_key = "extra";
568 Value* extra_value = new Value(0);
569 cache->SetStrictCapacityLimit(true);
570 Cache::Handle* handle;
571 s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
572 ASSERT_TRUE(s.IsIncomplete());
573 ASSERT_EQ(nullptr, handle);
574
575 for (size_t i = 0; i < 10; i++) {
576 cache->Release(handles[i]);
577 }
578
579 // test3: init with flag being true.
580 std::shared_ptr<Cache> cache2 = NewLRUCache(5, 0, true);
581 for (size_t i = 0; i < 5; i++) {
582 std::string key = ToString(i + 1);
583 s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
584 ASSERT_OK(s);
585 ASSERT_NE(nullptr, handles[i]);
586 }
587 s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
588 ASSERT_TRUE(s.IsIncomplete());
589 ASSERT_EQ(nullptr, handle);
590 // test insert without handle
591 s = cache2->Insert(extra_key, extra_value, 1, &deleter);
592 // AS if the key have been inserted into cache but get evicted immediately.
593 ASSERT_OK(s);
594 ASSERT_EQ(5, cache->GetUsage());
595 ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
596
597 for (size_t i = 0; i < 5; i++) {
598 cache2->Release(handles[i]);
599 }
600 }
601
602 TEST_P(CacheTest, OverCapacity) {
603 size_t n = 10;
604
605 // a LRUCache with n entries and one shard only
606 std::shared_ptr<Cache> cache = NewCache(n, 0, false);
607
608 std::vector<Cache::Handle*> handles(n+1);
609
610 // Insert n+1 entries, but not releasing.
611 for (size_t i = 0; i < n + 1; i++) {
612 std::string key = ToString(i+1);
613 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
614 ASSERT_TRUE(s.ok());
615 }
616
617 // Guess what's in the cache now?
618 for (size_t i = 0; i < n + 1; i++) {
619 std::string key = ToString(i+1);
620 auto h = cache->Lookup(key);
621 ASSERT_TRUE(h != nullptr);
622 if (h) cache->Release(h);
623 }
624
625 // the cache is over capacity since nothing could be evicted
626 ASSERT_EQ(n + 1U, cache->GetUsage());
627 for (size_t i = 0; i < n + 1; i++) {
628 cache->Release(handles[i]);
629 }
630 // Make sure eviction is triggered.
631 cache->SetCapacity(n);
632
633 // cache is under capacity now since elements were released
634 ASSERT_EQ(n, cache->GetUsage());
635
636 // element 0 is evicted and the rest is there
637 // This is consistent with the LRU policy since the element 0
638 // was released first
639 for (size_t i = 0; i < n + 1; i++) {
640 std::string key = ToString(i+1);
641 auto h = cache->Lookup(key);
642 if (h) {
643 ASSERT_NE(i, 0U);
644 cache->Release(h);
645 } else {
646 ASSERT_EQ(i, 0U);
647 }
648 }
649 }
650
651 namespace {
652 std::vector<std::pair<int, int>> callback_state;
653 void callback(void* entry, size_t charge) {
654 callback_state.push_back({DecodeValue(entry), static_cast<int>(charge)});
655 }
656 };
657
658 TEST_P(CacheTest, ApplyToAllCacheEntiresTest) {
659 std::vector<std::pair<int, int>> inserted;
660 callback_state.clear();
661
662 for (int i = 0; i < 10; ++i) {
663 Insert(i, i * 2, i + 1);
664 inserted.push_back({i * 2, i + 1});
665 }
666 cache_->ApplyToAllCacheEntries(callback, true);
667
668 std::sort(inserted.begin(), inserted.end());
669 std::sort(callback_state.begin(), callback_state.end());
670 ASSERT_TRUE(inserted == callback_state);
671 }
672
673 TEST_P(CacheTest, DefaultShardBits) {
674 // test1: set the flag to false. Insert more keys than capacity. See if they
675 // all go through.
676 std::shared_ptr<Cache> cache = NewCache(16 * 1024L * 1024L);
677 ShardedCache* sc = dynamic_cast<ShardedCache*>(cache.get());
678 ASSERT_EQ(5, sc->GetNumShardBits());
679
680 cache = NewLRUCache(511 * 1024L, -1, true);
681 sc = dynamic_cast<ShardedCache*>(cache.get());
682 ASSERT_EQ(0, sc->GetNumShardBits());
683
684 cache = NewLRUCache(1024L * 1024L * 1024L, -1, true);
685 sc = dynamic_cast<ShardedCache*>(cache.get());
686 ASSERT_EQ(6, sc->GetNumShardBits());
687 }
688
689 #ifdef SUPPORT_CLOCK_CACHE
690 std::shared_ptr<Cache> (*new_clock_cache_func)(size_t, int,
691 bool) = NewClockCache;
692 INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
693 testing::Values(kLRU, kClock));
694 #else
695 INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
696 #endif // SUPPORT_CLOCK_CACHE
697
698 } // namespace rocksdb
699
700 int main(int argc, char** argv) {
701 ::testing::InitGoogleTest(&argc, argv);
702 return RUN_ALL_TESTS();
703 }