1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
10 #include "rocksdb/cache.h"
12 #include <forward_list>
17 #include "cache/clock_cache.h"
18 #include "cache/lru_cache.h"
19 #include "test_util/testharness.h"
20 #include "util/coding.h"
21 #include "util/string_util.h"
23 namespace ROCKSDB_NAMESPACE
{
25 // Conversions between numeric keys/values and the types expected by Cache.
26 static std::string
EncodeKey(int k
) {
28 PutFixed32(&result
, k
);
31 static int DecodeKey(const Slice
& k
) {
32 assert(k
.size() == 4);
33 return DecodeFixed32(k
.data());
35 static void* EncodeValue(uintptr_t v
) { return reinterpret_cast<void*>(v
); }
36 static int DecodeValue(void* v
) {
37 return static_cast<int>(reinterpret_cast<uintptr_t>(v
));
40 const std::string kLRU
= "lru";
41 const std::string kClock
= "clock";
43 void dumbDeleter(const Slice
& /*key*/, void* /*value*/) {}
45 void eraseDeleter(const Slice
& /*key*/, void* value
) {
46 Cache
* cache
= reinterpret_cast<Cache
*>(value
);
50 class CacheTest
: public testing::TestWithParam
<std::string
> {
52 static CacheTest
* current_
;
54 static void Deleter(const Slice
& key
, void* v
) {
55 current_
->deleted_keys_
.push_back(DecodeKey(key
));
56 current_
->deleted_values_
.push_back(DecodeValue(v
));
59 static const int kCacheSize
= 1000;
60 static const int kNumShardBits
= 4;
62 static const int kCacheSize2
= 100;
63 static const int kNumShardBits2
= 2;
65 std::vector
<int> deleted_keys_
;
66 std::vector
<int> deleted_values_
;
67 std::shared_ptr
<Cache
> cache_
;
68 std::shared_ptr
<Cache
> cache2_
;
71 : cache_(NewCache(kCacheSize
, kNumShardBits
, false)),
72 cache2_(NewCache(kCacheSize2
, kNumShardBits2
, false)) {
76 ~CacheTest() override
{}
78 std::shared_ptr
<Cache
> NewCache(size_t capacity
) {
79 auto type
= GetParam();
81 return NewLRUCache(capacity
);
84 return NewClockCache(capacity
);
89 std::shared_ptr
<Cache
> NewCache(
90 size_t capacity
, int num_shard_bits
, bool strict_capacity_limit
,
91 CacheMetadataChargePolicy charge_policy
= kDontChargeCacheMetadata
) {
92 auto type
= GetParam();
95 co
.capacity
= capacity
;
96 co
.num_shard_bits
= num_shard_bits
;
97 co
.strict_capacity_limit
= strict_capacity_limit
;
98 co
.high_pri_pool_ratio
= 0;
99 co
.metadata_charge_policy
= charge_policy
;
100 return NewLRUCache(co
);
102 if (type
== kClock
) {
103 return NewClockCache(capacity
, num_shard_bits
, strict_capacity_limit
,
109 int Lookup(std::shared_ptr
<Cache
> cache
, int key
) {
110 Cache::Handle
* handle
= cache
->Lookup(EncodeKey(key
));
111 const int r
= (handle
== nullptr) ? -1 : DecodeValue(cache
->Value(handle
));
112 if (handle
!= nullptr) {
113 cache
->Release(handle
);
118 void Insert(std::shared_ptr
<Cache
> cache
, int key
, int value
,
120 cache
->Insert(EncodeKey(key
), EncodeValue(value
), charge
,
121 &CacheTest::Deleter
);
124 void Erase(std::shared_ptr
<Cache
> cache
, int key
) {
125 cache
->Erase(EncodeKey(key
));
128 int Lookup(int key
) {
129 return Lookup(cache_
, key
);
132 void Insert(int key
, int value
, int charge
= 1) {
133 Insert(cache_
, key
, value
, charge
);
136 void Erase(int key
) {
140 int Lookup2(int key
) {
141 return Lookup(cache2_
, key
);
144 void Insert2(int key
, int value
, int charge
= 1) {
145 Insert(cache2_
, key
, value
, charge
);
148 void Erase2(int key
) {
152 CacheTest
* CacheTest::current_
;
154 class LRUCacheTest
: public CacheTest
{};
156 TEST_P(CacheTest
, UsageTest
) {
157 // cache is std::shared_ptr and will be automatically cleaned up.
158 const uint64_t kCapacity
= 100000;
159 auto cache
= NewCache(kCapacity
, 8, false, kDontChargeCacheMetadata
);
160 auto precise_cache
= NewCache(kCapacity
, 0, false, kFullChargeCacheMetadata
);
161 ASSERT_EQ(0, cache
->GetUsage());
162 ASSERT_EQ(0, precise_cache
->GetUsage());
165 char value
[10] = "abcdef";
166 // make sure everything will be cached
167 for (int i
= 1; i
< 100; ++i
) {
168 std::string
key(i
, 'a');
169 auto kv_size
= key
.size() + 5;
170 cache
->Insert(key
, reinterpret_cast<void*>(value
), kv_size
, dumbDeleter
);
171 precise_cache
->Insert(key
, reinterpret_cast<void*>(value
), kv_size
,
174 ASSERT_EQ(usage
, cache
->GetUsage());
175 ASSERT_LT(usage
, precise_cache
->GetUsage());
178 cache
->EraseUnRefEntries();
179 precise_cache
->EraseUnRefEntries();
180 ASSERT_EQ(0, cache
->GetUsage());
181 ASSERT_EQ(0, precise_cache
->GetUsage());
183 // make sure the cache will be overloaded
184 for (uint64_t i
= 1; i
< kCapacity
; ++i
) {
185 auto key
= ToString(i
);
186 cache
->Insert(key
, reinterpret_cast<void*>(value
), key
.size() + 5,
188 precise_cache
->Insert(key
, reinterpret_cast<void*>(value
), key
.size() + 5,
192 // the usage should be close to the capacity
193 ASSERT_GT(kCapacity
, cache
->GetUsage());
194 ASSERT_GT(kCapacity
, precise_cache
->GetUsage());
195 ASSERT_LT(kCapacity
* 0.95, cache
->GetUsage());
196 ASSERT_LT(kCapacity
* 0.95, precise_cache
->GetUsage());
199 TEST_P(CacheTest
, PinnedUsageTest
) {
200 // cache is std::shared_ptr and will be automatically cleaned up.
201 const uint64_t kCapacity
= 200000;
202 auto cache
= NewCache(kCapacity
, 8, false, kDontChargeCacheMetadata
);
203 auto precise_cache
= NewCache(kCapacity
, 8, false, kFullChargeCacheMetadata
);
205 size_t pinned_usage
= 0;
206 char value
[10] = "abcdef";
208 std::forward_list
<Cache::Handle
*> unreleased_handles
;
209 std::forward_list
<Cache::Handle
*> unreleased_handles_in_precise_cache
;
211 // Add entries. Unpin some of them after insertion. Then, pin some of them
212 // again. Check GetPinnedUsage().
213 for (int i
= 1; i
< 100; ++i
) {
214 std::string
key(i
, 'a');
215 auto kv_size
= key
.size() + 5;
216 Cache::Handle
* handle
;
217 Cache::Handle
* handle_in_precise_cache
;
218 cache
->Insert(key
, reinterpret_cast<void*>(value
), kv_size
, dumbDeleter
,
221 precise_cache
->Insert(key
, reinterpret_cast<void*>(value
), kv_size
,
222 dumbDeleter
, &handle_in_precise_cache
);
223 assert(handle_in_precise_cache
);
224 pinned_usage
+= kv_size
;
225 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
226 ASSERT_LT(pinned_usage
, precise_cache
->GetPinnedUsage());
228 cache
->Release(handle
);
229 precise_cache
->Release(handle_in_precise_cache
);
230 pinned_usage
-= kv_size
;
231 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
232 ASSERT_LT(pinned_usage
, precise_cache
->GetPinnedUsage());
234 unreleased_handles
.push_front(handle
);
235 unreleased_handles_in_precise_cache
.push_front(handle_in_precise_cache
);
238 unreleased_handles
.push_front(cache
->Lookup(key
));
239 auto x
= precise_cache
->Lookup(key
);
241 unreleased_handles_in_precise_cache
.push_front(x
);
242 // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
245 pinned_usage
+= kv_size
;
247 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
248 ASSERT_LT(pinned_usage
, precise_cache
->GetPinnedUsage());
251 auto precise_cache_pinned_usage
= precise_cache
->GetPinnedUsage();
252 ASSERT_LT(pinned_usage
, precise_cache_pinned_usage
);
254 // check that overloading the cache does not change the pinned usage
255 for (uint64_t i
= 1; i
< 2 * kCapacity
; ++i
) {
256 auto key
= ToString(i
);
257 cache
->Insert(key
, reinterpret_cast<void*>(value
), key
.size() + 5,
259 precise_cache
->Insert(key
, reinterpret_cast<void*>(value
), key
.size() + 5,
262 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
263 ASSERT_EQ(precise_cache_pinned_usage
, precise_cache
->GetPinnedUsage());
265 cache
->EraseUnRefEntries();
266 precise_cache
->EraseUnRefEntries();
267 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
268 ASSERT_EQ(precise_cache_pinned_usage
, precise_cache
->GetPinnedUsage());
270 // release handles for pinned entries to prevent memory leaks
271 for (auto handle
: unreleased_handles
) {
272 cache
->Release(handle
);
274 for (auto handle
: unreleased_handles_in_precise_cache
) {
275 precise_cache
->Release(handle
);
277 ASSERT_EQ(0, cache
->GetPinnedUsage());
278 ASSERT_EQ(0, precise_cache
->GetPinnedUsage());
279 cache
->EraseUnRefEntries();
280 precise_cache
->EraseUnRefEntries();
281 ASSERT_EQ(0, cache
->GetUsage());
282 ASSERT_EQ(0, precise_cache
->GetUsage());
285 TEST_P(CacheTest
, HitAndMiss
) {
286 ASSERT_EQ(-1, Lookup(100));
289 ASSERT_EQ(101, Lookup(100));
290 ASSERT_EQ(-1, Lookup(200));
291 ASSERT_EQ(-1, Lookup(300));
294 ASSERT_EQ(101, Lookup(100));
295 ASSERT_EQ(201, Lookup(200));
296 ASSERT_EQ(-1, Lookup(300));
299 ASSERT_EQ(102, Lookup(100));
300 ASSERT_EQ(201, Lookup(200));
301 ASSERT_EQ(-1, Lookup(300));
303 ASSERT_EQ(1U, deleted_keys_
.size());
304 ASSERT_EQ(100, deleted_keys_
[0]);
305 ASSERT_EQ(101, deleted_values_
[0]);
308 TEST_P(CacheTest
, InsertSameKey
) {
311 ASSERT_EQ(2, Lookup(1));
314 TEST_P(CacheTest
, Erase
) {
316 ASSERT_EQ(0U, deleted_keys_
.size());
321 ASSERT_EQ(-1, Lookup(100));
322 ASSERT_EQ(201, Lookup(200));
323 ASSERT_EQ(1U, deleted_keys_
.size());
324 ASSERT_EQ(100, deleted_keys_
[0]);
325 ASSERT_EQ(101, deleted_values_
[0]);
328 ASSERT_EQ(-1, Lookup(100));
329 ASSERT_EQ(201, Lookup(200));
330 ASSERT_EQ(1U, deleted_keys_
.size());
333 TEST_P(CacheTest
, EntriesArePinned
) {
335 Cache::Handle
* h1
= cache_
->Lookup(EncodeKey(100));
336 ASSERT_EQ(101, DecodeValue(cache_
->Value(h1
)));
337 ASSERT_EQ(1U, cache_
->GetUsage());
340 Cache::Handle
* h2
= cache_
->Lookup(EncodeKey(100));
341 ASSERT_EQ(102, DecodeValue(cache_
->Value(h2
)));
342 ASSERT_EQ(0U, deleted_keys_
.size());
343 ASSERT_EQ(2U, cache_
->GetUsage());
346 ASSERT_EQ(1U, deleted_keys_
.size());
347 ASSERT_EQ(100, deleted_keys_
[0]);
348 ASSERT_EQ(101, deleted_values_
[0]);
349 ASSERT_EQ(1U, cache_
->GetUsage());
352 ASSERT_EQ(-1, Lookup(100));
353 ASSERT_EQ(1U, deleted_keys_
.size());
354 ASSERT_EQ(1U, cache_
->GetUsage());
357 ASSERT_EQ(2U, deleted_keys_
.size());
358 ASSERT_EQ(100, deleted_keys_
[1]);
359 ASSERT_EQ(102, deleted_values_
[1]);
360 ASSERT_EQ(0U, cache_
->GetUsage());
363 TEST_P(CacheTest
, EvictionPolicy
) {
367 // Frequently used entry must be kept around
368 for (int i
= 0; i
< kCacheSize
* 2; i
++) {
369 Insert(1000+i
, 2000+i
);
370 ASSERT_EQ(101, Lookup(100));
372 ASSERT_EQ(101, Lookup(100));
373 ASSERT_EQ(-1, Lookup(200));
376 TEST_P(CacheTest
, ExternalRefPinsEntries
) {
378 Cache::Handle
* h
= cache_
->Lookup(EncodeKey(100));
379 ASSERT_TRUE(cache_
->Ref(h
));
380 ASSERT_EQ(101, DecodeValue(cache_
->Value(h
)));
381 ASSERT_EQ(1U, cache_
->GetUsage());
383 for (int i
= 0; i
< 3; ++i
) {
385 // First release (i == 1) corresponds to Ref(), second release (i == 2)
386 // corresponds to Lookup(). Then, since all external refs are released,
387 // the below insertions should push out the cache entry.
390 // double cache size because the usage bit in block cache prevents 100 from
391 // being evicted in the first kCacheSize iterations
392 for (int j
= 0; j
< 2 * kCacheSize
+ 100; j
++) {
393 Insert(1000 + j
, 2000 + j
);
396 ASSERT_EQ(101, Lookup(100));
399 ASSERT_EQ(-1, Lookup(100));
402 TEST_P(CacheTest
, EvictionPolicyRef
) {
411 Cache::Handle
* h201
= cache_
->Lookup(EncodeKey(200));
412 Cache::Handle
* h202
= cache_
->Lookup(EncodeKey(201));
413 Cache::Handle
* h203
= cache_
->Lookup(EncodeKey(202));
414 Cache::Handle
* h204
= cache_
->Lookup(EncodeKey(203));
420 // Insert entries much more than Cache capacity
421 for (int i
= 0; i
< kCacheSize
* 2; i
++) {
422 Insert(1000 + i
, 2000 + i
);
425 // Check whether the entries inserted in the beginning
426 // are evicted. Ones without extra ref are evicted and
427 // those with are not.
428 ASSERT_EQ(-1, Lookup(100));
429 ASSERT_EQ(-1, Lookup(101));
430 ASSERT_EQ(-1, Lookup(102));
431 ASSERT_EQ(-1, Lookup(103));
433 ASSERT_EQ(-1, Lookup(300));
434 ASSERT_EQ(-1, Lookup(301));
435 ASSERT_EQ(-1, Lookup(302));
436 ASSERT_EQ(-1, Lookup(303));
438 ASSERT_EQ(101, Lookup(200));
439 ASSERT_EQ(102, Lookup(201));
440 ASSERT_EQ(103, Lookup(202));
441 ASSERT_EQ(104, Lookup(203));
443 // Cleaning up all the handles
444 cache_
->Release(h201
);
445 cache_
->Release(h202
);
446 cache_
->Release(h203
);
447 cache_
->Release(h204
);
450 TEST_P(CacheTest
, EvictEmptyCache
) {
451 // Insert item large than capacity to trigger eviction on empty cache.
452 auto cache
= NewCache(1, 0, false);
453 ASSERT_OK(cache
->Insert("foo", nullptr, 10, dumbDeleter
));
456 TEST_P(CacheTest
, EraseFromDeleter
) {
457 // Have deleter which will erase item from cache, which will re-enter
458 // the cache at that point.
459 std::shared_ptr
<Cache
> cache
= NewCache(10, 0, false);
460 ASSERT_OK(cache
->Insert("foo", nullptr, 1, dumbDeleter
));
461 ASSERT_OK(cache
->Insert("bar", cache
.get(), 1, eraseDeleter
));
463 ASSERT_EQ(nullptr, cache
->Lookup("foo"));
464 ASSERT_EQ(nullptr, cache
->Lookup("bar"));
467 TEST_P(CacheTest
, ErasedHandleState
) {
468 // insert a key and get two handles
470 Cache::Handle
* h1
= cache_
->Lookup(EncodeKey(100));
471 Cache::Handle
* h2
= cache_
->Lookup(EncodeKey(100));
473 ASSERT_EQ(DecodeValue(cache_
->Value(h1
)), 1000);
474 ASSERT_EQ(DecodeValue(cache_
->Value(h2
)), 1000);
476 // delete the key from the cache
478 // can no longer find in the cache
479 ASSERT_EQ(-1, Lookup(100));
481 // release one handle
483 // still can't find in cache
484 ASSERT_EQ(-1, Lookup(100));
489 TEST_P(CacheTest
, HeavyEntries
) {
490 // Add a bunch of light and heavy entries and then count the combined
491 // size of items still in the cache, which must be approximately the
492 // same as the total capacity.
493 const int kLight
= 1;
494 const int kHeavy
= 10;
497 while (added
< 2*kCacheSize
) {
498 const int weight
= (index
& 1) ? kLight
: kHeavy
;
499 Insert(index
, 1000+index
, weight
);
504 int cached_weight
= 0;
505 for (int i
= 0; i
< index
; i
++) {
506 const int weight
= (i
& 1 ? kLight
: kHeavy
);
509 cached_weight
+= weight
;
510 ASSERT_EQ(1000+i
, r
);
513 ASSERT_LE(cached_weight
, kCacheSize
+ kCacheSize
/10);
516 TEST_P(CacheTest
, NewId
) {
517 uint64_t a
= cache_
->NewId();
518 uint64_t b
= cache_
->NewId();
525 explicit Value(size_t v
) : v_(v
) { }
531 void deleter(const Slice
& /*key*/, void* value
) {
532 delete static_cast<Value
*>(value
);
536 TEST_P(CacheTest
, ReleaseAndErase
) {
537 std::shared_ptr
<Cache
> cache
= NewCache(5, 0, false);
538 Cache::Handle
* handle
;
539 Status s
= cache
->Insert(EncodeKey(100), EncodeValue(100), 1,
540 &CacheTest::Deleter
, &handle
);
542 ASSERT_EQ(5U, cache
->GetCapacity());
543 ASSERT_EQ(1U, cache
->GetUsage());
544 ASSERT_EQ(0U, deleted_keys_
.size());
545 auto erased
= cache
->Release(handle
, true);
547 // This tests that deleter has been called
548 ASSERT_EQ(1U, deleted_keys_
.size());
551 TEST_P(CacheTest
, ReleaseWithoutErase
) {
552 std::shared_ptr
<Cache
> cache
= NewCache(5, 0, false);
553 Cache::Handle
* handle
;
554 Status s
= cache
->Insert(EncodeKey(100), EncodeValue(100), 1,
555 &CacheTest::Deleter
, &handle
);
557 ASSERT_EQ(5U, cache
->GetCapacity());
558 ASSERT_EQ(1U, cache
->GetUsage());
559 ASSERT_EQ(0U, deleted_keys_
.size());
560 auto erased
= cache
->Release(handle
);
561 ASSERT_FALSE(erased
);
562 // This tests that deleter is not called. When cache has free capacity it is
563 // not expected to immediately erase the released items.
564 ASSERT_EQ(0U, deleted_keys_
.size());
567 TEST_P(CacheTest
, SetCapacity
) {
568 // test1: increase capacity
569 // lets create a cache with capacity 5,
570 // then, insert 5 elements, then increase capacity
571 // to 10, returned capacity should be 10, usage=5
572 std::shared_ptr
<Cache
> cache
= NewCache(5, 0, false);
573 std::vector
<Cache::Handle
*> handles(10);
574 // Insert 5 entries, but not releasing.
575 for (size_t i
= 0; i
< 5; i
++) {
576 std::string key
= ToString(i
+1);
577 Status s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
580 ASSERT_EQ(5U, cache
->GetCapacity());
581 ASSERT_EQ(5U, cache
->GetUsage());
582 cache
->SetCapacity(10);
583 ASSERT_EQ(10U, cache
->GetCapacity());
584 ASSERT_EQ(5U, cache
->GetUsage());
586 // test2: decrease capacity
587 // insert 5 more elements to cache, then release 5,
588 // then decrease capacity to 7, final capacity should be 7
589 // and usage should be 7
590 for (size_t i
= 5; i
< 10; i
++) {
591 std::string key
= ToString(i
+1);
592 Status s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
595 ASSERT_EQ(10U, cache
->GetCapacity());
596 ASSERT_EQ(10U, cache
->GetUsage());
597 for (size_t i
= 0; i
< 5; i
++) {
598 cache
->Release(handles
[i
]);
600 ASSERT_EQ(10U, cache
->GetCapacity());
601 ASSERT_EQ(10U, cache
->GetUsage());
602 cache
->SetCapacity(7);
603 ASSERT_EQ(7, cache
->GetCapacity());
604 ASSERT_EQ(7, cache
->GetUsage());
606 // release remaining 5 to keep valgrind happy
607 for (size_t i
= 5; i
< 10; i
++) {
608 cache
->Release(handles
[i
]);
612 TEST_P(LRUCacheTest
, SetStrictCapacityLimit
) {
613 // test1: set the flag to false. Insert more keys than capacity. See if they
615 std::shared_ptr
<Cache
> cache
= NewCache(5, 0, false);
616 std::vector
<Cache::Handle
*> handles(10);
618 for (size_t i
= 0; i
< 10; i
++) {
619 std::string key
= ToString(i
+ 1);
620 s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
622 ASSERT_NE(nullptr, handles
[i
]);
624 ASSERT_EQ(10, cache
->GetUsage());
626 // test2: set the flag to true. Insert and check if it fails.
627 std::string extra_key
= "extra";
628 Value
* extra_value
= new Value(0);
629 cache
->SetStrictCapacityLimit(true);
630 Cache::Handle
* handle
;
631 s
= cache
->Insert(extra_key
, extra_value
, 1, &deleter
, &handle
);
632 ASSERT_TRUE(s
.IsIncomplete());
633 ASSERT_EQ(nullptr, handle
);
634 ASSERT_EQ(10, cache
->GetUsage());
636 for (size_t i
= 0; i
< 10; i
++) {
637 cache
->Release(handles
[i
]);
640 // test3: init with flag being true.
641 std::shared_ptr
<Cache
> cache2
= NewCache(5, 0, true);
642 for (size_t i
= 0; i
< 5; i
++) {
643 std::string key
= ToString(i
+ 1);
644 s
= cache2
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
646 ASSERT_NE(nullptr, handles
[i
]);
648 s
= cache2
->Insert(extra_key
, extra_value
, 1, &deleter
, &handle
);
649 ASSERT_TRUE(s
.IsIncomplete());
650 ASSERT_EQ(nullptr, handle
);
651 // test insert without handle
652 s
= cache2
->Insert(extra_key
, extra_value
, 1, &deleter
);
653 // AS if the key have been inserted into cache but get evicted immediately.
655 ASSERT_EQ(5, cache2
->GetUsage());
656 ASSERT_EQ(nullptr, cache2
->Lookup(extra_key
));
658 for (size_t i
= 0; i
< 5; i
++) {
659 cache2
->Release(handles
[i
]);
663 TEST_P(CacheTest
, OverCapacity
) {
666 // a LRUCache with n entries and one shard only
667 std::shared_ptr
<Cache
> cache
= NewCache(n
, 0, false);
669 std::vector
<Cache::Handle
*> handles(n
+1);
671 // Insert n+1 entries, but not releasing.
672 for (size_t i
= 0; i
< n
+ 1; i
++) {
673 std::string key
= ToString(i
+1);
674 Status s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
678 // Guess what's in the cache now?
679 for (size_t i
= 0; i
< n
+ 1; i
++) {
680 std::string key
= ToString(i
+1);
681 auto h
= cache
->Lookup(key
);
682 ASSERT_TRUE(h
!= nullptr);
683 if (h
) cache
->Release(h
);
686 // the cache is over capacity since nothing could be evicted
687 ASSERT_EQ(n
+ 1U, cache
->GetUsage());
688 for (size_t i
= 0; i
< n
+ 1; i
++) {
689 cache
->Release(handles
[i
]);
691 // Make sure eviction is triggered.
692 cache
->SetCapacity(n
);
694 // cache is under capacity now since elements were released
695 ASSERT_EQ(n
, cache
->GetUsage());
697 // element 0 is evicted and the rest is there
698 // This is consistent with the LRU policy since the element 0
699 // was released first
700 for (size_t i
= 0; i
< n
+ 1; i
++) {
701 std::string key
= ToString(i
+1);
702 auto h
= cache
->Lookup(key
);
713 std::vector
<std::pair
<int, int>> callback_state
;
714 void callback(void* entry
, size_t charge
) {
715 callback_state
.push_back({DecodeValue(entry
), static_cast<int>(charge
)});
719 TEST_P(CacheTest
, ApplyToAllCacheEntiresTest
) {
720 std::vector
<std::pair
<int, int>> inserted
;
721 callback_state
.clear();
723 for (int i
= 0; i
< 10; ++i
) {
724 Insert(i
, i
* 2, i
+ 1);
725 inserted
.push_back({i
* 2, i
+ 1});
727 cache_
->ApplyToAllCacheEntries(callback
, true);
729 std::sort(inserted
.begin(), inserted
.end());
730 std::sort(callback_state
.begin(), callback_state
.end());
731 ASSERT_TRUE(inserted
== callback_state
);
734 TEST_P(CacheTest
, DefaultShardBits
) {
735 // test1: set the flag to false. Insert more keys than capacity. See if they
737 std::shared_ptr
<Cache
> cache
= NewCache(16 * 1024L * 1024L);
738 ShardedCache
* sc
= dynamic_cast<ShardedCache
*>(cache
.get());
739 ASSERT_EQ(5, sc
->GetNumShardBits());
741 cache
= NewLRUCache(511 * 1024L, -1, true);
742 sc
= dynamic_cast<ShardedCache
*>(cache
.get());
743 ASSERT_EQ(0, sc
->GetNumShardBits());
745 cache
= NewLRUCache(1024L * 1024L * 1024L, -1, true);
746 sc
= dynamic_cast<ShardedCache
*>(cache
.get());
747 ASSERT_EQ(6, sc
->GetNumShardBits());
750 TEST_P(CacheTest
, GetCharge
) {
752 Cache::Handle
* h1
= cache_
->Lookup(EncodeKey(1));
753 ASSERT_EQ(2, DecodeValue(cache_
->Value(h1
)));
754 ASSERT_EQ(1, cache_
->GetCharge(h1
));
758 #ifdef SUPPORT_CLOCK_CACHE
759 std::shared_ptr
<Cache
> (*new_clock_cache_func
)(
760 size_t, int, bool, CacheMetadataChargePolicy
) = NewClockCache
;
761 INSTANTIATE_TEST_CASE_P(CacheTestInstance
, CacheTest
,
762 testing::Values(kLRU
, kClock
));
764 INSTANTIATE_TEST_CASE_P(CacheTestInstance
, CacheTest
, testing::Values(kLRU
));
765 #endif // SUPPORT_CLOCK_CACHE
766 INSTANTIATE_TEST_CASE_P(CacheTestInstance
, LRUCacheTest
, testing::Values(kLRU
));
768 } // namespace ROCKSDB_NAMESPACE
770 int main(int argc
, char** argv
) {
771 ::testing::InitGoogleTest(&argc
, argv
);
772 return RUN_ALL_TESTS();