]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/cache/cache_test.cc
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / rocksdb / cache / cache_test.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #include "rocksdb/cache.h"
11
12 #include <forward_list>
13 #include <functional>
14 #include <iostream>
15 #include <string>
16 #include <vector>
17 #include "cache/clock_cache.h"
18 #include "cache/lru_cache.h"
19 #include "test_util/testharness.h"
20 #include "util/coding.h"
21 #include "util/string_util.h"
22
23 namespace ROCKSDB_NAMESPACE {
24
25 // Conversions between numeric keys/values and the types expected by Cache.
26 static std::string EncodeKey(int k) {
27 std::string result;
28 PutFixed32(&result, k);
29 return result;
30 }
31 static int DecodeKey(const Slice& k) {
32 assert(k.size() == 4);
33 return DecodeFixed32(k.data());
34 }
35 static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
36 static int DecodeValue(void* v) {
37 return static_cast<int>(reinterpret_cast<uintptr_t>(v));
38 }
39
40 const std::string kLRU = "lru";
41 const std::string kClock = "clock";
42
43 void dumbDeleter(const Slice& /*key*/, void* /*value*/) {}
44
45 void eraseDeleter(const Slice& /*key*/, void* value) {
46 Cache* cache = reinterpret_cast<Cache*>(value);
47 cache->Erase("foo");
48 }
49
50 class CacheTest : public testing::TestWithParam<std::string> {
51 public:
52 static CacheTest* current_;
53
54 static void Deleter(const Slice& key, void* v) {
55 current_->deleted_keys_.push_back(DecodeKey(key));
56 current_->deleted_values_.push_back(DecodeValue(v));
57 }
58
59 static const int kCacheSize = 1000;
60 static const int kNumShardBits = 4;
61
62 static const int kCacheSize2 = 100;
63 static const int kNumShardBits2 = 2;
64
65 std::vector<int> deleted_keys_;
66 std::vector<int> deleted_values_;
67 std::shared_ptr<Cache> cache_;
68 std::shared_ptr<Cache> cache2_;
69
70 CacheTest()
71 : cache_(NewCache(kCacheSize, kNumShardBits, false)),
72 cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
73 current_ = this;
74 }
75
76 ~CacheTest() override {}
77
78 std::shared_ptr<Cache> NewCache(size_t capacity) {
79 auto type = GetParam();
80 if (type == kLRU) {
81 return NewLRUCache(capacity);
82 }
83 if (type == kClock) {
84 return NewClockCache(capacity);
85 }
86 return nullptr;
87 }
88
89 std::shared_ptr<Cache> NewCache(
90 size_t capacity, int num_shard_bits, bool strict_capacity_limit,
91 CacheMetadataChargePolicy charge_policy = kDontChargeCacheMetadata) {
92 auto type = GetParam();
93 if (type == kLRU) {
94 LRUCacheOptions co;
95 co.capacity = capacity;
96 co.num_shard_bits = num_shard_bits;
97 co.strict_capacity_limit = strict_capacity_limit;
98 co.high_pri_pool_ratio = 0;
99 co.metadata_charge_policy = charge_policy;
100 return NewLRUCache(co);
101 }
102 if (type == kClock) {
103 return NewClockCache(capacity, num_shard_bits, strict_capacity_limit,
104 charge_policy);
105 }
106 return nullptr;
107 }
108
109 int Lookup(std::shared_ptr<Cache> cache, int key) {
110 Cache::Handle* handle = cache->Lookup(EncodeKey(key));
111 const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
112 if (handle != nullptr) {
113 cache->Release(handle);
114 }
115 return r;
116 }
117
118 void Insert(std::shared_ptr<Cache> cache, int key, int value,
119 int charge = 1) {
120 cache->Insert(EncodeKey(key), EncodeValue(value), charge,
121 &CacheTest::Deleter);
122 }
123
124 void Erase(std::shared_ptr<Cache> cache, int key) {
125 cache->Erase(EncodeKey(key));
126 }
127
128 int Lookup(int key) {
129 return Lookup(cache_, key);
130 }
131
132 void Insert(int key, int value, int charge = 1) {
133 Insert(cache_, key, value, charge);
134 }
135
136 void Erase(int key) {
137 Erase(cache_, key);
138 }
139
140 int Lookup2(int key) {
141 return Lookup(cache2_, key);
142 }
143
144 void Insert2(int key, int value, int charge = 1) {
145 Insert(cache2_, key, value, charge);
146 }
147
148 void Erase2(int key) {
149 Erase(cache2_, key);
150 }
151 };
152 CacheTest* CacheTest::current_;
153
154 class LRUCacheTest : public CacheTest {};
155
156 TEST_P(CacheTest, UsageTest) {
157 // cache is std::shared_ptr and will be automatically cleaned up.
158 const uint64_t kCapacity = 100000;
159 auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
160 auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
161 ASSERT_EQ(0, cache->GetUsage());
162 ASSERT_EQ(0, precise_cache->GetUsage());
163
164 size_t usage = 0;
165 char value[10] = "abcdef";
166 // make sure everything will be cached
167 for (int i = 1; i < 100; ++i) {
168 std::string key(i, 'a');
169 auto kv_size = key.size() + 5;
170 cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter);
171 precise_cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
172 dumbDeleter);
173 usage += kv_size;
174 ASSERT_EQ(usage, cache->GetUsage());
175 ASSERT_LT(usage, precise_cache->GetUsage());
176 }
177
178 cache->EraseUnRefEntries();
179 precise_cache->EraseUnRefEntries();
180 ASSERT_EQ(0, cache->GetUsage());
181 ASSERT_EQ(0, precise_cache->GetUsage());
182
183 // make sure the cache will be overloaded
184 for (uint64_t i = 1; i < kCapacity; ++i) {
185 auto key = ToString(i);
186 cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
187 dumbDeleter);
188 precise_cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
189 dumbDeleter);
190 }
191
192 // the usage should be close to the capacity
193 ASSERT_GT(kCapacity, cache->GetUsage());
194 ASSERT_GT(kCapacity, precise_cache->GetUsage());
195 ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
196 ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
197 }
198
199 TEST_P(CacheTest, PinnedUsageTest) {
200 // cache is std::shared_ptr and will be automatically cleaned up.
201 const uint64_t kCapacity = 200000;
202 auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
203 auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
204
205 size_t pinned_usage = 0;
206 char value[10] = "abcdef";
207
208 std::forward_list<Cache::Handle*> unreleased_handles;
209 std::forward_list<Cache::Handle*> unreleased_handles_in_precise_cache;
210
211 // Add entries. Unpin some of them after insertion. Then, pin some of them
212 // again. Check GetPinnedUsage().
213 for (int i = 1; i < 100; ++i) {
214 std::string key(i, 'a');
215 auto kv_size = key.size() + 5;
216 Cache::Handle* handle;
217 Cache::Handle* handle_in_precise_cache;
218 cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter,
219 &handle);
220 assert(handle);
221 precise_cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
222 dumbDeleter, &handle_in_precise_cache);
223 assert(handle_in_precise_cache);
224 pinned_usage += kv_size;
225 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
226 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
227 if (i % 2 == 0) {
228 cache->Release(handle);
229 precise_cache->Release(handle_in_precise_cache);
230 pinned_usage -= kv_size;
231 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
232 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
233 } else {
234 unreleased_handles.push_front(handle);
235 unreleased_handles_in_precise_cache.push_front(handle_in_precise_cache);
236 }
237 if (i % 3 == 0) {
238 unreleased_handles.push_front(cache->Lookup(key));
239 auto x = precise_cache->Lookup(key);
240 assert(x);
241 unreleased_handles_in_precise_cache.push_front(x);
242 // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
243 // usage increased
244 if (i % 2 == 0) {
245 pinned_usage += kv_size;
246 }
247 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
248 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
249 }
250 }
251 auto precise_cache_pinned_usage = precise_cache->GetPinnedUsage();
252 ASSERT_LT(pinned_usage, precise_cache_pinned_usage);
253
254 // check that overloading the cache does not change the pinned usage
255 for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
256 auto key = ToString(i);
257 cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
258 dumbDeleter);
259 precise_cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
260 dumbDeleter);
261 }
262 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
263 ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
264
265 cache->EraseUnRefEntries();
266 precise_cache->EraseUnRefEntries();
267 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
268 ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
269
270 // release handles for pinned entries to prevent memory leaks
271 for (auto handle : unreleased_handles) {
272 cache->Release(handle);
273 }
274 for (auto handle : unreleased_handles_in_precise_cache) {
275 precise_cache->Release(handle);
276 }
277 ASSERT_EQ(0, cache->GetPinnedUsage());
278 ASSERT_EQ(0, precise_cache->GetPinnedUsage());
279 cache->EraseUnRefEntries();
280 precise_cache->EraseUnRefEntries();
281 ASSERT_EQ(0, cache->GetUsage());
282 ASSERT_EQ(0, precise_cache->GetUsage());
283 }
284
285 TEST_P(CacheTest, HitAndMiss) {
286 ASSERT_EQ(-1, Lookup(100));
287
288 Insert(100, 101);
289 ASSERT_EQ(101, Lookup(100));
290 ASSERT_EQ(-1, Lookup(200));
291 ASSERT_EQ(-1, Lookup(300));
292
293 Insert(200, 201);
294 ASSERT_EQ(101, Lookup(100));
295 ASSERT_EQ(201, Lookup(200));
296 ASSERT_EQ(-1, Lookup(300));
297
298 Insert(100, 102);
299 ASSERT_EQ(102, Lookup(100));
300 ASSERT_EQ(201, Lookup(200));
301 ASSERT_EQ(-1, Lookup(300));
302
303 ASSERT_EQ(1U, deleted_keys_.size());
304 ASSERT_EQ(100, deleted_keys_[0]);
305 ASSERT_EQ(101, deleted_values_[0]);
306 }
307
308 TEST_P(CacheTest, InsertSameKey) {
309 Insert(1, 1);
310 Insert(1, 2);
311 ASSERT_EQ(2, Lookup(1));
312 }
313
314 TEST_P(CacheTest, Erase) {
315 Erase(200);
316 ASSERT_EQ(0U, deleted_keys_.size());
317
318 Insert(100, 101);
319 Insert(200, 201);
320 Erase(100);
321 ASSERT_EQ(-1, Lookup(100));
322 ASSERT_EQ(201, Lookup(200));
323 ASSERT_EQ(1U, deleted_keys_.size());
324 ASSERT_EQ(100, deleted_keys_[0]);
325 ASSERT_EQ(101, deleted_values_[0]);
326
327 Erase(100);
328 ASSERT_EQ(-1, Lookup(100));
329 ASSERT_EQ(201, Lookup(200));
330 ASSERT_EQ(1U, deleted_keys_.size());
331 }
332
333 TEST_P(CacheTest, EntriesArePinned) {
334 Insert(100, 101);
335 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
336 ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
337 ASSERT_EQ(1U, cache_->GetUsage());
338
339 Insert(100, 102);
340 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
341 ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
342 ASSERT_EQ(0U, deleted_keys_.size());
343 ASSERT_EQ(2U, cache_->GetUsage());
344
345 cache_->Release(h1);
346 ASSERT_EQ(1U, deleted_keys_.size());
347 ASSERT_EQ(100, deleted_keys_[0]);
348 ASSERT_EQ(101, deleted_values_[0]);
349 ASSERT_EQ(1U, cache_->GetUsage());
350
351 Erase(100);
352 ASSERT_EQ(-1, Lookup(100));
353 ASSERT_EQ(1U, deleted_keys_.size());
354 ASSERT_EQ(1U, cache_->GetUsage());
355
356 cache_->Release(h2);
357 ASSERT_EQ(2U, deleted_keys_.size());
358 ASSERT_EQ(100, deleted_keys_[1]);
359 ASSERT_EQ(102, deleted_values_[1]);
360 ASSERT_EQ(0U, cache_->GetUsage());
361 }
362
363 TEST_P(CacheTest, EvictionPolicy) {
364 Insert(100, 101);
365 Insert(200, 201);
366
367 // Frequently used entry must be kept around
368 for (int i = 0; i < kCacheSize * 2; i++) {
369 Insert(1000+i, 2000+i);
370 ASSERT_EQ(101, Lookup(100));
371 }
372 ASSERT_EQ(101, Lookup(100));
373 ASSERT_EQ(-1, Lookup(200));
374 }
375
376 TEST_P(CacheTest, ExternalRefPinsEntries) {
377 Insert(100, 101);
378 Cache::Handle* h = cache_->Lookup(EncodeKey(100));
379 ASSERT_TRUE(cache_->Ref(h));
380 ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
381 ASSERT_EQ(1U, cache_->GetUsage());
382
383 for (int i = 0; i < 3; ++i) {
384 if (i > 0) {
385 // First release (i == 1) corresponds to Ref(), second release (i == 2)
386 // corresponds to Lookup(). Then, since all external refs are released,
387 // the below insertions should push out the cache entry.
388 cache_->Release(h);
389 }
390 // double cache size because the usage bit in block cache prevents 100 from
391 // being evicted in the first kCacheSize iterations
392 for (int j = 0; j < 2 * kCacheSize + 100; j++) {
393 Insert(1000 + j, 2000 + j);
394 }
395 if (i < 2) {
396 ASSERT_EQ(101, Lookup(100));
397 }
398 }
399 ASSERT_EQ(-1, Lookup(100));
400 }
401
402 TEST_P(CacheTest, EvictionPolicyRef) {
403 Insert(100, 101);
404 Insert(101, 102);
405 Insert(102, 103);
406 Insert(103, 104);
407 Insert(200, 101);
408 Insert(201, 102);
409 Insert(202, 103);
410 Insert(203, 104);
411 Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
412 Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
413 Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
414 Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
415 Insert(300, 101);
416 Insert(301, 102);
417 Insert(302, 103);
418 Insert(303, 104);
419
420 // Insert entries much more than Cache capacity
421 for (int i = 0; i < kCacheSize * 2; i++) {
422 Insert(1000 + i, 2000 + i);
423 }
424
425 // Check whether the entries inserted in the beginning
426 // are evicted. Ones without extra ref are evicted and
427 // those with are not.
428 ASSERT_EQ(-1, Lookup(100));
429 ASSERT_EQ(-1, Lookup(101));
430 ASSERT_EQ(-1, Lookup(102));
431 ASSERT_EQ(-1, Lookup(103));
432
433 ASSERT_EQ(-1, Lookup(300));
434 ASSERT_EQ(-1, Lookup(301));
435 ASSERT_EQ(-1, Lookup(302));
436 ASSERT_EQ(-1, Lookup(303));
437
438 ASSERT_EQ(101, Lookup(200));
439 ASSERT_EQ(102, Lookup(201));
440 ASSERT_EQ(103, Lookup(202));
441 ASSERT_EQ(104, Lookup(203));
442
443 // Cleaning up all the handles
444 cache_->Release(h201);
445 cache_->Release(h202);
446 cache_->Release(h203);
447 cache_->Release(h204);
448 }
449
450 TEST_P(CacheTest, EvictEmptyCache) {
451 // Insert item large than capacity to trigger eviction on empty cache.
452 auto cache = NewCache(1, 0, false);
453 ASSERT_OK(cache->Insert("foo", nullptr, 10, dumbDeleter));
454 }
455
456 TEST_P(CacheTest, EraseFromDeleter) {
457 // Have deleter which will erase item from cache, which will re-enter
458 // the cache at that point.
459 std::shared_ptr<Cache> cache = NewCache(10, 0, false);
460 ASSERT_OK(cache->Insert("foo", nullptr, 1, dumbDeleter));
461 ASSERT_OK(cache->Insert("bar", cache.get(), 1, eraseDeleter));
462 cache->Erase("bar");
463 ASSERT_EQ(nullptr, cache->Lookup("foo"));
464 ASSERT_EQ(nullptr, cache->Lookup("bar"));
465 }
466
467 TEST_P(CacheTest, ErasedHandleState) {
468 // insert a key and get two handles
469 Insert(100, 1000);
470 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
471 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
472 ASSERT_EQ(h1, h2);
473 ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
474 ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
475
476 // delete the key from the cache
477 Erase(100);
478 // can no longer find in the cache
479 ASSERT_EQ(-1, Lookup(100));
480
481 // release one handle
482 cache_->Release(h1);
483 // still can't find in cache
484 ASSERT_EQ(-1, Lookup(100));
485
486 cache_->Release(h2);
487 }
488
489 TEST_P(CacheTest, HeavyEntries) {
490 // Add a bunch of light and heavy entries and then count the combined
491 // size of items still in the cache, which must be approximately the
492 // same as the total capacity.
493 const int kLight = 1;
494 const int kHeavy = 10;
495 int added = 0;
496 int index = 0;
497 while (added < 2*kCacheSize) {
498 const int weight = (index & 1) ? kLight : kHeavy;
499 Insert(index, 1000+index, weight);
500 added += weight;
501 index++;
502 }
503
504 int cached_weight = 0;
505 for (int i = 0; i < index; i++) {
506 const int weight = (i & 1 ? kLight : kHeavy);
507 int r = Lookup(i);
508 if (r >= 0) {
509 cached_weight += weight;
510 ASSERT_EQ(1000+i, r);
511 }
512 }
513 ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
514 }
515
516 TEST_P(CacheTest, NewId) {
517 uint64_t a = cache_->NewId();
518 uint64_t b = cache_->NewId();
519 ASSERT_NE(a, b);
520 }
521
522
523 class Value {
524 public:
525 explicit Value(size_t v) : v_(v) { }
526
527 size_t v_;
528 };
529
530 namespace {
531 void deleter(const Slice& /*key*/, void* value) {
532 delete static_cast<Value *>(value);
533 }
534 } // namespace
535
536 TEST_P(CacheTest, ReleaseAndErase) {
537 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
538 Cache::Handle* handle;
539 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
540 &CacheTest::Deleter, &handle);
541 ASSERT_TRUE(s.ok());
542 ASSERT_EQ(5U, cache->GetCapacity());
543 ASSERT_EQ(1U, cache->GetUsage());
544 ASSERT_EQ(0U, deleted_keys_.size());
545 auto erased = cache->Release(handle, true);
546 ASSERT_TRUE(erased);
547 // This tests that deleter has been called
548 ASSERT_EQ(1U, deleted_keys_.size());
549 }
550
551 TEST_P(CacheTest, ReleaseWithoutErase) {
552 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
553 Cache::Handle* handle;
554 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
555 &CacheTest::Deleter, &handle);
556 ASSERT_TRUE(s.ok());
557 ASSERT_EQ(5U, cache->GetCapacity());
558 ASSERT_EQ(1U, cache->GetUsage());
559 ASSERT_EQ(0U, deleted_keys_.size());
560 auto erased = cache->Release(handle);
561 ASSERT_FALSE(erased);
562 // This tests that deleter is not called. When cache has free capacity it is
563 // not expected to immediately erase the released items.
564 ASSERT_EQ(0U, deleted_keys_.size());
565 }
566
567 TEST_P(CacheTest, SetCapacity) {
568 // test1: increase capacity
569 // lets create a cache with capacity 5,
570 // then, insert 5 elements, then increase capacity
571 // to 10, returned capacity should be 10, usage=5
572 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
573 std::vector<Cache::Handle*> handles(10);
574 // Insert 5 entries, but not releasing.
575 for (size_t i = 0; i < 5; i++) {
576 std::string key = ToString(i+1);
577 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
578 ASSERT_TRUE(s.ok());
579 }
580 ASSERT_EQ(5U, cache->GetCapacity());
581 ASSERT_EQ(5U, cache->GetUsage());
582 cache->SetCapacity(10);
583 ASSERT_EQ(10U, cache->GetCapacity());
584 ASSERT_EQ(5U, cache->GetUsage());
585
586 // test2: decrease capacity
587 // insert 5 more elements to cache, then release 5,
588 // then decrease capacity to 7, final capacity should be 7
589 // and usage should be 7
590 for (size_t i = 5; i < 10; i++) {
591 std::string key = ToString(i+1);
592 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
593 ASSERT_TRUE(s.ok());
594 }
595 ASSERT_EQ(10U, cache->GetCapacity());
596 ASSERT_EQ(10U, cache->GetUsage());
597 for (size_t i = 0; i < 5; i++) {
598 cache->Release(handles[i]);
599 }
600 ASSERT_EQ(10U, cache->GetCapacity());
601 ASSERT_EQ(10U, cache->GetUsage());
602 cache->SetCapacity(7);
603 ASSERT_EQ(7, cache->GetCapacity());
604 ASSERT_EQ(7, cache->GetUsage());
605
606 // release remaining 5 to keep valgrind happy
607 for (size_t i = 5; i < 10; i++) {
608 cache->Release(handles[i]);
609 }
610 }
611
612 TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
613 // test1: set the flag to false. Insert more keys than capacity. See if they
614 // all go through.
615 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
616 std::vector<Cache::Handle*> handles(10);
617 Status s;
618 for (size_t i = 0; i < 10; i++) {
619 std::string key = ToString(i + 1);
620 s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
621 ASSERT_OK(s);
622 ASSERT_NE(nullptr, handles[i]);
623 }
624 ASSERT_EQ(10, cache->GetUsage());
625
626 // test2: set the flag to true. Insert and check if it fails.
627 std::string extra_key = "extra";
628 Value* extra_value = new Value(0);
629 cache->SetStrictCapacityLimit(true);
630 Cache::Handle* handle;
631 s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
632 ASSERT_TRUE(s.IsIncomplete());
633 ASSERT_EQ(nullptr, handle);
634 ASSERT_EQ(10, cache->GetUsage());
635
636 for (size_t i = 0; i < 10; i++) {
637 cache->Release(handles[i]);
638 }
639
640 // test3: init with flag being true.
641 std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
642 for (size_t i = 0; i < 5; i++) {
643 std::string key = ToString(i + 1);
644 s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
645 ASSERT_OK(s);
646 ASSERT_NE(nullptr, handles[i]);
647 }
648 s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
649 ASSERT_TRUE(s.IsIncomplete());
650 ASSERT_EQ(nullptr, handle);
651 // test insert without handle
652 s = cache2->Insert(extra_key, extra_value, 1, &deleter);
653 // AS if the key have been inserted into cache but get evicted immediately.
654 ASSERT_OK(s);
655 ASSERT_EQ(5, cache2->GetUsage());
656 ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
657
658 for (size_t i = 0; i < 5; i++) {
659 cache2->Release(handles[i]);
660 }
661 }
662
663 TEST_P(CacheTest, OverCapacity) {
664 size_t n = 10;
665
666 // a LRUCache with n entries and one shard only
667 std::shared_ptr<Cache> cache = NewCache(n, 0, false);
668
669 std::vector<Cache::Handle*> handles(n+1);
670
671 // Insert n+1 entries, but not releasing.
672 for (size_t i = 0; i < n + 1; i++) {
673 std::string key = ToString(i+1);
674 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
675 ASSERT_TRUE(s.ok());
676 }
677
678 // Guess what's in the cache now?
679 for (size_t i = 0; i < n + 1; i++) {
680 std::string key = ToString(i+1);
681 auto h = cache->Lookup(key);
682 ASSERT_TRUE(h != nullptr);
683 if (h) cache->Release(h);
684 }
685
686 // the cache is over capacity since nothing could be evicted
687 ASSERT_EQ(n + 1U, cache->GetUsage());
688 for (size_t i = 0; i < n + 1; i++) {
689 cache->Release(handles[i]);
690 }
691 // Make sure eviction is triggered.
692 cache->SetCapacity(n);
693
694 // cache is under capacity now since elements were released
695 ASSERT_EQ(n, cache->GetUsage());
696
697 // element 0 is evicted and the rest is there
698 // This is consistent with the LRU policy since the element 0
699 // was released first
700 for (size_t i = 0; i < n + 1; i++) {
701 std::string key = ToString(i+1);
702 auto h = cache->Lookup(key);
703 if (h) {
704 ASSERT_NE(i, 0U);
705 cache->Release(h);
706 } else {
707 ASSERT_EQ(i, 0U);
708 }
709 }
710 }
711
712 namespace {
713 std::vector<std::pair<int, int>> callback_state;
714 void callback(void* entry, size_t charge) {
715 callback_state.push_back({DecodeValue(entry), static_cast<int>(charge)});
716 }
717 };
718
719 TEST_P(CacheTest, ApplyToAllCacheEntiresTest) {
720 std::vector<std::pair<int, int>> inserted;
721 callback_state.clear();
722
723 for (int i = 0; i < 10; ++i) {
724 Insert(i, i * 2, i + 1);
725 inserted.push_back({i * 2, i + 1});
726 }
727 cache_->ApplyToAllCacheEntries(callback, true);
728
729 std::sort(inserted.begin(), inserted.end());
730 std::sort(callback_state.begin(), callback_state.end());
731 ASSERT_TRUE(inserted == callback_state);
732 }
733
734 TEST_P(CacheTest, DefaultShardBits) {
735 // test1: set the flag to false. Insert more keys than capacity. See if they
736 // all go through.
737 std::shared_ptr<Cache> cache = NewCache(16 * 1024L * 1024L);
738 ShardedCache* sc = dynamic_cast<ShardedCache*>(cache.get());
739 ASSERT_EQ(5, sc->GetNumShardBits());
740
741 cache = NewLRUCache(511 * 1024L, -1, true);
742 sc = dynamic_cast<ShardedCache*>(cache.get());
743 ASSERT_EQ(0, sc->GetNumShardBits());
744
745 cache = NewLRUCache(1024L * 1024L * 1024L, -1, true);
746 sc = dynamic_cast<ShardedCache*>(cache.get());
747 ASSERT_EQ(6, sc->GetNumShardBits());
748 }
749
750 TEST_P(CacheTest, GetCharge) {
751 Insert(1, 2);
752 Cache::Handle* h1 = cache_->Lookup(EncodeKey(1));
753 ASSERT_EQ(2, DecodeValue(cache_->Value(h1)));
754 ASSERT_EQ(1, cache_->GetCharge(h1));
755 cache_->Release(h1);
756 }
757
758 #ifdef SUPPORT_CLOCK_CACHE
759 std::shared_ptr<Cache> (*new_clock_cache_func)(
760 size_t, int, bool, CacheMetadataChargePolicy) = NewClockCache;
761 INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
762 testing::Values(kLRU, kClock));
763 #else
764 INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
765 #endif // SUPPORT_CLOCK_CACHE
766 INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest, testing::Values(kLRU));
767
768 } // namespace ROCKSDB_NAMESPACE
769
770 int main(int argc, char** argv) {
771 ::testing::InitGoogleTest(&argc, argv);
772 return RUN_ALL_TESTS();
773 }