]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/cache/cache_test.cc
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / rocksdb / cache / cache_test.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #include "rocksdb/cache.h"
11
12 #include <forward_list>
13 #include <functional>
14 #include <iostream>
15 #include <string>
16 #include <vector>
17 #include "cache/clock_cache.h"
18 #include "cache/lru_cache.h"
19 #include "util/coding.h"
20 #include "util/string_util.h"
21 #include "util/testharness.h"
22
23 namespace rocksdb {
24
25 // Conversions between numeric keys/values and the types expected by Cache.
26 static std::string EncodeKey(int k) {
27 std::string result;
28 PutFixed32(&result, k);
29 return result;
30 }
31 static int DecodeKey(const Slice& k) {
32 assert(k.size() == 4);
33 return DecodeFixed32(k.data());
34 }
35 static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
36 static int DecodeValue(void* v) {
37 return static_cast<int>(reinterpret_cast<uintptr_t>(v));
38 }
39
40 const std::string kLRU = "lru";
41 const std::string kClock = "clock";
42
43 void dumbDeleter(const Slice& /*key*/, void* /*value*/) {}
44
45 void eraseDeleter(const Slice& /*key*/, void* value) {
46 Cache* cache = reinterpret_cast<Cache*>(value);
47 cache->Erase("foo");
48 }
49
50 class CacheTest : public testing::TestWithParam<std::string> {
51 public:
52 static CacheTest* current_;
53
54 static void Deleter(const Slice& key, void* v) {
55 current_->deleted_keys_.push_back(DecodeKey(key));
56 current_->deleted_values_.push_back(DecodeValue(v));
57 }
58
59 static const int kCacheSize = 1000;
60 static const int kNumShardBits = 4;
61
62 static const int kCacheSize2 = 100;
63 static const int kNumShardBits2 = 2;
64
65 std::vector<int> deleted_keys_;
66 std::vector<int> deleted_values_;
67 shared_ptr<Cache> cache_;
68 shared_ptr<Cache> cache2_;
69
70 CacheTest()
71 : cache_(NewCache(kCacheSize, kNumShardBits, false)),
72 cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
73 current_ = this;
74 }
75
76 ~CacheTest() {
77 }
78
79 std::shared_ptr<Cache> NewCache(size_t capacity) {
80 auto type = GetParam();
81 if (type == kLRU) {
82 return NewLRUCache(capacity);
83 }
84 if (type == kClock) {
85 return NewClockCache(capacity);
86 }
87 return nullptr;
88 }
89
90 std::shared_ptr<Cache> NewCache(size_t capacity, int num_shard_bits,
91 bool strict_capacity_limit) {
92 auto type = GetParam();
93 if (type == kLRU) {
94 return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit);
95 }
96 if (type == kClock) {
97 return NewClockCache(capacity, num_shard_bits, strict_capacity_limit);
98 }
99 return nullptr;
100 }
101
102 int Lookup(shared_ptr<Cache> cache, int key) {
103 Cache::Handle* handle = cache->Lookup(EncodeKey(key));
104 const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
105 if (handle != nullptr) {
106 cache->Release(handle);
107 }
108 return r;
109 }
110
111 void Insert(shared_ptr<Cache> cache, int key, int value, int charge = 1) {
112 cache->Insert(EncodeKey(key), EncodeValue(value), charge,
113 &CacheTest::Deleter);
114 }
115
116 void Erase(shared_ptr<Cache> cache, int key) {
117 cache->Erase(EncodeKey(key));
118 }
119
120
121 int Lookup(int key) {
122 return Lookup(cache_, key);
123 }
124
125 void Insert(int key, int value, int charge = 1) {
126 Insert(cache_, key, value, charge);
127 }
128
129 void Erase(int key) {
130 Erase(cache_, key);
131 }
132
133 int Lookup2(int key) {
134 return Lookup(cache2_, key);
135 }
136
137 void Insert2(int key, int value, int charge = 1) {
138 Insert(cache2_, key, value, charge);
139 }
140
141 void Erase2(int key) {
142 Erase(cache2_, key);
143 }
144 };
145 CacheTest* CacheTest::current_;
146
147 TEST_P(CacheTest, UsageTest) {
148 // cache is shared_ptr and will be automatically cleaned up.
149 const uint64_t kCapacity = 100000;
150 auto cache = NewCache(kCapacity, 8, false);
151
152 size_t usage = 0;
153 char value[10] = "abcdef";
154 // make sure everything will be cached
155 for (int i = 1; i < 100; ++i) {
156 std::string key(i, 'a');
157 auto kv_size = key.size() + 5;
158 cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter);
159 usage += kv_size;
160 ASSERT_EQ(usage, cache->GetUsage());
161 }
162
163 // make sure the cache will be overloaded
164 for (uint64_t i = 1; i < kCapacity; ++i) {
165 auto key = ToString(i);
166 cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
167 dumbDeleter);
168 }
169
170 // the usage should be close to the capacity
171 ASSERT_GT(kCapacity, cache->GetUsage());
172 ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
173 }
174
175 TEST_P(CacheTest, PinnedUsageTest) {
176 // cache is shared_ptr and will be automatically cleaned up.
177 const uint64_t kCapacity = 100000;
178 auto cache = NewCache(kCapacity, 8, false);
179
180 size_t pinned_usage = 0;
181 char value[10] = "abcdef";
182
183 std::forward_list<Cache::Handle*> unreleased_handles;
184
185 // Add entries. Unpin some of them after insertion. Then, pin some of them
186 // again. Check GetPinnedUsage().
187 for (int i = 1; i < 100; ++i) {
188 std::string key(i, 'a');
189 auto kv_size = key.size() + 5;
190 Cache::Handle* handle;
191 cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter,
192 &handle);
193 pinned_usage += kv_size;
194 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
195 if (i % 2 == 0) {
196 cache->Release(handle);
197 pinned_usage -= kv_size;
198 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
199 } else {
200 unreleased_handles.push_front(handle);
201 }
202 if (i % 3 == 0) {
203 unreleased_handles.push_front(cache->Lookup(key));
204 // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
205 // usage increased
206 if (i % 2 == 0) {
207 pinned_usage += kv_size;
208 }
209 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
210 }
211 }
212
213 // check that overloading the cache does not change the pinned usage
214 for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
215 auto key = ToString(i);
216 cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
217 dumbDeleter);
218 }
219 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
220
221 // release handles for pinned entries to prevent memory leaks
222 for (auto handle : unreleased_handles) {
223 cache->Release(handle);
224 }
225 }
226
227 TEST_P(CacheTest, HitAndMiss) {
228 ASSERT_EQ(-1, Lookup(100));
229
230 Insert(100, 101);
231 ASSERT_EQ(101, Lookup(100));
232 ASSERT_EQ(-1, Lookup(200));
233 ASSERT_EQ(-1, Lookup(300));
234
235 Insert(200, 201);
236 ASSERT_EQ(101, Lookup(100));
237 ASSERT_EQ(201, Lookup(200));
238 ASSERT_EQ(-1, Lookup(300));
239
240 Insert(100, 102);
241 ASSERT_EQ(102, Lookup(100));
242 ASSERT_EQ(201, Lookup(200));
243 ASSERT_EQ(-1, Lookup(300));
244
245 ASSERT_EQ(1U, deleted_keys_.size());
246 ASSERT_EQ(100, deleted_keys_[0]);
247 ASSERT_EQ(101, deleted_values_[0]);
248 }
249
250 TEST_P(CacheTest, InsertSameKey) {
251 Insert(1, 1);
252 Insert(1, 2);
253 ASSERT_EQ(2, Lookup(1));
254 }
255
256 TEST_P(CacheTest, Erase) {
257 Erase(200);
258 ASSERT_EQ(0U, deleted_keys_.size());
259
260 Insert(100, 101);
261 Insert(200, 201);
262 Erase(100);
263 ASSERT_EQ(-1, Lookup(100));
264 ASSERT_EQ(201, Lookup(200));
265 ASSERT_EQ(1U, deleted_keys_.size());
266 ASSERT_EQ(100, deleted_keys_[0]);
267 ASSERT_EQ(101, deleted_values_[0]);
268
269 Erase(100);
270 ASSERT_EQ(-1, Lookup(100));
271 ASSERT_EQ(201, Lookup(200));
272 ASSERT_EQ(1U, deleted_keys_.size());
273 }
274
275 TEST_P(CacheTest, EntriesArePinned) {
276 Insert(100, 101);
277 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
278 ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
279 ASSERT_EQ(1U, cache_->GetUsage());
280
281 Insert(100, 102);
282 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
283 ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
284 ASSERT_EQ(0U, deleted_keys_.size());
285 ASSERT_EQ(2U, cache_->GetUsage());
286
287 cache_->Release(h1);
288 ASSERT_EQ(1U, deleted_keys_.size());
289 ASSERT_EQ(100, deleted_keys_[0]);
290 ASSERT_EQ(101, deleted_values_[0]);
291 ASSERT_EQ(1U, cache_->GetUsage());
292
293 Erase(100);
294 ASSERT_EQ(-1, Lookup(100));
295 ASSERT_EQ(1U, deleted_keys_.size());
296 ASSERT_EQ(1U, cache_->GetUsage());
297
298 cache_->Release(h2);
299 ASSERT_EQ(2U, deleted_keys_.size());
300 ASSERT_EQ(100, deleted_keys_[1]);
301 ASSERT_EQ(102, deleted_values_[1]);
302 ASSERT_EQ(0U, cache_->GetUsage());
303 }
304
305 TEST_P(CacheTest, EvictionPolicy) {
306 Insert(100, 101);
307 Insert(200, 201);
308
309 // Frequently used entry must be kept around
310 for (int i = 0; i < kCacheSize + 100; i++) {
311 Insert(1000+i, 2000+i);
312 ASSERT_EQ(101, Lookup(100));
313 }
314 ASSERT_EQ(101, Lookup(100));
315 ASSERT_EQ(-1, Lookup(200));
316 }
317
318 TEST_P(CacheTest, ExternalRefPinsEntries) {
319 Insert(100, 101);
320 Cache::Handle* h = cache_->Lookup(EncodeKey(100));
321 ASSERT_TRUE(cache_->Ref(h));
322 ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
323 ASSERT_EQ(1U, cache_->GetUsage());
324
325 for (int i = 0; i < 3; ++i) {
326 if (i > 0) {
327 // First release (i == 1) corresponds to Ref(), second release (i == 2)
328 // corresponds to Lookup(). Then, since all external refs are released,
329 // the below insertions should push out the cache entry.
330 cache_->Release(h);
331 }
332 // double cache size because the usage bit in block cache prevents 100 from
333 // being evicted in the first kCacheSize iterations
334 for (int j = 0; j < 2 * kCacheSize + 100; j++) {
335 Insert(1000 + j, 2000 + j);
336 }
337 if (i < 2) {
338 ASSERT_EQ(101, Lookup(100));
339 }
340 }
341 ASSERT_EQ(-1, Lookup(100));
342 }
343
344 TEST_P(CacheTest, EvictionPolicyRef) {
345 Insert(100, 101);
346 Insert(101, 102);
347 Insert(102, 103);
348 Insert(103, 104);
349 Insert(200, 101);
350 Insert(201, 102);
351 Insert(202, 103);
352 Insert(203, 104);
353 Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
354 Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
355 Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
356 Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
357 Insert(300, 101);
358 Insert(301, 102);
359 Insert(302, 103);
360 Insert(303, 104);
361
362 // Insert entries much more than Cache capacity
363 for (int i = 0; i < kCacheSize + 100; i++) {
364 Insert(1000 + i, 2000 + i);
365 }
366
367 // Check whether the entries inserted in the beginning
368 // are evicted. Ones without extra ref are evicted and
369 // those with are not.
370 ASSERT_EQ(-1, Lookup(100));
371 ASSERT_EQ(-1, Lookup(101));
372 ASSERT_EQ(-1, Lookup(102));
373 ASSERT_EQ(-1, Lookup(103));
374
375 ASSERT_EQ(-1, Lookup(300));
376 ASSERT_EQ(-1, Lookup(301));
377 ASSERT_EQ(-1, Lookup(302));
378 ASSERT_EQ(-1, Lookup(303));
379
380 ASSERT_EQ(101, Lookup(200));
381 ASSERT_EQ(102, Lookup(201));
382 ASSERT_EQ(103, Lookup(202));
383 ASSERT_EQ(104, Lookup(203));
384
385 // Cleaning up all the handles
386 cache_->Release(h201);
387 cache_->Release(h202);
388 cache_->Release(h203);
389 cache_->Release(h204);
390 }
391
392 TEST_P(CacheTest, EvictEmptyCache) {
393 // Insert item large than capacity to trigger eviction on empty cache.
394 auto cache = NewCache(1, 0, false);
395 ASSERT_OK(cache->Insert("foo", nullptr, 10, dumbDeleter));
396 }
397
398 TEST_P(CacheTest, EraseFromDeleter) {
399 // Have deleter which will erase item from cache, which will re-enter
400 // the cache at that point.
401 std::shared_ptr<Cache> cache = NewCache(10, 0, false);
402 ASSERT_OK(cache->Insert("foo", nullptr, 1, dumbDeleter));
403 ASSERT_OK(cache->Insert("bar", cache.get(), 1, eraseDeleter));
404 cache->Erase("bar");
405 ASSERT_EQ(nullptr, cache->Lookup("foo"));
406 ASSERT_EQ(nullptr, cache->Lookup("bar"));
407 }
408
409 TEST_P(CacheTest, ErasedHandleState) {
410 // insert a key and get two handles
411 Insert(100, 1000);
412 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
413 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
414 ASSERT_EQ(h1, h2);
415 ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
416 ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
417
418 // delete the key from the cache
419 Erase(100);
420 // can no longer find in the cache
421 ASSERT_EQ(-1, Lookup(100));
422
423 // release one handle
424 cache_->Release(h1);
425 // still can't find in cache
426 ASSERT_EQ(-1, Lookup(100));
427
428 cache_->Release(h2);
429 }
430
431 TEST_P(CacheTest, HeavyEntries) {
432 // Add a bunch of light and heavy entries and then count the combined
433 // size of items still in the cache, which must be approximately the
434 // same as the total capacity.
435 const int kLight = 1;
436 const int kHeavy = 10;
437 int added = 0;
438 int index = 0;
439 while (added < 2*kCacheSize) {
440 const int weight = (index & 1) ? kLight : kHeavy;
441 Insert(index, 1000+index, weight);
442 added += weight;
443 index++;
444 }
445
446 int cached_weight = 0;
447 for (int i = 0; i < index; i++) {
448 const int weight = (i & 1 ? kLight : kHeavy);
449 int r = Lookup(i);
450 if (r >= 0) {
451 cached_weight += weight;
452 ASSERT_EQ(1000+i, r);
453 }
454 }
455 ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
456 }
457
458 TEST_P(CacheTest, NewId) {
459 uint64_t a = cache_->NewId();
460 uint64_t b = cache_->NewId();
461 ASSERT_NE(a, b);
462 }
463
464
465 class Value {
466 public:
467 explicit Value(size_t v) : v_(v) { }
468
469 size_t v_;
470 };
471
472 namespace {
473 void deleter(const Slice& /*key*/, void* value) {
474 delete static_cast<Value *>(value);
475 }
476 } // namespace
477
478 TEST_P(CacheTest, ReleaseAndErase) {
479 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
480 Cache::Handle* handle;
481 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
482 &CacheTest::Deleter, &handle);
483 ASSERT_TRUE(s.ok());
484 ASSERT_EQ(5U, cache->GetCapacity());
485 ASSERT_EQ(1U, cache->GetUsage());
486 ASSERT_EQ(0U, deleted_keys_.size());
487 auto erased = cache->Release(handle, true);
488 ASSERT_TRUE(erased);
489 // This tests that deleter has been called
490 ASSERT_EQ(1U, deleted_keys_.size());
491 }
492
493 TEST_P(CacheTest, ReleaseWithoutErase) {
494 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
495 Cache::Handle* handle;
496 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
497 &CacheTest::Deleter, &handle);
498 ASSERT_TRUE(s.ok());
499 ASSERT_EQ(5U, cache->GetCapacity());
500 ASSERT_EQ(1U, cache->GetUsage());
501 ASSERT_EQ(0U, deleted_keys_.size());
502 auto erased = cache->Release(handle);
503 ASSERT_FALSE(erased);
504 // This tests that deleter is not called. When cache has free capacity it is
505 // not expected to immediately erase the released items.
506 ASSERT_EQ(0U, deleted_keys_.size());
507 }
508
509 TEST_P(CacheTest, SetCapacity) {
510 // test1: increase capacity
511 // lets create a cache with capacity 5,
512 // then, insert 5 elements, then increase capacity
513 // to 10, returned capacity should be 10, usage=5
514 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
515 std::vector<Cache::Handle*> handles(10);
516 // Insert 5 entries, but not releasing.
517 for (size_t i = 0; i < 5; i++) {
518 std::string key = ToString(i+1);
519 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
520 ASSERT_TRUE(s.ok());
521 }
522 ASSERT_EQ(5U, cache->GetCapacity());
523 ASSERT_EQ(5U, cache->GetUsage());
524 cache->SetCapacity(10);
525 ASSERT_EQ(10U, cache->GetCapacity());
526 ASSERT_EQ(5U, cache->GetUsage());
527
528 // test2: decrease capacity
529 // insert 5 more elements to cache, then release 5,
530 // then decrease capacity to 7, final capacity should be 7
531 // and usage should be 7
532 for (size_t i = 5; i < 10; i++) {
533 std::string key = ToString(i+1);
534 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
535 ASSERT_TRUE(s.ok());
536 }
537 ASSERT_EQ(10U, cache->GetCapacity());
538 ASSERT_EQ(10U, cache->GetUsage());
539 for (size_t i = 0; i < 5; i++) {
540 cache->Release(handles[i]);
541 }
542 ASSERT_EQ(10U, cache->GetCapacity());
543 ASSERT_EQ(10U, cache->GetUsage());
544 cache->SetCapacity(7);
545 ASSERT_EQ(7, cache->GetCapacity());
546 ASSERT_EQ(7, cache->GetUsage());
547
548 // release remaining 5 to keep valgrind happy
549 for (size_t i = 5; i < 10; i++) {
550 cache->Release(handles[i]);
551 }
552 }
553
554 TEST_P(CacheTest, SetStrictCapacityLimit) {
555 // test1: set the flag to false. Insert more keys than capacity. See if they
556 // all go through.
557 std::shared_ptr<Cache> cache = NewLRUCache(5, 0, false);
558 std::vector<Cache::Handle*> handles(10);
559 Status s;
560 for (size_t i = 0; i < 10; i++) {
561 std::string key = ToString(i + 1);
562 s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
563 ASSERT_OK(s);
564 ASSERT_NE(nullptr, handles[i]);
565 }
566
567 // test2: set the flag to true. Insert and check if it fails.
568 std::string extra_key = "extra";
569 Value* extra_value = new Value(0);
570 cache->SetStrictCapacityLimit(true);
571 Cache::Handle* handle;
572 s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
573 ASSERT_TRUE(s.IsIncomplete());
574 ASSERT_EQ(nullptr, handle);
575
576 for (size_t i = 0; i < 10; i++) {
577 cache->Release(handles[i]);
578 }
579
580 // test3: init with flag being true.
581 std::shared_ptr<Cache> cache2 = NewLRUCache(5, 0, true);
582 for (size_t i = 0; i < 5; i++) {
583 std::string key = ToString(i + 1);
584 s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
585 ASSERT_OK(s);
586 ASSERT_NE(nullptr, handles[i]);
587 }
588 s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
589 ASSERT_TRUE(s.IsIncomplete());
590 ASSERT_EQ(nullptr, handle);
591 // test insert without handle
592 s = cache2->Insert(extra_key, extra_value, 1, &deleter);
593 // AS if the key have been inserted into cache but get evicted immediately.
594 ASSERT_OK(s);
595 ASSERT_EQ(5, cache->GetUsage());
596 ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
597
598 for (size_t i = 0; i < 5; i++) {
599 cache2->Release(handles[i]);
600 }
601 }
602
603 TEST_P(CacheTest, OverCapacity) {
604 size_t n = 10;
605
606 // a LRUCache with n entries and one shard only
607 std::shared_ptr<Cache> cache = NewCache(n, 0, false);
608
609 std::vector<Cache::Handle*> handles(n+1);
610
611 // Insert n+1 entries, but not releasing.
612 for (size_t i = 0; i < n + 1; i++) {
613 std::string key = ToString(i+1);
614 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
615 ASSERT_TRUE(s.ok());
616 }
617
618 // Guess what's in the cache now?
619 for (size_t i = 0; i < n + 1; i++) {
620 std::string key = ToString(i+1);
621 auto h = cache->Lookup(key);
622 ASSERT_TRUE(h != nullptr);
623 if (h) cache->Release(h);
624 }
625
626 // the cache is over capacity since nothing could be evicted
627 ASSERT_EQ(n + 1U, cache->GetUsage());
628 for (size_t i = 0; i < n + 1; i++) {
629 cache->Release(handles[i]);
630 }
631 // Make sure eviction is triggered.
632 cache->SetCapacity(n);
633
634 // cache is under capacity now since elements were released
635 ASSERT_EQ(n, cache->GetUsage());
636
637 // element 0 is evicted and the rest is there
638 // This is consistent with the LRU policy since the element 0
639 // was released first
640 for (size_t i = 0; i < n + 1; i++) {
641 std::string key = ToString(i+1);
642 auto h = cache->Lookup(key);
643 if (h) {
644 ASSERT_NE(i, 0U);
645 cache->Release(h);
646 } else {
647 ASSERT_EQ(i, 0U);
648 }
649 }
650 }
651
652 namespace {
653 std::vector<std::pair<int, int>> callback_state;
654 void callback(void* entry, size_t charge) {
655 callback_state.push_back({DecodeValue(entry), static_cast<int>(charge)});
656 }
657 };
658
659 TEST_P(CacheTest, ApplyToAllCacheEntiresTest) {
660 std::vector<std::pair<int, int>> inserted;
661 callback_state.clear();
662
663 for (int i = 0; i < 10; ++i) {
664 Insert(i, i * 2, i + 1);
665 inserted.push_back({i * 2, i + 1});
666 }
667 cache_->ApplyToAllCacheEntries(callback, true);
668
669 std::sort(inserted.begin(), inserted.end());
670 std::sort(callback_state.begin(), callback_state.end());
671 ASSERT_TRUE(inserted == callback_state);
672 }
673
674 TEST_P(CacheTest, DefaultShardBits) {
675 // test1: set the flag to false. Insert more keys than capacity. See if they
676 // all go through.
677 std::shared_ptr<Cache> cache = NewCache(16 * 1024L * 1024L);
678 ShardedCache* sc = dynamic_cast<ShardedCache*>(cache.get());
679 ASSERT_EQ(5, sc->GetNumShardBits());
680
681 cache = NewLRUCache(511 * 1024L, -1, true);
682 sc = dynamic_cast<ShardedCache*>(cache.get());
683 ASSERT_EQ(0, sc->GetNumShardBits());
684
685 cache = NewLRUCache(1024L * 1024L * 1024L, -1, true);
686 sc = dynamic_cast<ShardedCache*>(cache.get());
687 ASSERT_EQ(6, sc->GetNumShardBits());
688 }
689
690 #ifdef SUPPORT_CLOCK_CACHE
691 shared_ptr<Cache> (*new_clock_cache_func)(size_t, int, bool) = NewClockCache;
692 INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
693 testing::Values(kLRU, kClock));
694 #else
695 INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
696 #endif // SUPPORT_CLOCK_CACHE
697
698 } // namespace rocksdb
699
700 int main(int argc, char** argv) {
701 ::testing::InitGoogleTest(&argc, argv);
702 return RUN_ALL_TESTS();
703 }