]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/cache/cache_test.cc
import quincy beta 17.1.0
[ceph.git] / ceph / src / rocksdb / cache / cache_test.cc
CommitLineData
7c673cae 1// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
11fdf7f2
TL
2// This source code is licensed under both the GPLv2 (found in the
3// COPYING file in the root directory) and Apache 2.0 License
4// (found in the LICENSE.Apache file in the root directory).
7c673cae
FG
5//
6// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7// Use of this source code is governed by a BSD-style license that can be
8// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10#include "rocksdb/cache.h"
11
12#include <forward_list>
13#include <functional>
14#include <iostream>
15#include <string>
16#include <vector>
17#include "cache/clock_cache.h"
18#include "cache/lru_cache.h"
f67539c2 19#include "test_util/testharness.h"
7c673cae
FG
20#include "util/coding.h"
21#include "util/string_util.h"
7c673cae 22
f67539c2 23namespace ROCKSDB_NAMESPACE {
7c673cae
FG
24
25// Conversions between numeric keys/values and the types expected by Cache.
26static std::string EncodeKey(int k) {
27 std::string result;
28 PutFixed32(&result, k);
29 return result;
30}
31static int DecodeKey(const Slice& k) {
32 assert(k.size() == 4);
33 return DecodeFixed32(k.data());
34}
35static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
36static int DecodeValue(void* v) {
37 return static_cast<int>(reinterpret_cast<uintptr_t>(v));
38}
39
40const std::string kLRU = "lru";
41const std::string kClock = "clock";
42
11fdf7f2 43void dumbDeleter(const Slice& /*key*/, void* /*value*/) {}
7c673cae 44
11fdf7f2 45void eraseDeleter(const Slice& /*key*/, void* value) {
7c673cae
FG
46 Cache* cache = reinterpret_cast<Cache*>(value);
47 cache->Erase("foo");
48}
49
50class CacheTest : public testing::TestWithParam<std::string> {
51 public:
52 static CacheTest* current_;
53
54 static void Deleter(const Slice& key, void* v) {
55 current_->deleted_keys_.push_back(DecodeKey(key));
56 current_->deleted_values_.push_back(DecodeValue(v));
57 }
58
59 static const int kCacheSize = 1000;
60 static const int kNumShardBits = 4;
61
62 static const int kCacheSize2 = 100;
63 static const int kNumShardBits2 = 2;
64
65 std::vector<int> deleted_keys_;
66 std::vector<int> deleted_values_;
494da23a
TL
67 std::shared_ptr<Cache> cache_;
68 std::shared_ptr<Cache> cache2_;
7c673cae
FG
69
70 CacheTest()
71 : cache_(NewCache(kCacheSize, kNumShardBits, false)),
72 cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
73 current_ = this;
74 }
75
494da23a 76 ~CacheTest() override {}
7c673cae
FG
77
78 std::shared_ptr<Cache> NewCache(size_t capacity) {
79 auto type = GetParam();
80 if (type == kLRU) {
81 return NewLRUCache(capacity);
82 }
83 if (type == kClock) {
84 return NewClockCache(capacity);
85 }
86 return nullptr;
87 }
88
f67539c2
TL
89 std::shared_ptr<Cache> NewCache(
90 size_t capacity, int num_shard_bits, bool strict_capacity_limit,
91 CacheMetadataChargePolicy charge_policy = kDontChargeCacheMetadata) {
7c673cae
FG
92 auto type = GetParam();
93 if (type == kLRU) {
f67539c2
TL
94 LRUCacheOptions co;
95 co.capacity = capacity;
96 co.num_shard_bits = num_shard_bits;
97 co.strict_capacity_limit = strict_capacity_limit;
98 co.high_pri_pool_ratio = 0;
99 co.metadata_charge_policy = charge_policy;
100 return NewLRUCache(co);
7c673cae
FG
101 }
102 if (type == kClock) {
f67539c2
TL
103 return NewClockCache(capacity, num_shard_bits, strict_capacity_limit,
104 charge_policy);
7c673cae
FG
105 }
106 return nullptr;
107 }
108
494da23a 109 int Lookup(std::shared_ptr<Cache> cache, int key) {
7c673cae
FG
110 Cache::Handle* handle = cache->Lookup(EncodeKey(key));
111 const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
112 if (handle != nullptr) {
113 cache->Release(handle);
114 }
115 return r;
116 }
117
494da23a
TL
118 void Insert(std::shared_ptr<Cache> cache, int key, int value,
119 int charge = 1) {
20effc67
TL
120 EXPECT_OK(cache->Insert(EncodeKey(key), EncodeValue(value), charge,
121 &CacheTest::Deleter));
7c673cae
FG
122 }
123
494da23a 124 void Erase(std::shared_ptr<Cache> cache, int key) {
7c673cae
FG
125 cache->Erase(EncodeKey(key));
126 }
127
7c673cae
FG
128 int Lookup(int key) {
129 return Lookup(cache_, key);
130 }
131
132 void Insert(int key, int value, int charge = 1) {
133 Insert(cache_, key, value, charge);
134 }
135
136 void Erase(int key) {
137 Erase(cache_, key);
138 }
139
140 int Lookup2(int key) {
141 return Lookup(cache2_, key);
142 }
143
144 void Insert2(int key, int value, int charge = 1) {
145 Insert(cache2_, key, value, charge);
146 }
147
148 void Erase2(int key) {
149 Erase(cache2_, key);
150 }
151};
152CacheTest* CacheTest::current_;
153
f67539c2
TL
154class LRUCacheTest : public CacheTest {};
155
7c673cae 156TEST_P(CacheTest, UsageTest) {
494da23a 157 // cache is std::shared_ptr and will be automatically cleaned up.
7c673cae 158 const uint64_t kCapacity = 100000;
f67539c2
TL
159 auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
160 auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
161 ASSERT_EQ(0, cache->GetUsage());
162 ASSERT_EQ(0, precise_cache->GetUsage());
7c673cae
FG
163
164 size_t usage = 0;
165 char value[10] = "abcdef";
166 // make sure everything will be cached
167 for (int i = 1; i < 100; ++i) {
168 std::string key(i, 'a');
169 auto kv_size = key.size() + 5;
20effc67
TL
170 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
171 dumbDeleter));
172 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
173 kv_size, dumbDeleter));
7c673cae
FG
174 usage += kv_size;
175 ASSERT_EQ(usage, cache->GetUsage());
f67539c2 176 ASSERT_LT(usage, precise_cache->GetUsage());
7c673cae
FG
177 }
178
f67539c2
TL
179 cache->EraseUnRefEntries();
180 precise_cache->EraseUnRefEntries();
181 ASSERT_EQ(0, cache->GetUsage());
182 ASSERT_EQ(0, precise_cache->GetUsage());
183
7c673cae
FG
184 // make sure the cache will be overloaded
185 for (uint64_t i = 1; i < kCapacity; ++i) {
186 auto key = ToString(i);
20effc67
TL
187 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
188 dumbDeleter));
189 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
190 key.size() + 5, dumbDeleter));
7c673cae
FG
191 }
192
193 // the usage should be close to the capacity
194 ASSERT_GT(kCapacity, cache->GetUsage());
f67539c2 195 ASSERT_GT(kCapacity, precise_cache->GetUsage());
7c673cae 196 ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
f67539c2 197 ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
7c673cae
FG
198}
199
200TEST_P(CacheTest, PinnedUsageTest) {
494da23a 201 // cache is std::shared_ptr and will be automatically cleaned up.
f67539c2
TL
202 const uint64_t kCapacity = 200000;
203 auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
204 auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
7c673cae
FG
205
206 size_t pinned_usage = 0;
207 char value[10] = "abcdef";
208
209 std::forward_list<Cache::Handle*> unreleased_handles;
f67539c2 210 std::forward_list<Cache::Handle*> unreleased_handles_in_precise_cache;
7c673cae
FG
211
212 // Add entries. Unpin some of them after insertion. Then, pin some of them
213 // again. Check GetPinnedUsage().
214 for (int i = 1; i < 100; ++i) {
215 std::string key(i, 'a');
216 auto kv_size = key.size() + 5;
217 Cache::Handle* handle;
f67539c2 218 Cache::Handle* handle_in_precise_cache;
20effc67
TL
219 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
220 dumbDeleter, &handle));
f67539c2 221 assert(handle);
20effc67
TL
222 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
223 kv_size, dumbDeleter,
224 &handle_in_precise_cache));
f67539c2 225 assert(handle_in_precise_cache);
7c673cae
FG
226 pinned_usage += kv_size;
227 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
f67539c2 228 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
7c673cae
FG
229 if (i % 2 == 0) {
230 cache->Release(handle);
f67539c2 231 precise_cache->Release(handle_in_precise_cache);
7c673cae
FG
232 pinned_usage -= kv_size;
233 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
f67539c2 234 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
7c673cae
FG
235 } else {
236 unreleased_handles.push_front(handle);
f67539c2 237 unreleased_handles_in_precise_cache.push_front(handle_in_precise_cache);
7c673cae
FG
238 }
239 if (i % 3 == 0) {
240 unreleased_handles.push_front(cache->Lookup(key));
f67539c2
TL
241 auto x = precise_cache->Lookup(key);
242 assert(x);
243 unreleased_handles_in_precise_cache.push_front(x);
7c673cae
FG
244 // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
245 // usage increased
246 if (i % 2 == 0) {
247 pinned_usage += kv_size;
248 }
249 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
f67539c2 250 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
7c673cae
FG
251 }
252 }
f67539c2
TL
253 auto precise_cache_pinned_usage = precise_cache->GetPinnedUsage();
254 ASSERT_LT(pinned_usage, precise_cache_pinned_usage);
7c673cae
FG
255
256 // check that overloading the cache does not change the pinned usage
257 for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
258 auto key = ToString(i);
20effc67
TL
259 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
260 dumbDeleter));
261 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
262 key.size() + 5, dumbDeleter));
7c673cae
FG
263 }
264 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
f67539c2
TL
265 ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
266
267 cache->EraseUnRefEntries();
268 precise_cache->EraseUnRefEntries();
269 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
270 ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
7c673cae
FG
271
272 // release handles for pinned entries to prevent memory leaks
273 for (auto handle : unreleased_handles) {
274 cache->Release(handle);
275 }
f67539c2
TL
276 for (auto handle : unreleased_handles_in_precise_cache) {
277 precise_cache->Release(handle);
278 }
279 ASSERT_EQ(0, cache->GetPinnedUsage());
280 ASSERT_EQ(0, precise_cache->GetPinnedUsage());
281 cache->EraseUnRefEntries();
282 precise_cache->EraseUnRefEntries();
283 ASSERT_EQ(0, cache->GetUsage());
284 ASSERT_EQ(0, precise_cache->GetUsage());
7c673cae
FG
285}
286
287TEST_P(CacheTest, HitAndMiss) {
288 ASSERT_EQ(-1, Lookup(100));
289
290 Insert(100, 101);
291 ASSERT_EQ(101, Lookup(100));
292 ASSERT_EQ(-1, Lookup(200));
293 ASSERT_EQ(-1, Lookup(300));
294
295 Insert(200, 201);
296 ASSERT_EQ(101, Lookup(100));
297 ASSERT_EQ(201, Lookup(200));
298 ASSERT_EQ(-1, Lookup(300));
299
300 Insert(100, 102);
301 ASSERT_EQ(102, Lookup(100));
302 ASSERT_EQ(201, Lookup(200));
303 ASSERT_EQ(-1, Lookup(300));
304
305 ASSERT_EQ(1U, deleted_keys_.size());
306 ASSERT_EQ(100, deleted_keys_[0]);
307 ASSERT_EQ(101, deleted_values_[0]);
308}
309
310TEST_P(CacheTest, InsertSameKey) {
311 Insert(1, 1);
312 Insert(1, 2);
313 ASSERT_EQ(2, Lookup(1));
314}
315
316TEST_P(CacheTest, Erase) {
317 Erase(200);
318 ASSERT_EQ(0U, deleted_keys_.size());
319
320 Insert(100, 101);
321 Insert(200, 201);
322 Erase(100);
323 ASSERT_EQ(-1, Lookup(100));
324 ASSERT_EQ(201, Lookup(200));
325 ASSERT_EQ(1U, deleted_keys_.size());
326 ASSERT_EQ(100, deleted_keys_[0]);
327 ASSERT_EQ(101, deleted_values_[0]);
328
329 Erase(100);
330 ASSERT_EQ(-1, Lookup(100));
331 ASSERT_EQ(201, Lookup(200));
332 ASSERT_EQ(1U, deleted_keys_.size());
333}
334
335TEST_P(CacheTest, EntriesArePinned) {
336 Insert(100, 101);
337 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
338 ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
339 ASSERT_EQ(1U, cache_->GetUsage());
340
341 Insert(100, 102);
342 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
343 ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
344 ASSERT_EQ(0U, deleted_keys_.size());
345 ASSERT_EQ(2U, cache_->GetUsage());
346
347 cache_->Release(h1);
348 ASSERT_EQ(1U, deleted_keys_.size());
349 ASSERT_EQ(100, deleted_keys_[0]);
350 ASSERT_EQ(101, deleted_values_[0]);
351 ASSERT_EQ(1U, cache_->GetUsage());
352
353 Erase(100);
354 ASSERT_EQ(-1, Lookup(100));
355 ASSERT_EQ(1U, deleted_keys_.size());
356 ASSERT_EQ(1U, cache_->GetUsage());
357
358 cache_->Release(h2);
359 ASSERT_EQ(2U, deleted_keys_.size());
360 ASSERT_EQ(100, deleted_keys_[1]);
361 ASSERT_EQ(102, deleted_values_[1]);
362 ASSERT_EQ(0U, cache_->GetUsage());
363}
364
365TEST_P(CacheTest, EvictionPolicy) {
366 Insert(100, 101);
367 Insert(200, 201);
368
369 // Frequently used entry must be kept around
f67539c2 370 for (int i = 0; i < kCacheSize * 2; i++) {
7c673cae
FG
371 Insert(1000+i, 2000+i);
372 ASSERT_EQ(101, Lookup(100));
373 }
374 ASSERT_EQ(101, Lookup(100));
375 ASSERT_EQ(-1, Lookup(200));
376}
377
378TEST_P(CacheTest, ExternalRefPinsEntries) {
379 Insert(100, 101);
380 Cache::Handle* h = cache_->Lookup(EncodeKey(100));
381 ASSERT_TRUE(cache_->Ref(h));
382 ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
383 ASSERT_EQ(1U, cache_->GetUsage());
384
385 for (int i = 0; i < 3; ++i) {
386 if (i > 0) {
387 // First release (i == 1) corresponds to Ref(), second release (i == 2)
388 // corresponds to Lookup(). Then, since all external refs are released,
389 // the below insertions should push out the cache entry.
390 cache_->Release(h);
391 }
392 // double cache size because the usage bit in block cache prevents 100 from
393 // being evicted in the first kCacheSize iterations
394 for (int j = 0; j < 2 * kCacheSize + 100; j++) {
395 Insert(1000 + j, 2000 + j);
396 }
397 if (i < 2) {
398 ASSERT_EQ(101, Lookup(100));
399 }
400 }
401 ASSERT_EQ(-1, Lookup(100));
402}
403
404TEST_P(CacheTest, EvictionPolicyRef) {
405 Insert(100, 101);
406 Insert(101, 102);
407 Insert(102, 103);
408 Insert(103, 104);
409 Insert(200, 101);
410 Insert(201, 102);
411 Insert(202, 103);
412 Insert(203, 104);
413 Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
414 Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
415 Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
416 Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
417 Insert(300, 101);
418 Insert(301, 102);
419 Insert(302, 103);
420 Insert(303, 104);
421
422 // Insert entries much more than Cache capacity
f67539c2 423 for (int i = 0; i < kCacheSize * 2; i++) {
7c673cae
FG
424 Insert(1000 + i, 2000 + i);
425 }
426
427 // Check whether the entries inserted in the beginning
428 // are evicted. Ones without extra ref are evicted and
429 // those with are not.
430 ASSERT_EQ(-1, Lookup(100));
431 ASSERT_EQ(-1, Lookup(101));
432 ASSERT_EQ(-1, Lookup(102));
433 ASSERT_EQ(-1, Lookup(103));
434
435 ASSERT_EQ(-1, Lookup(300));
436 ASSERT_EQ(-1, Lookup(301));
437 ASSERT_EQ(-1, Lookup(302));
438 ASSERT_EQ(-1, Lookup(303));
439
440 ASSERT_EQ(101, Lookup(200));
441 ASSERT_EQ(102, Lookup(201));
442 ASSERT_EQ(103, Lookup(202));
443 ASSERT_EQ(104, Lookup(203));
444
445 // Cleaning up all the handles
446 cache_->Release(h201);
447 cache_->Release(h202);
448 cache_->Release(h203);
449 cache_->Release(h204);
450}
451
452TEST_P(CacheTest, EvictEmptyCache) {
453 // Insert item large than capacity to trigger eviction on empty cache.
454 auto cache = NewCache(1, 0, false);
455 ASSERT_OK(cache->Insert("foo", nullptr, 10, dumbDeleter));
456}
457
458TEST_P(CacheTest, EraseFromDeleter) {
459 // Have deleter which will erase item from cache, which will re-enter
460 // the cache at that point.
461 std::shared_ptr<Cache> cache = NewCache(10, 0, false);
462 ASSERT_OK(cache->Insert("foo", nullptr, 1, dumbDeleter));
463 ASSERT_OK(cache->Insert("bar", cache.get(), 1, eraseDeleter));
464 cache->Erase("bar");
465 ASSERT_EQ(nullptr, cache->Lookup("foo"));
466 ASSERT_EQ(nullptr, cache->Lookup("bar"));
467}
468
469TEST_P(CacheTest, ErasedHandleState) {
470 // insert a key and get two handles
471 Insert(100, 1000);
472 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
473 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
474 ASSERT_EQ(h1, h2);
475 ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
476 ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
477
478 // delete the key from the cache
479 Erase(100);
480 // can no longer find in the cache
481 ASSERT_EQ(-1, Lookup(100));
482
483 // release one handle
484 cache_->Release(h1);
485 // still can't find in cache
486 ASSERT_EQ(-1, Lookup(100));
487
488 cache_->Release(h2);
489}
490
491TEST_P(CacheTest, HeavyEntries) {
492 // Add a bunch of light and heavy entries and then count the combined
493 // size of items still in the cache, which must be approximately the
494 // same as the total capacity.
495 const int kLight = 1;
496 const int kHeavy = 10;
497 int added = 0;
498 int index = 0;
499 while (added < 2*kCacheSize) {
500 const int weight = (index & 1) ? kLight : kHeavy;
501 Insert(index, 1000+index, weight);
502 added += weight;
503 index++;
504 }
505
506 int cached_weight = 0;
507 for (int i = 0; i < index; i++) {
508 const int weight = (i & 1 ? kLight : kHeavy);
509 int r = Lookup(i);
510 if (r >= 0) {
511 cached_weight += weight;
512 ASSERT_EQ(1000+i, r);
513 }
514 }
515 ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
516}
517
518TEST_P(CacheTest, NewId) {
519 uint64_t a = cache_->NewId();
520 uint64_t b = cache_->NewId();
521 ASSERT_NE(a, b);
522}
523
524
525class Value {
526 public:
527 explicit Value(size_t v) : v_(v) { }
528
529 size_t v_;
530};
531
532namespace {
11fdf7f2 533void deleter(const Slice& /*key*/, void* value) {
7c673cae
FG
534 delete static_cast<Value *>(value);
535}
536} // namespace
537
538TEST_P(CacheTest, ReleaseAndErase) {
539 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
540 Cache::Handle* handle;
541 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
542 &CacheTest::Deleter, &handle);
543 ASSERT_TRUE(s.ok());
544 ASSERT_EQ(5U, cache->GetCapacity());
545 ASSERT_EQ(1U, cache->GetUsage());
546 ASSERT_EQ(0U, deleted_keys_.size());
547 auto erased = cache->Release(handle, true);
548 ASSERT_TRUE(erased);
549 // This tests that deleter has been called
550 ASSERT_EQ(1U, deleted_keys_.size());
551}
552
553TEST_P(CacheTest, ReleaseWithoutErase) {
554 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
555 Cache::Handle* handle;
556 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
557 &CacheTest::Deleter, &handle);
558 ASSERT_TRUE(s.ok());
559 ASSERT_EQ(5U, cache->GetCapacity());
560 ASSERT_EQ(1U, cache->GetUsage());
561 ASSERT_EQ(0U, deleted_keys_.size());
562 auto erased = cache->Release(handle);
563 ASSERT_FALSE(erased);
564 // This tests that deleter is not called. When cache has free capacity it is
565 // not expected to immediately erase the released items.
566 ASSERT_EQ(0U, deleted_keys_.size());
567}
568
569TEST_P(CacheTest, SetCapacity) {
570 // test1: increase capacity
571 // lets create a cache with capacity 5,
572 // then, insert 5 elements, then increase capacity
573 // to 10, returned capacity should be 10, usage=5
574 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
575 std::vector<Cache::Handle*> handles(10);
576 // Insert 5 entries, but not releasing.
577 for (size_t i = 0; i < 5; i++) {
578 std::string key = ToString(i+1);
579 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
580 ASSERT_TRUE(s.ok());
581 }
582 ASSERT_EQ(5U, cache->GetCapacity());
583 ASSERT_EQ(5U, cache->GetUsage());
584 cache->SetCapacity(10);
585 ASSERT_EQ(10U, cache->GetCapacity());
586 ASSERT_EQ(5U, cache->GetUsage());
587
588 // test2: decrease capacity
589 // insert 5 more elements to cache, then release 5,
590 // then decrease capacity to 7, final capacity should be 7
591 // and usage should be 7
592 for (size_t i = 5; i < 10; i++) {
593 std::string key = ToString(i+1);
594 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
595 ASSERT_TRUE(s.ok());
596 }
597 ASSERT_EQ(10U, cache->GetCapacity());
598 ASSERT_EQ(10U, cache->GetUsage());
599 for (size_t i = 0; i < 5; i++) {
600 cache->Release(handles[i]);
601 }
602 ASSERT_EQ(10U, cache->GetCapacity());
603 ASSERT_EQ(10U, cache->GetUsage());
604 cache->SetCapacity(7);
605 ASSERT_EQ(7, cache->GetCapacity());
606 ASSERT_EQ(7, cache->GetUsage());
607
608 // release remaining 5 to keep valgrind happy
609 for (size_t i = 5; i < 10; i++) {
610 cache->Release(handles[i]);
611 }
612}
613
f67539c2 614TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
7c673cae
FG
615 // test1: set the flag to false. Insert more keys than capacity. See if they
616 // all go through.
f67539c2 617 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
7c673cae
FG
618 std::vector<Cache::Handle*> handles(10);
619 Status s;
620 for (size_t i = 0; i < 10; i++) {
621 std::string key = ToString(i + 1);
622 s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
623 ASSERT_OK(s);
624 ASSERT_NE(nullptr, handles[i]);
625 }
f67539c2 626 ASSERT_EQ(10, cache->GetUsage());
7c673cae
FG
627
628 // test2: set the flag to true. Insert and check if it fails.
629 std::string extra_key = "extra";
630 Value* extra_value = new Value(0);
631 cache->SetStrictCapacityLimit(true);
632 Cache::Handle* handle;
633 s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
634 ASSERT_TRUE(s.IsIncomplete());
635 ASSERT_EQ(nullptr, handle);
f67539c2 636 ASSERT_EQ(10, cache->GetUsage());
7c673cae
FG
637
638 for (size_t i = 0; i < 10; i++) {
639 cache->Release(handles[i]);
640 }
641
642 // test3: init with flag being true.
f67539c2 643 std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
7c673cae
FG
644 for (size_t i = 0; i < 5; i++) {
645 std::string key = ToString(i + 1);
646 s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
647 ASSERT_OK(s);
648 ASSERT_NE(nullptr, handles[i]);
649 }
650 s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
651 ASSERT_TRUE(s.IsIncomplete());
652 ASSERT_EQ(nullptr, handle);
653 // test insert without handle
654 s = cache2->Insert(extra_key, extra_value, 1, &deleter);
655 // AS if the key have been inserted into cache but get evicted immediately.
656 ASSERT_OK(s);
f67539c2 657 ASSERT_EQ(5, cache2->GetUsage());
7c673cae
FG
658 ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
659
660 for (size_t i = 0; i < 5; i++) {
661 cache2->Release(handles[i]);
662 }
663}
664
665TEST_P(CacheTest, OverCapacity) {
666 size_t n = 10;
667
668 // a LRUCache with n entries and one shard only
669 std::shared_ptr<Cache> cache = NewCache(n, 0, false);
670
671 std::vector<Cache::Handle*> handles(n+1);
672
673 // Insert n+1 entries, but not releasing.
674 for (size_t i = 0; i < n + 1; i++) {
675 std::string key = ToString(i+1);
676 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
677 ASSERT_TRUE(s.ok());
678 }
679
680 // Guess what's in the cache now?
681 for (size_t i = 0; i < n + 1; i++) {
682 std::string key = ToString(i+1);
683 auto h = cache->Lookup(key);
684 ASSERT_TRUE(h != nullptr);
685 if (h) cache->Release(h);
686 }
687
688 // the cache is over capacity since nothing could be evicted
689 ASSERT_EQ(n + 1U, cache->GetUsage());
690 for (size_t i = 0; i < n + 1; i++) {
691 cache->Release(handles[i]);
692 }
693 // Make sure eviction is triggered.
694 cache->SetCapacity(n);
695
696 // cache is under capacity now since elements were released
697 ASSERT_EQ(n, cache->GetUsage());
698
699 // element 0 is evicted and the rest is there
700 // This is consistent with the LRU policy since the element 0
701 // was released first
702 for (size_t i = 0; i < n + 1; i++) {
703 std::string key = ToString(i+1);
704 auto h = cache->Lookup(key);
705 if (h) {
706 ASSERT_NE(i, 0U);
707 cache->Release(h);
708 } else {
709 ASSERT_EQ(i, 0U);
710 }
711 }
712}
713
714namespace {
715std::vector<std::pair<int, int>> callback_state;
716void callback(void* entry, size_t charge) {
717 callback_state.push_back({DecodeValue(entry), static_cast<int>(charge)});
718}
719};
720
721TEST_P(CacheTest, ApplyToAllCacheEntiresTest) {
722 std::vector<std::pair<int, int>> inserted;
723 callback_state.clear();
724
725 for (int i = 0; i < 10; ++i) {
726 Insert(i, i * 2, i + 1);
727 inserted.push_back({i * 2, i + 1});
728 }
729 cache_->ApplyToAllCacheEntries(callback, true);
730
731 std::sort(inserted.begin(), inserted.end());
732 std::sort(callback_state.begin(), callback_state.end());
733 ASSERT_TRUE(inserted == callback_state);
734}
735
736TEST_P(CacheTest, DefaultShardBits) {
737 // test1: set the flag to false. Insert more keys than capacity. See if they
738 // all go through.
739 std::shared_ptr<Cache> cache = NewCache(16 * 1024L * 1024L);
740 ShardedCache* sc = dynamic_cast<ShardedCache*>(cache.get());
741 ASSERT_EQ(5, sc->GetNumShardBits());
742
743 cache = NewLRUCache(511 * 1024L, -1, true);
744 sc = dynamic_cast<ShardedCache*>(cache.get());
745 ASSERT_EQ(0, sc->GetNumShardBits());
746
747 cache = NewLRUCache(1024L * 1024L * 1024L, -1, true);
748 sc = dynamic_cast<ShardedCache*>(cache.get());
749 ASSERT_EQ(6, sc->GetNumShardBits());
750}
751
f67539c2
TL
752TEST_P(CacheTest, GetCharge) {
753 Insert(1, 2);
754 Cache::Handle* h1 = cache_->Lookup(EncodeKey(1));
755 ASSERT_EQ(2, DecodeValue(cache_->Value(h1)));
756 ASSERT_EQ(1, cache_->GetCharge(h1));
757 cache_->Release(h1);
758}
759
7c673cae 760#ifdef SUPPORT_CLOCK_CACHE
f67539c2
TL
761std::shared_ptr<Cache> (*new_clock_cache_func)(
762 size_t, int, bool, CacheMetadataChargePolicy) = NewClockCache;
7c673cae
FG
763INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
764 testing::Values(kLRU, kClock));
765#else
766INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
767#endif // SUPPORT_CLOCK_CACHE
f67539c2 768INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest, testing::Values(kLRU));
7c673cae 769
f67539c2 770} // namespace ROCKSDB_NAMESPACE
7c673cae
FG
771
772int main(int argc, char** argv) {
773 ::testing::InitGoogleTest(&argc, argv);
774 return RUN_ALL_TESTS();
775}