]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/cache/cache_test.cc
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / rocksdb / cache / cache_test.cc
CommitLineData
7c673cae 1// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
11fdf7f2
TL
2// This source code is licensed under both the GPLv2 (found in the
3// COPYING file in the root directory) and Apache 2.0 License
4// (found in the LICENSE.Apache file in the root directory).
7c673cae
FG
5//
6// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7// Use of this source code is governed by a BSD-style license that can be
8// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10#include "rocksdb/cache.h"
11
12#include <forward_list>
13#include <functional>
14#include <iostream>
15#include <string>
16#include <vector>
1e59de90 17
7c673cae 18#include "cache/lru_cache.h"
1e59de90 19#include "port/stack_trace.h"
f67539c2 20#include "test_util/testharness.h"
7c673cae
FG
21#include "util/coding.h"
22#include "util/string_util.h"
7c673cae 23
1e59de90
TL
24// HyperClockCache only supports 16-byte keys, so some of the tests
25// originally written for LRUCache do not work on the other caches.
26// Those tests were adapted to use 16-byte keys. We kept the original ones.
27// TODO: Remove the original tests if they ever become unused.
28
f67539c2 29namespace ROCKSDB_NAMESPACE {
7c673cae 30
1e59de90
TL
31namespace {
32
7c673cae 33// Conversions between numeric keys/values and the types expected by Cache.
1e59de90
TL
34std::string EncodeKey16Bytes(int k) {
35 std::string result;
36 PutFixed32(&result, k);
37 result.append(std::string(12, 'a')); // Because we need a 16B output, we
38 // add a 12-byte padding.
39 return result;
40}
41
42int DecodeKey16Bytes(const Slice& k) {
43 assert(k.size() == 16);
44 return DecodeFixed32(k.data()); // Decodes only the first 4 bytes of k.
45}
46
47std::string EncodeKey32Bits(int k) {
7c673cae
FG
48 std::string result;
49 PutFixed32(&result, k);
50 return result;
51}
1e59de90
TL
52
53int DecodeKey32Bits(const Slice& k) {
7c673cae
FG
54 assert(k.size() == 4);
55 return DecodeFixed32(k.data());
56}
1e59de90
TL
57
58void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
59
60int DecodeValue(void* v) {
7c673cae
FG
61 return static_cast<int>(reinterpret_cast<uintptr_t>(v));
62}
63
1e59de90 64void DumbDeleter(const Slice& /*key*/, void* /*value*/) {}
7c673cae 65
1e59de90 66void EraseDeleter1(const Slice& /*key*/, void* value) {
7c673cae
FG
67 Cache* cache = reinterpret_cast<Cache*>(value);
68 cache->Erase("foo");
69}
70
1e59de90
TL
71void EraseDeleter2(const Slice& /*key*/, void* value) {
72 Cache* cache = reinterpret_cast<Cache*>(value);
73 cache->Erase(EncodeKey16Bytes(1234));
74}
75
76const std::string kLRU = "lru";
77const std::string kHyperClock = "hyper_clock";
78
79} // anonymous namespace
80
7c673cae
FG
81class CacheTest : public testing::TestWithParam<std::string> {
82 public:
83 static CacheTest* current_;
1e59de90 84 static std::string type_;
7c673cae
FG
85
86 static void Deleter(const Slice& key, void* v) {
1e59de90
TL
87 if (type_ == kHyperClock) {
88 current_->deleted_keys_.push_back(DecodeKey16Bytes(key));
89 } else {
90 current_->deleted_keys_.push_back(DecodeKey32Bits(key));
91 }
7c673cae
FG
92 current_->deleted_values_.push_back(DecodeValue(v));
93 }
94
95 static const int kCacheSize = 1000;
96 static const int kNumShardBits = 4;
97
98 static const int kCacheSize2 = 100;
99 static const int kNumShardBits2 = 2;
100
101 std::vector<int> deleted_keys_;
102 std::vector<int> deleted_values_;
494da23a
TL
103 std::shared_ptr<Cache> cache_;
104 std::shared_ptr<Cache> cache2_;
7c673cae 105
1e59de90
TL
106 size_t estimated_value_size_ = 1;
107
7c673cae
FG
108 CacheTest()
109 : cache_(NewCache(kCacheSize, kNumShardBits, false)),
110 cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
111 current_ = this;
1e59de90 112 type_ = GetParam();
7c673cae
FG
113 }
114
494da23a 115 ~CacheTest() override {}
7c673cae
FG
116
117 std::shared_ptr<Cache> NewCache(size_t capacity) {
118 auto type = GetParam();
119 if (type == kLRU) {
120 return NewLRUCache(capacity);
121 }
1e59de90
TL
122 if (type == kHyperClock) {
123 return HyperClockCacheOptions(
124 capacity, estimated_value_size_ /*estimated_value_size*/)
125 .MakeSharedCache();
7c673cae
FG
126 }
127 return nullptr;
128 }
129
f67539c2
TL
130 std::shared_ptr<Cache> NewCache(
131 size_t capacity, int num_shard_bits, bool strict_capacity_limit,
132 CacheMetadataChargePolicy charge_policy = kDontChargeCacheMetadata) {
7c673cae
FG
133 auto type = GetParam();
134 if (type == kLRU) {
f67539c2
TL
135 LRUCacheOptions co;
136 co.capacity = capacity;
137 co.num_shard_bits = num_shard_bits;
138 co.strict_capacity_limit = strict_capacity_limit;
139 co.high_pri_pool_ratio = 0;
140 co.metadata_charge_policy = charge_policy;
141 return NewLRUCache(co);
7c673cae 142 }
1e59de90
TL
143 if (type == kHyperClock) {
144 return HyperClockCacheOptions(capacity, 1 /*estimated_value_size*/,
145 num_shard_bits, strict_capacity_limit,
146 nullptr /*allocator*/, charge_policy)
147 .MakeSharedCache();
7c673cae
FG
148 }
149 return nullptr;
150 }
151
1e59de90
TL
152 // These functions encode/decode keys in tests cases that use
153 // int keys.
154 // Currently, HyperClockCache requires keys to be 16B long, whereas
155 // LRUCache doesn't, so the encoding depends on the cache type.
156 std::string EncodeKey(int k) {
157 auto type = GetParam();
158 if (type == kHyperClock) {
159 return EncodeKey16Bytes(k);
160 } else {
161 return EncodeKey32Bits(k);
162 }
163 }
164
165 int DecodeKey(const Slice& k) {
166 auto type = GetParam();
167 if (type == kHyperClock) {
168 return DecodeKey16Bytes(k);
169 } else {
170 return DecodeKey32Bits(k);
171 }
172 }
173
494da23a 174 int Lookup(std::shared_ptr<Cache> cache, int key) {
7c673cae
FG
175 Cache::Handle* handle = cache->Lookup(EncodeKey(key));
176 const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
177 if (handle != nullptr) {
178 cache->Release(handle);
179 }
180 return r;
181 }
182
494da23a
TL
183 void Insert(std::shared_ptr<Cache> cache, int key, int value,
184 int charge = 1) {
20effc67
TL
185 EXPECT_OK(cache->Insert(EncodeKey(key), EncodeValue(value), charge,
186 &CacheTest::Deleter));
7c673cae
FG
187 }
188
494da23a 189 void Erase(std::shared_ptr<Cache> cache, int key) {
7c673cae
FG
190 cache->Erase(EncodeKey(key));
191 }
192
1e59de90 193 int Lookup(int key) { return Lookup(cache_, key); }
7c673cae
FG
194
195 void Insert(int key, int value, int charge = 1) {
196 Insert(cache_, key, value, charge);
197 }
198
1e59de90 199 void Erase(int key) { Erase(cache_, key); }
7c673cae 200
1e59de90 201 int Lookup2(int key) { return Lookup(cache2_, key); }
7c673cae
FG
202
203 void Insert2(int key, int value, int charge = 1) {
204 Insert(cache2_, key, value, charge);
205 }
206
1e59de90 207 void Erase2(int key) { Erase(cache2_, key); }
7c673cae 208};
1e59de90 209
7c673cae 210CacheTest* CacheTest::current_;
1e59de90 211std::string CacheTest::type_;
7c673cae 212
f67539c2
TL
213class LRUCacheTest : public CacheTest {};
214
7c673cae 215TEST_P(CacheTest, UsageTest) {
1e59de90
TL
216 auto type = GetParam();
217
494da23a 218 // cache is std::shared_ptr and will be automatically cleaned up.
1e59de90 219 const size_t kCapacity = 100000;
f67539c2
TL
220 auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
221 auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
222 ASSERT_EQ(0, cache->GetUsage());
1e59de90
TL
223 size_t baseline_meta_usage = precise_cache->GetUsage();
224 if (type != kHyperClock) {
225 ASSERT_EQ(0, baseline_meta_usage);
226 }
7c673cae
FG
227
228 size_t usage = 0;
229 char value[10] = "abcdef";
230 // make sure everything will be cached
231 for (int i = 1; i < 100; ++i) {
1e59de90
TL
232 std::string key;
233 if (type == kLRU) {
234 key = std::string(i, 'a');
235 } else {
236 key = EncodeKey(i);
237 }
7c673cae 238 auto kv_size = key.size() + 5;
20effc67 239 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
1e59de90 240 DumbDeleter));
20effc67 241 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
1e59de90 242 kv_size, DumbDeleter));
7c673cae
FG
243 usage += kv_size;
244 ASSERT_EQ(usage, cache->GetUsage());
1e59de90
TL
245 if (type == kHyperClock) {
246 ASSERT_EQ(baseline_meta_usage + usage, precise_cache->GetUsage());
247 } else {
248 ASSERT_LT(usage, precise_cache->GetUsage());
249 }
7c673cae
FG
250 }
251
f67539c2
TL
252 cache->EraseUnRefEntries();
253 precise_cache->EraseUnRefEntries();
254 ASSERT_EQ(0, cache->GetUsage());
1e59de90 255 ASSERT_EQ(baseline_meta_usage, precise_cache->GetUsage());
f67539c2 256
7c673cae 257 // make sure the cache will be overloaded
1e59de90
TL
258 for (size_t i = 1; i < kCapacity; ++i) {
259 std::string key;
260 if (type == kLRU) {
261 key = std::to_string(i);
262 } else {
263 key = EncodeKey(static_cast<int>(1000 + i));
264 }
20effc67 265 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
1e59de90 266 DumbDeleter));
20effc67 267 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
1e59de90 268 key.size() + 5, DumbDeleter));
7c673cae
FG
269 }
270
271 // the usage should be close to the capacity
272 ASSERT_GT(kCapacity, cache->GetUsage());
f67539c2 273 ASSERT_GT(kCapacity, precise_cache->GetUsage());
7c673cae 274 ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
1e59de90
TL
275 if (type != kHyperClock) {
276 ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
277 } else {
278 // estimated value size of 1 is weird for clock cache, because
279 // almost all of the capacity will be used for metadata, and due to only
280 // using power of 2 table sizes, we might hit strict occupancy limit
281 // before hitting capacity limit.
282 ASSERT_LT(kCapacity * 0.80, precise_cache->GetUsage());
283 }
7c673cae
FG
284}
285
1e59de90
TL
286// TODO: This test takes longer than expected on ClockCache. This is
287// because the values size estimate at construction is too sloppy.
288// Fix this.
289// Why is it so slow? The cache is constructed with an estimate of 1, but
290// then the charge is claimed to be 21. This will cause the hash table
291// to be extremely sparse, which in turn means clock needs to scan too
292// many slots to find victims.
7c673cae 293TEST_P(CacheTest, PinnedUsageTest) {
1e59de90
TL
294 auto type = GetParam();
295
494da23a 296 // cache is std::shared_ptr and will be automatically cleaned up.
1e59de90 297 const size_t kCapacity = 200000;
f67539c2
TL
298 auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
299 auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
1e59de90
TL
300 size_t baseline_meta_usage = precise_cache->GetUsage();
301 if (type != kHyperClock) {
302 ASSERT_EQ(0, baseline_meta_usage);
303 }
7c673cae
FG
304
305 size_t pinned_usage = 0;
306 char value[10] = "abcdef";
307
308 std::forward_list<Cache::Handle*> unreleased_handles;
f67539c2 309 std::forward_list<Cache::Handle*> unreleased_handles_in_precise_cache;
7c673cae
FG
310
311 // Add entries. Unpin some of them after insertion. Then, pin some of them
312 // again. Check GetPinnedUsage().
313 for (int i = 1; i < 100; ++i) {
1e59de90
TL
314 std::string key;
315 if (type == kLRU) {
316 key = std::string(i, 'a');
317 } else {
318 key = EncodeKey(i);
319 }
7c673cae
FG
320 auto kv_size = key.size() + 5;
321 Cache::Handle* handle;
f67539c2 322 Cache::Handle* handle_in_precise_cache;
20effc67 323 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
1e59de90 324 DumbDeleter, &handle));
f67539c2 325 assert(handle);
20effc67 326 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
1e59de90 327 kv_size, DumbDeleter,
20effc67 328 &handle_in_precise_cache));
f67539c2 329 assert(handle_in_precise_cache);
7c673cae
FG
330 pinned_usage += kv_size;
331 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
f67539c2 332 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
7c673cae
FG
333 if (i % 2 == 0) {
334 cache->Release(handle);
f67539c2 335 precise_cache->Release(handle_in_precise_cache);
7c673cae
FG
336 pinned_usage -= kv_size;
337 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
f67539c2 338 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
7c673cae
FG
339 } else {
340 unreleased_handles.push_front(handle);
f67539c2 341 unreleased_handles_in_precise_cache.push_front(handle_in_precise_cache);
7c673cae
FG
342 }
343 if (i % 3 == 0) {
344 unreleased_handles.push_front(cache->Lookup(key));
f67539c2
TL
345 auto x = precise_cache->Lookup(key);
346 assert(x);
347 unreleased_handles_in_precise_cache.push_front(x);
7c673cae
FG
348 // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
349 // usage increased
350 if (i % 2 == 0) {
351 pinned_usage += kv_size;
352 }
353 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
f67539c2 354 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
7c673cae
FG
355 }
356 }
f67539c2
TL
357 auto precise_cache_pinned_usage = precise_cache->GetPinnedUsage();
358 ASSERT_LT(pinned_usage, precise_cache_pinned_usage);
7c673cae
FG
359
360 // check that overloading the cache does not change the pinned usage
1e59de90
TL
361 for (size_t i = 1; i < 2 * kCapacity; ++i) {
362 std::string key;
363 if (type == kLRU) {
364 key = std::to_string(i);
365 } else {
366 key = EncodeKey(static_cast<int>(1000 + i));
367 }
20effc67 368 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
1e59de90 369 DumbDeleter));
20effc67 370 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
1e59de90 371 key.size() + 5, DumbDeleter));
7c673cae
FG
372 }
373 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
f67539c2
TL
374 ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
375
376 cache->EraseUnRefEntries();
377 precise_cache->EraseUnRefEntries();
378 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
379 ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
7c673cae
FG
380
381 // release handles for pinned entries to prevent memory leaks
382 for (auto handle : unreleased_handles) {
383 cache->Release(handle);
384 }
f67539c2
TL
385 for (auto handle : unreleased_handles_in_precise_cache) {
386 precise_cache->Release(handle);
387 }
388 ASSERT_EQ(0, cache->GetPinnedUsage());
389 ASSERT_EQ(0, precise_cache->GetPinnedUsage());
390 cache->EraseUnRefEntries();
391 precise_cache->EraseUnRefEntries();
392 ASSERT_EQ(0, cache->GetUsage());
1e59de90 393 ASSERT_EQ(baseline_meta_usage, precise_cache->GetUsage());
7c673cae
FG
394}
395
396TEST_P(CacheTest, HitAndMiss) {
397 ASSERT_EQ(-1, Lookup(100));
398
399 Insert(100, 101);
400 ASSERT_EQ(101, Lookup(100));
1e59de90
TL
401 ASSERT_EQ(-1, Lookup(200));
402 ASSERT_EQ(-1, Lookup(300));
7c673cae
FG
403
404 Insert(200, 201);
405 ASSERT_EQ(101, Lookup(100));
406 ASSERT_EQ(201, Lookup(200));
1e59de90 407 ASSERT_EQ(-1, Lookup(300));
7c673cae
FG
408
409 Insert(100, 102);
1e59de90
TL
410 if (GetParam() == kHyperClock) {
411 // ClockCache usually doesn't overwrite on Insert
412 ASSERT_EQ(101, Lookup(100));
413 } else {
414 ASSERT_EQ(102, Lookup(100));
415 }
7c673cae 416 ASSERT_EQ(201, Lookup(200));
1e59de90 417 ASSERT_EQ(-1, Lookup(300));
7c673cae
FG
418
419 ASSERT_EQ(1U, deleted_keys_.size());
420 ASSERT_EQ(100, deleted_keys_[0]);
1e59de90
TL
421 if (GetParam() == kHyperClock) {
422 ASSERT_EQ(102, deleted_values_[0]);
423 } else {
424 ASSERT_EQ(101, deleted_values_[0]);
425 }
7c673cae
FG
426}
427
428TEST_P(CacheTest, InsertSameKey) {
1e59de90
TL
429 if (GetParam() == kHyperClock) {
430 ROCKSDB_GTEST_BYPASS(
431 "ClockCache doesn't guarantee Insert overwrite same key.");
432 return;
433 }
7c673cae
FG
434 Insert(1, 1);
435 Insert(1, 2);
436 ASSERT_EQ(2, Lookup(1));
437}
438
439TEST_P(CacheTest, Erase) {
440 Erase(200);
441 ASSERT_EQ(0U, deleted_keys_.size());
442
443 Insert(100, 101);
444 Insert(200, 201);
445 Erase(100);
1e59de90 446 ASSERT_EQ(-1, Lookup(100));
7c673cae
FG
447 ASSERT_EQ(201, Lookup(200));
448 ASSERT_EQ(1U, deleted_keys_.size());
449 ASSERT_EQ(100, deleted_keys_[0]);
450 ASSERT_EQ(101, deleted_values_[0]);
451
452 Erase(100);
1e59de90 453 ASSERT_EQ(-1, Lookup(100));
7c673cae
FG
454 ASSERT_EQ(201, Lookup(200));
455 ASSERT_EQ(1U, deleted_keys_.size());
456}
457
458TEST_P(CacheTest, EntriesArePinned) {
1e59de90
TL
459 if (GetParam() == kHyperClock) {
460 ROCKSDB_GTEST_BYPASS(
461 "ClockCache doesn't guarantee Insert overwrite same key.");
462 return;
463 }
7c673cae
FG
464 Insert(100, 101);
465 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
466 ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
467 ASSERT_EQ(1U, cache_->GetUsage());
468
469 Insert(100, 102);
470 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
471 ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
472 ASSERT_EQ(0U, deleted_keys_.size());
473 ASSERT_EQ(2U, cache_->GetUsage());
474
475 cache_->Release(h1);
476 ASSERT_EQ(1U, deleted_keys_.size());
477 ASSERT_EQ(100, deleted_keys_[0]);
478 ASSERT_EQ(101, deleted_values_[0]);
479 ASSERT_EQ(1U, cache_->GetUsage());
480
481 Erase(100);
482 ASSERT_EQ(-1, Lookup(100));
483 ASSERT_EQ(1U, deleted_keys_.size());
484 ASSERT_EQ(1U, cache_->GetUsage());
485
486 cache_->Release(h2);
487 ASSERT_EQ(2U, deleted_keys_.size());
488 ASSERT_EQ(100, deleted_keys_[1]);
489 ASSERT_EQ(102, deleted_values_[1]);
490 ASSERT_EQ(0U, cache_->GetUsage());
491}
492
493TEST_P(CacheTest, EvictionPolicy) {
494 Insert(100, 101);
495 Insert(200, 201);
7c673cae 496 // Frequently used entry must be kept around
1e59de90
TL
497 for (int i = 0; i < 2 * kCacheSize; i++) {
498 Insert(1000 + i, 2000 + i);
7c673cae
FG
499 ASSERT_EQ(101, Lookup(100));
500 }
501 ASSERT_EQ(101, Lookup(100));
502 ASSERT_EQ(-1, Lookup(200));
503}
504
505TEST_P(CacheTest, ExternalRefPinsEntries) {
506 Insert(100, 101);
507 Cache::Handle* h = cache_->Lookup(EncodeKey(100));
508 ASSERT_TRUE(cache_->Ref(h));
509 ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
510 ASSERT_EQ(1U, cache_->GetUsage());
511
512 for (int i = 0; i < 3; ++i) {
513 if (i > 0) {
514 // First release (i == 1) corresponds to Ref(), second release (i == 2)
515 // corresponds to Lookup(). Then, since all external refs are released,
516 // the below insertions should push out the cache entry.
517 cache_->Release(h);
518 }
519 // double cache size because the usage bit in block cache prevents 100 from
520 // being evicted in the first kCacheSize iterations
521 for (int j = 0; j < 2 * kCacheSize + 100; j++) {
522 Insert(1000 + j, 2000 + j);
523 }
1e59de90
TL
524 // Clock cache is even more stateful and needs more churn to evict
525 if (GetParam() == kHyperClock) {
526 for (int j = 0; j < kCacheSize; j++) {
527 Insert(11000 + j, 11000 + j);
528 }
529 }
7c673cae
FG
530 if (i < 2) {
531 ASSERT_EQ(101, Lookup(100));
532 }
533 }
534 ASSERT_EQ(-1, Lookup(100));
535}
536
537TEST_P(CacheTest, EvictionPolicyRef) {
538 Insert(100, 101);
539 Insert(101, 102);
540 Insert(102, 103);
541 Insert(103, 104);
542 Insert(200, 101);
543 Insert(201, 102);
544 Insert(202, 103);
545 Insert(203, 104);
546 Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
547 Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
548 Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
549 Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
550 Insert(300, 101);
551 Insert(301, 102);
552 Insert(302, 103);
553 Insert(303, 104);
554
1e59de90
TL
555 // Insert entries much more than cache capacity.
556 for (int i = 0; i < 100 * kCacheSize; i++) {
7c673cae
FG
557 Insert(1000 + i, 2000 + i);
558 }
559
560 // Check whether the entries inserted in the beginning
561 // are evicted. Ones without extra ref are evicted and
562 // those with are not.
563 ASSERT_EQ(-1, Lookup(100));
564 ASSERT_EQ(-1, Lookup(101));
565 ASSERT_EQ(-1, Lookup(102));
566 ASSERT_EQ(-1, Lookup(103));
567
568 ASSERT_EQ(-1, Lookup(300));
569 ASSERT_EQ(-1, Lookup(301));
570 ASSERT_EQ(-1, Lookup(302));
571 ASSERT_EQ(-1, Lookup(303));
572
573 ASSERT_EQ(101, Lookup(200));
574 ASSERT_EQ(102, Lookup(201));
575 ASSERT_EQ(103, Lookup(202));
576 ASSERT_EQ(104, Lookup(203));
577
578 // Cleaning up all the handles
579 cache_->Release(h201);
580 cache_->Release(h202);
581 cache_->Release(h203);
582 cache_->Release(h204);
583}
584
585TEST_P(CacheTest, EvictEmptyCache) {
1e59de90
TL
586 auto type = GetParam();
587
7c673cae
FG
588 // Insert item large than capacity to trigger eviction on empty cache.
589 auto cache = NewCache(1, 0, false);
1e59de90
TL
590 if (type == kLRU) {
591 ASSERT_OK(cache->Insert("foo", nullptr, 10, DumbDeleter));
592 } else {
593 ASSERT_OK(cache->Insert(EncodeKey(1000), nullptr, 10, DumbDeleter));
594 }
7c673cae
FG
595}
596
597TEST_P(CacheTest, EraseFromDeleter) {
1e59de90
TL
598 auto type = GetParam();
599
7c673cae
FG
600 // Have deleter which will erase item from cache, which will re-enter
601 // the cache at that point.
602 std::shared_ptr<Cache> cache = NewCache(10, 0, false);
1e59de90
TL
603 std::string foo, bar;
604 Cache::DeleterFn erase_deleter;
605 if (type == kLRU) {
606 foo = "foo";
607 bar = "bar";
608 erase_deleter = EraseDeleter1;
609 } else {
610 foo = EncodeKey(1234);
611 bar = EncodeKey(5678);
612 erase_deleter = EraseDeleter2;
613 }
614
615 ASSERT_OK(cache->Insert(foo, nullptr, 1, DumbDeleter));
616 ASSERT_OK(cache->Insert(bar, cache.get(), 1, erase_deleter));
617
618 cache->Erase(bar);
619 ASSERT_EQ(nullptr, cache->Lookup(foo));
620 ASSERT_EQ(nullptr, cache->Lookup(bar));
7c673cae
FG
621}
622
623TEST_P(CacheTest, ErasedHandleState) {
624 // insert a key and get two handles
625 Insert(100, 1000);
626 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
627 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
628 ASSERT_EQ(h1, h2);
629 ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
630 ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
631
632 // delete the key from the cache
633 Erase(100);
634 // can no longer find in the cache
635 ASSERT_EQ(-1, Lookup(100));
636
637 // release one handle
638 cache_->Release(h1);
639 // still can't find in cache
640 ASSERT_EQ(-1, Lookup(100));
641
642 cache_->Release(h2);
643}
644
645TEST_P(CacheTest, HeavyEntries) {
646 // Add a bunch of light and heavy entries and then count the combined
647 // size of items still in the cache, which must be approximately the
648 // same as the total capacity.
649 const int kLight = 1;
650 const int kHeavy = 10;
651 int added = 0;
652 int index = 0;
1e59de90 653 while (added < 2 * kCacheSize) {
7c673cae 654 const int weight = (index & 1) ? kLight : kHeavy;
1e59de90 655 Insert(index, 1000 + index, weight);
7c673cae
FG
656 added += weight;
657 index++;
658 }
659
660 int cached_weight = 0;
661 for (int i = 0; i < index; i++) {
662 const int weight = (i & 1 ? kLight : kHeavy);
663 int r = Lookup(i);
664 if (r >= 0) {
665 cached_weight += weight;
1e59de90 666 ASSERT_EQ(1000 + i, r);
7c673cae
FG
667 }
668 }
1e59de90 669 ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
7c673cae
FG
670}
671
672TEST_P(CacheTest, NewId) {
673 uint64_t a = cache_->NewId();
674 uint64_t b = cache_->NewId();
675 ASSERT_NE(a, b);
676}
677
7c673cae
FG
678class Value {
679 public:
1e59de90 680 explicit Value(int v) : v_(v) {}
7c673cae 681
1e59de90 682 int v_;
7c673cae
FG
683};
684
685namespace {
11fdf7f2 686void deleter(const Slice& /*key*/, void* value) {
1e59de90 687 delete static_cast<Value*>(value);
7c673cae
FG
688}
689} // namespace
690
691TEST_P(CacheTest, ReleaseAndErase) {
692 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
693 Cache::Handle* handle;
694 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
695 &CacheTest::Deleter, &handle);
696 ASSERT_TRUE(s.ok());
697 ASSERT_EQ(5U, cache->GetCapacity());
698 ASSERT_EQ(1U, cache->GetUsage());
699 ASSERT_EQ(0U, deleted_keys_.size());
700 auto erased = cache->Release(handle, true);
701 ASSERT_TRUE(erased);
702 // This tests that deleter has been called
703 ASSERT_EQ(1U, deleted_keys_.size());
704}
705
706TEST_P(CacheTest, ReleaseWithoutErase) {
707 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
708 Cache::Handle* handle;
709 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
710 &CacheTest::Deleter, &handle);
711 ASSERT_TRUE(s.ok());
712 ASSERT_EQ(5U, cache->GetCapacity());
713 ASSERT_EQ(1U, cache->GetUsage());
714 ASSERT_EQ(0U, deleted_keys_.size());
715 auto erased = cache->Release(handle);
716 ASSERT_FALSE(erased);
717 // This tests that deleter is not called. When cache has free capacity it is
718 // not expected to immediately erase the released items.
719 ASSERT_EQ(0U, deleted_keys_.size());
720}
721
722TEST_P(CacheTest, SetCapacity) {
1e59de90
TL
723 auto type = GetParam();
724 if (type == kHyperClock) {
725 ROCKSDB_GTEST_BYPASS(
726 "FastLRUCache and HyperClockCache don't support arbitrary capacity "
727 "adjustments.");
728 return;
729 }
7c673cae
FG
730 // test1: increase capacity
731 // lets create a cache with capacity 5,
732 // then, insert 5 elements, then increase capacity
733 // to 10, returned capacity should be 10, usage=5
734 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
735 std::vector<Cache::Handle*> handles(10);
736 // Insert 5 entries, but not releasing.
1e59de90
TL
737 for (int i = 0; i < 5; i++) {
738 std::string key = EncodeKey(i + 1);
7c673cae
FG
739 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
740 ASSERT_TRUE(s.ok());
741 }
742 ASSERT_EQ(5U, cache->GetCapacity());
743 ASSERT_EQ(5U, cache->GetUsage());
744 cache->SetCapacity(10);
745 ASSERT_EQ(10U, cache->GetCapacity());
746 ASSERT_EQ(5U, cache->GetUsage());
747
748 // test2: decrease capacity
749 // insert 5 more elements to cache, then release 5,
750 // then decrease capacity to 7, final capacity should be 7
751 // and usage should be 7
1e59de90
TL
752 for (int i = 5; i < 10; i++) {
753 std::string key = EncodeKey(i + 1);
7c673cae
FG
754 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
755 ASSERT_TRUE(s.ok());
756 }
757 ASSERT_EQ(10U, cache->GetCapacity());
758 ASSERT_EQ(10U, cache->GetUsage());
1e59de90 759 for (int i = 0; i < 5; i++) {
7c673cae
FG
760 cache->Release(handles[i]);
761 }
762 ASSERT_EQ(10U, cache->GetCapacity());
763 ASSERT_EQ(10U, cache->GetUsage());
764 cache->SetCapacity(7);
765 ASSERT_EQ(7, cache->GetCapacity());
766 ASSERT_EQ(7, cache->GetUsage());
767
768 // release remaining 5 to keep valgrind happy
1e59de90 769 for (int i = 5; i < 10; i++) {
7c673cae
FG
770 cache->Release(handles[i]);
771 }
1e59de90
TL
772
773 // Make sure this doesn't crash or upset ASAN/valgrind
774 cache->DisownData();
7c673cae
FG
775}
776
f67539c2 777TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
7c673cae
FG
778 // test1: set the flag to false. Insert more keys than capacity. See if they
779 // all go through.
f67539c2 780 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
7c673cae
FG
781 std::vector<Cache::Handle*> handles(10);
782 Status s;
1e59de90
TL
783 for (int i = 0; i < 10; i++) {
784 std::string key = EncodeKey(i + 1);
7c673cae
FG
785 s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
786 ASSERT_OK(s);
787 ASSERT_NE(nullptr, handles[i]);
788 }
f67539c2 789 ASSERT_EQ(10, cache->GetUsage());
7c673cae
FG
790
791 // test2: set the flag to true. Insert and check if it fails.
1e59de90 792 std::string extra_key = EncodeKey(100);
7c673cae
FG
793 Value* extra_value = new Value(0);
794 cache->SetStrictCapacityLimit(true);
795 Cache::Handle* handle;
796 s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
1e59de90 797 ASSERT_TRUE(s.IsMemoryLimit());
7c673cae 798 ASSERT_EQ(nullptr, handle);
f67539c2 799 ASSERT_EQ(10, cache->GetUsage());
7c673cae 800
1e59de90 801 for (int i = 0; i < 10; i++) {
7c673cae
FG
802 cache->Release(handles[i]);
803 }
804
805 // test3: init with flag being true.
f67539c2 806 std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
1e59de90
TL
807 for (int i = 0; i < 5; i++) {
808 std::string key = EncodeKey(i + 1);
7c673cae
FG
809 s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
810 ASSERT_OK(s);
811 ASSERT_NE(nullptr, handles[i]);
812 }
813 s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
1e59de90 814 ASSERT_TRUE(s.IsMemoryLimit());
7c673cae
FG
815 ASSERT_EQ(nullptr, handle);
816 // test insert without handle
817 s = cache2->Insert(extra_key, extra_value, 1, &deleter);
818 // AS if the key have been inserted into cache but get evicted immediately.
819 ASSERT_OK(s);
f67539c2 820 ASSERT_EQ(5, cache2->GetUsage());
7c673cae
FG
821 ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
822
1e59de90 823 for (int i = 0; i < 5; i++) {
7c673cae
FG
824 cache2->Release(handles[i]);
825 }
826}
827
828TEST_P(CacheTest, OverCapacity) {
829 size_t n = 10;
830
831 // a LRUCache with n entries and one shard only
832 std::shared_ptr<Cache> cache = NewCache(n, 0, false);
833
1e59de90 834 std::vector<Cache::Handle*> handles(n + 1);
7c673cae
FG
835
836 // Insert n+1 entries, but not releasing.
1e59de90
TL
837 for (int i = 0; i < static_cast<int>(n + 1); i++) {
838 std::string key = EncodeKey(i + 1);
7c673cae
FG
839 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
840 ASSERT_TRUE(s.ok());
841 }
842
843 // Guess what's in the cache now?
1e59de90
TL
844 for (int i = 0; i < static_cast<int>(n + 1); i++) {
845 std::string key = EncodeKey(i + 1);
7c673cae
FG
846 auto h = cache->Lookup(key);
847 ASSERT_TRUE(h != nullptr);
848 if (h) cache->Release(h);
849 }
850
851 // the cache is over capacity since nothing could be evicted
852 ASSERT_EQ(n + 1U, cache->GetUsage());
1e59de90 853 for (int i = 0; i < static_cast<int>(n + 1); i++) {
7c673cae
FG
854 cache->Release(handles[i]);
855 }
7c673cae 856
1e59de90
TL
857 if (GetParam() == kHyperClock) {
858 // Make sure eviction is triggered.
859 ASSERT_OK(cache->Insert(EncodeKey(-1), nullptr, 1, &deleter, &handles[0]));
860
861 // cache is under capacity now since elements were released
862 ASSERT_GE(n, cache->GetUsage());
863
864 // clean up
865 cache->Release(handles[0]);
866 } else {
867 // LRUCache checks for over-capacity in Release.
868
869 // cache is exactly at capacity now with minimal eviction
870 ASSERT_EQ(n, cache->GetUsage());
871
872 // element 0 is evicted and the rest is there
873 // This is consistent with the LRU policy since the element 0
874 // was released first
875 for (int i = 0; i < static_cast<int>(n + 1); i++) {
876 std::string key = EncodeKey(i + 1);
877 auto h = cache->Lookup(key);
878 if (h) {
879 ASSERT_NE(static_cast<size_t>(i), 0U);
880 cache->Release(h);
881 } else {
882 ASSERT_EQ(static_cast<size_t>(i), 0U);
883 }
7c673cae
FG
884 }
885 }
886}
887
888namespace {
1e59de90
TL
889std::vector<std::pair<int, int>> legacy_callback_state;
890void legacy_callback(void* value, size_t charge) {
891 legacy_callback_state.push_back(
892 {DecodeValue(value), static_cast<int>(charge)});
7c673cae 893}
1e59de90 894}; // namespace
7c673cae 895
1e59de90 896TEST_P(CacheTest, ApplyToAllCacheEntriesTest) {
7c673cae 897 std::vector<std::pair<int, int>> inserted;
1e59de90 898 legacy_callback_state.clear();
7c673cae
FG
899
900 for (int i = 0; i < 10; ++i) {
901 Insert(i, i * 2, i + 1);
902 inserted.push_back({i * 2, i + 1});
903 }
1e59de90
TL
904 cache_->ApplyToAllCacheEntries(legacy_callback, true);
905
906 std::sort(inserted.begin(), inserted.end());
907 std::sort(legacy_callback_state.begin(), legacy_callback_state.end());
908 ASSERT_EQ(inserted.size(), legacy_callback_state.size());
909 for (int i = 0; i < static_cast<int>(inserted.size()); ++i) {
910 EXPECT_EQ(inserted[i], legacy_callback_state[i]);
911 }
912}
913
914TEST_P(CacheTest, ApplyToAllEntriesTest) {
915 std::vector<std::string> callback_state;
916 const auto callback = [&](const Slice& key, void* value, size_t charge,
917 Cache::DeleterFn deleter) {
918 callback_state.push_back(std::to_string(DecodeKey(key)) + "," +
919 std::to_string(DecodeValue(value)) + "," +
920 std::to_string(charge));
921 assert(deleter == &CacheTest::Deleter);
922 };
923
924 std::vector<std::string> inserted;
925 callback_state.clear();
926
927 for (int i = 0; i < 10; ++i) {
928 Insert(i, i * 2, i + 1);
929 inserted.push_back(std::to_string(i) + "," + std::to_string(i * 2) + "," +
930 std::to_string(i + 1));
931 }
932 cache_->ApplyToAllEntries(callback, /*opts*/ {});
7c673cae
FG
933
934 std::sort(inserted.begin(), inserted.end());
935 std::sort(callback_state.begin(), callback_state.end());
1e59de90
TL
936 ASSERT_EQ(inserted.size(), callback_state.size());
937 for (int i = 0; i < static_cast<int>(inserted.size()); ++i) {
938 EXPECT_EQ(inserted[i], callback_state[i]);
939 }
940}
941
942TEST_P(CacheTest, ApplyToAllEntriesDuringResize) {
943 // This is a mini-stress test of ApplyToAllEntries, to ensure
944 // items in the cache that are neither added nor removed
945 // during ApplyToAllEntries are counted exactly once.
946
947 // Insert some entries that we expect to be seen exactly once
948 // during iteration.
949 constexpr int kSpecialCharge = 2;
950 constexpr int kNotSpecialCharge = 1;
951 constexpr int kSpecialCount = 100;
952 size_t expected_usage = 0;
953 for (int i = 0; i < kSpecialCount; ++i) {
954 Insert(i, i * 2, kSpecialCharge);
955 expected_usage += kSpecialCharge;
956 }
957
958 // For callback
959 int special_count = 0;
960 const auto callback = [&](const Slice&, void*, size_t charge,
961 Cache::DeleterFn) {
962 if (charge == static_cast<size_t>(kSpecialCharge)) {
963 ++special_count;
964 }
965 };
966
967 // Start counting
968 std::thread apply_thread([&]() {
969 // Use small average_entries_per_lock to make the problem difficult
970 Cache::ApplyToAllEntriesOptions opts;
971 opts.average_entries_per_lock = 2;
972 cache_->ApplyToAllEntries(callback, opts);
973 });
974
975 // In parallel, add more entries, enough to cause resize but not enough
976 // to cause ejections. (Note: if any cache shard is over capacity, there
977 // will be ejections)
978 for (int i = kSpecialCount * 1; i < kSpecialCount * 5; ++i) {
979 Insert(i, i * 2, kNotSpecialCharge);
980 expected_usage += kNotSpecialCharge;
981 }
982
983 apply_thread.join();
984 // verify no evictions
985 ASSERT_EQ(cache_->GetUsage(), expected_usage);
986 // verify everything seen in ApplyToAllEntries
987 ASSERT_EQ(special_count, kSpecialCount);
7c673cae
FG
988}
989
990TEST_P(CacheTest, DefaultShardBits) {
1e59de90
TL
991 // Prevent excessive allocation (to save time & space)
992 estimated_value_size_ = 100000;
993 // Implementations use different minimum shard sizes
994 size_t min_shard_size =
995 (GetParam() == kHyperClock ? 32U * 1024U : 512U) * 1024U;
996
997 std::shared_ptr<Cache> cache = NewCache(32U * min_shard_size);
998 ShardedCacheBase* sc = dynamic_cast<ShardedCacheBase*>(cache.get());
7c673cae
FG
999 ASSERT_EQ(5, sc->GetNumShardBits());
1000
1e59de90
TL
1001 cache = NewCache(min_shard_size / 1000U * 999U);
1002 sc = dynamic_cast<ShardedCacheBase*>(cache.get());
7c673cae
FG
1003 ASSERT_EQ(0, sc->GetNumShardBits());
1004
1e59de90
TL
1005 cache = NewCache(3U * 1024U * 1024U * 1024U);
1006 sc = dynamic_cast<ShardedCacheBase*>(cache.get());
1007 // current maximum of 6
7c673cae 1008 ASSERT_EQ(6, sc->GetNumShardBits());
1e59de90
TL
1009
1010 if constexpr (sizeof(size_t) > 4) {
1011 cache = NewCache(128U * min_shard_size);
1012 sc = dynamic_cast<ShardedCacheBase*>(cache.get());
1013 // current maximum of 6
1014 ASSERT_EQ(6, sc->GetNumShardBits());
1015 }
7c673cae
FG
1016}
1017
1e59de90 1018TEST_P(CacheTest, GetChargeAndDeleter) {
f67539c2
TL
1019 Insert(1, 2);
1020 Cache::Handle* h1 = cache_->Lookup(EncodeKey(1));
1021 ASSERT_EQ(2, DecodeValue(cache_->Value(h1)));
1022 ASSERT_EQ(1, cache_->GetCharge(h1));
1e59de90 1023 ASSERT_EQ(&CacheTest::Deleter, cache_->GetDeleter(h1));
f67539c2
TL
1024 cache_->Release(h1);
1025}
1026
7c673cae 1027INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
1e59de90 1028 testing::Values(kLRU, kHyperClock));
f67539c2 1029INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest, testing::Values(kLRU));
7c673cae 1030
f67539c2 1031} // namespace ROCKSDB_NAMESPACE
7c673cae
FG
1032
1033int main(int argc, char** argv) {
1e59de90 1034 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
7c673cae
FG
1035 ::testing::InitGoogleTest(&argc, argv);
1036 return RUN_ALL_TESTS();
1037}