]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/db/db_block_cache_test.cc
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / rocksdb / db / db_block_cache_test.cc
CommitLineData
7c673cae 1// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
11fdf7f2
TL
2// This source code is licensed under both the GPLv2 (found in the
3// COPYING file in the root directory) and Apache 2.0 License
4// (found in the LICENSE.Apache file in the root directory).
7c673cae
FG
5//
6// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7// Use of this source code is governed by a BSD-style license that can be
8// found in the LICENSE file. See the AUTHORS file for names of contributors.
9#include <cstdlib>
10#include "cache/lru_cache.h"
11#include "db/db_test_util.h"
12#include "port/stack_trace.h"
13
14namespace rocksdb {
15
16class DBBlockCacheTest : public DBTestBase {
17 private:
18 size_t miss_count_ = 0;
19 size_t hit_count_ = 0;
20 size_t insert_count_ = 0;
21 size_t failure_count_ = 0;
22 size_t compressed_miss_count_ = 0;
23 size_t compressed_hit_count_ = 0;
24 size_t compressed_insert_count_ = 0;
25 size_t compressed_failure_count_ = 0;
26
27 public:
28 const size_t kNumBlocks = 10;
29 const size_t kValueSize = 100;
30
31 DBBlockCacheTest() : DBTestBase("/db_block_cache_test") {}
32
33 BlockBasedTableOptions GetTableOptions() {
34 BlockBasedTableOptions table_options;
35 // Set a small enough block size so that each key-value get its own block.
36 table_options.block_size = 1;
37 return table_options;
38 }
39
40 Options GetOptions(const BlockBasedTableOptions& table_options) {
41 Options options = CurrentOptions();
42 options.create_if_missing = true;
43 options.avoid_flush_during_recovery = false;
44 // options.compression = kNoCompression;
45 options.statistics = rocksdb::CreateDBStatistics();
46 options.table_factory.reset(new BlockBasedTableFactory(table_options));
47 return options;
48 }
49
11fdf7f2 50 void InitTable(const Options& /*options*/) {
7c673cae
FG
51 std::string value(kValueSize, 'a');
52 for (size_t i = 0; i < kNumBlocks; i++) {
53 ASSERT_OK(Put(ToString(i), value.c_str()));
54 }
55 }
56
57 void RecordCacheCounters(const Options& options) {
58 miss_count_ = TestGetTickerCount(options, BLOCK_CACHE_MISS);
59 hit_count_ = TestGetTickerCount(options, BLOCK_CACHE_HIT);
60 insert_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD);
61 failure_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
62 compressed_miss_count_ =
63 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
64 compressed_hit_count_ =
65 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
66 compressed_insert_count_ =
67 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD);
68 compressed_failure_count_ =
69 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
70 }
71
72 void CheckCacheCounters(const Options& options, size_t expected_misses,
73 size_t expected_hits, size_t expected_inserts,
74 size_t expected_failures) {
75 size_t new_miss_count = TestGetTickerCount(options, BLOCK_CACHE_MISS);
76 size_t new_hit_count = TestGetTickerCount(options, BLOCK_CACHE_HIT);
77 size_t new_insert_count = TestGetTickerCount(options, BLOCK_CACHE_ADD);
78 size_t new_failure_count =
79 TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
80 ASSERT_EQ(miss_count_ + expected_misses, new_miss_count);
81 ASSERT_EQ(hit_count_ + expected_hits, new_hit_count);
82 ASSERT_EQ(insert_count_ + expected_inserts, new_insert_count);
83 ASSERT_EQ(failure_count_ + expected_failures, new_failure_count);
84 miss_count_ = new_miss_count;
85 hit_count_ = new_hit_count;
86 insert_count_ = new_insert_count;
87 failure_count_ = new_failure_count;
88 }
89
90 void CheckCompressedCacheCounters(const Options& options,
91 size_t expected_misses,
92 size_t expected_hits,
93 size_t expected_inserts,
94 size_t expected_failures) {
95 size_t new_miss_count =
96 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
97 size_t new_hit_count =
98 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
99 size_t new_insert_count =
100 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD);
101 size_t new_failure_count =
102 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
103 ASSERT_EQ(compressed_miss_count_ + expected_misses, new_miss_count);
104 ASSERT_EQ(compressed_hit_count_ + expected_hits, new_hit_count);
105 ASSERT_EQ(compressed_insert_count_ + expected_inserts, new_insert_count);
106 ASSERT_EQ(compressed_failure_count_ + expected_failures, new_failure_count);
107 compressed_miss_count_ = new_miss_count;
108 compressed_hit_count_ = new_hit_count;
109 compressed_insert_count_ = new_insert_count;
110 compressed_failure_count_ = new_failure_count;
111 }
112};
113
11fdf7f2
TL
114TEST_F(DBBlockCacheTest, IteratorBlockCacheUsage) {
115 ReadOptions read_options;
116 read_options.fill_cache = false;
117 auto table_options = GetTableOptions();
118 auto options = GetOptions(table_options);
119 InitTable(options);
120
121 std::shared_ptr<Cache> cache = NewLRUCache(0, 0, false);
122 table_options.block_cache = cache;
123 options.table_factory.reset(new BlockBasedTableFactory(table_options));
124 Reopen(options);
125 RecordCacheCounters(options);
126
127 std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks - 1);
128 Iterator* iter = nullptr;
129
130 ASSERT_EQ(0, cache->GetUsage());
131 iter = db_->NewIterator(read_options);
132 iter->Seek(ToString(0));
133 ASSERT_LT(0, cache->GetUsage());
134 delete iter;
135 iter = nullptr;
136 ASSERT_EQ(0, cache->GetUsage());
137}
138
7c673cae
FG
139TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
140 ReadOptions read_options;
141 auto table_options = GetTableOptions();
142 auto options = GetOptions(table_options);
143 InitTable(options);
144
145 std::shared_ptr<Cache> cache = NewLRUCache(0, 0, false);
146 table_options.block_cache = cache;
147 options.table_factory.reset(new BlockBasedTableFactory(table_options));
148 Reopen(options);
149 RecordCacheCounters(options);
150
151 std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks - 1);
152 Iterator* iter = nullptr;
153
154 // Load blocks into cache.
155 for (size_t i = 0; i < kNumBlocks - 1; i++) {
156 iter = db_->NewIterator(read_options);
157 iter->Seek(ToString(i));
158 ASSERT_OK(iter->status());
159 CheckCacheCounters(options, 1, 0, 1, 0);
160 iterators[i].reset(iter);
161 }
162 size_t usage = cache->GetUsage();
163 ASSERT_LT(0, usage);
164 cache->SetCapacity(usage);
165 ASSERT_EQ(usage, cache->GetPinnedUsage());
166
167 // Test with strict capacity limit.
168 cache->SetStrictCapacityLimit(true);
169 iter = db_->NewIterator(read_options);
170 iter->Seek(ToString(kNumBlocks - 1));
171 ASSERT_TRUE(iter->status().IsIncomplete());
172 CheckCacheCounters(options, 1, 0, 0, 1);
173 delete iter;
174 iter = nullptr;
175
494da23a 176 // Release iterators and access cache again.
7c673cae
FG
177 for (size_t i = 0; i < kNumBlocks - 1; i++) {
178 iterators[i].reset();
179 CheckCacheCounters(options, 0, 0, 0, 0);
180 }
181 ASSERT_EQ(0, cache->GetPinnedUsage());
182 for (size_t i = 0; i < kNumBlocks - 1; i++) {
183 iter = db_->NewIterator(read_options);
184 iter->Seek(ToString(i));
185 ASSERT_OK(iter->status());
186 CheckCacheCounters(options, 0, 1, 0, 0);
187 iterators[i].reset(iter);
188 }
189}
190
191#ifdef SNAPPY
192TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
193 ReadOptions read_options;
194 auto table_options = GetTableOptions();
195 auto options = GetOptions(table_options);
196 options.compression = CompressionType::kSnappyCompression;
197 InitTable(options);
198
199 std::shared_ptr<Cache> cache = NewLRUCache(0, 0, false);
200 std::shared_ptr<Cache> compressed_cache = NewLRUCache(1 << 25, 0, false);
201 table_options.block_cache = cache;
202 table_options.block_cache_compressed = compressed_cache;
203 options.table_factory.reset(new BlockBasedTableFactory(table_options));
204 Reopen(options);
205 RecordCacheCounters(options);
206
207 std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks - 1);
208 Iterator* iter = nullptr;
209
210 // Load blocks into cache.
211 for (size_t i = 0; i < kNumBlocks - 1; i++) {
212 iter = db_->NewIterator(read_options);
213 iter->Seek(ToString(i));
214 ASSERT_OK(iter->status());
215 CheckCacheCounters(options, 1, 0, 1, 0);
216 CheckCompressedCacheCounters(options, 1, 0, 1, 0);
217 iterators[i].reset(iter);
218 }
219 size_t usage = cache->GetUsage();
220 ASSERT_LT(0, usage);
221 ASSERT_EQ(usage, cache->GetPinnedUsage());
222 size_t compressed_usage = compressed_cache->GetUsage();
223 ASSERT_LT(0, compressed_usage);
224 // Compressed block cache cannot be pinned.
225 ASSERT_EQ(0, compressed_cache->GetPinnedUsage());
226
227 // Set strict capacity limit flag. Now block will only load into compressed
228 // block cache.
229 cache->SetCapacity(usage);
230 cache->SetStrictCapacityLimit(true);
231 ASSERT_EQ(usage, cache->GetPinnedUsage());
232 iter = db_->NewIterator(read_options);
233 iter->Seek(ToString(kNumBlocks - 1));
234 ASSERT_TRUE(iter->status().IsIncomplete());
235 CheckCacheCounters(options, 1, 0, 0, 1);
236 CheckCompressedCacheCounters(options, 1, 0, 1, 0);
237 delete iter;
238 iter = nullptr;
239
240 // Clear strict capacity limit flag. This time we shall hit compressed block
241 // cache.
242 cache->SetStrictCapacityLimit(false);
243 iter = db_->NewIterator(read_options);
244 iter->Seek(ToString(kNumBlocks - 1));
245 ASSERT_OK(iter->status());
246 CheckCacheCounters(options, 1, 0, 1, 0);
247 CheckCompressedCacheCounters(options, 0, 1, 0, 0);
248 delete iter;
249 iter = nullptr;
250}
251#endif // SNAPPY
252
253#ifndef ROCKSDB_LITE
254
255// Make sure that when options.block_cache is set, after a new table is
256// created its index/filter blocks are added to block cache.
257TEST_F(DBBlockCacheTest, IndexAndFilterBlocksOfNewTableAddedToCache) {
258 Options options = CurrentOptions();
259 options.create_if_missing = true;
260 options.statistics = rocksdb::CreateDBStatistics();
261 BlockBasedTableOptions table_options;
262 table_options.cache_index_and_filter_blocks = true;
263 table_options.filter_policy.reset(NewBloomFilterPolicy(20));
264 options.table_factory.reset(new BlockBasedTableFactory(table_options));
265 CreateAndReopenWithCF({"pikachu"}, options);
266
267 ASSERT_OK(Put(1, "key", "val"));
268 // Create a new table.
269 ASSERT_OK(Flush(1));
270
271 // index/filter blocks added to block cache right after table creation.
272 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
273 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
274 ASSERT_EQ(2, /* only index/filter were added */
275 TestGetTickerCount(options, BLOCK_CACHE_ADD));
276 ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
277 uint64_t int_num;
278 ASSERT_TRUE(
279 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
280 ASSERT_EQ(int_num, 0U);
281
282 // Make sure filter block is in cache.
283 std::string value;
284 ReadOptions ropt;
285 db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value);
286
287 // Miss count should remain the same.
288 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
289 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
290
291 db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value);
292 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
293 ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
294
295 // Make sure index block is in cache.
296 auto index_block_hit = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT);
297 value = Get(1, "key");
298 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
299 ASSERT_EQ(index_block_hit + 1,
300 TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
301
302 value = Get(1, "key");
303 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
304 ASSERT_EQ(index_block_hit + 2,
305 TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
306}
307
11fdf7f2
TL
308// With fill_cache = false, fills up the cache, then iterates over the entire
309// db, verify dummy entries inserted in `BlockBasedTable::NewDataBlockIterator`
310// does not cause heap-use-after-free errors in COMPILE_WITH_ASAN=1 runs
311TEST_F(DBBlockCacheTest, FillCacheAndIterateDB) {
312 ReadOptions read_options;
313 read_options.fill_cache = false;
314 auto table_options = GetTableOptions();
315 auto options = GetOptions(table_options);
316 InitTable(options);
317
318 std::shared_ptr<Cache> cache = NewLRUCache(10, 0, true);
319 table_options.block_cache = cache;
320 options.table_factory.reset(new BlockBasedTableFactory(table_options));
321 Reopen(options);
322 ASSERT_OK(Put("key1", "val1"));
323 ASSERT_OK(Put("key2", "val2"));
324 ASSERT_OK(Flush());
325 ASSERT_OK(Put("key3", "val3"));
326 ASSERT_OK(Put("key4", "val4"));
327 ASSERT_OK(Flush());
328 ASSERT_OK(Put("key5", "val5"));
329 ASSERT_OK(Put("key6", "val6"));
330 ASSERT_OK(Flush());
331
332 Iterator* iter = nullptr;
333
334 iter = db_->NewIterator(read_options);
335 iter->Seek(ToString(0));
336 while (iter->Valid()) {
337 iter->Next();
338 }
339 delete iter;
340 iter = nullptr;
341}
342
7c673cae
FG
343TEST_F(DBBlockCacheTest, IndexAndFilterBlocksStats) {
344 Options options = CurrentOptions();
345 options.create_if_missing = true;
346 options.statistics = rocksdb::CreateDBStatistics();
347 BlockBasedTableOptions table_options;
348 table_options.cache_index_and_filter_blocks = true;
349 // 200 bytes are enough to hold the first two blocks
350 std::shared_ptr<Cache> cache = NewLRUCache(200, 0, false);
351 table_options.block_cache = cache;
494da23a 352 table_options.filter_policy.reset(NewBloomFilterPolicy(20, true));
7c673cae
FG
353 options.table_factory.reset(new BlockBasedTableFactory(table_options));
354 CreateAndReopenWithCF({"pikachu"}, options);
355
356 ASSERT_OK(Put(1, "key", "val"));
357 // Create a new table
358 ASSERT_OK(Flush(1));
359 size_t index_bytes_insert =
360 TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT);
361 size_t filter_bytes_insert =
362 TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT);
363 ASSERT_GT(index_bytes_insert, 0);
364 ASSERT_GT(filter_bytes_insert, 0);
365 ASSERT_EQ(cache->GetUsage(), index_bytes_insert + filter_bytes_insert);
366 // set the cache capacity to the current usage
367 cache->SetCapacity(index_bytes_insert + filter_bytes_insert);
368 ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT), 0);
369 ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT), 0);
370 ASSERT_OK(Put(1, "key2", "val"));
371 // Create a new table
372 ASSERT_OK(Flush(1));
373 // cache evicted old index and block entries
374 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT),
375 index_bytes_insert);
376 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT),
377 filter_bytes_insert);
378 ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT),
379 index_bytes_insert);
380 ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT),
381 filter_bytes_insert);
382}
383
384namespace {
385
386// A mock cache wraps LRUCache, and record how many entries have been
387// inserted for each priority.
388class MockCache : public LRUCache {
389 public:
390 static uint32_t high_pri_insert_count;
391 static uint32_t low_pri_insert_count;
392
11fdf7f2
TL
393 MockCache()
394 : LRUCache((size_t)1 << 25 /*capacity*/, 0 /*num_shard_bits*/,
395 false /*strict_capacity_limit*/, 0.0 /*high_pri_pool_ratio*/) {
396 }
7c673cae 397
494da23a
TL
398 Status Insert(const Slice& key, void* value, size_t charge,
399 void (*deleter)(const Slice& key, void* value), Handle** handle,
400 Priority priority) override {
7c673cae
FG
401 if (priority == Priority::LOW) {
402 low_pri_insert_count++;
403 } else {
404 high_pri_insert_count++;
405 }
406 return LRUCache::Insert(key, value, charge, deleter, handle, priority);
407 }
408};
409
410uint32_t MockCache::high_pri_insert_count = 0;
411uint32_t MockCache::low_pri_insert_count = 0;
412
413} // anonymous namespace
414
415TEST_F(DBBlockCacheTest, IndexAndFilterBlocksCachePriority) {
416 for (auto priority : {Cache::Priority::LOW, Cache::Priority::HIGH}) {
417 Options options = CurrentOptions();
418 options.create_if_missing = true;
419 options.statistics = rocksdb::CreateDBStatistics();
420 BlockBasedTableOptions table_options;
421 table_options.cache_index_and_filter_blocks = true;
422 table_options.block_cache.reset(new MockCache());
423 table_options.filter_policy.reset(NewBloomFilterPolicy(20));
424 table_options.cache_index_and_filter_blocks_with_high_priority =
425 priority == Cache::Priority::HIGH ? true : false;
426 options.table_factory.reset(new BlockBasedTableFactory(table_options));
427 DestroyAndReopen(options);
428
429 MockCache::high_pri_insert_count = 0;
430 MockCache::low_pri_insert_count = 0;
431
432 // Create a new table.
433 ASSERT_OK(Put("foo", "value"));
434 ASSERT_OK(Put("bar", "value"));
435 ASSERT_OK(Flush());
436 ASSERT_EQ(1, NumTableFilesAtLevel(0));
437
438 // index/filter blocks added to block cache right after table creation.
439 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
440 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
441 ASSERT_EQ(2, /* only index/filter were added */
442 TestGetTickerCount(options, BLOCK_CACHE_ADD));
443 ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
444 if (priority == Cache::Priority::LOW) {
445 ASSERT_EQ(0, MockCache::high_pri_insert_count);
446 ASSERT_EQ(2, MockCache::low_pri_insert_count);
447 } else {
448 ASSERT_EQ(2, MockCache::high_pri_insert_count);
449 ASSERT_EQ(0, MockCache::low_pri_insert_count);
450 }
451
452 // Access data block.
453 ASSERT_EQ("value", Get("foo"));
454
455 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
456 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
457 ASSERT_EQ(3, /*adding data block*/
458 TestGetTickerCount(options, BLOCK_CACHE_ADD));
459 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
460
461 // Data block should be inserted with low priority.
462 if (priority == Cache::Priority::LOW) {
463 ASSERT_EQ(0, MockCache::high_pri_insert_count);
464 ASSERT_EQ(3, MockCache::low_pri_insert_count);
465 } else {
466 ASSERT_EQ(2, MockCache::high_pri_insert_count);
467 ASSERT_EQ(1, MockCache::low_pri_insert_count);
468 }
469 }
470}
471
472TEST_F(DBBlockCacheTest, ParanoidFileChecks) {
473 Options options = CurrentOptions();
474 options.create_if_missing = true;
475 options.statistics = rocksdb::CreateDBStatistics();
476 options.level0_file_num_compaction_trigger = 2;
477 options.paranoid_file_checks = true;
478 BlockBasedTableOptions table_options;
479 table_options.cache_index_and_filter_blocks = false;
480 table_options.filter_policy.reset(NewBloomFilterPolicy(20));
481 options.table_factory.reset(new BlockBasedTableFactory(table_options));
482 CreateAndReopenWithCF({"pikachu"}, options);
483
484 ASSERT_OK(Put(1, "1_key", "val"));
485 ASSERT_OK(Put(1, "9_key", "val"));
486 // Create a new table.
487 ASSERT_OK(Flush(1));
488 ASSERT_EQ(1, /* read and cache data block */
489 TestGetTickerCount(options, BLOCK_CACHE_ADD));
490
491 ASSERT_OK(Put(1, "1_key2", "val2"));
492 ASSERT_OK(Put(1, "9_key2", "val2"));
493 // Create a new SST file. This will further trigger a compaction
494 // and generate another file.
495 ASSERT_OK(Flush(1));
496 dbfull()->TEST_WaitForCompact();
497 ASSERT_EQ(3, /* Totally 3 files created up to now */
498 TestGetTickerCount(options, BLOCK_CACHE_ADD));
499
500 // After disabling options.paranoid_file_checks. NO further block
501 // is added after generating a new file.
502 ASSERT_OK(
503 dbfull()->SetOptions(handles_[1], {{"paranoid_file_checks", "false"}}));
504
505 ASSERT_OK(Put(1, "1_key3", "val3"));
506 ASSERT_OK(Put(1, "9_key3", "val3"));
507 ASSERT_OK(Flush(1));
508 ASSERT_OK(Put(1, "1_key4", "val4"));
509 ASSERT_OK(Put(1, "9_key4", "val4"));
510 ASSERT_OK(Flush(1));
511 dbfull()->TEST_WaitForCompact();
512 ASSERT_EQ(3, /* Totally 3 files created up to now */
513 TestGetTickerCount(options, BLOCK_CACHE_ADD));
514}
515
516TEST_F(DBBlockCacheTest, CompressedCache) {
517 if (!Snappy_Supported()) {
518 return;
519 }
520 int num_iter = 80;
521
522 // Run this test three iterations.
523 // Iteration 1: only a uncompressed block cache
524 // Iteration 2: only a compressed block cache
525 // Iteration 3: both block cache and compressed cache
526 // Iteration 4: both block cache and compressed cache, but DB is not
527 // compressed
528 for (int iter = 0; iter < 4; iter++) {
529 Options options = CurrentOptions();
530 options.write_buffer_size = 64 * 1024; // small write buffer
531 options.statistics = rocksdb::CreateDBStatistics();
532
533 BlockBasedTableOptions table_options;
534 switch (iter) {
535 case 0:
536 // only uncompressed block cache
537 table_options.block_cache = NewLRUCache(8 * 1024);
538 table_options.block_cache_compressed = nullptr;
539 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
540 break;
541 case 1:
542 // no block cache, only compressed cache
543 table_options.no_block_cache = true;
544 table_options.block_cache = nullptr;
545 table_options.block_cache_compressed = NewLRUCache(8 * 1024);
546 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
547 break;
548 case 2:
549 // both compressed and uncompressed block cache
550 table_options.block_cache = NewLRUCache(1024);
551 table_options.block_cache_compressed = NewLRUCache(8 * 1024);
552 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
553 break;
554 case 3:
555 // both block cache and compressed cache, but DB is not compressed
556 // also, make block cache sizes bigger, to trigger block cache hits
557 table_options.block_cache = NewLRUCache(1024 * 1024);
558 table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
559 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
560 options.compression = kNoCompression;
561 break;
562 default:
11fdf7f2 563 FAIL();
7c673cae
FG
564 }
565 CreateAndReopenWithCF({"pikachu"}, options);
566 // default column family doesn't have block cache
567 Options no_block_cache_opts;
568 no_block_cache_opts.statistics = options.statistics;
569 no_block_cache_opts = CurrentOptions(no_block_cache_opts);
570 BlockBasedTableOptions table_options_no_bc;
571 table_options_no_bc.no_block_cache = true;
572 no_block_cache_opts.table_factory.reset(
573 NewBlockBasedTableFactory(table_options_no_bc));
574 ReopenWithColumnFamilies(
575 {"default", "pikachu"},
576 std::vector<Options>({no_block_cache_opts, options}));
577
578 Random rnd(301);
579
580 // Write 8MB (80 values, each 100K)
581 ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
582 std::vector<std::string> values;
583 std::string str;
584 for (int i = 0; i < num_iter; i++) {
585 if (i % 4 == 0) { // high compression ratio
586 str = RandomString(&rnd, 1000);
587 }
588 values.push_back(str);
589 ASSERT_OK(Put(1, Key(i), values[i]));
590 }
591
592 // flush all data from memtable so that reads are from block cache
593 ASSERT_OK(Flush(1));
594
595 for (int i = 0; i < num_iter; i++) {
596 ASSERT_EQ(Get(1, Key(i)), values[i]);
597 }
598
599 // check that we triggered the appropriate code paths in the cache
600 switch (iter) {
601 case 0:
602 // only uncompressed block cache
603 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
604 ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
605 break;
606 case 1:
607 // no block cache, only compressed cache
608 ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
609 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
610 break;
611 case 2:
612 // both compressed and uncompressed block cache
613 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
614 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
615 break;
616 case 3:
617 // both compressed and uncompressed block cache
618 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
619 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_HIT), 0);
620 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
621 // compressed doesn't have any hits since blocks are not compressed on
622 // storage
623 ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT), 0);
624 break;
625 default:
11fdf7f2 626 FAIL();
7c673cae
FG
627 }
628
629 options.create_if_missing = true;
630 DestroyAndReopen(options);
631 }
632}
633
494da23a
TL
634TEST_F(DBBlockCacheTest, CacheCompressionDict) {
635 const int kNumFiles = 4;
636 const int kNumEntriesPerFile = 128;
637 const int kNumBytesPerEntry = 1024;
638
639 // Try all the available libraries that support dictionary compression
640 std::vector<CompressionType> compression_types;
641#ifdef ZLIB
642 compression_types.push_back(kZlibCompression);
643#endif // ZLIB
644#if LZ4_VERSION_NUMBER >= 10400
645 compression_types.push_back(kLZ4Compression);
646 compression_types.push_back(kLZ4HCCompression);
647#endif // LZ4_VERSION_NUMBER >= 10400
648#if ZSTD_VERSION_NUMBER >= 500
649 compression_types.push_back(kZSTD);
650#endif // ZSTD_VERSION_NUMBER >= 500
651 Random rnd(301);
652 for (auto compression_type : compression_types) {
653 Options options = CurrentOptions();
654 options.compression = compression_type;
655 options.compression_opts.max_dict_bytes = 4096;
656 options.create_if_missing = true;
657 options.num_levels = 2;
658 options.statistics = rocksdb::CreateDBStatistics();
659 options.target_file_size_base = kNumEntriesPerFile * kNumBytesPerEntry;
660 BlockBasedTableOptions table_options;
661 table_options.cache_index_and_filter_blocks = true;
662 table_options.block_cache.reset(new MockCache());
663 options.table_factory.reset(new BlockBasedTableFactory(table_options));
664 DestroyAndReopen(options);
665
666 for (int i = 0; i < kNumFiles; ++i) {
667 ASSERT_EQ(i, NumTableFilesAtLevel(0, 0));
668 for (int j = 0; j < kNumEntriesPerFile; ++j) {
669 std::string value = RandomString(&rnd, kNumBytesPerEntry);
670 ASSERT_OK(Put(Key(j * kNumFiles + i), value.c_str()));
671 }
672 ASSERT_OK(Flush());
673 }
674 dbfull()->TEST_WaitForCompact();
675 ASSERT_EQ(0, NumTableFilesAtLevel(0));
676 ASSERT_EQ(kNumFiles, NumTableFilesAtLevel(1));
677
678 // Seek to a key in a file. It should cause the SST's dictionary meta-block
679 // to be read.
680 RecordCacheCounters(options);
681 ASSERT_EQ(0,
682 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS));
683 ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_ADD));
684 ASSERT_EQ(
685 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT),
686 0);
687 ReadOptions read_options;
688 ASSERT_NE("NOT_FOUND", Get(Key(kNumFiles * kNumEntriesPerFile - 1)));
689 // Two blocks missed/added: dictionary and data block
690 // One block hit: index since it's prefetched
691 CheckCacheCounters(options, 2 /* expected_misses */, 1 /* expected_hits */,
692 2 /* expected_inserts */, 0 /* expected_failures */);
693 ASSERT_EQ(1,
694 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS));
695 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_ADD));
696 ASSERT_GT(
697 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT),
698 0);
699 }
700}
701
7c673cae
FG
702#endif // ROCKSDB_LITE
703
704} // namespace rocksdb
705
706int main(int argc, char** argv) {
707 rocksdb::port::InstallStackTraceHandler();
708 ::testing::InitGoogleTest(&argc, argv);
709 return RUN_ALL_TESTS();
710}