]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/utilities/memory/memory_test.cc
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / rocksdb / utilities / memory / memory_test.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under the BSD-style license found in the
3 // LICENSE file in the root directory of this source tree. An additional grant
4 // of patent rights can be found in the PATENTS file in the same directory.
5
6 #ifndef ROCKSDB_LITE
7
8 #include "db/db_impl.h"
9 #include "rocksdb/cache.h"
10 #include "rocksdb/table.h"
11 #include "rocksdb/utilities/memory_util.h"
12 #include "rocksdb/utilities/stackable_db.h"
13 #include "table/block_based_table_factory.h"
14 #include "util/string_util.h"
15 #include "util/testharness.h"
16 #include "util/testutil.h"
17
18 namespace rocksdb {
19
20 class MemoryTest : public testing::Test {
21 public:
22 MemoryTest() : kDbDir(test::TmpDir() + "/memory_test"), rnd_(301) {
23 assert(Env::Default()->CreateDirIfMissing(kDbDir).ok());
24 }
25
26 std::string GetDBName(int id) { return kDbDir + "db_" + ToString(id); }
27
28 std::string RandomString(int len) {
29 std::string r;
30 test::RandomString(&rnd_, len, &r);
31 return r;
32 }
33
34 void UpdateUsagesHistory(const std::vector<DB*>& dbs) {
35 std::map<MemoryUtil::UsageType, uint64_t> usage_by_type;
36 ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type));
37 for (int i = 0; i < MemoryUtil::kNumUsageTypes; ++i) {
38 usage_history_[i].push_back(
39 usage_by_type[static_cast<MemoryUtil::UsageType>(i)]);
40 }
41 }
42
43 void GetCachePointersFromTableFactory(
44 const TableFactory* factory,
45 std::unordered_set<const Cache*>* cache_set) {
46 const BlockBasedTableFactory* bbtf =
47 dynamic_cast<const BlockBasedTableFactory*>(factory);
48 if (bbtf != nullptr) {
49 const auto bbt_opts = bbtf->table_options();
50 cache_set->insert(bbt_opts.block_cache.get());
51 cache_set->insert(bbt_opts.block_cache_compressed.get());
52 }
53 }
54
55 void GetCachePointers(const std::vector<DB*>& dbs,
56 std::unordered_set<const Cache*>* cache_set) {
57 cache_set->clear();
58
59 for (auto* db : dbs) {
60 // Cache from DBImpl
61 StackableDB* sdb = dynamic_cast<StackableDB*>(db);
62 DBImpl* db_impl = dynamic_cast<DBImpl*>(sdb ? sdb->GetBaseDB() : db);
63 if (db_impl != nullptr) {
64 cache_set->insert(db_impl->TEST_table_cache());
65 }
66
67 // Cache from DBOptions
68 cache_set->insert(db->GetDBOptions().row_cache.get());
69
70 // Cache from table factories
71 std::unordered_map<std::string, const ImmutableCFOptions*> iopts_map;
72 if (db_impl != nullptr) {
73 ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map));
74 }
75 for (auto pair : iopts_map) {
76 GetCachePointersFromTableFactory(pair.second->table_factory, cache_set);
77 }
78 }
79 }
80
81 Status GetApproximateMemoryUsageByType(
82 const std::vector<DB*>& dbs,
83 std::map<MemoryUtil::UsageType, uint64_t>* usage_by_type) {
84 std::unordered_set<const Cache*> cache_set;
85 GetCachePointers(dbs, &cache_set);
86
87 return MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set,
88 usage_by_type);
89 }
90
91 const std::string kDbDir;
92 Random rnd_;
93 std::vector<uint64_t> usage_history_[MemoryUtil::kNumUsageTypes];
94 };
95
96 TEST_F(MemoryTest, SharedBlockCacheTotal) {
97 std::vector<DB*> dbs;
98 std::vector<uint64_t> usage_by_type;
99 const int kNumDBs = 10;
100 const int kKeySize = 100;
101 const int kValueSize = 500;
102 Options opt;
103 opt.create_if_missing = true;
104 opt.write_buffer_size = kKeySize + kValueSize;
105 opt.max_write_buffer_number = 10;
106 opt.min_write_buffer_number_to_merge = 10;
107 opt.disable_auto_compactions = true;
108 BlockBasedTableOptions bbt_opts;
109 bbt_opts.block_cache = NewLRUCache(4096 * 1000 * 10);
110 for (int i = 0; i < kNumDBs; ++i) {
111 DestroyDB(GetDBName(i), opt);
112 DB* db = nullptr;
113 ASSERT_OK(DB::Open(opt, GetDBName(i), &db));
114 dbs.push_back(db);
115 }
116
117 std::vector<std::string> keys_by_db[kNumDBs];
118
119 // Fill one memtable per Put to make memtable use more memory.
120 for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
121 for (int i = 0; i < kNumDBs; ++i) {
122 for (int j = 0; j < 100; ++j) {
123 keys_by_db[i].emplace_back(RandomString(kKeySize));
124 dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
125 RandomString(kValueSize));
126 }
127 dbs[i]->Flush(FlushOptions());
128 }
129 }
130 for (int i = 0; i < kNumDBs; ++i) {
131 for (auto& key : keys_by_db[i]) {
132 std::string value;
133 dbs[i]->Get(ReadOptions(), key, &value);
134 }
135 UpdateUsagesHistory(dbs);
136 }
137 for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
138 ++i) {
139 // Expect EQ as we didn't flush more memtables.
140 ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
141 usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
142 }
143 for (int i = 0; i < kNumDBs; ++i) {
144 delete dbs[i];
145 }
146 }
147
148 TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
149 std::vector<DB*> dbs;
150 std::vector<uint64_t> usage_by_type;
151 std::vector<std::vector<ColumnFamilyHandle*>> vec_handles;
152 const int kNumDBs = 10;
153 const int kKeySize = 100;
154 const int kValueSize = 500;
155 Options opt;
156 opt.create_if_missing = true;
157 opt.create_missing_column_families = true;
158 opt.write_buffer_size = kKeySize + kValueSize;
159 opt.max_write_buffer_number = 10;
160 opt.min_write_buffer_number_to_merge = 10;
161 opt.disable_auto_compactions = true;
162
163 std::vector<ColumnFamilyDescriptor> cf_descs = {
164 {kDefaultColumnFamilyName, ColumnFamilyOptions(opt)},
165 {"one", ColumnFamilyOptions(opt)},
166 {"two", ColumnFamilyOptions(opt)},
167 };
168
169 for (int i = 0; i < kNumDBs; ++i) {
170 DestroyDB(GetDBName(i), opt);
171 std::vector<ColumnFamilyHandle*> handles;
172 dbs.emplace_back();
173 vec_handles.emplace_back();
174 ASSERT_OK(DB::Open(DBOptions(opt), GetDBName(i), cf_descs,
175 &vec_handles.back(), &dbs.back()));
176 }
177
178 // Fill one memtable per Put to make memtable use more memory.
179 for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
180 for (int i = 0; i < kNumDBs; ++i) {
181 for (auto* handle : vec_handles[i]) {
182 dbs[i]->Put(WriteOptions(), handle, RandomString(kKeySize),
183 RandomString(kValueSize));
184 UpdateUsagesHistory(dbs);
185 }
186 }
187 }
188 // Expect the usage history is monotonically increasing
189 for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
190 ++i) {
191 ASSERT_GT(usage_history_[MemoryUtil::kMemTableTotal][i],
192 usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
193 ASSERT_GT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
194 usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
195 ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
196 usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
197 }
198
199 size_t usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
200 std::vector<Iterator*> iters;
201
202 // Create an iterator and flush all memtables for each db
203 for (int i = 0; i < kNumDBs; ++i) {
204 iters.push_back(dbs[i]->NewIterator(ReadOptions()));
205 dbs[i]->Flush(FlushOptions());
206
207 for (int j = 0; j < 100; ++j) {
208 std::string value;
209 dbs[i]->Get(ReadOptions(), RandomString(kKeySize), &value);
210 }
211
212 UpdateUsagesHistory(dbs);
213 }
214 for (size_t i = usage_check_point;
215 i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
216 // Since memtables are pinned by iterators, we don't expect the
217 // memory usage of all the memtables decreases as they are pinned
218 // by iterators.
219 ASSERT_GE(usage_history_[MemoryUtil::kMemTableTotal][i],
220 usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
221 // Expect the usage history from the "usage_decay_point" is
222 // monotonically decreasing.
223 ASSERT_LT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
224 usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
225 // Expect the usage history of the table readers increases
226 // as we flush tables.
227 ASSERT_GT(usage_history_[MemoryUtil::kTableReadersTotal][i],
228 usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
229 ASSERT_GT(usage_history_[MemoryUtil::kCacheTotal][i],
230 usage_history_[MemoryUtil::kCacheTotal][i - 1]);
231 }
232 usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
233 for (int i = 0; i < kNumDBs; ++i) {
234 delete iters[i];
235 UpdateUsagesHistory(dbs);
236 }
237 for (size_t i = usage_check_point;
238 i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
239 // Expect the usage of all memtables decreasing as we delete iterators.
240 ASSERT_LT(usage_history_[MemoryUtil::kMemTableTotal][i],
241 usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
242 // Since the memory usage of un-flushed memtables is only affected
243 // by Put and flush, we expect EQ here as we only delete iterators.
244 ASSERT_EQ(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
245 usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
246 // Expect EQ as we didn't flush more memtables.
247 ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
248 usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
249 }
250
251 for (int i = 0; i < kNumDBs; ++i) {
252 for (auto* handle : vec_handles[i]) {
253 delete handle;
254 }
255 delete dbs[i];
256 }
257 }
258 } // namespace rocksdb
259
260 int main(int argc, char** argv) {
261 #if !(defined NDEBUG) || !defined(OS_WIN)
262 ::testing::InitGoogleTest(&argc, argv);
263 return RUN_ALL_TESTS();
264 #else
265 return 0;
266 #endif
267 }
268
269 #else
270 #include <cstdio>
271
272 int main(int argc, char** argv) {
273 printf("Skipped in RocksDBLite as utilities are not supported.\n");
274 return 0;
275 }
276 #endif // !ROCKSDB_LITE