]>
Commit | Line | Data |
---|---|---|
7c673cae | 1 | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
11fdf7f2 TL |
2 | // This source code is licensed under both the GPLv2 (found in the |
3 | // COPYING file in the root directory) and Apache 2.0 License | |
4 | // (found in the LICENSE.Apache file in the root directory). | |
7c673cae FG |
5 | |
6 | #ifndef ROCKSDB_LITE | |
7 | ||
f67539c2 | 8 | #include "db/db_impl/db_impl.h" |
7c673cae FG |
9 | #include "rocksdb/cache.h" |
10 | #include "rocksdb/table.h" | |
11 | #include "rocksdb/utilities/memory_util.h" | |
12 | #include "rocksdb/utilities/stackable_db.h" | |
f67539c2 TL |
13 | #include "table/block_based/block_based_table_factory.h" |
14 | #include "test_util/testharness.h" | |
15 | #include "test_util/testutil.h" | |
20effc67 | 16 | #include "util/random.h" |
7c673cae | 17 | #include "util/string_util.h" |
7c673cae | 18 | |
f67539c2 | 19 | namespace ROCKSDB_NAMESPACE { |
7c673cae FG |
20 | |
21 | class MemoryTest : public testing::Test { | |
22 | public: | |
11fdf7f2 | 23 | MemoryTest() : kDbDir(test::PerThreadDBPath("memory_test")), rnd_(301) { |
7c673cae FG |
24 | assert(Env::Default()->CreateDirIfMissing(kDbDir).ok()); |
25 | } | |
26 | ||
1e59de90 | 27 | std::string GetDBName(int id) { return kDbDir + "db_" + std::to_string(id); } |
7c673cae | 28 | |
7c673cae FG |
29 | void UpdateUsagesHistory(const std::vector<DB*>& dbs) { |
30 | std::map<MemoryUtil::UsageType, uint64_t> usage_by_type; | |
31 | ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type)); | |
32 | for (int i = 0; i < MemoryUtil::kNumUsageTypes; ++i) { | |
33 | usage_history_[i].push_back( | |
34 | usage_by_type[static_cast<MemoryUtil::UsageType>(i)]); | |
35 | } | |
36 | } | |
37 | ||
38 | void GetCachePointersFromTableFactory( | |
39 | const TableFactory* factory, | |
40 | std::unordered_set<const Cache*>* cache_set) { | |
20effc67 TL |
41 | const auto bbto = factory->GetOptions<BlockBasedTableOptions>(); |
42 | if (bbto != nullptr) { | |
43 | cache_set->insert(bbto->block_cache.get()); | |
44 | cache_set->insert(bbto->block_cache_compressed.get()); | |
7c673cae FG |
45 | } |
46 | } | |
47 | ||
48 | void GetCachePointers(const std::vector<DB*>& dbs, | |
49 | std::unordered_set<const Cache*>* cache_set) { | |
50 | cache_set->clear(); | |
51 | ||
52 | for (auto* db : dbs) { | |
f67539c2 TL |
53 | assert(db); |
54 | ||
7c673cae FG |
55 | // Cache from DBImpl |
56 | StackableDB* sdb = dynamic_cast<StackableDB*>(db); | |
57 | DBImpl* db_impl = dynamic_cast<DBImpl*>(sdb ? sdb->GetBaseDB() : db); | |
58 | if (db_impl != nullptr) { | |
59 | cache_set->insert(db_impl->TEST_table_cache()); | |
60 | } | |
61 | ||
62 | // Cache from DBOptions | |
63 | cache_set->insert(db->GetDBOptions().row_cache.get()); | |
64 | ||
65 | // Cache from table factories | |
66 | std::unordered_map<std::string, const ImmutableCFOptions*> iopts_map; | |
67 | if (db_impl != nullptr) { | |
68 | ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map)); | |
69 | } | |
70 | for (auto pair : iopts_map) { | |
1e59de90 TL |
71 | GetCachePointersFromTableFactory(pair.second->table_factory.get(), |
72 | cache_set); | |
7c673cae FG |
73 | } |
74 | } | |
75 | } | |
76 | ||
77 | Status GetApproximateMemoryUsageByType( | |
78 | const std::vector<DB*>& dbs, | |
79 | std::map<MemoryUtil::UsageType, uint64_t>* usage_by_type) { | |
80 | std::unordered_set<const Cache*> cache_set; | |
81 | GetCachePointers(dbs, &cache_set); | |
82 | ||
83 | return MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set, | |
84 | usage_by_type); | |
85 | } | |
86 | ||
87 | const std::string kDbDir; | |
88 | Random rnd_; | |
89 | std::vector<uint64_t> usage_history_[MemoryUtil::kNumUsageTypes]; | |
90 | }; | |
91 | ||
92 | TEST_F(MemoryTest, SharedBlockCacheTotal) { | |
93 | std::vector<DB*> dbs; | |
94 | std::vector<uint64_t> usage_by_type; | |
95 | const int kNumDBs = 10; | |
96 | const int kKeySize = 100; | |
97 | const int kValueSize = 500; | |
98 | Options opt; | |
99 | opt.create_if_missing = true; | |
100 | opt.write_buffer_size = kKeySize + kValueSize; | |
101 | opt.max_write_buffer_number = 10; | |
102 | opt.min_write_buffer_number_to_merge = 10; | |
103 | opt.disable_auto_compactions = true; | |
104 | BlockBasedTableOptions bbt_opts; | |
105 | bbt_opts.block_cache = NewLRUCache(4096 * 1000 * 10); | |
106 | for (int i = 0; i < kNumDBs; ++i) { | |
1e59de90 | 107 | ASSERT_OK(DestroyDB(GetDBName(i), opt)); |
7c673cae FG |
108 | DB* db = nullptr; |
109 | ASSERT_OK(DB::Open(opt, GetDBName(i), &db)); | |
110 | dbs.push_back(db); | |
111 | } | |
112 | ||
113 | std::vector<std::string> keys_by_db[kNumDBs]; | |
114 | ||
115 | // Fill one memtable per Put to make memtable use more memory. | |
116 | for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) { | |
117 | for (int i = 0; i < kNumDBs; ++i) { | |
118 | for (int j = 0; j < 100; ++j) { | |
20effc67 TL |
119 | keys_by_db[i].emplace_back(rnd_.RandomString(kKeySize)); |
120 | ASSERT_OK(dbs[i]->Put(WriteOptions(), keys_by_db[i].back(), | |
121 | rnd_.RandomString(kValueSize))); | |
7c673cae | 122 | } |
1e59de90 | 123 | ASSERT_OK(dbs[i]->Flush(FlushOptions())); |
7c673cae FG |
124 | } |
125 | } | |
126 | for (int i = 0; i < kNumDBs; ++i) { | |
127 | for (auto& key : keys_by_db[i]) { | |
128 | std::string value; | |
1e59de90 | 129 | ASSERT_OK(dbs[i]->Get(ReadOptions(), key, &value)); |
7c673cae FG |
130 | } |
131 | UpdateUsagesHistory(dbs); | |
132 | } | |
133 | for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size(); | |
134 | ++i) { | |
135 | // Expect EQ as we didn't flush more memtables. | |
136 | ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i], | |
137 | usage_history_[MemoryUtil::kTableReadersTotal][i - 1]); | |
138 | } | |
139 | for (int i = 0; i < kNumDBs; ++i) { | |
140 | delete dbs[i]; | |
141 | } | |
142 | } | |
143 | ||
144 | TEST_F(MemoryTest, MemTableAndTableReadersTotal) { | |
145 | std::vector<DB*> dbs; | |
146 | std::vector<uint64_t> usage_by_type; | |
147 | std::vector<std::vector<ColumnFamilyHandle*>> vec_handles; | |
148 | const int kNumDBs = 10; | |
1e59de90 TL |
149 | // These key/value sizes ensure each KV has its own memtable. Note that the |
150 | // minimum write_buffer_size allowed is 64 KB. | |
7c673cae | 151 | const int kKeySize = 100; |
1e59de90 | 152 | const int kValueSize = 1 << 16; |
7c673cae FG |
153 | Options opt; |
154 | opt.create_if_missing = true; | |
155 | opt.create_missing_column_families = true; | |
156 | opt.write_buffer_size = kKeySize + kValueSize; | |
157 | opt.max_write_buffer_number = 10; | |
158 | opt.min_write_buffer_number_to_merge = 10; | |
159 | opt.disable_auto_compactions = true; | |
160 | ||
161 | std::vector<ColumnFamilyDescriptor> cf_descs = { | |
162 | {kDefaultColumnFamilyName, ColumnFamilyOptions(opt)}, | |
163 | {"one", ColumnFamilyOptions(opt)}, | |
164 | {"two", ColumnFamilyOptions(opt)}, | |
165 | }; | |
166 | ||
167 | for (int i = 0; i < kNumDBs; ++i) { | |
1e59de90 | 168 | ASSERT_OK(DestroyDB(GetDBName(i), opt)); |
7c673cae FG |
169 | std::vector<ColumnFamilyHandle*> handles; |
170 | dbs.emplace_back(); | |
171 | vec_handles.emplace_back(); | |
172 | ASSERT_OK(DB::Open(DBOptions(opt), GetDBName(i), cf_descs, | |
173 | &vec_handles.back(), &dbs.back())); | |
174 | } | |
175 | ||
176 | // Fill one memtable per Put to make memtable use more memory. | |
177 | for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) { | |
178 | for (int i = 0; i < kNumDBs; ++i) { | |
179 | for (auto* handle : vec_handles[i]) { | |
20effc67 TL |
180 | ASSERT_OK(dbs[i]->Put(WriteOptions(), handle, |
181 | rnd_.RandomString(kKeySize), | |
182 | rnd_.RandomString(kValueSize))); | |
7c673cae FG |
183 | UpdateUsagesHistory(dbs); |
184 | } | |
185 | } | |
186 | } | |
187 | // Expect the usage history is monotonically increasing | |
188 | for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size(); | |
189 | ++i) { | |
190 | ASSERT_GT(usage_history_[MemoryUtil::kMemTableTotal][i], | |
191 | usage_history_[MemoryUtil::kMemTableTotal][i - 1]); | |
192 | ASSERT_GT(usage_history_[MemoryUtil::kMemTableUnFlushed][i], | |
193 | usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]); | |
194 | ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i], | |
195 | usage_history_[MemoryUtil::kTableReadersTotal][i - 1]); | |
196 | } | |
197 | ||
198 | size_t usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size(); | |
199 | std::vector<Iterator*> iters; | |
200 | ||
201 | // Create an iterator and flush all memtables for each db | |
202 | for (int i = 0; i < kNumDBs; ++i) { | |
203 | iters.push_back(dbs[i]->NewIterator(ReadOptions())); | |
1e59de90 | 204 | ASSERT_OK(dbs[i]->Flush(FlushOptions())); |
7c673cae FG |
205 | |
206 | for (int j = 0; j < 100; ++j) { | |
207 | std::string value; | |
1e59de90 TL |
208 | ASSERT_NOK( |
209 | dbs[i]->Get(ReadOptions(), rnd_.RandomString(kKeySize), &value)); | |
7c673cae FG |
210 | } |
211 | ||
212 | UpdateUsagesHistory(dbs); | |
213 | } | |
214 | for (size_t i = usage_check_point; | |
215 | i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) { | |
216 | // Since memtables are pinned by iterators, we don't expect the | |
217 | // memory usage of all the memtables decreases as they are pinned | |
218 | // by iterators. | |
219 | ASSERT_GE(usage_history_[MemoryUtil::kMemTableTotal][i], | |
220 | usage_history_[MemoryUtil::kMemTableTotal][i - 1]); | |
221 | // Expect the usage history from the "usage_decay_point" is | |
222 | // monotonically decreasing. | |
223 | ASSERT_LT(usage_history_[MemoryUtil::kMemTableUnFlushed][i], | |
224 | usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]); | |
225 | // Expect the usage history of the table readers increases | |
226 | // as we flush tables. | |
227 | ASSERT_GT(usage_history_[MemoryUtil::kTableReadersTotal][i], | |
228 | usage_history_[MemoryUtil::kTableReadersTotal][i - 1]); | |
229 | ASSERT_GT(usage_history_[MemoryUtil::kCacheTotal][i], | |
230 | usage_history_[MemoryUtil::kCacheTotal][i - 1]); | |
231 | } | |
232 | usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size(); | |
233 | for (int i = 0; i < kNumDBs; ++i) { | |
20effc67 TL |
234 | // iterator is not used. |
235 | ASSERT_OK(iters[i]->status()); | |
7c673cae FG |
236 | delete iters[i]; |
237 | UpdateUsagesHistory(dbs); | |
238 | } | |
239 | for (size_t i = usage_check_point; | |
240 | i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) { | |
241 | // Expect the usage of all memtables decreasing as we delete iterators. | |
242 | ASSERT_LT(usage_history_[MemoryUtil::kMemTableTotal][i], | |
243 | usage_history_[MemoryUtil::kMemTableTotal][i - 1]); | |
244 | // Since the memory usage of un-flushed memtables is only affected | |
245 | // by Put and flush, we expect EQ here as we only delete iterators. | |
246 | ASSERT_EQ(usage_history_[MemoryUtil::kMemTableUnFlushed][i], | |
247 | usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]); | |
248 | // Expect EQ as we didn't flush more memtables. | |
249 | ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i], | |
250 | usage_history_[MemoryUtil::kTableReadersTotal][i - 1]); | |
251 | } | |
252 | ||
253 | for (int i = 0; i < kNumDBs; ++i) { | |
254 | for (auto* handle : vec_handles[i]) { | |
255 | delete handle; | |
256 | } | |
257 | delete dbs[i]; | |
258 | } | |
259 | } | |
f67539c2 | 260 | } // namespace ROCKSDB_NAMESPACE |
7c673cae FG |
261 | |
262 | int main(int argc, char** argv) { | |
263 | #if !(defined NDEBUG) || !defined(OS_WIN) | |
1e59de90 | 264 | ROCKSDB_NAMESPACE::port::InstallStackTraceHandler(); |
7c673cae FG |
265 | ::testing::InitGoogleTest(&argc, argv); |
266 | return RUN_ALL_TESTS(); | |
267 | #else | |
268 | return 0; | |
269 | #endif | |
270 | } | |
271 | ||
272 | #else | |
273 | #include <cstdio> | |
274 | ||
11fdf7f2 | 275 | int main(int /*argc*/, char** /*argv*/) { |
7c673cae FG |
276 | printf("Skipped in RocksDBLite as utilities are not supported.\n"); |
277 | return 0; | |
278 | } | |
279 | #endif // !ROCKSDB_LITE |