]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/memtable/vectorrep.cc
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / rocksdb / memtable / vectorrep.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 #ifndef ROCKSDB_LITE
7 #include <algorithm>
8 #include <memory>
9 #include <set>
10 #include <type_traits>
11 #include <unordered_set>
12
13 #include "db/memtable.h"
14 #include "memory/arena.h"
15 #include "memtable/stl_wrappers.h"
16 #include "port/port.h"
17 #include "rocksdb/memtablerep.h"
18 #include "rocksdb/utilities/options_type.h"
19 #include "util/mutexlock.h"
20
21 namespace ROCKSDB_NAMESPACE {
22 namespace {
23
24 class VectorRep : public MemTableRep {
25 public:
26 VectorRep(const KeyComparator& compare, Allocator* allocator, size_t count);
27
28 // Insert key into the collection. (The caller will pack key and value into a
29 // single buffer and pass that in as the parameter to Insert)
30 // REQUIRES: nothing that compares equal to key is currently in the
31 // collection.
32 void Insert(KeyHandle handle) override;
33
34 // Returns true iff an entry that compares equal to key is in the collection.
35 bool Contains(const char* key) const override;
36
37 void MarkReadOnly() override;
38
39 size_t ApproximateMemoryUsage() override;
40
41 void Get(const LookupKey& k, void* callback_args,
42 bool (*callback_func)(void* arg, const char* entry)) override;
43
44 ~VectorRep() override {}
45
46 class Iterator : public MemTableRep::Iterator {
47 class VectorRep* vrep_;
48 std::shared_ptr<std::vector<const char*>> bucket_;
49 std::vector<const char*>::const_iterator mutable cit_;
50 const KeyComparator& compare_;
51 std::string tmp_; // For passing to EncodeKey
52 bool mutable sorted_;
53 void DoSort() const;
54
55 public:
56 explicit Iterator(class VectorRep* vrep,
57 std::shared_ptr<std::vector<const char*>> bucket,
58 const KeyComparator& compare);
59
60 // Initialize an iterator over the specified collection.
61 // The returned iterator is not valid.
62 // explicit Iterator(const MemTableRep* collection);
63 ~Iterator() override{};
64
65 // Returns true iff the iterator is positioned at a valid node.
66 bool Valid() const override;
67
68 // Returns the key at the current position.
69 // REQUIRES: Valid()
70 const char* key() const override;
71
72 // Advances to the next position.
73 // REQUIRES: Valid()
74 void Next() override;
75
76 // Advances to the previous position.
77 // REQUIRES: Valid()
78 void Prev() override;
79
80 // Advance to the first entry with a key >= target
81 void Seek(const Slice& user_key, const char* memtable_key) override;
82
83 // Advance to the first entry with a key <= target
84 void SeekForPrev(const Slice& user_key, const char* memtable_key) override;
85
86 // Position at the first entry in collection.
87 // Final state of iterator is Valid() iff collection is not empty.
88 void SeekToFirst() override;
89
90 // Position at the last entry in collection.
91 // Final state of iterator is Valid() iff collection is not empty.
92 void SeekToLast() override;
93 };
94
95 // Return an iterator over the keys in this representation.
96 MemTableRep::Iterator* GetIterator(Arena* arena) override;
97
98 private:
99 friend class Iterator;
100 using Bucket = std::vector<const char*>;
101 std::shared_ptr<Bucket> bucket_;
102 mutable port::RWMutex rwlock_;
103 bool immutable_;
104 bool sorted_;
105 const KeyComparator& compare_;
106 };
107
108 void VectorRep::Insert(KeyHandle handle) {
109 auto* key = static_cast<char*>(handle);
110 WriteLock l(&rwlock_);
111 assert(!immutable_);
112 bucket_->push_back(key);
113 }
114
115 // Returns true iff an entry that compares equal to key is in the collection.
116 bool VectorRep::Contains(const char* key) const {
117 ReadLock l(&rwlock_);
118 return std::find(bucket_->begin(), bucket_->end(), key) != bucket_->end();
119 }
120
121 void VectorRep::MarkReadOnly() {
122 WriteLock l(&rwlock_);
123 immutable_ = true;
124 }
125
126 size_t VectorRep::ApproximateMemoryUsage() {
127 return sizeof(bucket_) + sizeof(*bucket_) +
128 bucket_->size() *
129 sizeof(
130 std::remove_reference<decltype(*bucket_)>::type::value_type);
131 }
132
133 VectorRep::VectorRep(const KeyComparator& compare, Allocator* allocator,
134 size_t count)
135 : MemTableRep(allocator),
136 bucket_(new Bucket()),
137 immutable_(false),
138 sorted_(false),
139 compare_(compare) {
140 bucket_.get()->reserve(count);
141 }
142
143 VectorRep::Iterator::Iterator(class VectorRep* vrep,
144 std::shared_ptr<std::vector<const char*>> bucket,
145 const KeyComparator& compare)
146 : vrep_(vrep),
147 bucket_(bucket),
148 cit_(bucket_->end()),
149 compare_(compare),
150 sorted_(false) {}
151
152 void VectorRep::Iterator::DoSort() const {
153 // vrep is non-null means that we are working on an immutable memtable
154 if (!sorted_ && vrep_ != nullptr) {
155 WriteLock l(&vrep_->rwlock_);
156 if (!vrep_->sorted_) {
157 std::sort(bucket_->begin(), bucket_->end(),
158 stl_wrappers::Compare(compare_));
159 cit_ = bucket_->begin();
160 vrep_->sorted_ = true;
161 }
162 sorted_ = true;
163 }
164 if (!sorted_) {
165 std::sort(bucket_->begin(), bucket_->end(),
166 stl_wrappers::Compare(compare_));
167 cit_ = bucket_->begin();
168 sorted_ = true;
169 }
170 assert(sorted_);
171 assert(vrep_ == nullptr || vrep_->sorted_);
172 }
173
174 // Returns true iff the iterator is positioned at a valid node.
175 bool VectorRep::Iterator::Valid() const {
176 DoSort();
177 return cit_ != bucket_->end();
178 }
179
180 // Returns the key at the current position.
181 // REQUIRES: Valid()
182 const char* VectorRep::Iterator::key() const {
183 assert(sorted_);
184 return *cit_;
185 }
186
187 // Advances to the next position.
188 // REQUIRES: Valid()
189 void VectorRep::Iterator::Next() {
190 assert(sorted_);
191 if (cit_ == bucket_->end()) {
192 return;
193 }
194 ++cit_;
195 }
196
197 // Advances to the previous position.
198 // REQUIRES: Valid()
199 void VectorRep::Iterator::Prev() {
200 assert(sorted_);
201 if (cit_ == bucket_->begin()) {
202 // If you try to go back from the first element, the iterator should be
203 // invalidated. So we set it to past-the-end. This means that you can
204 // treat the container circularly.
205 cit_ = bucket_->end();
206 } else {
207 --cit_;
208 }
209 }
210
211 // Advance to the first entry with a key >= target
212 void VectorRep::Iterator::Seek(const Slice& user_key,
213 const char* memtable_key) {
214 DoSort();
215 // Do binary search to find first value not less than the target
216 const char* encoded_key =
217 (memtable_key != nullptr) ? memtable_key : EncodeKey(&tmp_, user_key);
218 cit_ = std::equal_range(bucket_->begin(), bucket_->end(), encoded_key,
219 [this](const char* a, const char* b) {
220 return compare_(a, b) < 0;
221 })
222 .first;
223 }
224
225 // Advance to the first entry with a key <= target
226 void VectorRep::Iterator::SeekForPrev(const Slice& /*user_key*/,
227 const char* /*memtable_key*/) {
228 assert(false);
229 }
230
231 // Position at the first entry in collection.
232 // Final state of iterator is Valid() iff collection is not empty.
233 void VectorRep::Iterator::SeekToFirst() {
234 DoSort();
235 cit_ = bucket_->begin();
236 }
237
238 // Position at the last entry in collection.
239 // Final state of iterator is Valid() iff collection is not empty.
240 void VectorRep::Iterator::SeekToLast() {
241 DoSort();
242 cit_ = bucket_->end();
243 if (bucket_->size() != 0) {
244 --cit_;
245 }
246 }
247
248 void VectorRep::Get(const LookupKey& k, void* callback_args,
249 bool (*callback_func)(void* arg, const char* entry)) {
250 rwlock_.ReadLock();
251 VectorRep* vector_rep;
252 std::shared_ptr<Bucket> bucket;
253 if (immutable_) {
254 vector_rep = this;
255 } else {
256 vector_rep = nullptr;
257 bucket.reset(new Bucket(*bucket_)); // make a copy
258 }
259 VectorRep::Iterator iter(vector_rep, immutable_ ? bucket_ : bucket, compare_);
260 rwlock_.ReadUnlock();
261
262 for (iter.Seek(k.user_key(), k.memtable_key().data());
263 iter.Valid() && callback_func(callback_args, iter.key()); iter.Next()) {
264 }
265 }
266
267 MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) {
268 char* mem = nullptr;
269 if (arena != nullptr) {
270 mem = arena->AllocateAligned(sizeof(Iterator));
271 }
272 ReadLock l(&rwlock_);
273 // Do not sort here. The sorting would be done the first time
274 // a Seek is performed on the iterator.
275 if (immutable_) {
276 if (arena == nullptr) {
277 return new Iterator(this, bucket_, compare_);
278 } else {
279 return new (mem) Iterator(this, bucket_, compare_);
280 }
281 } else {
282 std::shared_ptr<Bucket> tmp;
283 tmp.reset(new Bucket(*bucket_)); // make a copy
284 if (arena == nullptr) {
285 return new Iterator(nullptr, tmp, compare_);
286 } else {
287 return new (mem) Iterator(nullptr, tmp, compare_);
288 }
289 }
290 }
291 } // namespace
292
293 static std::unordered_map<std::string, OptionTypeInfo> vector_rep_table_info = {
294 {"count",
295 {0, OptionType::kSizeT, OptionVerificationType::kNormal,
296 OptionTypeFlags::kNone}},
297 };
298
299 VectorRepFactory::VectorRepFactory(size_t count) : count_(count) {
300 RegisterOptions("VectorRepFactoryOptions", &count_, &vector_rep_table_info);
301 }
302
303 MemTableRep* VectorRepFactory::CreateMemTableRep(
304 const MemTableRep::KeyComparator& compare, Allocator* allocator,
305 const SliceTransform*, Logger* /*logger*/) {
306 return new VectorRep(compare, allocator, count_);
307 }
308 } // namespace ROCKSDB_NAMESPACE
309 #endif // ROCKSDB_LITE