]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/memtable/vectorrep.cc
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / rocksdb / memtable / vectorrep.cc
CommitLineData
7c673cae 1// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
11fdf7f2
TL
2// This source code is licensed under both the GPLv2 (found in the
3// COPYING file in the root directory) and Apache 2.0 License
4// (found in the LICENSE.Apache file in the root directory).
7c673cae
FG
5//
6#ifndef ROCKSDB_LITE
7#include "rocksdb/memtablerep.h"
8
9#include <unordered_set>
10#include <set>
11#include <memory>
12#include <algorithm>
13#include <type_traits>
14
15#include "util/arena.h"
16#include "db/memtable.h"
17#include "memtable/stl_wrappers.h"
18#include "port/port.h"
19#include "util/mutexlock.h"
20
21namespace rocksdb {
22namespace {
23
24using namespace stl_wrappers;
25
26class VectorRep : public MemTableRep {
27 public:
11fdf7f2 28 VectorRep(const KeyComparator& compare, Allocator* allocator, size_t count);
7c673cae
FG
29
30 // Insert key into the collection. (The caller will pack key and value into a
31 // single buffer and pass that in as the parameter to Insert)
32 // REQUIRES: nothing that compares equal to key is currently in the
33 // collection.
34 virtual void Insert(KeyHandle handle) override;
35
36 // Returns true iff an entry that compares equal to key is in the collection.
37 virtual bool Contains(const char* key) const override;
38
39 virtual void MarkReadOnly() override;
40
41 virtual size_t ApproximateMemoryUsage() override;
42
43 virtual void Get(const LookupKey& k, void* callback_args,
44 bool (*callback_func)(void* arg,
45 const char* entry)) override;
46
47 virtual ~VectorRep() override { }
48
49 class Iterator : public MemTableRep::Iterator {
50 class VectorRep* vrep_;
51 std::shared_ptr<std::vector<const char*>> bucket_;
52 std::vector<const char*>::const_iterator mutable cit_;
53 const KeyComparator& compare_;
54 std::string tmp_; // For passing to EncodeKey
55 bool mutable sorted_;
56 void DoSort() const;
57 public:
58 explicit Iterator(class VectorRep* vrep,
59 std::shared_ptr<std::vector<const char*>> bucket,
60 const KeyComparator& compare);
61
62 // Initialize an iterator over the specified collection.
63 // The returned iterator is not valid.
64 // explicit Iterator(const MemTableRep* collection);
65 virtual ~Iterator() override { };
66
67 // Returns true iff the iterator is positioned at a valid node.
68 virtual bool Valid() const override;
69
70 // Returns the key at the current position.
71 // REQUIRES: Valid()
72 virtual const char* key() const override;
73
74 // Advances to the next position.
75 // REQUIRES: Valid()
76 virtual void Next() override;
77
78 // Advances to the previous position.
79 // REQUIRES: Valid()
80 virtual void Prev() override;
81
82 // Advance to the first entry with a key >= target
83 virtual void Seek(const Slice& user_key, const char* memtable_key) override;
84
85 // Advance to the first entry with a key <= target
86 virtual void SeekForPrev(const Slice& user_key,
87 const char* memtable_key) override;
88
89 // Position at the first entry in collection.
90 // Final state of iterator is Valid() iff collection is not empty.
91 virtual void SeekToFirst() override;
92
93 // Position at the last entry in collection.
94 // Final state of iterator is Valid() iff collection is not empty.
95 virtual void SeekToLast() override;
96 };
97
98 // Return an iterator over the keys in this representation.
99 virtual MemTableRep::Iterator* GetIterator(Arena* arena) override;
100
101 private:
102 friend class Iterator;
103 typedef std::vector<const char*> Bucket;
104 std::shared_ptr<Bucket> bucket_;
105 mutable port::RWMutex rwlock_;
106 bool immutable_;
107 bool sorted_;
108 const KeyComparator& compare_;
109};
110
111void VectorRep::Insert(KeyHandle handle) {
112 auto* key = static_cast<char*>(handle);
113 WriteLock l(&rwlock_);
114 assert(!immutable_);
115 bucket_->push_back(key);
116}
117
118// Returns true iff an entry that compares equal to key is in the collection.
119bool VectorRep::Contains(const char* key) const {
120 ReadLock l(&rwlock_);
121 return std::find(bucket_->begin(), bucket_->end(), key) != bucket_->end();
122}
123
124void VectorRep::MarkReadOnly() {
125 WriteLock l(&rwlock_);
126 immutable_ = true;
127}
128
129size_t VectorRep::ApproximateMemoryUsage() {
130 return
131 sizeof(bucket_) + sizeof(*bucket_) +
132 bucket_->size() *
133 sizeof(
134 std::remove_reference<decltype(*bucket_)>::type::value_type
135 );
136}
137
11fdf7f2 138VectorRep::VectorRep(const KeyComparator& compare, Allocator* allocator,
7c673cae 139 size_t count)
11fdf7f2
TL
140 : MemTableRep(allocator),
141 bucket_(new Bucket()),
142 immutable_(false),
143 sorted_(false),
144 compare_(compare) {
145 bucket_.get()->reserve(count);
146}
7c673cae
FG
147
148VectorRep::Iterator::Iterator(class VectorRep* vrep,
149 std::shared_ptr<std::vector<const char*>> bucket,
150 const KeyComparator& compare)
151: vrep_(vrep),
152 bucket_(bucket),
153 cit_(bucket_->end()),
154 compare_(compare),
155 sorted_(false) { }
156
157void VectorRep::Iterator::DoSort() const {
158 // vrep is non-null means that we are working on an immutable memtable
159 if (!sorted_ && vrep_ != nullptr) {
160 WriteLock l(&vrep_->rwlock_);
161 if (!vrep_->sorted_) {
162 std::sort(bucket_->begin(), bucket_->end(), Compare(compare_));
163 cit_ = bucket_->begin();
164 vrep_->sorted_ = true;
165 }
166 sorted_ = true;
167 }
168 if (!sorted_) {
169 std::sort(bucket_->begin(), bucket_->end(), Compare(compare_));
170 cit_ = bucket_->begin();
171 sorted_ = true;
172 }
173 assert(sorted_);
174 assert(vrep_ == nullptr || vrep_->sorted_);
175}
176
177// Returns true iff the iterator is positioned at a valid node.
178bool VectorRep::Iterator::Valid() const {
179 DoSort();
180 return cit_ != bucket_->end();
181}
182
183// Returns the key at the current position.
184// REQUIRES: Valid()
185const char* VectorRep::Iterator::key() const {
186 assert(sorted_);
187 return *cit_;
188}
189
190// Advances to the next position.
191// REQUIRES: Valid()
192void VectorRep::Iterator::Next() {
193 assert(sorted_);
194 if (cit_ == bucket_->end()) {
195 return;
196 }
197 ++cit_;
198}
199
200// Advances to the previous position.
201// REQUIRES: Valid()
202void VectorRep::Iterator::Prev() {
203 assert(sorted_);
204 if (cit_ == bucket_->begin()) {
205 // If you try to go back from the first element, the iterator should be
206 // invalidated. So we set it to past-the-end. This means that you can
207 // treat the container circularly.
208 cit_ = bucket_->end();
209 } else {
210 --cit_;
211 }
212}
213
214// Advance to the first entry with a key >= target
215void VectorRep::Iterator::Seek(const Slice& user_key,
216 const char* memtable_key) {
217 DoSort();
218 // Do binary search to find first value not less than the target
219 const char* encoded_key =
220 (memtable_key != nullptr) ? memtable_key : EncodeKey(&tmp_, user_key);
221 cit_ = std::equal_range(bucket_->begin(),
222 bucket_->end(),
223 encoded_key,
224 [this] (const char* a, const char* b) {
225 return compare_(a, b) < 0;
226 }).first;
227}
228
229// Advance to the first entry with a key <= target
11fdf7f2
TL
230void VectorRep::Iterator::SeekForPrev(const Slice& /*user_key*/,
231 const char* /*memtable_key*/) {
7c673cae
FG
232 assert(false);
233}
234
235// Position at the first entry in collection.
236// Final state of iterator is Valid() iff collection is not empty.
237void VectorRep::Iterator::SeekToFirst() {
238 DoSort();
239 cit_ = bucket_->begin();
240}
241
242// Position at the last entry in collection.
243// Final state of iterator is Valid() iff collection is not empty.
244void VectorRep::Iterator::SeekToLast() {
245 DoSort();
246 cit_ = bucket_->end();
247 if (bucket_->size() != 0) {
248 --cit_;
249 }
250}
251
252void VectorRep::Get(const LookupKey& k, void* callback_args,
253 bool (*callback_func)(void* arg, const char* entry)) {
254 rwlock_.ReadLock();
255 VectorRep* vector_rep;
256 std::shared_ptr<Bucket> bucket;
257 if (immutable_) {
258 vector_rep = this;
259 } else {
260 vector_rep = nullptr;
261 bucket.reset(new Bucket(*bucket_)); // make a copy
262 }
263 VectorRep::Iterator iter(vector_rep, immutable_ ? bucket_ : bucket, compare_);
264 rwlock_.ReadUnlock();
265
266 for (iter.Seek(k.user_key(), k.memtable_key().data());
267 iter.Valid() && callback_func(callback_args, iter.key()); iter.Next()) {
268 }
269}
270
271MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) {
272 char* mem = nullptr;
273 if (arena != nullptr) {
274 mem = arena->AllocateAligned(sizeof(Iterator));
275 }
276 ReadLock l(&rwlock_);
277 // Do not sort here. The sorting would be done the first time
278 // a Seek is performed on the iterator.
279 if (immutable_) {
280 if (arena == nullptr) {
281 return new Iterator(this, bucket_, compare_);
282 } else {
283 return new (mem) Iterator(this, bucket_, compare_);
284 }
285 } else {
286 std::shared_ptr<Bucket> tmp;
287 tmp.reset(new Bucket(*bucket_)); // make a copy
288 if (arena == nullptr) {
289 return new Iterator(nullptr, tmp, compare_);
290 } else {
291 return new (mem) Iterator(nullptr, tmp, compare_);
292 }
293 }
294}
295} // anon namespace
296
297MemTableRep* VectorRepFactory::CreateMemTableRep(
11fdf7f2
TL
298 const MemTableRep::KeyComparator& compare, Allocator* allocator,
299 const SliceTransform*, Logger* /*logger*/) {
7c673cae
FG
300 return new VectorRep(compare, allocator, count_);
301}
302} // namespace rocksdb
303#endif // ROCKSDB_LITE