]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/include/rocksdb/cache.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / rocksdb / include / rocksdb / cache.h
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 //
10 // A Cache is an interface that maps keys to values. It has internal
11 // synchronization and may be safely accessed concurrently from
12 // multiple threads. It may automatically evict entries to make room
13 // for new entries. Values have a specified charge against the cache
14 // capacity. For example, a cache where the values are variable
15 // length strings, may use the length of the string as the charge for
16 // the string.
17 //
18 // A builtin cache implementation with a least-recently-used eviction
19 // policy is provided. Clients may use their own implementations if
20 // they want something more sophisticated (like scan-resistance, a
21 // custom eviction policy, variable cache sizing, etc.)
22
23 #pragma once
24
25 #include <stdint.h>
26 #include <memory>
27 #include <string>
28 #include "rocksdb/memory_allocator.h"
29 #include "rocksdb/slice.h"
30 #include "rocksdb/statistics.h"
31 #include "rocksdb/status.h"
32
33 namespace ROCKSDB_NAMESPACE {
34
35 class Cache;
36
37 extern const bool kDefaultToAdaptiveMutex;
38
39 enum CacheMetadataChargePolicy {
40 kDontChargeCacheMetadata,
41 kFullChargeCacheMetadata
42 };
43 const CacheMetadataChargePolicy kDefaultCacheMetadataChargePolicy =
44 kFullChargeCacheMetadata;
45
46 struct LRUCacheOptions {
47 // Capacity of the cache.
48 size_t capacity = 0;
49
50 // Cache is sharded into 2^num_shard_bits shards,
51 // by hash of key. Refer to NewLRUCache for further
52 // information.
53 int num_shard_bits = -1;
54
55 // If strict_capacity_limit is set,
56 // insert to the cache will fail when cache is full.
57 bool strict_capacity_limit = false;
58
59 // Percentage of cache reserved for high priority entries.
60 // If greater than zero, the LRU list will be split into a high-pri
61 // list and a low-pri list. High-pri entries will be insert to the
62 // tail of high-pri list, while low-pri entries will be first inserted to
63 // the low-pri list (the midpoint). This is refered to as
64 // midpoint insertion strategy to make entries never get hit in cache
65 // age out faster.
66 //
67 // See also
68 // BlockBasedTableOptions::cache_index_and_filter_blocks_with_high_priority.
69 double high_pri_pool_ratio = 0.5;
70
71 // If non-nullptr will use this allocator instead of system allocator when
72 // allocating memory for cache blocks. Call this method before you start using
73 // the cache!
74 //
75 // Caveat: when the cache is used as block cache, the memory allocator is
76 // ignored when dealing with compression libraries that allocate memory
77 // internally (currently only XPRESS).
78 std::shared_ptr<MemoryAllocator> memory_allocator;
79
80 // Whether to use adaptive mutexes for cache shards. Note that adaptive
81 // mutexes need to be supported by the platform in order for this to have any
82 // effect. The default value is true if RocksDB is compiled with
83 // -DROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX, false otherwise.
84 bool use_adaptive_mutex = kDefaultToAdaptiveMutex;
85
86 CacheMetadataChargePolicy metadata_charge_policy =
87 kDefaultCacheMetadataChargePolicy;
88
89 LRUCacheOptions() {}
90 LRUCacheOptions(size_t _capacity, int _num_shard_bits,
91 bool _strict_capacity_limit, double _high_pri_pool_ratio,
92 std::shared_ptr<MemoryAllocator> _memory_allocator = nullptr,
93 bool _use_adaptive_mutex = kDefaultToAdaptiveMutex,
94 CacheMetadataChargePolicy _metadata_charge_policy =
95 kDefaultCacheMetadataChargePolicy)
96 : capacity(_capacity),
97 num_shard_bits(_num_shard_bits),
98 strict_capacity_limit(_strict_capacity_limit),
99 high_pri_pool_ratio(_high_pri_pool_ratio),
100 memory_allocator(std::move(_memory_allocator)),
101 use_adaptive_mutex(_use_adaptive_mutex),
102 metadata_charge_policy(_metadata_charge_policy) {}
103 };
104
105 // Create a new cache with a fixed size capacity. The cache is sharded
106 // to 2^num_shard_bits shards, by hash of the key. The total capacity
107 // is divided and evenly assigned to each shard. If strict_capacity_limit
108 // is set, insert to the cache will fail when cache is full. User can also
109 // set percentage of the cache reserves for high priority entries via
110 // high_pri_pool_pct.
111 // num_shard_bits = -1 means it is automatically determined: every shard
112 // will be at least 512KB and number of shard bits will not exceed 6.
113 extern std::shared_ptr<Cache> NewLRUCache(
114 size_t capacity, int num_shard_bits = -1,
115 bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.5,
116 std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
117 bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
118 CacheMetadataChargePolicy metadata_charge_policy =
119 kDefaultCacheMetadataChargePolicy);
120
121 extern std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts);
122
123 // Similar to NewLRUCache, but create a cache based on CLOCK algorithm with
124 // better concurrent performance in some cases. See util/clock_cache.cc for
125 // more detail.
126 //
127 // Return nullptr if it is not supported.
128 extern std::shared_ptr<Cache> NewClockCache(
129 size_t capacity, int num_shard_bits = -1,
130 bool strict_capacity_limit = false,
131 CacheMetadataChargePolicy metadata_charge_policy =
132 kDefaultCacheMetadataChargePolicy);
133 class Cache {
134 public:
135 // Depending on implementation, cache entries with high priority could be less
136 // likely to get evicted than low priority entries.
137 enum class Priority { HIGH, LOW };
138
139 Cache(std::shared_ptr<MemoryAllocator> allocator = nullptr)
140 : memory_allocator_(std::move(allocator)) {}
141 // No copying allowed
142 Cache(const Cache&) = delete;
143 Cache& operator=(const Cache&) = delete;
144
145 // Destroys all existing entries by calling the "deleter"
146 // function that was passed via the Insert() function.
147 //
148 // @See Insert
149 virtual ~Cache() {}
150
151 // Opaque handle to an entry stored in the cache.
152 struct Handle {};
153
154 // The type of the Cache
155 virtual const char* Name() const = 0;
156
157 // Insert a mapping from key->value into the cache and assign it
158 // the specified charge against the total cache capacity.
159 // If strict_capacity_limit is true and cache reaches its full capacity,
160 // return Status::Incomplete.
161 //
162 // If handle is not nullptr, returns a handle that corresponds to the
163 // mapping. The caller must call this->Release(handle) when the returned
164 // mapping is no longer needed. In case of error caller is responsible to
165 // cleanup the value (i.e. calling "deleter").
166 //
167 // If handle is nullptr, it is as if Release is called immediately after
168 // insert. In case of error value will be cleanup.
169 //
170 // When the inserted entry is no longer needed, the key and
171 // value will be passed to "deleter".
172 virtual Status Insert(const Slice& key, void* value, size_t charge,
173 void (*deleter)(const Slice& key, void* value),
174 Handle** handle = nullptr,
175 Priority priority = Priority::LOW) = 0;
176
177 // If the cache has no mapping for "key", returns nullptr.
178 //
179 // Else return a handle that corresponds to the mapping. The caller
180 // must call this->Release(handle) when the returned mapping is no
181 // longer needed.
182 // If stats is not nullptr, relative tickers could be used inside the
183 // function.
184 virtual Handle* Lookup(const Slice& key, Statistics* stats = nullptr) = 0;
185
186 // Increments the reference count for the handle if it refers to an entry in
187 // the cache. Returns true if refcount was incremented; otherwise, returns
188 // false.
189 // REQUIRES: handle must have been returned by a method on *this.
190 virtual bool Ref(Handle* handle) = 0;
191
192 /**
193 * Release a mapping returned by a previous Lookup(). A released entry might
194 * still remain in cache in case it is later looked up by others. If
195 * force_erase is set then it also erase it from the cache if there is no
196 * other reference to it. Erasing it should call the deleter function that
197 * was provided when the
198 * entry was inserted.
199 *
200 * Returns true if the entry was also erased.
201 */
202 // REQUIRES: handle must not have been released yet.
203 // REQUIRES: handle must have been returned by a method on *this.
204 virtual bool Release(Handle* handle, bool force_erase = false) = 0;
205
206 // Return the value encapsulated in a handle returned by a
207 // successful Lookup().
208 // REQUIRES: handle must not have been released yet.
209 // REQUIRES: handle must have been returned by a method on *this.
210 virtual void* Value(Handle* handle) = 0;
211
212 // If the cache contains entry for key, erase it. Note that the
213 // underlying entry will be kept around until all existing handles
214 // to it have been released.
215 virtual void Erase(const Slice& key) = 0;
216 // Return a new numeric id. May be used by multiple clients who are
217 // sharding the same cache to partition the key space. Typically the
218 // client will allocate a new id at startup and prepend the id to
219 // its cache keys.
220 virtual uint64_t NewId() = 0;
221
222 // sets the maximum configured capacity of the cache. When the new
223 // capacity is less than the old capacity and the existing usage is
224 // greater than new capacity, the implementation will do its best job to
225 // purge the released entries from the cache in order to lower the usage
226 virtual void SetCapacity(size_t capacity) = 0;
227
228 // Set whether to return error on insertion when cache reaches its full
229 // capacity.
230 virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
231
232 // Get the flag whether to return error on insertion when cache reaches its
233 // full capacity.
234 virtual bool HasStrictCapacityLimit() const = 0;
235
236 // returns the maximum configured capacity of the cache
237 virtual size_t GetCapacity() const = 0;
238
239 // returns the memory size for the entries residing in the cache.
240 virtual size_t GetUsage() const = 0;
241
242 // returns the memory size for a specific entry in the cache.
243 virtual size_t GetUsage(Handle* handle) const = 0;
244
245 // returns the memory size for the entries in use by the system
246 virtual size_t GetPinnedUsage() const = 0;
247
248 // returns the charge for the specific entry in the cache.
249 virtual size_t GetCharge(Handle* handle) const = 0;
250
251 // Call this on shutdown if you want to speed it up. Cache will disown
252 // any underlying data and will not free it on delete. This call will leak
253 // memory - call this only if you're shutting down the process.
254 // Any attempts of using cache after this call will fail terribly.
255 // Always delete the DB object before calling this method!
256 virtual void DisownData(){
257 // default implementation is noop
258 }
259
260 // Apply callback to all entries in the cache
261 // If thread_safe is true, it will also lock the accesses. Otherwise, it will
262 // access the cache without the lock held
263 virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
264 bool thread_safe) = 0;
265
266 // Remove all entries.
267 // Prerequisite: no entry is referenced.
268 virtual void EraseUnRefEntries() = 0;
269
270 virtual std::string GetPrintableOptions() const { return ""; }
271
272 MemoryAllocator* memory_allocator() const { return memory_allocator_.get(); }
273
274 private:
275 std::shared_ptr<MemoryAllocator> memory_allocator_;
276 };
277
278 } // namespace ROCKSDB_NAMESPACE