]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/include/rocksdb/cache.h
import quincy beta 17.1.0
[ceph.git] / ceph / src / rocksdb / include / rocksdb / cache.h
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 //
10 // A Cache is an interface that maps keys to values. It has internal
11 // synchronization and may be safely accessed concurrently from
12 // multiple threads. It may automatically evict entries to make room
13 // for new entries. Values have a specified charge against the cache
14 // capacity. For example, a cache where the values are variable
15 // length strings, may use the length of the string as the charge for
16 // the string.
17 //
18 // A builtin cache implementation with a least-recently-used eviction
19 // policy is provided. Clients may use their own implementations if
20 // they want something more sophisticated (like scan-resistance, a
21 // custom eviction policy, variable cache sizing, etc.)
22
23 #pragma once
24
25 #include <stdint.h>
26 #include <memory>
27 #include <string>
28 #include "rocksdb/memory_allocator.h"
29 #include "rocksdb/slice.h"
30 #include "rocksdb/statistics.h"
31 #include "rocksdb/status.h"
32
33 namespace ROCKSDB_NAMESPACE {
34
35 class Cache;
36 struct ConfigOptions;
37
38 extern const bool kDefaultToAdaptiveMutex;
39
40 enum CacheMetadataChargePolicy {
41 kDontChargeCacheMetadata,
42 kFullChargeCacheMetadata
43 };
44 const CacheMetadataChargePolicy kDefaultCacheMetadataChargePolicy =
45 kFullChargeCacheMetadata;
46
47 struct LRUCacheOptions {
48 // Capacity of the cache.
49 size_t capacity = 0;
50
51 // Cache is sharded into 2^num_shard_bits shards,
52 // by hash of key. Refer to NewLRUCache for further
53 // information.
54 int num_shard_bits = -1;
55
56 // If strict_capacity_limit is set,
57 // insert to the cache will fail when cache is full.
58 bool strict_capacity_limit = false;
59
60 // Percentage of cache reserved for high priority entries.
61 // If greater than zero, the LRU list will be split into a high-pri
62 // list and a low-pri list. High-pri entries will be insert to the
63 // tail of high-pri list, while low-pri entries will be first inserted to
64 // the low-pri list (the midpoint). This is refered to as
65 // midpoint insertion strategy to make entries never get hit in cache
66 // age out faster.
67 //
68 // See also
69 // BlockBasedTableOptions::cache_index_and_filter_blocks_with_high_priority.
70 double high_pri_pool_ratio = 0.5;
71
72 // If non-nullptr will use this allocator instead of system allocator when
73 // allocating memory for cache blocks. Call this method before you start using
74 // the cache!
75 //
76 // Caveat: when the cache is used as block cache, the memory allocator is
77 // ignored when dealing with compression libraries that allocate memory
78 // internally (currently only XPRESS).
79 std::shared_ptr<MemoryAllocator> memory_allocator;
80
81 // Whether to use adaptive mutexes for cache shards. Note that adaptive
82 // mutexes need to be supported by the platform in order for this to have any
83 // effect. The default value is true if RocksDB is compiled with
84 // -DROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX, false otherwise.
85 bool use_adaptive_mutex = kDefaultToAdaptiveMutex;
86
87 CacheMetadataChargePolicy metadata_charge_policy =
88 kDefaultCacheMetadataChargePolicy;
89
90 LRUCacheOptions() {}
91 LRUCacheOptions(size_t _capacity, int _num_shard_bits,
92 bool _strict_capacity_limit, double _high_pri_pool_ratio,
93 std::shared_ptr<MemoryAllocator> _memory_allocator = nullptr,
94 bool _use_adaptive_mutex = kDefaultToAdaptiveMutex,
95 CacheMetadataChargePolicy _metadata_charge_policy =
96 kDefaultCacheMetadataChargePolicy)
97 : capacity(_capacity),
98 num_shard_bits(_num_shard_bits),
99 strict_capacity_limit(_strict_capacity_limit),
100 high_pri_pool_ratio(_high_pri_pool_ratio),
101 memory_allocator(std::move(_memory_allocator)),
102 use_adaptive_mutex(_use_adaptive_mutex),
103 metadata_charge_policy(_metadata_charge_policy) {}
104 };
105
106 // Create a new cache with a fixed size capacity. The cache is sharded
107 // to 2^num_shard_bits shards, by hash of the key. The total capacity
108 // is divided and evenly assigned to each shard. If strict_capacity_limit
109 // is set, insert to the cache will fail when cache is full. User can also
110 // set percentage of the cache reserves for high priority entries via
111 // high_pri_pool_pct.
112 // num_shard_bits = -1 means it is automatically determined: every shard
113 // will be at least 512KB and number of shard bits will not exceed 6.
114 extern std::shared_ptr<Cache> NewLRUCache(
115 size_t capacity, int num_shard_bits = -1,
116 bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.5,
117 std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
118 bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
119 CacheMetadataChargePolicy metadata_charge_policy =
120 kDefaultCacheMetadataChargePolicy);
121
122 extern std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts);
123
124 // Similar to NewLRUCache, but create a cache based on CLOCK algorithm with
125 // better concurrent performance in some cases. See util/clock_cache.cc for
126 // more detail.
127 //
128 // Return nullptr if it is not supported.
129 extern std::shared_ptr<Cache> NewClockCache(
130 size_t capacity, int num_shard_bits = -1,
131 bool strict_capacity_limit = false,
132 CacheMetadataChargePolicy metadata_charge_policy =
133 kDefaultCacheMetadataChargePolicy);
134 class Cache {
135 public:
136 // Depending on implementation, cache entries with high priority could be less
137 // likely to get evicted than low priority entries.
138 enum class Priority { HIGH, LOW };
139
140 Cache(std::shared_ptr<MemoryAllocator> allocator = nullptr)
141 : memory_allocator_(std::move(allocator)) {}
142 // No copying allowed
143 Cache(const Cache&) = delete;
144 Cache& operator=(const Cache&) = delete;
145
146 // Creates a new Cache based on the input value string and returns the result.
147 // Currently, this method can be used to create LRUCaches only
148 // @param config_options
149 // @param value The value might be:
150 // - an old-style cache ("1M") -- equivalent to NewLRUCache(1024*102(
151 // - Name-value option pairs -- "capacity=1M; num_shard_bits=4;
152 // For the LRUCache, the values are defined in LRUCacheOptions.
153 // @param result The new Cache object
154 // @return OK if the cache was sucessfully created
155 // @return NotFound if an invalid name was specified in the value
156 // @return InvalidArgument if either the options were not valid
157 static Status CreateFromString(const ConfigOptions& config_options,
158 const std::string& value,
159 std::shared_ptr<Cache>* result);
160
161 // Destroys all existing entries by calling the "deleter"
162 // function that was passed via the Insert() function.
163 //
164 // @See Insert
165 virtual ~Cache() {}
166
167 // Opaque handle to an entry stored in the cache.
168 struct Handle {};
169
170 // The type of the Cache
171 virtual const char* Name() const = 0;
172
173 // Insert a mapping from key->value into the cache and assign it
174 // the specified charge against the total cache capacity.
175 // If strict_capacity_limit is true and cache reaches its full capacity,
176 // return Status::Incomplete.
177 //
178 // If handle is not nullptr, returns a handle that corresponds to the
179 // mapping. The caller must call this->Release(handle) when the returned
180 // mapping is no longer needed. In case of error caller is responsible to
181 // cleanup the value (i.e. calling "deleter").
182 //
183 // If handle is nullptr, it is as if Release is called immediately after
184 // insert. In case of error value will be cleanup.
185 //
186 // When the inserted entry is no longer needed, the key and
187 // value will be passed to "deleter".
188 virtual Status Insert(const Slice& key, void* value, size_t charge,
189 void (*deleter)(const Slice& key, void* value),
190 Handle** handle = nullptr,
191 Priority priority = Priority::LOW) = 0;
192
193 // If the cache has no mapping for "key", returns nullptr.
194 //
195 // Else return a handle that corresponds to the mapping. The caller
196 // must call this->Release(handle) when the returned mapping is no
197 // longer needed.
198 // If stats is not nullptr, relative tickers could be used inside the
199 // function.
200 virtual Handle* Lookup(const Slice& key, Statistics* stats = nullptr) = 0;
201
202 // Increments the reference count for the handle if it refers to an entry in
203 // the cache. Returns true if refcount was incremented; otherwise, returns
204 // false.
205 // REQUIRES: handle must have been returned by a method on *this.
206 virtual bool Ref(Handle* handle) = 0;
207
208 /**
209 * Release a mapping returned by a previous Lookup(). A released entry might
210 * still remain in cache in case it is later looked up by others. If
211 * force_erase is set then it also erase it from the cache if there is no
212 * other reference to it. Erasing it should call the deleter function that
213 * was provided when the
214 * entry was inserted.
215 *
216 * Returns true if the entry was also erased.
217 */
218 // REQUIRES: handle must not have been released yet.
219 // REQUIRES: handle must have been returned by a method on *this.
220 virtual bool Release(Handle* handle, bool force_erase = false) = 0;
221
222 // Return the value encapsulated in a handle returned by a
223 // successful Lookup().
224 // REQUIRES: handle must not have been released yet.
225 // REQUIRES: handle must have been returned by a method on *this.
226 virtual void* Value(Handle* handle) = 0;
227
228 // If the cache contains entry for key, erase it. Note that the
229 // underlying entry will be kept around until all existing handles
230 // to it have been released.
231 virtual void Erase(const Slice& key) = 0;
232 // Return a new numeric id. May be used by multiple clients who are
233 // sharding the same cache to partition the key space. Typically the
234 // client will allocate a new id at startup and prepend the id to
235 // its cache keys.
236 virtual uint64_t NewId() = 0;
237
238 // sets the maximum configured capacity of the cache. When the new
239 // capacity is less than the old capacity and the existing usage is
240 // greater than new capacity, the implementation will do its best job to
241 // purge the released entries from the cache in order to lower the usage
242 virtual void SetCapacity(size_t capacity) = 0;
243
244 // Set whether to return error on insertion when cache reaches its full
245 // capacity.
246 virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
247
248 // Get the flag whether to return error on insertion when cache reaches its
249 // full capacity.
250 virtual bool HasStrictCapacityLimit() const = 0;
251
252 // returns the maximum configured capacity of the cache
253 virtual size_t GetCapacity() const = 0;
254
255 // returns the memory size for the entries residing in the cache.
256 virtual size_t GetUsage() const = 0;
257
258 // returns the memory size for a specific entry in the cache.
259 virtual size_t GetUsage(Handle* handle) const = 0;
260
261 // returns the memory size for the entries in use by the system
262 virtual size_t GetPinnedUsage() const = 0;
263
264 // returns the charge for the specific entry in the cache.
265 virtual size_t GetCharge(Handle* handle) const = 0;
266
267 // Call this on shutdown if you want to speed it up. Cache will disown
268 // any underlying data and will not free it on delete. This call will leak
269 // memory - call this only if you're shutting down the process.
270 // Any attempts of using cache after this call will fail terribly.
271 // Always delete the DB object before calling this method!
272 virtual void DisownData(){
273 // default implementation is noop
274 }
275
276 // Apply callback to all entries in the cache
277 // If thread_safe is true, it will also lock the accesses. Otherwise, it will
278 // access the cache without the lock held
279 virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
280 bool thread_safe) = 0;
281
282 // Remove all entries.
283 // Prerequisite: no entry is referenced.
284 virtual void EraseUnRefEntries() = 0;
285
286 virtual std::string GetPrintableOptions() const { return ""; }
287
288 MemoryAllocator* memory_allocator() const { return memory_allocator_.get(); }
289
290 private:
291 std::shared_ptr<MemoryAllocator> memory_allocator_;
292 };
293
294 } // namespace ROCKSDB_NAMESPACE