]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/include/rocksdb/cache.h
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / rocksdb / include / rocksdb / cache.h
CommitLineData
7c673cae 1// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
11fdf7f2
TL
2// This source code is licensed under both the GPLv2 (found in the
3// COPYING file in the root directory) and Apache 2.0 License
4// (found in the LICENSE.Apache file in the root directory).
7c673cae
FG
5//
6// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7// Use of this source code is governed by a BSD-style license that can be
8// found in the LICENSE file. See the AUTHORS file for names of contributors.
9//
10// A Cache is an interface that maps keys to values. It has internal
11// synchronization and may be safely accessed concurrently from
12// multiple threads. It may automatically evict entries to make room
13// for new entries. Values have a specified charge against the cache
14// capacity. For example, a cache where the values are variable
15// length strings, may use the length of the string as the charge for
16// the string.
17//
18// A builtin cache implementation with a least-recently-used eviction
19// policy is provided. Clients may use their own implementations if
20// they want something more sophisticated (like scan-resistance, a
21// custom eviction policy, variable cache sizing, etc.)
22
23#pragma once
24
25#include <stdint.h>
26#include <memory>
27#include <string>
494da23a 28#include "rocksdb/memory_allocator.h"
7c673cae
FG
29#include "rocksdb/slice.h"
30#include "rocksdb/statistics.h"
31#include "rocksdb/status.h"
32
33namespace rocksdb {
34
35class Cache;
36
494da23a
TL
37extern const bool kDefaultToAdaptiveMutex;
38
11fdf7f2
TL
39struct LRUCacheOptions {
40 // Capacity of the cache.
41 size_t capacity = 0;
42
43 // Cache is sharded into 2^num_shard_bits shards,
44 // by hash of key. Refer to NewLRUCache for further
45 // information.
46 int num_shard_bits = -1;
47
48 // If strict_capacity_limit is set,
49 // insert to the cache will fail when cache is full.
50 bool strict_capacity_limit = false;
51
52 // Percentage of cache reserved for high priority entries.
53 // If greater than zero, the LRU list will be split into a high-pri
54 // list and a low-pri list. High-pri entries will be insert to the
55 // tail of high-pri list, while low-pri entries will be first inserted to
56 // the low-pri list (the midpoint). This is refered to as
57 // midpoint insertion strategy to make entries never get hit in cache
58 // age out faster.
59 //
60 // See also
61 // BlockBasedTableOptions::cache_index_and_filter_blocks_with_high_priority.
62 double high_pri_pool_ratio = 0.0;
63
494da23a
TL
64 // If non-nullptr will use this allocator instead of system allocator when
65 // allocating memory for cache blocks. Call this method before you start using
66 // the cache!
67 //
68 // Caveat: when the cache is used as block cache, the memory allocator is
69 // ignored when dealing with compression libraries that allocate memory
70 // internally (currently only XPRESS).
71 std::shared_ptr<MemoryAllocator> memory_allocator;
72
73 // Whether to use adaptive mutexes for cache shards. Note that adaptive
74 // mutexes need to be supported by the platform in order for this to have any
75 // effect. The default value is true if RocksDB is compiled with
76 // -DROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX, false otherwise.
77 bool use_adaptive_mutex = kDefaultToAdaptiveMutex;
78
11fdf7f2
TL
79 LRUCacheOptions() {}
80 LRUCacheOptions(size_t _capacity, int _num_shard_bits,
494da23a
TL
81 bool _strict_capacity_limit, double _high_pri_pool_ratio,
82 std::shared_ptr<MemoryAllocator> _memory_allocator = nullptr,
83 bool _use_adaptive_mutex = kDefaultToAdaptiveMutex)
11fdf7f2
TL
84 : capacity(_capacity),
85 num_shard_bits(_num_shard_bits),
86 strict_capacity_limit(_strict_capacity_limit),
494da23a
TL
87 high_pri_pool_ratio(_high_pri_pool_ratio),
88 memory_allocator(std::move(_memory_allocator)),
89 use_adaptive_mutex(_use_adaptive_mutex) {}
11fdf7f2
TL
90};
91
7c673cae
FG
92// Create a new cache with a fixed size capacity. The cache is sharded
93// to 2^num_shard_bits shards, by hash of the key. The total capacity
94// is divided and evenly assigned to each shard. If strict_capacity_limit
95// is set, insert to the cache will fail when cache is full. User can also
96// set percentage of the cache reserves for high priority entries via
97// high_pri_pool_pct.
98// num_shard_bits = -1 means it is automatically determined: every shard
99// will be at least 512KB and number of shard bits will not exceed 6.
494da23a
TL
100extern std::shared_ptr<Cache> NewLRUCache(
101 size_t capacity, int num_shard_bits = -1,
102 bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.0,
103 std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
104 bool use_adaptive_mutex = kDefaultToAdaptiveMutex);
7c673cae 105
11fdf7f2
TL
106extern std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts);
107
7c673cae
FG
108// Similar to NewLRUCache, but create a cache based on CLOCK algorithm with
109// better concurrent performance in some cases. See util/clock_cache.cc for
110// more detail.
111//
112// Return nullptr if it is not supported.
113extern std::shared_ptr<Cache> NewClockCache(size_t capacity,
114 int num_shard_bits = -1,
115 bool strict_capacity_limit = false);
116
117class Cache {
118 public:
119 // Depending on implementation, cache entries with high priority could be less
120 // likely to get evicted than low priority entries.
121 enum class Priority { HIGH, LOW };
122
494da23a
TL
123 Cache(std::shared_ptr<MemoryAllocator> allocator = nullptr)
124 : memory_allocator_(std::move(allocator)) {}
7c673cae
FG
125
126 // Destroys all existing entries by calling the "deleter"
127 // function that was passed via the Insert() function.
128 //
129 // @See Insert
130 virtual ~Cache() {}
131
132 // Opaque handle to an entry stored in the cache.
133 struct Handle {};
134
135 // The type of the Cache
136 virtual const char* Name() const = 0;
137
138 // Insert a mapping from key->value into the cache and assign it
139 // the specified charge against the total cache capacity.
140 // If strict_capacity_limit is true and cache reaches its full capacity,
141 // return Status::Incomplete.
142 //
143 // If handle is not nullptr, returns a handle that corresponds to the
144 // mapping. The caller must call this->Release(handle) when the returned
145 // mapping is no longer needed. In case of error caller is responsible to
146 // cleanup the value (i.e. calling "deleter").
147 //
148 // If handle is nullptr, it is as if Release is called immediately after
149 // insert. In case of error value will be cleanup.
150 //
151 // When the inserted entry is no longer needed, the key and
152 // value will be passed to "deleter".
153 virtual Status Insert(const Slice& key, void* value, size_t charge,
154 void (*deleter)(const Slice& key, void* value),
155 Handle** handle = nullptr,
156 Priority priority = Priority::LOW) = 0;
157
158 // If the cache has no mapping for "key", returns nullptr.
159 //
160 // Else return a handle that corresponds to the mapping. The caller
161 // must call this->Release(handle) when the returned mapping is no
162 // longer needed.
163 // If stats is not nullptr, relative tickers could be used inside the
164 // function.
165 virtual Handle* Lookup(const Slice& key, Statistics* stats = nullptr) = 0;
166
167 // Increments the reference count for the handle if it refers to an entry in
168 // the cache. Returns true if refcount was incremented; otherwise, returns
169 // false.
170 // REQUIRES: handle must have been returned by a method on *this.
171 virtual bool Ref(Handle* handle) = 0;
172
173 /**
174 * Release a mapping returned by a previous Lookup(). A released entry might
175 * still remain in cache in case it is later looked up by others. If
176 * force_erase is set then it also erase it from the cache if there is no
177 * other reference to it. Erasing it should call the deleter function that
178 * was provided when the
179 * entry was inserted.
180 *
181 * Returns true if the entry was also erased.
182 */
183 // REQUIRES: handle must not have been released yet.
184 // REQUIRES: handle must have been returned by a method on *this.
185 virtual bool Release(Handle* handle, bool force_erase = false) = 0;
186
187 // Return the value encapsulated in a handle returned by a
188 // successful Lookup().
189 // REQUIRES: handle must not have been released yet.
190 // REQUIRES: handle must have been returned by a method on *this.
191 virtual void* Value(Handle* handle) = 0;
192
193 // If the cache contains entry for key, erase it. Note that the
194 // underlying entry will be kept around until all existing handles
195 // to it have been released.
196 virtual void Erase(const Slice& key) = 0;
197 // Return a new numeric id. May be used by multiple clients who are
198 // sharding the same cache to partition the key space. Typically the
199 // client will allocate a new id at startup and prepend the id to
200 // its cache keys.
201 virtual uint64_t NewId() = 0;
202
203 // sets the maximum configured capacity of the cache. When the new
204 // capacity is less than the old capacity and the existing usage is
205 // greater than new capacity, the implementation will do its best job to
206 // purge the released entries from the cache in order to lower the usage
207 virtual void SetCapacity(size_t capacity) = 0;
208
209 // Set whether to return error on insertion when cache reaches its full
210 // capacity.
211 virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
212
213 // Get the flag whether to return error on insertion when cache reaches its
214 // full capacity.
215 virtual bool HasStrictCapacityLimit() const = 0;
216
217 // returns the maximum configured capacity of the cache
218 virtual size_t GetCapacity() const = 0;
219
220 // returns the memory size for the entries residing in the cache.
221 virtual size_t GetUsage() const = 0;
222
223 // returns the memory size for a specific entry in the cache.
224 virtual size_t GetUsage(Handle* handle) const = 0;
225
226 // returns the memory size for the entries in use by the system
227 virtual size_t GetPinnedUsage() const = 0;
228
229 // Call this on shutdown if you want to speed it up. Cache will disown
230 // any underlying data and will not free it on delete. This call will leak
231 // memory - call this only if you're shutting down the process.
232 // Any attempts of using cache after this call will fail terribly.
233 // Always delete the DB object before calling this method!
234 virtual void DisownData(){
235 // default implementation is noop
236 };
237
238 // Apply callback to all entries in the cache
239 // If thread_safe is true, it will also lock the accesses. Otherwise, it will
240 // access the cache without the lock held
241 virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
242 bool thread_safe) = 0;
243
244 // Remove all entries.
11fdf7f2 245 // Prerequisite: no entry is referenced.
7c673cae
FG
246 virtual void EraseUnRefEntries() = 0;
247
248 virtual std::string GetPrintableOptions() const { return ""; }
249
11fdf7f2
TL
250 // Mark the last inserted object as being a raw data block. This will be used
251 // in tests. The default implementation does nothing.
252 virtual void TEST_mark_as_data_block(const Slice& /*key*/,
253 size_t /*charge*/) {}
254
494da23a
TL
255 MemoryAllocator* memory_allocator() const { return memory_allocator_.get(); }
256
7c673cae
FG
257 private:
258 // No copying allowed
259 Cache(const Cache&);
260 Cache& operator=(const Cache&);
494da23a
TL
261
262 std::shared_ptr<MemoryAllocator> memory_allocator_;
7c673cae
FG
263};
264
265} // namespace rocksdb