]>
git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/include/rocksdb/cache.h
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
10 // A Cache is an interface that maps keys to values. It has internal
11 // synchronization and may be safely accessed concurrently from
12 // multiple threads. It may automatically evict entries to make room
13 // for new entries. Values have a specified charge against the cache
14 // capacity. For example, a cache where the values are variable
15 // length strings, may use the length of the string as the charge for
18 // A builtin cache implementation with a least-recently-used eviction
19 // policy is provided. Clients may use their own implementations if
20 // they want something more sophisticated (like scan-resistance, a
21 // custom eviction policy, variable cache sizing, etc.)
28 #include "rocksdb/memory_allocator.h"
29 #include "rocksdb/slice.h"
30 #include "rocksdb/statistics.h"
31 #include "rocksdb/status.h"
37 extern const bool kDefaultToAdaptiveMutex
;
39 struct LRUCacheOptions
{
40 // Capacity of the cache.
43 // Cache is sharded into 2^num_shard_bits shards,
44 // by hash of key. Refer to NewLRUCache for further
46 int num_shard_bits
= -1;
48 // If strict_capacity_limit is set,
49 // insert to the cache will fail when cache is full.
50 bool strict_capacity_limit
= false;
52 // Percentage of cache reserved for high priority entries.
53 // If greater than zero, the LRU list will be split into a high-pri
54 // list and a low-pri list. High-pri entries will be insert to the
55 // tail of high-pri list, while low-pri entries will be first inserted to
56 // the low-pri list (the midpoint). This is refered to as
57 // midpoint insertion strategy to make entries never get hit in cache
61 // BlockBasedTableOptions::cache_index_and_filter_blocks_with_high_priority.
62 double high_pri_pool_ratio
= 0.0;
64 // If non-nullptr will use this allocator instead of system allocator when
65 // allocating memory for cache blocks. Call this method before you start using
68 // Caveat: when the cache is used as block cache, the memory allocator is
69 // ignored when dealing with compression libraries that allocate memory
70 // internally (currently only XPRESS).
71 std::shared_ptr
<MemoryAllocator
> memory_allocator
;
73 // Whether to use adaptive mutexes for cache shards. Note that adaptive
74 // mutexes need to be supported by the platform in order for this to have any
75 // effect. The default value is true if RocksDB is compiled with
76 // -DROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX, false otherwise.
77 bool use_adaptive_mutex
= kDefaultToAdaptiveMutex
;
80 LRUCacheOptions(size_t _capacity
, int _num_shard_bits
,
81 bool _strict_capacity_limit
, double _high_pri_pool_ratio
,
82 std::shared_ptr
<MemoryAllocator
> _memory_allocator
= nullptr,
83 bool _use_adaptive_mutex
= kDefaultToAdaptiveMutex
)
84 : capacity(_capacity
),
85 num_shard_bits(_num_shard_bits
),
86 strict_capacity_limit(_strict_capacity_limit
),
87 high_pri_pool_ratio(_high_pri_pool_ratio
),
88 memory_allocator(std::move(_memory_allocator
)),
89 use_adaptive_mutex(_use_adaptive_mutex
) {}
92 // Create a new cache with a fixed size capacity. The cache is sharded
93 // to 2^num_shard_bits shards, by hash of the key. The total capacity
94 // is divided and evenly assigned to each shard. If strict_capacity_limit
95 // is set, insert to the cache will fail when cache is full. User can also
96 // set percentage of the cache reserves for high priority entries via
98 // num_shard_bits = -1 means it is automatically determined: every shard
99 // will be at least 512KB and number of shard bits will not exceed 6.
100 extern std::shared_ptr
<Cache
> NewLRUCache(
101 size_t capacity
, int num_shard_bits
= -1,
102 bool strict_capacity_limit
= false, double high_pri_pool_ratio
= 0.0,
103 std::shared_ptr
<MemoryAllocator
> memory_allocator
= nullptr,
104 bool use_adaptive_mutex
= kDefaultToAdaptiveMutex
);
106 extern std::shared_ptr
<Cache
> NewLRUCache(const LRUCacheOptions
& cache_opts
);
108 // Similar to NewLRUCache, but create a cache based on CLOCK algorithm with
109 // better concurrent performance in some cases. See util/clock_cache.cc for
112 // Return nullptr if it is not supported.
113 extern std::shared_ptr
<Cache
> NewClockCache(size_t capacity
,
114 int num_shard_bits
= -1,
115 bool strict_capacity_limit
= false);
119 // Depending on implementation, cache entries with high priority could be less
120 // likely to get evicted than low priority entries.
121 enum class Priority
{ HIGH
, LOW
};
123 Cache(std::shared_ptr
<MemoryAllocator
> allocator
= nullptr)
124 : memory_allocator_(std::move(allocator
)) {}
126 // Destroys all existing entries by calling the "deleter"
127 // function that was passed via the Insert() function.
132 // Opaque handle to an entry stored in the cache.
135 // The type of the Cache
136 virtual const char* Name() const = 0;
138 // Insert a mapping from key->value into the cache and assign it
139 // the specified charge against the total cache capacity.
140 // If strict_capacity_limit is true and cache reaches its full capacity,
141 // return Status::Incomplete.
143 // If handle is not nullptr, returns a handle that corresponds to the
144 // mapping. The caller must call this->Release(handle) when the returned
145 // mapping is no longer needed. In case of error caller is responsible to
146 // cleanup the value (i.e. calling "deleter").
148 // If handle is nullptr, it is as if Release is called immediately after
149 // insert. In case of error value will be cleanup.
151 // When the inserted entry is no longer needed, the key and
152 // value will be passed to "deleter".
153 virtual Status
Insert(const Slice
& key
, void* value
, size_t charge
,
154 void (*deleter
)(const Slice
& key
, void* value
),
155 Handle
** handle
= nullptr,
156 Priority priority
= Priority::LOW
) = 0;
158 // If the cache has no mapping for "key", returns nullptr.
160 // Else return a handle that corresponds to the mapping. The caller
161 // must call this->Release(handle) when the returned mapping is no
163 // If stats is not nullptr, relative tickers could be used inside the
165 virtual Handle
* Lookup(const Slice
& key
, Statistics
* stats
= nullptr) = 0;
167 // Increments the reference count for the handle if it refers to an entry in
168 // the cache. Returns true if refcount was incremented; otherwise, returns
170 // REQUIRES: handle must have been returned by a method on *this.
171 virtual bool Ref(Handle
* handle
) = 0;
174 * Release a mapping returned by a previous Lookup(). A released entry might
175 * still remain in cache in case it is later looked up by others. If
176 * force_erase is set then it also erase it from the cache if there is no
177 * other reference to it. Erasing it should call the deleter function that
178 * was provided when the
179 * entry was inserted.
181 * Returns true if the entry was also erased.
183 // REQUIRES: handle must not have been released yet.
184 // REQUIRES: handle must have been returned by a method on *this.
185 virtual bool Release(Handle
* handle
, bool force_erase
= false) = 0;
187 // Return the value encapsulated in a handle returned by a
188 // successful Lookup().
189 // REQUIRES: handle must not have been released yet.
190 // REQUIRES: handle must have been returned by a method on *this.
191 virtual void* Value(Handle
* handle
) = 0;
193 // If the cache contains entry for key, erase it. Note that the
194 // underlying entry will be kept around until all existing handles
195 // to it have been released.
196 virtual void Erase(const Slice
& key
) = 0;
197 // Return a new numeric id. May be used by multiple clients who are
198 // sharding the same cache to partition the key space. Typically the
199 // client will allocate a new id at startup and prepend the id to
201 virtual uint64_t NewId() = 0;
203 // sets the maximum configured capacity of the cache. When the new
204 // capacity is less than the old capacity and the existing usage is
205 // greater than new capacity, the implementation will do its best job to
206 // purge the released entries from the cache in order to lower the usage
207 virtual void SetCapacity(size_t capacity
) = 0;
209 // Set whether to return error on insertion when cache reaches its full
211 virtual void SetStrictCapacityLimit(bool strict_capacity_limit
) = 0;
213 // Get the flag whether to return error on insertion when cache reaches its
215 virtual bool HasStrictCapacityLimit() const = 0;
217 // returns the maximum configured capacity of the cache
218 virtual size_t GetCapacity() const = 0;
220 // returns the memory size for the entries residing in the cache.
221 virtual size_t GetUsage() const = 0;
223 // returns the memory size for a specific entry in the cache.
224 virtual size_t GetUsage(Handle
* handle
) const = 0;
226 // returns the memory size for the entries in use by the system
227 virtual size_t GetPinnedUsage() const = 0;
229 // Call this on shutdown if you want to speed it up. Cache will disown
230 // any underlying data and will not free it on delete. This call will leak
231 // memory - call this only if you're shutting down the process.
232 // Any attempts of using cache after this call will fail terribly.
233 // Always delete the DB object before calling this method!
234 virtual void DisownData(){
235 // default implementation is noop
238 // Apply callback to all entries in the cache
239 // If thread_safe is true, it will also lock the accesses. Otherwise, it will
240 // access the cache without the lock held
241 virtual void ApplyToAllCacheEntries(void (*callback
)(void*, size_t),
242 bool thread_safe
) = 0;
244 // Remove all entries.
245 // Prerequisite: no entry is referenced.
246 virtual void EraseUnRefEntries() = 0;
248 virtual std::string
GetPrintableOptions() const { return ""; }
250 // Mark the last inserted object as being a raw data block. This will be used
251 // in tests. The default implementation does nothing.
252 virtual void TEST_mark_as_data_block(const Slice
& /*key*/,
253 size_t /*charge*/) {}
255 MemoryAllocator
* memory_allocator() const { return memory_allocator_
.get(); }
258 // No copying allowed
260 Cache
& operator=(const Cache
&);
262 std::shared_ptr
<MemoryAllocator
> memory_allocator_
;
265 } // namespace rocksdb