1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
10 // A Cache is an interface that maps keys to values. It has internal
11 // synchronization and may be safely accessed concurrently from
12 // multiple threads. It may automatically evict entries to make room
13 // for new entries. Values have a specified charge against the cache
14 // capacity. For example, a cache where the values are variable
15 // length strings, may use the length of the string as the charge for
18 // A builtin cache implementation with a least-recently-used eviction
19 // policy is provided. Clients may use their own implementations if
20 // they want something more sophisticated (like scan-resistance, a
21 // custom eviction policy, variable cache sizing, etc.)
28 #include "rocksdb/memory_allocator.h"
29 #include "rocksdb/slice.h"
30 #include "rocksdb/statistics.h"
31 #include "rocksdb/status.h"
33 namespace ROCKSDB_NAMESPACE
{
38 extern const bool kDefaultToAdaptiveMutex
;
40 enum CacheMetadataChargePolicy
{
41 kDontChargeCacheMetadata
,
42 kFullChargeCacheMetadata
44 const CacheMetadataChargePolicy kDefaultCacheMetadataChargePolicy
=
45 kFullChargeCacheMetadata
;
47 struct LRUCacheOptions
{
48 // Capacity of the cache.
51 // Cache is sharded into 2^num_shard_bits shards,
52 // by hash of key. Refer to NewLRUCache for further
54 int num_shard_bits
= -1;
56 // If strict_capacity_limit is set,
57 // insert to the cache will fail when cache is full.
58 bool strict_capacity_limit
= false;
60 // Percentage of cache reserved for high priority entries.
61 // If greater than zero, the LRU list will be split into a high-pri
62 // list and a low-pri list. High-pri entries will be insert to the
63 // tail of high-pri list, while low-pri entries will be first inserted to
64 // the low-pri list (the midpoint). This is refered to as
65 // midpoint insertion strategy to make entries never get hit in cache
69 // BlockBasedTableOptions::cache_index_and_filter_blocks_with_high_priority.
70 double high_pri_pool_ratio
= 0.5;
72 // If non-nullptr will use this allocator instead of system allocator when
73 // allocating memory for cache blocks. Call this method before you start using
76 // Caveat: when the cache is used as block cache, the memory allocator is
77 // ignored when dealing with compression libraries that allocate memory
78 // internally (currently only XPRESS).
79 std::shared_ptr
<MemoryAllocator
> memory_allocator
;
81 // Whether to use adaptive mutexes for cache shards. Note that adaptive
82 // mutexes need to be supported by the platform in order for this to have any
83 // effect. The default value is true if RocksDB is compiled with
84 // -DROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX, false otherwise.
85 bool use_adaptive_mutex
= kDefaultToAdaptiveMutex
;
87 CacheMetadataChargePolicy metadata_charge_policy
=
88 kDefaultCacheMetadataChargePolicy
;
91 LRUCacheOptions(size_t _capacity
, int _num_shard_bits
,
92 bool _strict_capacity_limit
, double _high_pri_pool_ratio
,
93 std::shared_ptr
<MemoryAllocator
> _memory_allocator
= nullptr,
94 bool _use_adaptive_mutex
= kDefaultToAdaptiveMutex
,
95 CacheMetadataChargePolicy _metadata_charge_policy
=
96 kDefaultCacheMetadataChargePolicy
)
97 : capacity(_capacity
),
98 num_shard_bits(_num_shard_bits
),
99 strict_capacity_limit(_strict_capacity_limit
),
100 high_pri_pool_ratio(_high_pri_pool_ratio
),
101 memory_allocator(std::move(_memory_allocator
)),
102 use_adaptive_mutex(_use_adaptive_mutex
),
103 metadata_charge_policy(_metadata_charge_policy
) {}
106 // Create a new cache with a fixed size capacity. The cache is sharded
107 // to 2^num_shard_bits shards, by hash of the key. The total capacity
108 // is divided and evenly assigned to each shard. If strict_capacity_limit
109 // is set, insert to the cache will fail when cache is full. User can also
110 // set percentage of the cache reserves for high priority entries via
111 // high_pri_pool_pct.
112 // num_shard_bits = -1 means it is automatically determined: every shard
113 // will be at least 512KB and number of shard bits will not exceed 6.
114 extern std::shared_ptr
<Cache
> NewLRUCache(
115 size_t capacity
, int num_shard_bits
= -1,
116 bool strict_capacity_limit
= false, double high_pri_pool_ratio
= 0.5,
117 std::shared_ptr
<MemoryAllocator
> memory_allocator
= nullptr,
118 bool use_adaptive_mutex
= kDefaultToAdaptiveMutex
,
119 CacheMetadataChargePolicy metadata_charge_policy
=
120 kDefaultCacheMetadataChargePolicy
);
122 extern std::shared_ptr
<Cache
> NewLRUCache(const LRUCacheOptions
& cache_opts
);
124 // Similar to NewLRUCache, but create a cache based on CLOCK algorithm with
125 // better concurrent performance in some cases. See util/clock_cache.cc for
128 // Return nullptr if it is not supported.
129 extern std::shared_ptr
<Cache
> NewClockCache(
130 size_t capacity
, int num_shard_bits
= -1,
131 bool strict_capacity_limit
= false,
132 CacheMetadataChargePolicy metadata_charge_policy
=
133 kDefaultCacheMetadataChargePolicy
);
136 // Depending on implementation, cache entries with high priority could be less
137 // likely to get evicted than low priority entries.
138 enum class Priority
{ HIGH
, LOW
};
140 Cache(std::shared_ptr
<MemoryAllocator
> allocator
= nullptr)
141 : memory_allocator_(std::move(allocator
)) {}
142 // No copying allowed
143 Cache(const Cache
&) = delete;
144 Cache
& operator=(const Cache
&) = delete;
146 // Creates a new Cache based on the input value string and returns the result.
147 // Currently, this method can be used to create LRUCaches only
148 // @param config_options
149 // @param value The value might be:
150 // - an old-style cache ("1M") -- equivalent to NewLRUCache(1024*102(
151 // - Name-value option pairs -- "capacity=1M; num_shard_bits=4;
152 // For the LRUCache, the values are defined in LRUCacheOptions.
153 // @param result The new Cache object
154 // @return OK if the cache was sucessfully created
155 // @return NotFound if an invalid name was specified in the value
156 // @return InvalidArgument if either the options were not valid
157 static Status
CreateFromString(const ConfigOptions
& config_options
,
158 const std::string
& value
,
159 std::shared_ptr
<Cache
>* result
);
161 // Destroys all existing entries by calling the "deleter"
162 // function that was passed via the Insert() function.
167 // Opaque handle to an entry stored in the cache.
170 // The type of the Cache
171 virtual const char* Name() const = 0;
173 // Insert a mapping from key->value into the cache and assign it
174 // the specified charge against the total cache capacity.
175 // If strict_capacity_limit is true and cache reaches its full capacity,
176 // return Status::Incomplete.
178 // If handle is not nullptr, returns a handle that corresponds to the
179 // mapping. The caller must call this->Release(handle) when the returned
180 // mapping is no longer needed. In case of error caller is responsible to
181 // cleanup the value (i.e. calling "deleter").
183 // If handle is nullptr, it is as if Release is called immediately after
184 // insert. In case of error value will be cleanup.
186 // When the inserted entry is no longer needed, the key and
187 // value will be passed to "deleter".
188 virtual Status
Insert(const Slice
& key
, void* value
, size_t charge
,
189 void (*deleter
)(const Slice
& key
, void* value
),
190 Handle
** handle
= nullptr,
191 Priority priority
= Priority::LOW
) = 0;
193 // If the cache has no mapping for "key", returns nullptr.
195 // Else return a handle that corresponds to the mapping. The caller
196 // must call this->Release(handle) when the returned mapping is no
198 // If stats is not nullptr, relative tickers could be used inside the
200 virtual Handle
* Lookup(const Slice
& key
, Statistics
* stats
= nullptr) = 0;
202 // Increments the reference count for the handle if it refers to an entry in
203 // the cache. Returns true if refcount was incremented; otherwise, returns
205 // REQUIRES: handle must have been returned by a method on *this.
206 virtual bool Ref(Handle
* handle
) = 0;
209 * Release a mapping returned by a previous Lookup(). A released entry might
210 * still remain in cache in case it is later looked up by others. If
211 * force_erase is set then it also erase it from the cache if there is no
212 * other reference to it. Erasing it should call the deleter function that
213 * was provided when the
214 * entry was inserted.
216 * Returns true if the entry was also erased.
218 // REQUIRES: handle must not have been released yet.
219 // REQUIRES: handle must have been returned by a method on *this.
220 virtual bool Release(Handle
* handle
, bool force_erase
= false) = 0;
222 // Return the value encapsulated in a handle returned by a
223 // successful Lookup().
224 // REQUIRES: handle must not have been released yet.
225 // REQUIRES: handle must have been returned by a method on *this.
226 virtual void* Value(Handle
* handle
) = 0;
228 // If the cache contains entry for key, erase it. Note that the
229 // underlying entry will be kept around until all existing handles
230 // to it have been released.
231 virtual void Erase(const Slice
& key
) = 0;
232 // Return a new numeric id. May be used by multiple clients who are
233 // sharding the same cache to partition the key space. Typically the
234 // client will allocate a new id at startup and prepend the id to
236 virtual uint64_t NewId() = 0;
238 // sets the maximum configured capacity of the cache. When the new
239 // capacity is less than the old capacity and the existing usage is
240 // greater than new capacity, the implementation will do its best job to
241 // purge the released entries from the cache in order to lower the usage
242 virtual void SetCapacity(size_t capacity
) = 0;
244 // Set whether to return error on insertion when cache reaches its full
246 virtual void SetStrictCapacityLimit(bool strict_capacity_limit
) = 0;
248 // Get the flag whether to return error on insertion when cache reaches its
250 virtual bool HasStrictCapacityLimit() const = 0;
252 // returns the maximum configured capacity of the cache
253 virtual size_t GetCapacity() const = 0;
255 // returns the memory size for the entries residing in the cache.
256 virtual size_t GetUsage() const = 0;
258 // returns the memory size for a specific entry in the cache.
259 virtual size_t GetUsage(Handle
* handle
) const = 0;
261 // returns the memory size for the entries in use by the system
262 virtual size_t GetPinnedUsage() const = 0;
264 // returns the charge for the specific entry in the cache.
265 virtual size_t GetCharge(Handle
* handle
) const = 0;
267 // Call this on shutdown if you want to speed it up. Cache will disown
268 // any underlying data and will not free it on delete. This call will leak
269 // memory - call this only if you're shutting down the process.
270 // Any attempts of using cache after this call will fail terribly.
271 // Always delete the DB object before calling this method!
272 virtual void DisownData(){
273 // default implementation is noop
276 // Apply callback to all entries in the cache
277 // If thread_safe is true, it will also lock the accesses. Otherwise, it will
278 // access the cache without the lock held
279 virtual void ApplyToAllCacheEntries(void (*callback
)(void*, size_t),
280 bool thread_safe
) = 0;
282 // Remove all entries.
283 // Prerequisite: no entry is referenced.
284 virtual void EraseUnRefEntries() = 0;
286 virtual std::string
GetPrintableOptions() const { return ""; }
288 MemoryAllocator
* memory_allocator() const { return memory_allocator_
.get(); }
291 std::shared_ptr
<MemoryAllocator
> memory_allocator_
;
294 } // namespace ROCKSDB_NAMESPACE