]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/include/rocksdb/cache.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / rocksdb / include / rocksdb / cache.h
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under the BSD-style license found in the
3 // LICENSE file in the root directory of this source tree. An additional grant
4 // of patent rights can be found in the PATENTS file in the same directory.
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 //
10 // A Cache is an interface that maps keys to values. It has internal
11 // synchronization and may be safely accessed concurrently from
12 // multiple threads. It may automatically evict entries to make room
13 // for new entries. Values have a specified charge against the cache
14 // capacity. For example, a cache where the values are variable
15 // length strings, may use the length of the string as the charge for
16 // the string.
17 //
18 // A builtin cache implementation with a least-recently-used eviction
19 // policy is provided. Clients may use their own implementations if
20 // they want something more sophisticated (like scan-resistance, a
21 // custom eviction policy, variable cache sizing, etc.)
22
23 #pragma once
24
25 #include <stdint.h>
26 #include <memory>
27 #include <string>
28 #include "rocksdb/slice.h"
29 #include "rocksdb/statistics.h"
30 #include "rocksdb/status.h"
31
32 namespace rocksdb {
33
34 class Cache;
35
36 // Create a new cache with a fixed size capacity. The cache is sharded
37 // to 2^num_shard_bits shards, by hash of the key. The total capacity
38 // is divided and evenly assigned to each shard. If strict_capacity_limit
39 // is set, insert to the cache will fail when cache is full. User can also
40 // set percentage of the cache reserves for high priority entries via
41 // high_pri_pool_pct.
42 // num_shard_bits = -1 means it is automatically determined: every shard
43 // will be at least 512KB and number of shard bits will not exceed 6.
44 extern std::shared_ptr<Cache> NewLRUCache(size_t capacity,
45 int num_shard_bits = -1,
46 bool strict_capacity_limit = false,
47 double high_pri_pool_ratio = 0.0);
48
49 // Similar to NewLRUCache, but create a cache based on CLOCK algorithm with
50 // better concurrent performance in some cases. See util/clock_cache.cc for
51 // more detail.
52 //
53 // Return nullptr if it is not supported.
54 extern std::shared_ptr<Cache> NewClockCache(size_t capacity,
55 int num_shard_bits = -1,
56 bool strict_capacity_limit = false);
57
58 class Cache {
59 public:
60 // Depending on implementation, cache entries with high priority could be less
61 // likely to get evicted than low priority entries.
62 enum class Priority { HIGH, LOW };
63
64 Cache() {}
65
66 // Destroys all existing entries by calling the "deleter"
67 // function that was passed via the Insert() function.
68 //
69 // @See Insert
70 virtual ~Cache() {}
71
72 // Opaque handle to an entry stored in the cache.
73 struct Handle {};
74
75 // The type of the Cache
76 virtual const char* Name() const = 0;
77
78 // Insert a mapping from key->value into the cache and assign it
79 // the specified charge against the total cache capacity.
80 // If strict_capacity_limit is true and cache reaches its full capacity,
81 // return Status::Incomplete.
82 //
83 // If handle is not nullptr, returns a handle that corresponds to the
84 // mapping. The caller must call this->Release(handle) when the returned
85 // mapping is no longer needed. In case of error caller is responsible to
86 // cleanup the value (i.e. calling "deleter").
87 //
88 // If handle is nullptr, it is as if Release is called immediately after
89 // insert. In case of error value will be cleanup.
90 //
91 // When the inserted entry is no longer needed, the key and
92 // value will be passed to "deleter".
93 virtual Status Insert(const Slice& key, void* value, size_t charge,
94 void (*deleter)(const Slice& key, void* value),
95 Handle** handle = nullptr,
96 Priority priority = Priority::LOW) = 0;
97
98 // If the cache has no mapping for "key", returns nullptr.
99 //
100 // Else return a handle that corresponds to the mapping. The caller
101 // must call this->Release(handle) when the returned mapping is no
102 // longer needed.
103 // If stats is not nullptr, relative tickers could be used inside the
104 // function.
105 virtual Handle* Lookup(const Slice& key, Statistics* stats = nullptr) = 0;
106
107 // Increments the reference count for the handle if it refers to an entry in
108 // the cache. Returns true if refcount was incremented; otherwise, returns
109 // false.
110 // REQUIRES: handle must have been returned by a method on *this.
111 virtual bool Ref(Handle* handle) = 0;
112
113 /**
114 * Release a mapping returned by a previous Lookup(). A released entry might
115 * still remain in cache in case it is later looked up by others. If
116 * force_erase is set then it also erase it from the cache if there is no
117 * other reference to it. Erasing it should call the deleter function that
118 * was provided when the
119 * entry was inserted.
120 *
121 * Returns true if the entry was also erased.
122 */
123 // REQUIRES: handle must not have been released yet.
124 // REQUIRES: handle must have been returned by a method on *this.
125 virtual bool Release(Handle* handle, bool force_erase = false) = 0;
126
127 // Return the value encapsulated in a handle returned by a
128 // successful Lookup().
129 // REQUIRES: handle must not have been released yet.
130 // REQUIRES: handle must have been returned by a method on *this.
131 virtual void* Value(Handle* handle) = 0;
132
133 // If the cache contains entry for key, erase it. Note that the
134 // underlying entry will be kept around until all existing handles
135 // to it have been released.
136 virtual void Erase(const Slice& key) = 0;
137 // Return a new numeric id. May be used by multiple clients who are
138 // sharding the same cache to partition the key space. Typically the
139 // client will allocate a new id at startup and prepend the id to
140 // its cache keys.
141 virtual uint64_t NewId() = 0;
142
143 // sets the maximum configured capacity of the cache. When the new
144 // capacity is less than the old capacity and the existing usage is
145 // greater than new capacity, the implementation will do its best job to
146 // purge the released entries from the cache in order to lower the usage
147 virtual void SetCapacity(size_t capacity) = 0;
148
149 // Set whether to return error on insertion when cache reaches its full
150 // capacity.
151 virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
152
153 // Get the flag whether to return error on insertion when cache reaches its
154 // full capacity.
155 virtual bool HasStrictCapacityLimit() const = 0;
156
157 // returns the maximum configured capacity of the cache
158 virtual size_t GetCapacity() const = 0;
159
160 // returns the memory size for the entries residing in the cache.
161 virtual size_t GetUsage() const = 0;
162
163 // returns the memory size for a specific entry in the cache.
164 virtual size_t GetUsage(Handle* handle) const = 0;
165
166 // returns the memory size for the entries in use by the system
167 virtual size_t GetPinnedUsage() const = 0;
168
169 // Call this on shutdown if you want to speed it up. Cache will disown
170 // any underlying data and will not free it on delete. This call will leak
171 // memory - call this only if you're shutting down the process.
172 // Any attempts of using cache after this call will fail terribly.
173 // Always delete the DB object before calling this method!
174 virtual void DisownData(){
175 // default implementation is noop
176 };
177
178 // Apply callback to all entries in the cache
179 // If thread_safe is true, it will also lock the accesses. Otherwise, it will
180 // access the cache without the lock held
181 virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
182 bool thread_safe) = 0;
183
184 // Remove all entries.
185 // Prerequisit: no entry is referenced.
186 virtual void EraseUnRefEntries() = 0;
187
188 virtual std::string GetPrintableOptions() const { return ""; }
189
190 private:
191 // No copying allowed
192 Cache(const Cache&);
193 Cache& operator=(const Cache&);
194 };
195
196 } // namespace rocksdb