]> git.proxmox.com Git - ceph.git/blob - ceph/src/common/shared_cache.hpp
import ceph quincy 17.2.4
[ceph.git] / ceph / src / common / shared_cache.hpp
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #ifndef CEPH_SHAREDCACHE_H
16 #define CEPH_SHAREDCACHE_H
17
18 #include <map>
19 #include <list>
20 #ifdef WITH_SEASTAR
21 #include <boost/smart_ptr/local_shared_ptr.hpp>
22 #else
23 #include <memory>
24 #endif
25 #include "common/ceph_mutex.h"
26 #include "common/ceph_context.h"
27 #include "common/dout.h"
28 #include "include/unordered_map.h"
29
30 template <class K, class V>
31 class SharedLRU {
32 CephContext *cct;
33 #ifdef WITH_SEASTAR
34 using VPtr = boost::local_shared_ptr<V>;
35 using WeakVPtr = boost::weak_ptr<V>;
36 #else
37 using VPtr = std::shared_ptr<V>;
38 using WeakVPtr = std::weak_ptr<V>;
39 #endif
40 ceph::mutex lock;
41 size_t max_size;
42 ceph::condition_variable cond;
43 unsigned size;
44 public:
45 int waiting;
46 private:
47 using C = std::less<K>;
48 using H = std::hash<K>;
49 ceph::unordered_map<K, typename std::list<std::pair<K, VPtr> >::iterator, H> contents;
50 std::list<std::pair<K, VPtr> > lru;
51
52 std::map<K, std::pair<WeakVPtr, V*>, C> weak_refs;
53
54 void trim_cache(std::list<VPtr> *to_release) {
55 while (size > max_size) {
56 to_release->push_back(lru.back().second);
57 lru_remove(lru.back().first);
58 }
59 }
60
61 void lru_remove(const K& key) {
62 auto i = contents.find(key);
63 if (i == contents.end())
64 return;
65 lru.erase(i->second);
66 --size;
67 contents.erase(i);
68 }
69
70 void lru_add(const K& key, const VPtr& val, std::list<VPtr> *to_release) {
71 auto i = contents.find(key);
72 if (i != contents.end()) {
73 lru.splice(lru.begin(), lru, i->second);
74 } else {
75 ++size;
76 lru.push_front(make_pair(key, val));
77 contents[key] = lru.begin();
78 trim_cache(to_release);
79 }
80 }
81
82 void remove(const K& key, V *valptr) {
83 std::lock_guard l{lock};
84 auto i = weak_refs.find(key);
85 if (i != weak_refs.end() && i->second.second == valptr) {
86 weak_refs.erase(i);
87 }
88 cond.notify_all();
89 }
90
91 class Cleanup {
92 public:
93 SharedLRU<K, V> *cache;
94 K key;
95 Cleanup(SharedLRU<K, V> *cache, K key) : cache(cache), key(key) {}
96 void operator()(V *ptr) {
97 cache->remove(key, ptr);
98 delete ptr;
99 }
100 };
101
102 public:
103 SharedLRU(CephContext *cct = NULL, size_t max_size = 20)
104 : cct(cct),
105 lock{ceph::make_mutex("SharedLRU::lock")},
106 max_size(max_size),
107 size(0), waiting(0) {
108 contents.rehash(max_size);
109 }
110
111 ~SharedLRU() {
112 contents.clear();
113 lru.clear();
114 if (!weak_refs.empty()) {
115 lderr(cct) << "leaked refs:\n";
116 dump_weak_refs(*_dout);
117 *_dout << dendl;
118 if (cct->_conf.get_val<bool>("debug_asserts_on_shutdown")) {
119 ceph_assert(weak_refs.empty());
120 }
121 }
122 }
123
124 int get_count() {
125 std::lock_guard locker{lock};
126 return size;
127 }
128
129 void set_cct(CephContext *c) {
130 cct = c;
131 }
132
133 void dump_weak_refs() {
134 lderr(cct) << "leaked refs:\n";
135 dump_weak_refs(*_dout);
136 *_dout << dendl;
137 }
138
139 void dump_weak_refs(std::ostream& out) {
140 for (const auto& [key, ref] : weak_refs) {
141 out << __func__ << " " << this << " weak_refs: "
142 << key << " = " << ref.second
143 << " with " << ref.first.use_count() << " refs"
144 << std::endl;
145 }
146 }
147
148 //clear all strong reference from the lru.
149 void clear() {
150 while (true) {
151 VPtr val; // release any ref we have after we drop the lock
152 std::lock_guard locker{lock};
153 if (size == 0)
154 break;
155
156 val = lru.back().second;
157 lru_remove(lru.back().first);
158 }
159 }
160
161 void clear(const K& key) {
162 VPtr val; // release any ref we have after we drop the lock
163 {
164 std::lock_guard l{lock};
165 auto i = weak_refs.find(key);
166 if (i != weak_refs.end()) {
167 val = i->second.first.lock();
168 }
169 lru_remove(key);
170 }
171 }
172
173 /* Clears weakrefs in the interval [from, to] -- note that to is inclusive */
174 void clear_range(
175 const K& from,
176 const K& to) {
177 std::list<VPtr> vals; // release any refs we have after we drop the lock
178 {
179 std::lock_guard l{lock};
180 auto from_iter = weak_refs.lower_bound(from);
181 auto to_iter = weak_refs.upper_bound(to);
182 for (auto i = from_iter; i != to_iter; ) {
183 vals.push_back(i->second.first.lock());
184 lru_remove((i++)->first);
185 }
186 }
187 }
188
189
190 void purge(const K &key) {
191 VPtr val; // release any ref we have after we drop the lock
192 {
193 std::lock_guard l{lock};
194 auto i = weak_refs.find(key);
195 if (i != weak_refs.end()) {
196 val = i->second.first.lock();
197 weak_refs.erase(i);
198 }
199 lru_remove(key);
200 }
201 }
202
203 void set_size(size_t new_size) {
204 std::list<VPtr> to_release;
205 {
206 std::lock_guard l{lock};
207 max_size = new_size;
208 trim_cache(&to_release);
209 }
210 }
211
212 // Returns K key s.t. key <= k for all currently cached k,v
213 K cached_key_lower_bound() {
214 std::lock_guard l{lock};
215 return weak_refs.begin()->first;
216 }
217
218 VPtr lower_bound(const K& key) {
219 VPtr val;
220 std::list<VPtr> to_release;
221 {
222 std::unique_lock l{lock};
223 ++waiting;
224 cond.wait(l, [this, &key, &val, &to_release] {
225 if (weak_refs.empty()) {
226 return true;
227 }
228 auto i = weak_refs.lower_bound(key);
229 if (i == weak_refs.end()) {
230 --i;
231 }
232 if (val = i->second.first.lock(); val) {
233 lru_add(i->first, val, &to_release);
234 return true;
235 } else {
236 return false;
237 }
238 });
239 --waiting;
240 }
241 return val;
242 }
243 bool get_next(const K &key, std::pair<K, VPtr> *next) {
244 std::pair<K, VPtr> r;
245 {
246 std::lock_guard l{lock};
247 VPtr next_val;
248 typename std::map<K, std::pair<WeakVPtr, V*>, C>::iterator i = weak_refs.upper_bound(key);
249
250 while (i != weak_refs.end() &&
251 !(next_val = i->second.first.lock()))
252 ++i;
253
254 if (i == weak_refs.end())
255 return false;
256
257 if (next)
258 r = make_pair(i->first, next_val);
259 }
260 if (next)
261 *next = r;
262 return true;
263 }
264 bool get_next(const K &key, std::pair<K, V> *next) {
265 std::pair<K, VPtr> r;
266 bool found = get_next(key, &r);
267 if (!found || !next)
268 return found;
269 next->first = r.first;
270 ceph_assert(r.second);
271 next->second = *(r.second);
272 return found;
273 }
274
275 VPtr lookup(const K& key) {
276 VPtr val;
277 std::list<VPtr> to_release;
278 {
279 std::unique_lock l{lock};
280 ++waiting;
281 cond.wait(l, [this, &key, &val, &to_release] {
282 if (auto i = weak_refs.find(key); i != weak_refs.end()) {
283 if (val = i->second.first.lock(); val) {
284 lru_add(key, val, &to_release);
285 return true;
286 } else {
287 return false;
288 }
289 } else {
290 return true;
291 }
292 });
293 --waiting;
294 }
295 return val;
296 }
297 VPtr lookup_or_create(const K &key) {
298 VPtr val;
299 std::list<VPtr> to_release;
300 {
301 std::unique_lock l{lock};
302 cond.wait(l, [this, &key, &val] {
303 if (auto i = weak_refs.find(key); i != weak_refs.end()) {
304 if (val = i->second.first.lock(); val) {
305 return true;
306 } else {
307 return false;
308 }
309 } else {
310 return true;
311 }
312 });
313 if (!val) {
314 val = VPtr{new V{}, Cleanup{this, key}};
315 weak_refs.insert(make_pair(key, make_pair(val, val.get())));
316 }
317 lru_add(key, val, &to_release);
318 }
319 return val;
320 }
321
322 /**
323 * empty()
324 *
325 * Returns true iff there are no live references left to anything that has been
326 * in the cache.
327 */
328 bool empty() {
329 std::lock_guard l{lock};
330 return weak_refs.empty();
331 }
332
333 /***
334 * Inserts a key if not present, or bumps it to the front of the LRU if
335 * it is, and then gives you a reference to the value. If the key already
336 * existed, you are responsible for deleting the new value you tried to
337 * insert.
338 *
339 * @param key The key to insert
340 * @param value The value that goes with the key
341 * @param existed Set to true if the value was already in the
342 * map, false otherwise
343 * @return A reference to the map's value for the given key
344 */
345 VPtr add(const K& key, V *value, bool *existed = NULL) {
346 VPtr val;
347 std::list<VPtr> to_release;
348 {
349 typename std::map<K, std::pair<WeakVPtr, V*>, C>::iterator actual;
350 std::unique_lock l{lock};
351 cond.wait(l, [this, &key, &actual, &val] {
352 actual = weak_refs.lower_bound(key);
353 if (actual != weak_refs.end() && actual->first == key) {
354 val = actual->second.first.lock();
355 if (val) {
356 return true;
357 } else {
358 return false;
359 }
360 } else {
361 return true;
362 }
363 });
364
365 if (val) {
366 if (existed) {
367 *existed = true;
368 }
369 } else {
370 if (existed) {
371 *existed = false;
372 }
373 val = VPtr(value, Cleanup(this, key));
374 weak_refs.insert(actual, make_pair(key, make_pair(val, value)));
375 }
376 lru_add(key, val, &to_release);
377 }
378 return val;
379 }
380
381 friend class SharedLRUTest;
382 };
383
384 #endif