]> git.proxmox.com Git - ceph.git/blame - ceph/src/common/shared_cache.hpp
update sources to v12.2.3
[ceph.git] / ceph / src / common / shared_cache.hpp
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15#ifndef CEPH_SHAREDCACHE_H
16#define CEPH_SHAREDCACHE_H
17
18#include <map>
19#include <list>
7c673cae
FG
20#include "common/Mutex.h"
21#include "common/Cond.h"
22#include "include/unordered_map.h"
23
31f18b77
FG
24// re-include our assert to clobber the system one; fix dout:
25#include "include/assert.h"
26
7c673cae
FG
27template <class K, class V, class C = std::less<K>, class H = std::hash<K> >
28class SharedLRU {
29 CephContext *cct;
30 typedef ceph::shared_ptr<V> VPtr;
31 typedef ceph::weak_ptr<V> WeakVPtr;
32 Mutex lock;
33 size_t max_size;
34 Cond cond;
35 unsigned size;
36public:
37 int waiting;
38private:
39 ceph::unordered_map<K, typename list<pair<K, VPtr> >::iterator, H> contents;
40 list<pair<K, VPtr> > lru;
41
42 map<K, pair<WeakVPtr, V*>, C> weak_refs;
43
44 void trim_cache(list<VPtr> *to_release) {
45 while (size > max_size) {
46 to_release->push_back(lru.back().second);
47 lru_remove(lru.back().first);
48 }
49 }
50
51 void lru_remove(const K& key) {
52 typename ceph::unordered_map<K, typename list<pair<K, VPtr> >::iterator, H>::iterator i =
53 contents.find(key);
54 if (i == contents.end())
55 return;
56 lru.erase(i->second);
57 --size;
58 contents.erase(i);
59 }
60
61 void lru_add(const K& key, const VPtr& val, list<VPtr> *to_release) {
62 typename ceph::unordered_map<K, typename list<pair<K, VPtr> >::iterator, H>::iterator i =
63 contents.find(key);
64 if (i != contents.end()) {
65 lru.splice(lru.begin(), lru, i->second);
66 } else {
67 ++size;
68 lru.push_front(make_pair(key, val));
69 contents[key] = lru.begin();
70 trim_cache(to_release);
71 }
72 }
73
74 void remove(const K& key, V *valptr) {
75 Mutex::Locker l(lock);
76 typename map<K, pair<WeakVPtr, V*>, C>::iterator i = weak_refs.find(key);
77 if (i != weak_refs.end() && i->second.second == valptr) {
78 weak_refs.erase(i);
79 }
80 cond.Signal();
81 }
82
83 class Cleanup {
84 public:
85 SharedLRU<K, V, C> *cache;
86 K key;
87 Cleanup(SharedLRU<K, V, C> *cache, K key) : cache(cache), key(key) {}
88 void operator()(V *ptr) {
89 cache->remove(key, ptr);
90 delete ptr;
91 }
92 };
93
94public:
95 SharedLRU(CephContext *cct = NULL, size_t max_size = 20)
96 : cct(cct), lock("SharedLRU::lock"), max_size(max_size),
97 size(0), waiting(0) {
98 contents.rehash(max_size);
99 }
100
101 ~SharedLRU() {
102 contents.clear();
103 lru.clear();
104 if (!weak_refs.empty()) {
105 lderr(cct) << "leaked refs:\n";
106 dump_weak_refs(*_dout);
107 *_dout << dendl;
b32b8144
FG
108 if (cct->_conf->get_val<bool>("debug_asserts_on_shutdown")) {
109 assert(weak_refs.empty());
110 }
7c673cae
FG
111 }
112 }
113
114 /// adjust container comparator (for purposes of get_next sort order)
115 void reset_comparator(C comp) {
116 // get_next uses weak_refs; that's the only container we need to
117 // reorder.
118 map<K, pair<WeakVPtr, V*>, C> temp;
119
120 Mutex::Locker l(lock);
121 temp.swap(weak_refs);
122
123 // reconstruct with new comparator
124 weak_refs = map<K, pair<WeakVPtr, V*>, C>(comp);
125 weak_refs.insert(temp.begin(), temp.end());
126 }
127
128 C get_comparator() {
129 return weak_refs.key_comp();
130 }
131
132 void set_cct(CephContext *c) {
133 cct = c;
134 }
135
136 void dump_weak_refs() {
137 lderr(cct) << "leaked refs:\n";
138 dump_weak_refs(*_dout);
139 *_dout << dendl;
140 }
141
142 void dump_weak_refs(ostream& out) {
143 for (typename map<K, pair<WeakVPtr, V*>, C>::iterator p = weak_refs.begin();
144 p != weak_refs.end();
145 ++p) {
146 out << __func__ << " " << this << " weak_refs: "
147 << p->first << " = " << p->second.second
148 << " with " << p->second.first.use_count() << " refs"
149 << std::endl;
150 }
151 }
152
153 //clear all strong reference from the lru.
154 void clear() {
155 while (true) {
156 VPtr val; // release any ref we have after we drop the lock
157 Mutex::Locker l(lock);
158 if (size == 0)
159 break;
160
161 val = lru.back().second;
162 lru_remove(lru.back().first);
163 }
164 }
165
166 void clear(const K& key) {
167 VPtr val; // release any ref we have after we drop the lock
168 {
169 Mutex::Locker l(lock);
170 typename map<K, pair<WeakVPtr, V*>, C>::iterator i = weak_refs.find(key);
171 if (i != weak_refs.end()) {
172 val = i->second.first.lock();
173 }
174 lru_remove(key);
175 }
176 }
177
178 void purge(const K &key) {
179 VPtr val; // release any ref we have after we drop the lock
180 {
181 Mutex::Locker l(lock);
182 typename map<K, pair<WeakVPtr, V*>, C>::iterator i = weak_refs.find(key);
183 if (i != weak_refs.end()) {
184 val = i->second.first.lock();
185 weak_refs.erase(i);
186 }
187 lru_remove(key);
188 }
189 }
190
191 void set_size(size_t new_size) {
192 list<VPtr> to_release;
193 {
194 Mutex::Locker l(lock);
195 max_size = new_size;
196 trim_cache(&to_release);
197 }
198 }
199
200 // Returns K key s.t. key <= k for all currently cached k,v
201 K cached_key_lower_bound() {
202 Mutex::Locker l(lock);
203 return weak_refs.begin()->first;
204 }
205
206 VPtr lower_bound(const K& key) {
207 VPtr val;
208 list<VPtr> to_release;
209 {
210 Mutex::Locker l(lock);
211 ++waiting;
212 bool retry = false;
213 do {
214 retry = false;
215 if (weak_refs.empty())
216 break;
217 typename map<K, pair<WeakVPtr, V*>, C>::iterator i =
218 weak_refs.lower_bound(key);
219 if (i == weak_refs.end())
220 --i;
221 val = i->second.first.lock();
222 if (val) {
223 lru_add(i->first, val, &to_release);
224 } else {
225 retry = true;
226 }
227 if (retry)
228 cond.Wait(lock);
229 } while (retry);
230 --waiting;
231 }
232 return val;
233 }
234 bool get_next(const K &key, pair<K, VPtr> *next) {
235 pair<K, VPtr> r;
236 {
237 Mutex::Locker l(lock);
238 VPtr next_val;
239 typename map<K, pair<WeakVPtr, V*>, C>::iterator i = weak_refs.upper_bound(key);
240
241 while (i != weak_refs.end() &&
242 !(next_val = i->second.first.lock()))
243 ++i;
244
245 if (i == weak_refs.end())
246 return false;
247
248 if (next)
249 r = make_pair(i->first, next_val);
250 }
251 if (next)
252 *next = r;
253 return true;
254 }
255 bool get_next(const K &key, pair<K, V> *next) {
256 pair<K, VPtr> r;
257 bool found = get_next(key, &r);
258 if (!found || !next)
259 return found;
260 next->first = r.first;
261 assert(r.second);
262 next->second = *(r.second);
263 return found;
264 }
265
266 VPtr lookup(const K& key) {
267 VPtr val;
268 list<VPtr> to_release;
269 {
270 Mutex::Locker l(lock);
271 ++waiting;
272 bool retry = false;
273 do {
274 retry = false;
275 typename map<K, pair<WeakVPtr, V*>, C>::iterator i = weak_refs.find(key);
276 if (i != weak_refs.end()) {
277 val = i->second.first.lock();
278 if (val) {
279 lru_add(key, val, &to_release);
280 } else {
281 retry = true;
282 }
283 }
284 if (retry)
285 cond.Wait(lock);
286 } while (retry);
287 --waiting;
288 }
289 return val;
290 }
291 VPtr lookup_or_create(const K &key) {
292 VPtr val;
293 list<VPtr> to_release;
294 {
295 Mutex::Locker l(lock);
296 bool retry = false;
297 do {
298 retry = false;
299 typename map<K, pair<WeakVPtr, V*>, C>::iterator i = weak_refs.find(key);
300 if (i != weak_refs.end()) {
301 val = i->second.first.lock();
302 if (val) {
303 lru_add(key, val, &to_release);
304 return val;
305 } else {
306 retry = true;
307 }
308 }
309 if (retry)
310 cond.Wait(lock);
311 } while (retry);
312
313 V *new_value = new V();
314 VPtr new_val(new_value, Cleanup(this, key));
315 weak_refs.insert(make_pair(key, make_pair(new_val, new_value)));
316 lru_add(key, new_val, &to_release);
317 return new_val;
318 }
319 }
320
321 /**
322 * empty()
323 *
324 * Returns true iff there are no live references left to anything that has been
325 * in the cache.
326 */
327 bool empty() {
328 Mutex::Locker l(lock);
329 return weak_refs.empty();
330 }
331
332 /***
333 * Inserts a key if not present, or bumps it to the front of the LRU if
334 * it is, and then gives you a reference to the value. If the key already
335 * existed, you are responsible for deleting the new value you tried to
336 * insert.
337 *
338 * @param key The key to insert
339 * @param value The value that goes with the key
340 * @param existed Set to true if the value was already in the
341 * map, false otherwise
342 * @return A reference to the map's value for the given key
343 */
344 VPtr add(const K& key, V *value, bool *existed = NULL) {
345 VPtr val;
346 list<VPtr> to_release;
347 {
348 Mutex::Locker l(lock);
349 typename map<K, pair<WeakVPtr, V*>, C>::iterator actual =
350 weak_refs.lower_bound(key);
351 if (actual != weak_refs.end() && actual->first == key) {
352 if (existed)
353 *existed = true;
354
355 return actual->second.first.lock();
356 }
357
358 if (existed)
359 *existed = false;
360
361 val = VPtr(value, Cleanup(this, key));
362 weak_refs.insert(actual, make_pair(key, make_pair(val, value)));
363 lru_add(key, val, &to_release);
364 }
365 return val;
366 }
367
368 friend class SharedLRUTest;
369};
370
371#endif