]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_cache.cc
6908e7f9d252b119b03fe29b83cf4ca1def8365e
[ceph.git] / ceph / src / rgw / rgw_cache.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab ft=cpp
3
4 #include "rgw_cache.h"
5 #include "rgw_perf_counters.h"
6
7 #include <errno.h>
8
9 #define dout_subsys ceph_subsys_rgw
10
11
12 int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, rgw_cache_entry_info *cache_info)
13 {
14
15 std::shared_lock rl{lock};
16 if (!enabled) {
17 return -ENOENT;
18 }
19 auto iter = cache_map.find(name);
20 if (iter == cache_map.end()) {
21 ldout(cct, 10) << "cache get: name=" << name << " : miss" << dendl;
22 if (perfcounter) {
23 perfcounter->inc(l_rgw_cache_miss);
24 }
25 return -ENOENT;
26 }
27
28 if (expiry.count() &&
29 (ceph::coarse_mono_clock::now() - iter->second.info.time_added) > expiry) {
30 ldout(cct, 10) << "cache get: name=" << name << " : expiry miss" << dendl;
31 rl.unlock();
32 std::unique_lock wl{lock}; // write lock for insertion
33 // check that wasn't already removed by other thread
34 iter = cache_map.find(name);
35 if (iter != cache_map.end()) {
36 for (auto &kv : iter->second.chained_entries)
37 kv.first->invalidate(kv.second);
38 remove_lru(name, iter->second.lru_iter);
39 cache_map.erase(iter);
40 }
41 if (perfcounter) {
42 perfcounter->inc(l_rgw_cache_miss);
43 }
44 return -ENOENT;
45 }
46
47 ObjectCacheEntry *entry = &iter->second;
48
49 if (lru_counter - entry->lru_promotion_ts > lru_window) {
50 ldout(cct, 20) << "cache get: touching lru, lru_counter=" << lru_counter
51 << " promotion_ts=" << entry->lru_promotion_ts << dendl;
52 rl.unlock();
53 std::unique_lock wl{lock}; // write lock for insertion
54 /* need to redo this because entry might have dropped off the cache */
55 iter = cache_map.find(name);
56 if (iter == cache_map.end()) {
57 ldout(cct, 10) << "lost race! cache get: name=" << name << " : miss" << dendl;
58 if(perfcounter) perfcounter->inc(l_rgw_cache_miss);
59 return -ENOENT;
60 }
61
62 entry = &iter->second;
63 /* check again, we might have lost a race here */
64 if (lru_counter - entry->lru_promotion_ts > lru_window) {
65 touch_lru(name, *entry, iter->second.lru_iter);
66 }
67 }
68
69 ObjectCacheInfo& src = iter->second.info;
70 if(src.status == -ENOENT) {
71 ldout(cct, 10) << "cache get: name=" << name << " : hit (negative entry)" << dendl;
72 if (perfcounter) perfcounter->inc(l_rgw_cache_hit);
73 return -ENODATA;
74 }
75 if ((src.flags & mask) != mask) {
76 ldout(cct, 10) << "cache get: name=" << name << " : type miss (requested=0x"
77 << std::hex << mask << ", cached=0x" << src.flags
78 << std::dec << ")" << dendl;
79 if(perfcounter) perfcounter->inc(l_rgw_cache_miss);
80 return -ENOENT;
81 }
82 ldout(cct, 10) << "cache get: name=" << name << " : hit (requested=0x"
83 << std::hex << mask << ", cached=0x" << src.flags
84 << std::dec << ")" << dendl;
85
86 info = src;
87 if (cache_info) {
88 cache_info->cache_locator = name;
89 cache_info->gen = entry->gen;
90 }
91 if(perfcounter) perfcounter->inc(l_rgw_cache_hit);
92
93 return 0;
94 }
95
96 bool ObjectCache::chain_cache_entry(std::initializer_list<rgw_cache_entry_info*> cache_info_entries,
97 RGWChainedCache::Entry *chained_entry)
98 {
99 std::unique_lock l{lock};
100
101 if (!enabled) {
102 return false;
103 }
104
105 std::vector<ObjectCacheEntry*> entries;
106 entries.reserve(cache_info_entries.size());
107 /* first verify that all entries are still valid */
108 for (auto cache_info : cache_info_entries) {
109 ldout(cct, 10) << "chain_cache_entry: cache_locator="
110 << cache_info->cache_locator << dendl;
111 auto iter = cache_map.find(cache_info->cache_locator);
112 if (iter == cache_map.end()) {
113 ldout(cct, 20) << "chain_cache_entry: couldn't find cache locator" << dendl;
114 return false;
115 }
116
117 auto entry = &iter->second;
118
119 if (entry->gen != cache_info->gen) {
120 ldout(cct, 20) << "chain_cache_entry: entry.gen (" << entry->gen
121 << ") != cache_info.gen (" << cache_info->gen << ")"
122 << dendl;
123 return false;
124 }
125 entries.push_back(entry);
126 }
127
128
129 chained_entry->cache->chain_cb(chained_entry->key, chained_entry->data);
130
131 for (auto entry : entries) {
132 entry->chained_entries.push_back(make_pair(chained_entry->cache,
133 chained_entry->key));
134 }
135
136 return true;
137 }
138
139 void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry_info *cache_info)
140 {
141 std::unique_lock l{lock};
142
143 if (!enabled) {
144 return;
145 }
146
147 ldout(cct, 10) << "cache put: name=" << name << " info.flags=0x"
148 << std::hex << info.flags << std::dec << dendl;
149
150 auto [iter, inserted] = cache_map.emplace(name, ObjectCacheEntry{});
151 ObjectCacheEntry& entry = iter->second;
152 entry.info.time_added = ceph::coarse_mono_clock::now();
153 if (inserted) {
154 entry.lru_iter = lru.end();
155 }
156 ObjectCacheInfo& target = entry.info;
157
158 invalidate_lru(entry);
159
160 entry.chained_entries.clear();
161 entry.gen++;
162
163 touch_lru(name, entry, entry.lru_iter);
164
165 target.status = info.status;
166
167 if (info.status < 0) {
168 target.flags = 0;
169 target.xattrs.clear();
170 target.data.clear();
171 return;
172 }
173
174 if (cache_info) {
175 cache_info->cache_locator = name;
176 cache_info->gen = entry.gen;
177 }
178
179 // put() must include the latest version if we're going to keep caching it
180 target.flags &= ~CACHE_FLAG_OBJV;
181
182 target.flags |= info.flags;
183
184 if (info.flags & CACHE_FLAG_META)
185 target.meta = info.meta;
186 else if (!(info.flags & CACHE_FLAG_MODIFY_XATTRS))
187 target.flags &= ~CACHE_FLAG_META; // non-meta change should reset meta
188
189 if (info.flags & CACHE_FLAG_XATTRS) {
190 target.xattrs = info.xattrs;
191 map<string, bufferlist>::iterator iter;
192 for (iter = target.xattrs.begin(); iter != target.xattrs.end(); ++iter) {
193 ldout(cct, 10) << "updating xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl;
194 }
195 } else if (info.flags & CACHE_FLAG_MODIFY_XATTRS) {
196 map<string, bufferlist>::iterator iter;
197 for (iter = info.rm_xattrs.begin(); iter != info.rm_xattrs.end(); ++iter) {
198 ldout(cct, 10) << "removing xattr: name=" << iter->first << dendl;
199 target.xattrs.erase(iter->first);
200 }
201 for (iter = info.xattrs.begin(); iter != info.xattrs.end(); ++iter) {
202 ldout(cct, 10) << "appending xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl;
203 target.xattrs[iter->first] = iter->second;
204 }
205 }
206
207 if (info.flags & CACHE_FLAG_DATA)
208 target.data = info.data;
209
210 if (info.flags & CACHE_FLAG_OBJV)
211 target.version = info.version;
212 }
213
214 bool ObjectCache::remove(const string& name)
215 {
216 std::unique_lock l{lock};
217
218 if (!enabled) {
219 return false;
220 }
221
222 auto iter = cache_map.find(name);
223 if (iter == cache_map.end())
224 return false;
225
226 ldout(cct, 10) << "removing " << name << " from cache" << dendl;
227 ObjectCacheEntry& entry = iter->second;
228
229 for (auto& kv : entry.chained_entries) {
230 kv.first->invalidate(kv.second);
231 }
232
233 remove_lru(name, iter->second.lru_iter);
234 cache_map.erase(iter);
235 return true;
236 }
237
238 void ObjectCache::touch_lru(const string& name, ObjectCacheEntry& entry,
239 std::list<string>::iterator& lru_iter)
240 {
241 while (lru_size > (size_t)cct->_conf->rgw_cache_lru_size) {
242 auto iter = lru.begin();
243 if ((*iter).compare(name) == 0) {
244 /*
245 * if the entry we're touching happens to be at the lru end, don't remove it,
246 * lru shrinking can wait for next time
247 */
248 break;
249 }
250 auto map_iter = cache_map.find(*iter);
251 ldout(cct, 10) << "removing entry: name=" << *iter << " from cache LRU" << dendl;
252 if (map_iter != cache_map.end()) {
253 ObjectCacheEntry& entry = map_iter->second;
254 invalidate_lru(entry);
255 cache_map.erase(map_iter);
256 }
257 lru.pop_front();
258 lru_size--;
259 }
260
261 if (lru_iter == lru.end()) {
262 lru.push_back(name);
263 lru_size++;
264 lru_iter--;
265 ldout(cct, 10) << "adding " << name << " to cache LRU end" << dendl;
266 } else {
267 ldout(cct, 10) << "moving " << name << " to cache LRU end" << dendl;
268 lru.erase(lru_iter);
269 lru.push_back(name);
270 lru_iter = lru.end();
271 --lru_iter;
272 }
273
274 lru_counter++;
275 entry.lru_promotion_ts = lru_counter;
276 }
277
278 void ObjectCache::remove_lru(const string& name,
279 std::list<string>::iterator& lru_iter)
280 {
281 if (lru_iter == lru.end())
282 return;
283
284 lru.erase(lru_iter);
285 lru_size--;
286 lru_iter = lru.end();
287 }
288
289 void ObjectCache::invalidate_lru(ObjectCacheEntry& entry)
290 {
291 for (auto iter = entry.chained_entries.begin();
292 iter != entry.chained_entries.end(); ++iter) {
293 RGWChainedCache *chained_cache = iter->first;
294 chained_cache->invalidate(iter->second);
295 }
296 }
297
298 void ObjectCache::set_enabled(bool status)
299 {
300 std::unique_lock l{lock};
301
302 enabled = status;
303
304 if (!enabled) {
305 do_invalidate_all();
306 }
307 }
308
309 void ObjectCache::invalidate_all()
310 {
311 std::unique_lock l{lock};
312
313 do_invalidate_all();
314 }
315
316 void ObjectCache::do_invalidate_all()
317 {
318 cache_map.clear();
319 lru.clear();
320
321 lru_size = 0;
322 lru_counter = 0;
323 lru_window = 0;
324
325 for (auto& cache : chained_cache) {
326 cache->invalidate_all();
327 }
328 }
329
330 void ObjectCache::chain_cache(RGWChainedCache *cache) {
331 std::unique_lock l{lock};
332 chained_cache.push_back(cache);
333 }
334
335 void ObjectCache::unchain_cache(RGWChainedCache *cache) {
336 std::unique_lock l{lock};
337
338 auto iter = chained_cache.begin();
339 for (; iter != chained_cache.end(); ++iter) {
340 if (cache == *iter) {
341 chained_cache.erase(iter);
342 cache->unregistered();
343 return;
344 }
345 }
346 }
347
348 ObjectCache::~ObjectCache()
349 {
350 for (auto cache : chained_cache) {
351 cache->unregistered();
352 }
353 }
354