]> git.proxmox.com Git - ceph.git/blame_incremental - ceph/src/rgw/rgw_cache.cc
bump version to 18.2.2-pve1
[ceph.git] / ceph / src / rgw / rgw_cache.cc
... / ...
CommitLineData
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab ft=cpp
3
4#include "rgw_cache.h"
5#include "rgw_perf_counters.h"
6
7#include <errno.h>
8
9#define dout_subsys ceph_subsys_rgw
10
11using namespace std;
12
13int ObjectCache::get(const DoutPrefixProvider *dpp, const string& name, ObjectCacheInfo& info, uint32_t mask, rgw_cache_entry_info *cache_info)
14{
15
16 std::shared_lock rl{lock};
17 std::unique_lock wl{lock, std::defer_lock}; // may be promoted to write lock
18 if (!enabled) {
19 return -ENOENT;
20 }
21 auto iter = cache_map.find(name);
22 if (iter == cache_map.end()) {
23 ldpp_dout(dpp, 10) << "cache get: name=" << name << " : miss" << dendl;
24 if (perfcounter) {
25 perfcounter->inc(l_rgw_cache_miss);
26 }
27 return -ENOENT;
28 }
29
30 if (expiry.count() &&
31 (ceph::coarse_mono_clock::now() - iter->second.info.time_added) > expiry) {
32 ldpp_dout(dpp, 10) << "cache get: name=" << name << " : expiry miss" << dendl;
33 rl.unlock();
34 wl.lock(); // write lock for expiration
35 // check that wasn't already removed by other thread
36 iter = cache_map.find(name);
37 if (iter != cache_map.end()) {
38 for (auto &kv : iter->second.chained_entries)
39 kv.first->invalidate(kv.second);
40 remove_lru(name, iter->second.lru_iter);
41 cache_map.erase(iter);
42 }
43 if (perfcounter) {
44 perfcounter->inc(l_rgw_cache_miss);
45 }
46 return -ENOENT;
47 }
48
49 ObjectCacheEntry *entry = &iter->second;
50
51 if (lru_counter - entry->lru_promotion_ts > lru_window) {
52 ldpp_dout(dpp, 20) << "cache get: touching lru, lru_counter=" << lru_counter
53 << " promotion_ts=" << entry->lru_promotion_ts << dendl;
54 rl.unlock();
55 wl.lock(); // write lock for touch_lru()
56 /* need to redo this because entry might have dropped off the cache */
57 iter = cache_map.find(name);
58 if (iter == cache_map.end()) {
59 ldpp_dout(dpp, 10) << "lost race! cache get: name=" << name << " : miss" << dendl;
60 if(perfcounter) perfcounter->inc(l_rgw_cache_miss);
61 return -ENOENT;
62 }
63
64 entry = &iter->second;
65 /* check again, we might have lost a race here */
66 if (lru_counter - entry->lru_promotion_ts > lru_window) {
67 touch_lru(dpp, name, *entry, iter->second.lru_iter);
68 }
69 }
70
71 ObjectCacheInfo& src = iter->second.info;
72 if(src.status == -ENOENT) {
73 ldpp_dout(dpp, 10) << "cache get: name=" << name << " : hit (negative entry)" << dendl;
74 if (perfcounter) perfcounter->inc(l_rgw_cache_hit);
75 return -ENODATA;
76 }
77 if ((src.flags & mask) != mask) {
78 ldpp_dout(dpp, 10) << "cache get: name=" << name << " : type miss (requested=0x"
79 << std::hex << mask << ", cached=0x" << src.flags
80 << std::dec << ")" << dendl;
81 if(perfcounter) perfcounter->inc(l_rgw_cache_miss);
82 return -ENOENT;
83 }
84 ldpp_dout(dpp, 10) << "cache get: name=" << name << " : hit (requested=0x"
85 << std::hex << mask << ", cached=0x" << src.flags
86 << std::dec << ")" << dendl;
87
88 info = src;
89 if (cache_info) {
90 cache_info->cache_locator = name;
91 cache_info->gen = entry->gen;
92 }
93 if(perfcounter) perfcounter->inc(l_rgw_cache_hit);
94
95 return 0;
96}
97
98bool ObjectCache::chain_cache_entry(const DoutPrefixProvider *dpp,
99 std::initializer_list<rgw_cache_entry_info*> cache_info_entries,
100 RGWChainedCache::Entry *chained_entry)
101{
102 std::unique_lock l{lock};
103
104 if (!enabled) {
105 return false;
106 }
107
108 std::vector<ObjectCacheEntry*> entries;
109 entries.reserve(cache_info_entries.size());
110 /* first verify that all entries are still valid */
111 for (auto cache_info : cache_info_entries) {
112 ldpp_dout(dpp, 10) << "chain_cache_entry: cache_locator="
113 << cache_info->cache_locator << dendl;
114 auto iter = cache_map.find(cache_info->cache_locator);
115 if (iter == cache_map.end()) {
116 ldpp_dout(dpp, 20) << "chain_cache_entry: couldn't find cache locator" << dendl;
117 return false;
118 }
119
120 auto entry = &iter->second;
121
122 if (entry->gen != cache_info->gen) {
123 ldpp_dout(dpp, 20) << "chain_cache_entry: entry.gen (" << entry->gen
124 << ") != cache_info.gen (" << cache_info->gen << ")"
125 << dendl;
126 return false;
127 }
128 entries.push_back(entry);
129 }
130
131
132 chained_entry->cache->chain_cb(chained_entry->key, chained_entry->data);
133
134 for (auto entry : entries) {
135 entry->chained_entries.push_back(make_pair(chained_entry->cache,
136 chained_entry->key));
137 }
138
139 return true;
140}
141
142void ObjectCache::put(const DoutPrefixProvider *dpp, const string& name, ObjectCacheInfo& info, rgw_cache_entry_info *cache_info)
143{
144 std::unique_lock l{lock};
145
146 if (!enabled) {
147 return;
148 }
149
150 ldpp_dout(dpp, 10) << "cache put: name=" << name << " info.flags=0x"
151 << std::hex << info.flags << std::dec << dendl;
152
153 auto [iter, inserted] = cache_map.emplace(name, ObjectCacheEntry{});
154 ObjectCacheEntry& entry = iter->second;
155 entry.info.time_added = ceph::coarse_mono_clock::now();
156 if (inserted) {
157 entry.lru_iter = lru.end();
158 }
159 ObjectCacheInfo& target = entry.info;
160
161 invalidate_lru(entry);
162
163 entry.chained_entries.clear();
164 entry.gen++;
165
166 touch_lru(dpp, name, entry, entry.lru_iter);
167
168 target.status = info.status;
169
170 if (info.status < 0) {
171 target.flags = 0;
172 target.xattrs.clear();
173 target.data.clear();
174 return;
175 }
176
177 if (cache_info) {
178 cache_info->cache_locator = name;
179 cache_info->gen = entry.gen;
180 }
181
182 // put() must include the latest version if we're going to keep caching it
183 target.flags &= ~CACHE_FLAG_OBJV;
184
185 target.flags |= info.flags;
186
187 if (info.flags & CACHE_FLAG_META)
188 target.meta = info.meta;
189 else if (!(info.flags & CACHE_FLAG_MODIFY_XATTRS))
190 target.flags &= ~CACHE_FLAG_META; // non-meta change should reset meta
191
192 if (info.flags & CACHE_FLAG_XATTRS) {
193 target.xattrs = info.xattrs;
194 map<string, bufferlist>::iterator iter;
195 for (iter = target.xattrs.begin(); iter != target.xattrs.end(); ++iter) {
196 ldpp_dout(dpp, 10) << "updating xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl;
197 }
198 } else if (info.flags & CACHE_FLAG_MODIFY_XATTRS) {
199 map<string, bufferlist>::iterator iter;
200 for (iter = info.rm_xattrs.begin(); iter != info.rm_xattrs.end(); ++iter) {
201 ldpp_dout(dpp, 10) << "removing xattr: name=" << iter->first << dendl;
202 target.xattrs.erase(iter->first);
203 }
204 for (iter = info.xattrs.begin(); iter != info.xattrs.end(); ++iter) {
205 ldpp_dout(dpp, 10) << "appending xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl;
206 target.xattrs[iter->first] = iter->second;
207 }
208 }
209
210 if (info.flags & CACHE_FLAG_DATA)
211 target.data = info.data;
212
213 if (info.flags & CACHE_FLAG_OBJV)
214 target.version = info.version;
215}
216
217// WARNING: This function /must not/ be modified to cache a
218// negative lookup. It must only invalidate.
219bool ObjectCache::invalidate_remove(const DoutPrefixProvider *dpp, const string& name)
220{
221 std::unique_lock l{lock};
222
223 if (!enabled) {
224 return false;
225 }
226
227 auto iter = cache_map.find(name);
228 if (iter == cache_map.end())
229 return false;
230
231 ldpp_dout(dpp, 10) << "removing " << name << " from cache" << dendl;
232 ObjectCacheEntry& entry = iter->second;
233
234 for (auto& kv : entry.chained_entries) {
235 kv.first->invalidate(kv.second);
236 }
237
238 remove_lru(name, iter->second.lru_iter);
239 cache_map.erase(iter);
240 return true;
241}
242
243void ObjectCache::touch_lru(const DoutPrefixProvider *dpp, const string& name, ObjectCacheEntry& entry,
244 std::list<string>::iterator& lru_iter)
245{
246 while (lru_size > (size_t)cct->_conf->rgw_cache_lru_size) {
247 auto iter = lru.begin();
248 if ((*iter).compare(name) == 0) {
249 /*
250 * if the entry we're touching happens to be at the lru end, don't remove it,
251 * lru shrinking can wait for next time
252 */
253 break;
254 }
255 auto map_iter = cache_map.find(*iter);
256 ldout(cct, 10) << "removing entry: name=" << *iter << " from cache LRU" << dendl;
257 if (map_iter != cache_map.end()) {
258 ObjectCacheEntry& entry = map_iter->second;
259 invalidate_lru(entry);
260 cache_map.erase(map_iter);
261 }
262 lru.pop_front();
263 lru_size--;
264 }
265
266 if (lru_iter == lru.end()) {
267 lru.push_back(name);
268 lru_size++;
269 lru_iter--;
270 ldpp_dout(dpp, 10) << "adding " << name << " to cache LRU end" << dendl;
271 } else {
272 ldpp_dout(dpp, 10) << "moving " << name << " to cache LRU end" << dendl;
273 lru.erase(lru_iter);
274 lru.push_back(name);
275 lru_iter = lru.end();
276 --lru_iter;
277 }
278
279 lru_counter++;
280 entry.lru_promotion_ts = lru_counter;
281}
282
283void ObjectCache::remove_lru(const string& name,
284 std::list<string>::iterator& lru_iter)
285{
286 if (lru_iter == lru.end())
287 return;
288
289 lru.erase(lru_iter);
290 lru_size--;
291 lru_iter = lru.end();
292}
293
294void ObjectCache::invalidate_lru(ObjectCacheEntry& entry)
295{
296 for (auto iter = entry.chained_entries.begin();
297 iter != entry.chained_entries.end(); ++iter) {
298 RGWChainedCache *chained_cache = iter->first;
299 chained_cache->invalidate(iter->second);
300 }
301}
302
303void ObjectCache::set_enabled(bool status)
304{
305 std::unique_lock l{lock};
306
307 enabled = status;
308
309 if (!enabled) {
310 do_invalidate_all();
311 }
312}
313
314void ObjectCache::invalidate_all()
315{
316 std::unique_lock l{lock};
317
318 do_invalidate_all();
319}
320
321void ObjectCache::do_invalidate_all()
322{
323 cache_map.clear();
324 lru.clear();
325
326 lru_size = 0;
327 lru_counter = 0;
328 lru_window = 0;
329
330 for (auto& cache : chained_cache) {
331 cache->invalidate_all();
332 }
333}
334
335void ObjectCache::chain_cache(RGWChainedCache *cache) {
336 std::unique_lock l{lock};
337 chained_cache.push_back(cache);
338}
339
340void ObjectCache::unchain_cache(RGWChainedCache *cache) {
341 std::unique_lock l{lock};
342
343 auto iter = chained_cache.begin();
344 for (; iter != chained_cache.end(); ++iter) {
345 if (cache == *iter) {
346 chained_cache.erase(iter);
347 cache->unregistered();
348 return;
349 }
350 }
351}
352
353ObjectCache::~ObjectCache()
354{
355 for (auto cache : chained_cache) {
356 cache->unregistered();
357 }
358}
359
360void ObjectMetaInfo::generate_test_instances(list<ObjectMetaInfo*>& o)
361{
362 ObjectMetaInfo *m = new ObjectMetaInfo;
363 m->size = 1024 * 1024;
364 o.push_back(m);
365 o.push_back(new ObjectMetaInfo);
366}
367
368void ObjectMetaInfo::dump(Formatter *f) const
369{
370 encode_json("size", size, f);
371 encode_json("mtime", utime_t(mtime), f);
372}
373
374void ObjectCacheInfo::generate_test_instances(list<ObjectCacheInfo*>& o)
375{
376 using ceph::encode;
377 ObjectCacheInfo *i = new ObjectCacheInfo;
378 i->status = 0;
379 i->flags = CACHE_FLAG_MODIFY_XATTRS;
380 string s = "this is a string";
381 string s2 = "this is a another string";
382 bufferlist data, data2;
383 encode(s, data);
384 encode(s2, data2);
385 i->data = data;
386 i->xattrs["x1"] = data;
387 i->xattrs["x2"] = data2;
388 i->rm_xattrs["r2"] = data2;
389 i->rm_xattrs["r3"] = data;
390 i->meta.size = 512 * 1024;
391 o.push_back(i);
392 o.push_back(new ObjectCacheInfo);
393}
394
395void ObjectCacheInfo::dump(Formatter *f) const
396{
397 encode_json("status", status, f);
398 encode_json("flags", flags, f);
399 encode_json("data", data, f);
400 encode_json_map("xattrs", "name", "value", "length", xattrs, f);
401 encode_json_map("rm_xattrs", "name", "value", "length", rm_xattrs, f);
402 encode_json("meta", meta, f);
403
404}
405
406void RGWCacheNotifyInfo::generate_test_instances(list<RGWCacheNotifyInfo*>& o)
407{
408 o.push_back(new RGWCacheNotifyInfo);
409}
410
411void RGWCacheNotifyInfo::dump(Formatter *f) const
412{
413 encode_json("op", op, f);
414 encode_json("obj", obj, f);
415 encode_json("obj_info", obj_info, f);
416 encode_json("ofs", ofs, f);
417 encode_json("ns", ns, f);
418}
419