]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_cache.cc
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / rgw / rgw_cache.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "rgw_cache.h"
5 #include "rgw_perf_counters.h"
6
7 #include <errno.h>
8
9 #define dout_subsys ceph_subsys_rgw
10
11
12 int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, rgw_cache_entry_info *cache_info)
13 {
14 RWLock::RLocker l(lock);
15
16 if (!enabled) {
17 return -ENOENT;
18 }
19
20 auto iter = cache_map.find(name);
21 if (iter == cache_map.end()) {
22 ldout(cct, 10) << "cache get: name=" << name << " : miss" << dendl;
23 if (perfcounter)
24 perfcounter->inc(l_rgw_cache_miss);
25 return -ENOENT;
26 }
27 if (expiry.count() &&
28 (ceph::coarse_mono_clock::now() - iter->second.info.time_added) > expiry) {
29 ldout(cct, 10) << "cache get: name=" << name << " : expiry miss" << dendl;
30 lock.unlock();
31 lock.get_write();
32 // check that wasn't already removed by other thread
33 iter = cache_map.find(name);
34 if (iter != cache_map.end()) {
35 for (auto &kv : iter->second.chained_entries)
36 kv.first->invalidate(kv.second);
37 remove_lru(name, iter->second.lru_iter);
38 cache_map.erase(iter);
39 }
40 if(perfcounter)
41 perfcounter->inc(l_rgw_cache_miss);
42 return -ENOENT;
43 }
44
45 ObjectCacheEntry *entry = &iter->second;
46
47 if (lru_counter - entry->lru_promotion_ts > lru_window) {
48 ldout(cct, 20) << "cache get: touching lru, lru_counter=" << lru_counter
49 << " promotion_ts=" << entry->lru_promotion_ts << dendl;
50 lock.unlock();
51 lock.get_write(); /* promote lock to writer */
52
53 /* need to redo this because entry might have dropped off the cache */
54 iter = cache_map.find(name);
55 if (iter == cache_map.end()) {
56 ldout(cct, 10) << "lost race! cache get: name=" << name << " : miss" << dendl;
57 if(perfcounter) perfcounter->inc(l_rgw_cache_miss);
58 return -ENOENT;
59 }
60
61 entry = &iter->second;
62 /* check again, we might have lost a race here */
63 if (lru_counter - entry->lru_promotion_ts > lru_window) {
64 touch_lru(name, *entry, iter->second.lru_iter);
65 }
66 }
67
68 ObjectCacheInfo& src = iter->second.info;
69 if ((src.flags & mask) != mask) {
70 ldout(cct, 10) << "cache get: name=" << name << " : type miss (requested=0x"
71 << std::hex << mask << ", cached=0x" << src.flags
72 << std::dec << ")" << dendl;
73 if(perfcounter) perfcounter->inc(l_rgw_cache_miss);
74 return -ENOENT;
75 }
76 ldout(cct, 10) << "cache get: name=" << name << " : hit (requested=0x"
77 << std::hex << mask << ", cached=0x" << src.flags
78 << std::dec << ")" << dendl;
79
80 info = src;
81 if (cache_info) {
82 cache_info->cache_locator = name;
83 cache_info->gen = entry->gen;
84 }
85 if(perfcounter) perfcounter->inc(l_rgw_cache_hit);
86
87 return 0;
88 }
89
90 bool ObjectCache::chain_cache_entry(std::initializer_list<rgw_cache_entry_info*> cache_info_entries,
91 RGWChainedCache::Entry *chained_entry)
92 {
93 RWLock::WLocker l(lock);
94
95 if (!enabled) {
96 return false;
97 }
98
99 std::vector<ObjectCacheEntry*> entries;
100 entries.reserve(cache_info_entries.size());
101 /* first verify that all entries are still valid */
102 for (auto cache_info : cache_info_entries) {
103 ldout(cct, 10) << "chain_cache_entry: cache_locator="
104 << cache_info->cache_locator << dendl;
105 auto iter = cache_map.find(cache_info->cache_locator);
106 if (iter == cache_map.end()) {
107 ldout(cct, 20) << "chain_cache_entry: couldn't find cache locator" << dendl;
108 return false;
109 }
110
111 auto entry = &iter->second;
112
113 if (entry->gen != cache_info->gen) {
114 ldout(cct, 20) << "chain_cache_entry: entry.gen (" << entry->gen
115 << ") != cache_info.gen (" << cache_info->gen << ")"
116 << dendl;
117 return false;
118 }
119 entries.push_back(entry);
120 }
121
122
123 chained_entry->cache->chain_cb(chained_entry->key, chained_entry->data);
124
125 for (auto entry : entries) {
126 entry->chained_entries.push_back(make_pair(chained_entry->cache,
127 chained_entry->key));
128 }
129
130 return true;
131 }
132
133 void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry_info *cache_info)
134 {
135 RWLock::WLocker l(lock);
136
137 if (!enabled) {
138 return;
139 }
140
141 ldout(cct, 10) << "cache put: name=" << name << " info.flags=0x"
142 << std::hex << info.flags << std::dec << dendl;
143
144 auto [iter, inserted] = cache_map.emplace(name, ObjectCacheEntry{});
145 ObjectCacheEntry& entry = iter->second;
146 entry.info.time_added = ceph::coarse_mono_clock::now();
147 if (inserted) {
148 entry.lru_iter = lru.end();
149 }
150 ObjectCacheInfo& target = entry.info;
151
152 invalidate_lru(entry);
153
154 entry.chained_entries.clear();
155 entry.gen++;
156
157 touch_lru(name, entry, entry.lru_iter);
158
159 target.status = info.status;
160
161 if (info.status < 0) {
162 target.flags = 0;
163 target.xattrs.clear();
164 target.data.clear();
165 return;
166 }
167
168 if (cache_info) {
169 cache_info->cache_locator = name;
170 cache_info->gen = entry.gen;
171 }
172
173 target.flags |= info.flags;
174
175 if (info.flags & CACHE_FLAG_META)
176 target.meta = info.meta;
177 else if (!(info.flags & CACHE_FLAG_MODIFY_XATTRS))
178 target.flags &= ~CACHE_FLAG_META; // non-meta change should reset meta
179
180 if (info.flags & CACHE_FLAG_XATTRS) {
181 target.xattrs = info.xattrs;
182 map<string, bufferlist>::iterator iter;
183 for (iter = target.xattrs.begin(); iter != target.xattrs.end(); ++iter) {
184 ldout(cct, 10) << "updating xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl;
185 }
186 } else if (info.flags & CACHE_FLAG_MODIFY_XATTRS) {
187 map<string, bufferlist>::iterator iter;
188 for (iter = info.rm_xattrs.begin(); iter != info.rm_xattrs.end(); ++iter) {
189 ldout(cct, 10) << "removing xattr: name=" << iter->first << dendl;
190 target.xattrs.erase(iter->first);
191 }
192 for (iter = info.xattrs.begin(); iter != info.xattrs.end(); ++iter) {
193 ldout(cct, 10) << "appending xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl;
194 target.xattrs[iter->first] = iter->second;
195 }
196 }
197
198 if (info.flags & CACHE_FLAG_DATA)
199 target.data = info.data;
200
201 if (info.flags & CACHE_FLAG_OBJV)
202 target.version = info.version;
203 }
204
205 bool ObjectCache::remove(const string& name)
206 {
207 RWLock::WLocker l(lock);
208
209 if (!enabled) {
210 return false;
211 }
212
213 auto iter = cache_map.find(name);
214 if (iter == cache_map.end())
215 return false;
216
217 ldout(cct, 10) << "removing " << name << " from cache" << dendl;
218 ObjectCacheEntry& entry = iter->second;
219
220 for (auto& kv : entry.chained_entries) {
221 kv.first->invalidate(kv.second);
222 }
223
224 remove_lru(name, iter->second.lru_iter);
225 cache_map.erase(iter);
226 return true;
227 }
228
229 void ObjectCache::touch_lru(const string& name, ObjectCacheEntry& entry,
230 std::list<string>::iterator& lru_iter)
231 {
232 while (lru_size > (size_t)cct->_conf->rgw_cache_lru_size) {
233 auto iter = lru.begin();
234 if ((*iter).compare(name) == 0) {
235 /*
236 * if the entry we're touching happens to be at the lru end, don't remove it,
237 * lru shrinking can wait for next time
238 */
239 break;
240 }
241 auto map_iter = cache_map.find(*iter);
242 ldout(cct, 10) << "removing entry: name=" << *iter << " from cache LRU" << dendl;
243 if (map_iter != cache_map.end()) {
244 ObjectCacheEntry& entry = map_iter->second;
245 invalidate_lru(entry);
246 cache_map.erase(map_iter);
247 }
248 lru.pop_front();
249 lru_size--;
250 }
251
252 if (lru_iter == lru.end()) {
253 lru.push_back(name);
254 lru_size++;
255 lru_iter--;
256 ldout(cct, 10) << "adding " << name << " to cache LRU end" << dendl;
257 } else {
258 ldout(cct, 10) << "moving " << name << " to cache LRU end" << dendl;
259 lru.erase(lru_iter);
260 lru.push_back(name);
261 lru_iter = lru.end();
262 --lru_iter;
263 }
264
265 lru_counter++;
266 entry.lru_promotion_ts = lru_counter;
267 }
268
269 void ObjectCache::remove_lru(const string& name,
270 std::list<string>::iterator& lru_iter)
271 {
272 if (lru_iter == lru.end())
273 return;
274
275 lru.erase(lru_iter);
276 lru_size--;
277 lru_iter = lru.end();
278 }
279
280 void ObjectCache::invalidate_lru(ObjectCacheEntry& entry)
281 {
282 for (auto iter = entry.chained_entries.begin();
283 iter != entry.chained_entries.end(); ++iter) {
284 RGWChainedCache *chained_cache = iter->first;
285 chained_cache->invalidate(iter->second);
286 }
287 }
288
289 void ObjectCache::set_enabled(bool status)
290 {
291 RWLock::WLocker l(lock);
292
293 enabled = status;
294
295 if (!enabled) {
296 do_invalidate_all();
297 }
298 }
299
300 void ObjectCache::invalidate_all()
301 {
302 RWLock::WLocker l(lock);
303
304 do_invalidate_all();
305 }
306
307 void ObjectCache::do_invalidate_all()
308 {
309 cache_map.clear();
310 lru.clear();
311
312 lru_size = 0;
313 lru_counter = 0;
314 lru_window = 0;
315
316 for (auto& cache : chained_cache) {
317 cache->invalidate_all();
318 }
319 }
320
321 void ObjectCache::chain_cache(RGWChainedCache *cache) {
322 RWLock::WLocker l(lock);
323 chained_cache.push_back(cache);
324 }
325
326 void ObjectCache::unchain_cache(RGWChainedCache *cache) {
327 RWLock::WLocker l(lock);
328
329 auto iter = chained_cache.begin();
330 for (; iter != chained_cache.end(); ++iter) {
331 if (cache == *iter) {
332 chained_cache.erase(iter);
333 cache->unregistered();
334 return;
335 }
336 }
337 }
338
339 ObjectCache::~ObjectCache()
340 {
341 for (auto cache : chained_cache) {
342 cache->unregistered();
343 }
344 }
345