]> git.proxmox.com Git - ceph.git/blob - ceph/src/cls/rgw/cls_rgw.cc
f88a85e8d9e66fb4cbb00bd15391f1cc0d3b1d4f
[ceph.git] / ceph / src / cls / rgw / cls_rgw.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "include/types.h"
5
6 #include <errno.h>
7
8 #include <boost/algorithm/string.hpp>
9
10 #include "objclass/objclass.h"
11 #include "cls/rgw/cls_rgw_ops.h"
12 #include "cls/rgw/cls_rgw_const.h"
13 #include "common/Clock.h"
14 #include "common/strtol.h"
15 #include "common/escape.h"
16
17 #include "include/compat.h"
18 #include <boost/lexical_cast.hpp>
19
20 using std::pair;
21 using std::list;
22 using std::map;
23 using std::string;
24 using std::vector;
25
26 using ceph::bufferlist;
27 using ceph::decode;
28 using ceph::encode;
29 using ceph::make_timespan;
30 using ceph::real_clock;
31 using ceph::real_time;
32 using ceph::timespan;
33
34 CLS_VER(1,0)
35 CLS_NAME(rgw)
36
37
38 // No UTF-8 character can begin with 0x80, so this is a safe indicator
39 // of a special bucket-index entry for the first byte. Note: although
40 // it has no impact, the 2nd, 3rd, or 4th byte of a UTF-8 character
41 // may be 0x80.
42 #define BI_PREFIX_CHAR 0x80
43
44 #define BI_BUCKET_OBJS_INDEX 0
45 #define BI_BUCKET_LOG_INDEX 1
46 #define BI_BUCKET_OBJ_INSTANCE_INDEX 2
47 #define BI_BUCKET_OLH_DATA_INDEX 3
48
49 #define BI_BUCKET_LAST_INDEX 4
50
51 static std::string bucket_index_prefixes[] = { "", /* special handling for the objs list index */
52 "0_", /* bucket log index */
53 "1000_", /* obj instance index */
54 "1001_", /* olh data index */
55
56 /* this must be the last index */
57 "9999_",};
58
59 // this string is greater than all ascii plain entries and less than
60 // all special entries
61 static const std::string BI_PREFIX_BEGIN = string(1, BI_PREFIX_CHAR);
62
63 // this string is greater than all special entries and less than all
64 // non-ascii plain entries
65 static const std::string BI_PREFIX_END = string(1, BI_PREFIX_CHAR) +
66 bucket_index_prefixes[BI_BUCKET_LAST_INDEX];
67
68 /* Returns whether parameter is not a key for a special entry. Empty
69 * strings are considered plain also, so, for example, an empty marker
70 * is also considered plain. TODO: check to make sure all callers are
71 * using appropriately.
72 */
73 static bool bi_is_plain_entry(const std::string& s) {
74 return (s.empty() || (unsigned char)s[0] != BI_PREFIX_CHAR);
75 }
76
77 int bi_entry_type(const string& s)
78 {
79 if (bi_is_plain_entry(s)) {
80 return BI_BUCKET_OBJS_INDEX;
81 }
82
83 for (size_t i = 1;
84 i < sizeof(bucket_index_prefixes) / sizeof(bucket_index_prefixes[0]);
85 ++i) {
86 const string& t = bucket_index_prefixes[i];
87
88 if (s.compare(1, t.size(), t) == 0) {
89 return i;
90 }
91 }
92
93 return -EINVAL;
94 }
95
96 static bool bi_entry_gt(const string& first, const string& second)
97 {
98 int fi = bi_entry_type(first);
99 int si = bi_entry_type(second);
100
101 if (fi > si) {
102 return true;
103 } else if (fi < si) {
104 return false;
105 }
106
107 return first > second;
108 }
109
110 static void get_time_key(real_time& ut, string *key)
111 {
112 char buf[32];
113 ceph_timespec ts = ceph::real_clock::to_ceph_timespec(ut);
114 snprintf(buf, 32, "%011llu.%09u", (unsigned long long)ts.tv_sec, (unsigned int)ts.tv_nsec);
115 *key = buf;
116 }
117
118 static void get_index_ver_key(cls_method_context_t hctx, uint64_t index_ver, string *key)
119 {
120 char buf[48];
121 snprintf(buf, sizeof(buf), "%011llu.%llu.%d", (unsigned long long)index_ver,
122 (unsigned long long)cls_current_version(hctx),
123 cls_current_subop_num(hctx));
124 *key = buf;
125 }
126
127 static void bi_log_prefix(string& key)
128 {
129 key = BI_PREFIX_CHAR;
130 key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
131 }
132
133 static void bi_log_index_key(cls_method_context_t hctx, string& key, string& id, uint64_t index_ver)
134 {
135 bi_log_prefix(key);
136 get_index_ver_key(hctx, index_ver, &id);
137 key.append(id);
138 }
139
140 static int log_index_operation(cls_method_context_t hctx, const cls_rgw_obj_key& obj_key,
141 RGWModifyOp op, const string& tag, real_time timestamp,
142 const rgw_bucket_entry_ver& ver, RGWPendingState state, uint64_t index_ver,
143 string& max_marker, uint16_t bilog_flags, string *owner, string *owner_display_name, rgw_zone_set *zones_trace)
144 {
145 bufferlist bl;
146
147 rgw_bi_log_entry entry;
148
149 entry.object = obj_key.name;
150 entry.instance = obj_key.instance;
151 entry.timestamp = timestamp;
152 entry.op = op;
153 entry.ver = ver;
154 entry.state = state;
155 entry.index_ver = index_ver;
156 entry.tag = tag;
157 entry.bilog_flags = bilog_flags;
158 if (owner) {
159 entry.owner = *owner;
160 }
161 if (owner_display_name) {
162 entry.owner_display_name = *owner_display_name;
163 }
164 if (zones_trace) {
165 entry.zones_trace = std::move(*zones_trace);
166 }
167
168 string key;
169 bi_log_index_key(hctx, key, entry.id, index_ver);
170
171 encode(entry, bl);
172
173 if (entry.id > max_marker)
174 max_marker = entry.id;
175
176 return cls_cxx_map_set_val(hctx, key, &bl);
177 }
178
179 /*
180 * Read list of objects, skipping objects in the "ugly namespace". The
181 * "ugly namespace" entries begin with BI_PREFIX_CHAR (0x80). Valid
182 * UTF-8 object names can *both* preceed and follow the "ugly
183 * namespace".
184 */
185 static int get_obj_vals(cls_method_context_t hctx,
186 const std::string& start,
187 const std::string& filter_prefix,
188 int num_entries,
189 std::map<std::string, bufferlist> *pkeys,
190 bool *pmore)
191 {
192 int ret = cls_cxx_map_get_vals(hctx, start, filter_prefix,
193 num_entries, pkeys, pmore);
194 if (ret < 0) {
195 return ret;
196 }
197
198 if (pkeys->empty()) {
199 return 0;
200 }
201
202 auto last_element = pkeys->crbegin();
203 if ((unsigned char)last_element->first[0] < BI_PREFIX_CHAR) {
204 /* if the first character of the last entry is less than the
205 * prefix then all entries must preceed the "ugly namespace" and
206 * we're done
207 */
208 return 0;
209 }
210
211 auto first_element = pkeys->cbegin();
212 if ((unsigned char)first_element->first[0] > BI_PREFIX_CHAR) {
213 /* if the first character of the first entry is after the "ugly
214 * namespace" then all entries must follow the "ugly namespace"
215 * then all entries do and we're done
216 */
217 return 0;
218 }
219
220 /* at this point we know we have entries that could precede the
221 * "ugly namespace", be in the "ugly namespace", and follow the
222 * "ugly namespace", so let's rebuild the list, only keeping entries
223 * outside the "ugly namespace"
224 */
225
226 auto comp = [](const pair<std::string, bufferlist>& l, const std::string &r) {
227 return l.first < r;
228 };
229 std::string new_start = {static_cast<char>(BI_PREFIX_CHAR + 1)};
230
231 auto lower = pkeys->lower_bound(string{static_cast<char>(BI_PREFIX_CHAR)});
232 auto upper = std::lower_bound(lower, pkeys->end(), new_start, comp);
233 pkeys->erase(lower, upper);
234
235 if (num_entries == (int)pkeys->size() || !(*pmore)) {
236 return 0;
237 }
238
239 if (pkeys->size() && new_start < pkeys->crbegin()->first) {
240 new_start = pkeys->rbegin()->first;
241 }
242
243 std::map<std::string, bufferlist> new_keys;
244
245 /* now get some more keys */
246 ret = cls_cxx_map_get_vals(hctx, new_start, filter_prefix,
247 num_entries - pkeys->size(), &new_keys, pmore);
248 if (ret < 0) {
249 return ret;
250 }
251
252 pkeys->insert(std::make_move_iterator(new_keys.begin()),
253 std::make_move_iterator(new_keys.end()));
254
255 return 0;
256 }
257
258 /*
259 * get a monotonically decreasing string representation.
260 * For num = x, num = y, where x > y, str(x) < str(y)
261 * Another property is that string size starts short and grows as num increases
262 */
263 static void decreasing_str(uint64_t num, string *str)
264 {
265 char buf[32];
266 if (num < 0x10) { /* 16 */
267 snprintf(buf, sizeof(buf), "9%02lld", 15 - (long long)num);
268 } else if (num < 0x100) { /* 256 */
269 snprintf(buf, sizeof(buf), "8%03lld", 255 - (long long)num);
270 } else if (num < 0x1000) /* 4096 */ {
271 snprintf(buf, sizeof(buf), "7%04lld", 4095 - (long long)num);
272 } else if (num < 0x10000) /* 65536 */ {
273 snprintf(buf, sizeof(buf), "6%05lld", 65535 - (long long)num);
274 } else if (num < 0x100000000) /* 4G */ {
275 snprintf(buf, sizeof(buf), "5%010lld", 0xFFFFFFFF - (long long)num);
276 } else {
277 snprintf(buf, sizeof(buf), "4%020lld", (long long)-num);
278 }
279
280 *str = buf;
281 }
282
283 /*
284 * We hold two different indexes for objects. The first one holds the
285 * list of objects in the order that we want them to be listed. The
286 * second one only holds the objects instances (for versioned
287 * objects), and they're not arranged in any particular order. When
288 * listing objects we'll use the first index, when doing operations on
289 * the objects themselves we'll use the second index. Note that
290 * regular objects only map to the first index anyway
291 */
292
293 static void get_list_index_key(rgw_bucket_dir_entry& entry, string *index_key)
294 {
295 *index_key = entry.key.name;
296
297 string ver_str;
298 decreasing_str(entry.versioned_epoch, &ver_str);
299 string instance_delim("\0i", 2);
300 string ver_delim("\0v", 2);
301
302 index_key->append(ver_delim);
303 index_key->append(ver_str);
304 index_key->append(instance_delim);
305 index_key->append(entry.key.instance);
306 }
307
308 static void encode_obj_versioned_data_key(const cls_rgw_obj_key& key, string *index_key, bool append_delete_marker_suffix = false)
309 {
310 *index_key = BI_PREFIX_CHAR;
311 index_key->append(bucket_index_prefixes[BI_BUCKET_OBJ_INSTANCE_INDEX]);
312 index_key->append(key.name);
313 string delim("\0i", 2);
314 index_key->append(delim);
315 index_key->append(key.instance);
316 if (append_delete_marker_suffix) {
317 string dm("\0d", 2);
318 index_key->append(dm);
319 }
320 }
321
322 static void encode_obj_index_key(const cls_rgw_obj_key& key, string *index_key)
323 {
324 if (key.instance.empty()) {
325 *index_key = key.name;
326 } else {
327 encode_obj_versioned_data_key(key, index_key);
328 }
329 }
330
331 static void encode_olh_data_key(const cls_rgw_obj_key& key, string *index_key)
332 {
333 *index_key = BI_PREFIX_CHAR;
334 index_key->append(bucket_index_prefixes[BI_BUCKET_OLH_DATA_INDEX]);
335 index_key->append(key.name);
336 }
337
338 template <class T>
339 static int read_index_entry(cls_method_context_t hctx, string& name, T *entry);
340
341 static int encode_list_index_key(cls_method_context_t hctx, const cls_rgw_obj_key& key, string *index_key)
342 {
343 if (key.instance.empty()) {
344 *index_key = key.name;
345 return 0;
346 }
347
348 string obj_index_key;
349 cls_rgw_obj_key tmp_key(key);
350 if (tmp_key.instance == "null") {
351 tmp_key.instance.clear();
352 }
353 encode_obj_versioned_data_key(tmp_key, &obj_index_key);
354
355 rgw_bucket_dir_entry entry;
356
357 int ret = read_index_entry(hctx, obj_index_key, &entry);
358 if (ret == -ENOENT) {
359 /* couldn't find the entry, set key value after the current object */
360 char buf[2] = { 0x1, 0 };
361 string s(buf);
362 *index_key = key.name + s;
363 return 0;
364 }
365 if (ret < 0) {
366 CLS_LOG(1, "ERROR: encode_list_index_key(): cls_cxx_map_get_val returned %d", ret);
367 return ret;
368 }
369
370 get_list_index_key(entry, index_key);
371
372 return 0;
373 }
374
375 static void split_key(const string& key, list<string>& vals)
376 {
377 size_t pos = 0;
378 const char *p = key.c_str();
379 while (pos < key.size()) {
380 size_t len = strlen(p);
381 vals.push_back(p);
382 pos += len + 1;
383 p += len + 1;
384 }
385 }
386
387 static std::string escape_str(const std::string& s)
388 {
389 int len = escape_json_attr_len(s.c_str(), s.size());
390 std::string escaped(len, 0);
391 escape_json_attr(s.c_str(), s.size(), escaped.data());
392 return escaped;
393 }
394
395 /*
396 * list index key structure:
397 *
398 * <obj name>\0[v<ver>\0i<instance id>]
399 */
400 static int decode_list_index_key(const string& index_key, cls_rgw_obj_key *key, uint64_t *ver)
401 {
402 size_t len = strlen(index_key.c_str());
403
404 key->instance.clear();
405 *ver = 0;
406
407 if (len == index_key.size()) {
408 key->name = index_key;
409 return 0;
410 }
411
412 list<string> vals;
413 split_key(index_key, vals);
414
415 if (vals.empty()) {
416 CLS_LOG(0, "ERROR: %s: bad index_key (%s): split_key() returned empty vals", __func__, escape_str(index_key).c_str());
417 return -EIO;
418 }
419
420 auto iter = vals.begin();
421 key->name = *iter;
422 ++iter;
423
424 if (iter == vals.end()) {
425 CLS_LOG(0, "ERROR: %s: bad index_key (%s): no vals", __func__, escape_str(index_key).c_str());
426 return -EIO;
427 }
428
429 for (; iter != vals.end(); ++iter) {
430 string& val = *iter;
431 if (val[0] == 'i') {
432 key->instance = val.substr(1);
433 } else if (val[0] == 'v') {
434 string err;
435 const char *s = val.c_str() + 1;
436 *ver = strict_strtoll(s, 10, &err);
437 if (!err.empty()) {
438 CLS_LOG(0, "ERROR: %s: bad index_key (%s): could not parse val (v=%s)", __func__, escape_str(index_key).c_str(), s);
439 return -EIO;
440 }
441 }
442 }
443
444 return 0;
445 }
446
447 static int read_bucket_header(cls_method_context_t hctx,
448 rgw_bucket_dir_header *header)
449 {
450 bufferlist bl;
451 int rc = cls_cxx_map_read_header(hctx, &bl);
452 if (rc < 0)
453 return rc;
454
455 if (bl.length() == 0) {
456 *header = rgw_bucket_dir_header();
457 return 0;
458 }
459 auto iter = bl.cbegin();
460 try {
461 decode(*header, iter);
462 } catch (ceph::buffer::error& err) {
463 CLS_LOG(1, "ERROR: read_bucket_header(): failed to decode header\n");
464 return -EIO;
465 }
466
467 return 0;
468 }
469
470 int rgw_bucket_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
471 {
472 CLS_LOG(10, "entered %s", __func__);
473
474 // maximum number of calls to get_obj_vals we'll try; compromise
475 // between wanting to return the requested # of entries, but not
476 // wanting to slow down this op with too many omap reads
477 constexpr int max_attempts = 8;
478
479 auto iter = in->cbegin();
480
481 rgw_cls_list_op op;
482 try {
483 decode(op, iter);
484 } catch (ceph::buffer::error& err) {
485 CLS_LOG(1, "ERROR: %s: failed to decode request", __func__);
486 return -EINVAL;
487 }
488
489 rgw_cls_list_ret ret;
490 rgw_bucket_dir& new_dir = ret.dir;
491 auto& name_entry_map = new_dir.m; // map of keys to entries
492
493 int rc = read_bucket_header(hctx, &new_dir.header);
494 if (rc < 0) {
495 CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
496 return rc;
497 }
498
499 // some calls just want the header and request 0 entries
500 if (op.num_entries <= 0) {
501 ret.is_truncated = false;
502 encode(ret, *out);
503 return 0;
504 }
505
506 // key that we can start listing at, one of a) sent in by caller, b)
507 // last item visited, or c) when delimiter present, a key that will
508 // move past the subdirectory
509 std::string start_after_omap_key;
510 encode_list_index_key(hctx, op.start_obj, &start_after_omap_key);
511
512 // this is set whenenver start_after_omap_key is set to keep them in
513 // sync since this will be the returned marker when a marker is
514 // returned
515 cls_rgw_obj_key start_after_entry_key;
516
517 // last key stored in result, so if we have to call get_obj_vals
518 // multiple times, we do not add the overlap to result
519 std::string prev_omap_key;
520
521 // last prefix_key stored in result, so we can skip over entries
522 // with the same prefix_key
523 std::string prev_prefix_omap_key;
524
525 bool done = false; // whether we need to keep calling get_obj_vals
526 bool more = true; // output parameter of get_obj_vals
527 bool has_delimiter = !op.delimiter.empty();
528
529 if (has_delimiter &&
530 start_after_omap_key > op.filter_prefix &&
531 boost::algorithm::ends_with(start_after_omap_key, op.delimiter)) {
532 // advance past all subdirectory entries if we start after a
533 // subdirectory
534 start_after_omap_key = cls_rgw_after_delim(start_after_omap_key);
535 }
536
537 for (int attempt = 0;
538 attempt < max_attempts &&
539 more &&
540 !done &&
541 name_entry_map.size() < op.num_entries;
542 ++attempt) {
543 std::map<std::string, bufferlist> keys;
544
545 // note: get_obj_vals skips past the "ugly namespace" (i.e.,
546 // entries that start with the BI_PREFIX_CHAR), so no need to
547 // check for such entries
548 rc = get_obj_vals(hctx, start_after_omap_key, op.filter_prefix,
549 op.num_entries - name_entry_map.size(),
550 &keys, &more);
551 if (rc < 0) {
552 return rc;
553 }
554 CLS_LOG(20, "%s: on attempt %d get_obj_vls returned %ld entries, more=%d",
555 __func__, attempt, keys.size(), more);
556
557 done = keys.empty();
558
559 for (auto kiter = keys.cbegin(); kiter != keys.cend(); ++kiter) {
560 rgw_bucket_dir_entry entry;
561 try {
562 const bufferlist& entrybl = kiter->second;
563 auto eiter = entrybl.cbegin();
564 decode(entry, eiter);
565 } catch (ceph::buffer::error& err) {
566 CLS_LOG(1, "ERROR: %s: failed to decode entry, key=%s",
567 __func__, kiter->first.c_str());
568 return -EINVAL;
569 }
570
571 start_after_omap_key = kiter->first;
572 start_after_entry_key = entry.key;
573 CLS_LOG(20, "%s: working on key=%s len=%zu",
574 __func__, kiter->first.c_str(), kiter->first.size());
575
576 cls_rgw_obj_key key;
577 uint64_t ver;
578 int ret = decode_list_index_key(kiter->first, &key, &ver);
579 if (ret < 0) {
580 CLS_LOG(0, "ERROR: %s: failed to decode list index key (%s)",
581 __func__, escape_str(kiter->first).c_str());
582 continue;
583 }
584
585 if (!entry.is_valid()) {
586 CLS_LOG(20, "%s: entry %s[%s] is not valid",
587 __func__, key.name.c_str(), key.instance.c_str());
588 continue;
589 }
590
591 // filter out noncurrent versions, delete markers, and initial marker
592 if (!op.list_versions &&
593 (!entry.is_visible() || op.start_obj.name == key.name)) {
594 CLS_LOG(20, "%s: entry %s[%s] is not visible",
595 __func__, key.name.c_str(), key.instance.c_str());
596 continue;
597 }
598
599 if (has_delimiter) {
600 int delim_pos = key.name.find(op.delimiter, op.filter_prefix.size());
601
602 if (delim_pos >= 0) {
603 /* extract key with trailing delimiter */
604 string prefix_key =
605 key.name.substr(0, delim_pos + op.delimiter.length());
606
607 if (prefix_key == prev_prefix_omap_key) {
608 continue; // we've already added this;
609 } else {
610 prev_prefix_omap_key = prefix_key;
611 }
612
613 if (name_entry_map.size() < op.num_entries) {
614 rgw_bucket_dir_entry proxy_entry;
615 cls_rgw_obj_key proxy_key(prefix_key);
616 proxy_entry.key = cls_rgw_obj_key(proxy_key);
617 proxy_entry.flags = rgw_bucket_dir_entry::FLAG_COMMON_PREFIX;
618 name_entry_map[prefix_key] = proxy_entry;
619
620 CLS_LOG(20, "%s: got common prefix entry %s[%s] num entries=%lu",
621 __func__, proxy_key.name.c_str(), proxy_key.instance.c_str(),
622 name_entry_map.size());
623 }
624
625 // make sure that if this is the last item added to the
626 // result from this call to get_obj_vals, the next call will
627 // skip past rest of "subdirectory"
628 start_after_omap_key = cls_rgw_after_delim(prefix_key);
629 start_after_entry_key.set(start_after_omap_key);
630
631 // advance past this subdirectory, but then back up one,
632 // so the loop increment will put us in the right place
633 kiter = keys.lower_bound(start_after_omap_key);
634 --kiter;
635
636 continue;
637 }
638
639 // no delimiter after prefix found, so this is a "top-level"
640 // item and we can just fall through
641 }
642
643 if (name_entry_map.size() < op.num_entries &&
644 kiter->first != prev_omap_key) {
645 name_entry_map[kiter->first] = entry;
646 prev_omap_key = kiter->first;
647 CLS_LOG(20, "%s: got object entry %s[%s] num entries=%d",
648 __func__, key.name.c_str(), key.instance.c_str(),
649 int(name_entry_map.size()));
650 }
651 } // for (auto kiter...
652 } // for (int attempt...
653
654 ret.is_truncated = more && !done;
655 if (ret.is_truncated) {
656 ret.marker = start_after_entry_key;
657 }
658 CLS_LOG(20, "%s: normal exit returning %ld entries, is_truncated=%d",
659 __func__, ret.dir.m.size(), ret.is_truncated);
660 encode(ret, *out);
661
662 if (ret.is_truncated && name_entry_map.size() == 0) {
663 CLS_LOG(5, "%s: returning value RGWBIAdvanceAndRetryError", __func__);
664 return RGWBIAdvanceAndRetryError;
665 } else {
666 return 0;
667 }
668 } // rgw_bucket_list
669
670
671 static int check_index(cls_method_context_t hctx,
672 rgw_bucket_dir_header *existing_header,
673 rgw_bucket_dir_header *calc_header)
674 {
675 int rc = read_bucket_header(hctx, existing_header);
676 if (rc < 0) {
677 CLS_LOG(1, "ERROR: check_index(): failed to read header\n");
678 return rc;
679 }
680
681 calc_header->tag_timeout = existing_header->tag_timeout;
682 calc_header->ver = existing_header->ver;
683 calc_header->syncstopped = existing_header->syncstopped;
684
685 map<string, bufferlist> keys;
686 string start_obj;
687 string filter_prefix;
688
689 #define CHECK_CHUNK_SIZE 1000
690 bool done = false;
691 bool more;
692
693 do {
694 rc = get_obj_vals(hctx, start_obj, filter_prefix, CHECK_CHUNK_SIZE, &keys, &more);
695 if (rc < 0)
696 return rc;
697
698 for (auto kiter = keys.begin(); kiter != keys.end(); ++kiter) {
699 if (!bi_is_plain_entry(kiter->first)) {
700 done = true;
701 break;
702 }
703
704 rgw_bucket_dir_entry entry;
705 auto eiter = kiter->second.cbegin();
706 try {
707 decode(entry, eiter);
708 } catch (ceph::buffer::error& err) {
709 CLS_LOG(1, "ERROR: rgw_bucket_list(): failed to decode entry, key=%s", kiter->first.c_str());
710 return -EIO;
711 }
712 rgw_bucket_category_stats& stats = calc_header->stats[entry.meta.category];
713 stats.num_entries++;
714 stats.total_size += entry.meta.accounted_size;
715 stats.total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size);
716 stats.actual_size += entry.meta.size;
717
718 start_obj = kiter->first;
719 }
720 } while (keys.size() == CHECK_CHUNK_SIZE && !done);
721
722 return 0;
723 }
724
725 int rgw_bucket_check_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
726 {
727 CLS_LOG(10, "entered %s", __func__);
728 rgw_cls_check_index_ret ret;
729
730 int rc = check_index(hctx, &ret.existing_header, &ret.calculated_header);
731 if (rc < 0)
732 return rc;
733
734 encode(ret, *out);
735
736 return 0;
737 }
738
739 static int write_bucket_header(cls_method_context_t hctx, rgw_bucket_dir_header *header)
740 {
741 header->ver++;
742
743 bufferlist header_bl;
744 encode(*header, header_bl);
745 return cls_cxx_map_write_header(hctx, &header_bl);
746 }
747
748
749 int rgw_bucket_rebuild_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
750 {
751 CLS_LOG(10, "entered %s", __func__);
752 rgw_bucket_dir_header existing_header;
753 rgw_bucket_dir_header calc_header;
754 int rc = check_index(hctx, &existing_header, &calc_header);
755 if (rc < 0)
756 return rc;
757
758 return write_bucket_header(hctx, &calc_header);
759 }
760
761 int rgw_bucket_update_stats(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
762 {
763 CLS_LOG(10, "entered %s", __func__);
764 // decode request
765 rgw_cls_bucket_update_stats_op op;
766 auto iter = in->cbegin();
767 try {
768 decode(op, iter);
769 } catch (ceph::buffer::error& err) {
770 CLS_LOG(1, "ERROR: %s: failed to decode request", __func__);
771 return -EINVAL;
772 }
773
774 rgw_bucket_dir_header header;
775 int rc = read_bucket_header(hctx, &header);
776 if (rc < 0) {
777 CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
778 return rc;
779 }
780
781 for (auto& s : op.stats) {
782 auto& dest = header.stats[s.first];
783 if (op.absolute) {
784 dest = s.second;
785 } else {
786 dest.total_size += s.second.total_size;
787 dest.total_size_rounded += s.second.total_size_rounded;
788 dest.num_entries += s.second.num_entries;
789 dest.actual_size += s.second.actual_size;
790 }
791 }
792
793 return write_bucket_header(hctx, &header);
794 }
795
796 int rgw_bucket_init_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
797 {
798 CLS_LOG(10, "entered %s", __func__);
799 bufferlist header_bl;
800 int rc = cls_cxx_map_read_header(hctx, &header_bl);
801 if (rc < 0) {
802 switch (rc) {
803 case -ENODATA:
804 case -ENOENT:
805 break;
806 default:
807 return rc;
808 }
809 }
810
811 if (header_bl.length() != 0) {
812 CLS_LOG(1, "ERROR: index already initialized\n");
813 return -EINVAL;
814 }
815
816 rgw_bucket_dir dir;
817
818 return write_bucket_header(hctx, &dir.header);
819 }
820
821 int rgw_bucket_set_tag_timeout(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
822 {
823 CLS_LOG(10, "entered %s", __func__);
824 // decode request
825 rgw_cls_tag_timeout_op op;
826 auto iter = in->cbegin();
827 try {
828 decode(op, iter);
829 } catch (ceph::buffer::error& err) {
830 CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to decode request\n");
831 return -EINVAL;
832 }
833
834 rgw_bucket_dir_header header;
835 int rc = read_bucket_header(hctx, &header);
836 if (rc < 0) {
837 CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to read header\n");
838 return rc;
839 }
840
841 header.tag_timeout = op.tag_timeout;
842
843 return write_bucket_header(hctx, &header);
844 }
845
846 static int read_key_entry(cls_method_context_t hctx, const cls_rgw_obj_key& key,
847 string *idx, rgw_bucket_dir_entry *entry,
848 bool special_delete_marker_name = false);
849
850 int rgw_bucket_prepare_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
851 {
852 CLS_LOG(10, "entered %s", __func__);
853 // decode request
854 rgw_cls_obj_prepare_op op;
855 auto iter = in->cbegin();
856 try {
857 decode(op, iter);
858 } catch (ceph::buffer::error& err) {
859 CLS_LOG(1, "ERROR: rgw_bucket_prepare_op(): failed to decode request\n");
860 return -EINVAL;
861 }
862
863 if (op.tag.empty()) {
864 CLS_LOG(1, "ERROR: tag is empty\n");
865 return -EINVAL;
866 }
867
868 CLS_LOG(1, "rgw_bucket_prepare_op(): request: op=%d name=%s instance=%s tag=%s",
869 op.op, op.key.name.c_str(), op.key.instance.c_str(), op.tag.c_str());
870
871 // get on-disk state
872 string idx;
873
874 rgw_bucket_dir_entry entry;
875 int rc = read_key_entry(hctx, op.key, &idx, &entry);
876 if (rc < 0 && rc != -ENOENT)
877 return rc;
878
879 bool noent = (rc == -ENOENT);
880
881 rc = 0;
882
883 if (noent) { // no entry, initialize fields
884 entry.key = op.key;
885 entry.ver = rgw_bucket_entry_ver();
886 entry.exists = false;
887 entry.locator = op.locator;
888 }
889
890 // fill in proper state
891 rgw_bucket_pending_info info;
892 info.timestamp = real_clock::now();
893 info.state = CLS_RGW_STATE_PENDING_MODIFY;
894 info.op = op.op;
895 entry.pending_map.insert(pair<string, rgw_bucket_pending_info>(op.tag, info));
896
897 // write out new key to disk
898 bufferlist info_bl;
899 encode(entry, info_bl);
900 return cls_cxx_map_set_val(hctx, idx, &info_bl);
901 }
902
903 static void unaccount_entry(rgw_bucket_dir_header& header,
904 rgw_bucket_dir_entry& entry)
905 {
906 if (entry.exists) {
907 rgw_bucket_category_stats& stats = header.stats[entry.meta.category];
908 stats.num_entries--;
909 stats.total_size -= entry.meta.accounted_size;
910 stats.total_size_rounded -=
911 cls_rgw_get_rounded_size(entry.meta.accounted_size);
912 stats.actual_size -= entry.meta.size;
913 }
914 }
915
916 static void log_entry(const char *func, const char *str, rgw_bucket_dir_entry *entry)
917 {
918 CLS_LOG(1, "%s: %s: ver=%ld:%llu name=%s instance=%s locator=%s", func, str,
919 (long)entry->ver.pool, (unsigned long long)entry->ver.epoch,
920 entry->key.name.c_str(), entry->key.instance.c_str(), entry->locator.c_str());
921 }
922
923 static void log_entry(const char *func, const char *str, rgw_bucket_olh_entry *entry)
924 {
925 CLS_LOG(1, "%s: %s: epoch=%llu name=%s instance=%s tag=%s", func, str,
926 (unsigned long long)entry->epoch, entry->key.name.c_str(), entry->key.instance.c_str(),
927 entry->tag.c_str());
928 }
929
930 template <class T>
931 static int read_omap_entry(cls_method_context_t hctx, const std::string& name,
932 T* entry)
933 {
934 bufferlist current_entry;
935 int rc = cls_cxx_map_get_val(hctx, name, &current_entry);
936 if (rc < 0) {
937 return rc;
938 }
939
940 auto cur_iter = current_entry.cbegin();
941 try {
942 decode(*entry, cur_iter);
943 } catch (ceph::buffer::error& err) {
944 CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__);
945 return -EIO;
946 }
947 return 0;
948 }
949
950 template <class T>
951 static int read_index_entry(cls_method_context_t hctx, string& name, T* entry)
952 {
953 int ret = read_omap_entry(hctx, name, entry);
954 if (ret < 0) {
955 return ret;
956 }
957
958 log_entry(__func__, "existing entry", entry);
959 return 0;
960 }
961
962 static int read_key_entry(cls_method_context_t hctx, const cls_rgw_obj_key& key,
963 string *idx, rgw_bucket_dir_entry *entry,
964 bool special_delete_marker_name)
965 {
966 encode_obj_index_key(key, idx);
967 int rc = read_index_entry(hctx, *idx, entry);
968 if (rc < 0) {
969 return rc;
970 }
971
972 if (key.instance.empty() &&
973 entry->flags & rgw_bucket_dir_entry::FLAG_VER_MARKER) {
974 /* we only do it where key.instance is empty. In this case the
975 * delete marker will have a separate entry in the index to avoid
976 * collisions with the actual object, as it's mutable
977 */
978 if (special_delete_marker_name) {
979 encode_obj_versioned_data_key(key, idx, true);
980 rc = read_index_entry(hctx, *idx, entry);
981 if (rc == 0) {
982 return 0;
983 }
984 }
985 encode_obj_versioned_data_key(key, idx);
986 rc = read_index_entry(hctx, *idx, entry);
987 if (rc < 0) {
988 *entry = rgw_bucket_dir_entry(); /* need to reset entry because we initialized it earlier */
989 return rc;
990 }
991 }
992
993 return 0;
994 }
995
996 // called by rgw_bucket_complete_op() for each item in op.remove_objs
997 static int complete_remove_obj(cls_method_context_t hctx,
998 rgw_bucket_dir_header& header,
999 const cls_rgw_obj_key& key, bool log_op)
1000 {
1001 rgw_bucket_dir_entry entry;
1002 string idx;
1003 int ret = read_key_entry(hctx, key, &idx, &entry);
1004 if (ret < 0) {
1005 CLS_LOG(1, "%s: read_key_entry name=%s instance=%s failed with %d",
1006 __func__, key.name.c_str(), key.instance.c_str(), ret);
1007 return ret;
1008 }
1009 CLS_LOG(10, "%s: read entry name=%s instance=%s category=%d", __func__,
1010 entry.key.name.c_str(), entry.key.instance.c_str(),
1011 int(entry.meta.category));
1012 unaccount_entry(header, entry);
1013
1014 if (log_op) {
1015 ++header.ver; // increment index version, or we'll overwrite keys previously written
1016 const std::string tag;
1017 ret = log_index_operation(hctx, key, CLS_RGW_OP_DEL, tag, entry.meta.mtime,
1018 entry.ver, CLS_RGW_STATE_COMPLETE, header.ver,
1019 header.max_marker, 0, nullptr, nullptr, nullptr);
1020 if (ret < 0) {
1021 return ret;
1022 }
1023 }
1024
1025 ret = cls_cxx_map_remove_key(hctx, idx);
1026 if (ret < 0) {
1027 CLS_LOG(1, "%s: cls_cxx_map_remove_key failed with %d", __func__, ret);
1028 return ret;
1029 }
1030 return ret;
1031 }
1032
1033 int rgw_bucket_complete_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
1034 {
1035 CLS_LOG(10, "entered %s", __func__);
1036
1037 // decode request
1038 rgw_cls_obj_complete_op op;
1039 auto iter = in->cbegin();
1040 try {
1041 decode(op, iter);
1042 } catch (ceph::buffer::error& err) {
1043 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to decode request\n");
1044 return -EINVAL;
1045 }
1046
1047 CLS_LOG(1, "rgw_bucket_complete_op(): request: op=%d name=%s instance=%s ver=%lu:%llu tag=%s",
1048 op.op, op.key.name.c_str(), op.key.instance.c_str(),
1049 (unsigned long)op.ver.pool, (unsigned long long)op.ver.epoch,
1050 op.tag.c_str());
1051
1052 rgw_bucket_dir_header header;
1053 int rc = read_bucket_header(hctx, &header);
1054 if (rc < 0) {
1055 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
1056 return -EINVAL;
1057 }
1058
1059 rgw_bucket_dir_entry entry;
1060 bool ondisk = true;
1061
1062 std::string idx;
1063 rc = read_key_entry(hctx, op.key, &idx, &entry);
1064 if (rc == -ENOENT) {
1065 entry.key = op.key;
1066 entry.ver = op.ver;
1067 entry.meta = op.meta;
1068 entry.locator = op.locator;
1069 ondisk = false;
1070 } else if (rc < 0) {
1071 return rc;
1072 }
1073
1074 entry.index_ver = header.ver;
1075 /* resetting entry flags, entry might have been previously a delete
1076 * marker */
1077 entry.flags &= rgw_bucket_dir_entry::FLAG_VER;
1078
1079 if (op.tag.size()) {
1080 auto pinter = entry.pending_map.find(op.tag);
1081 if (pinter == entry.pending_map.end()) {
1082 CLS_LOG(1, "ERROR: couldn't find tag for pending operation\n");
1083 return -EINVAL;
1084 }
1085 entry.pending_map.erase(pinter);
1086 }
1087
1088 if (op.tag.size() && op.op == CLS_RGW_OP_CANCEL) {
1089 CLS_LOG(1, "rgw_bucket_complete_op(): cancel requested\n");
1090 } else if (op.ver.pool == entry.ver.pool &&
1091 op.ver.epoch && op.ver.epoch <= entry.ver.epoch) {
1092 CLS_LOG(1, "rgw_bucket_complete_op(): skipping request, old epoch\n");
1093 op.op = CLS_RGW_OP_CANCEL;
1094 }
1095
1096 // controls whether remove_objs deletions are logged
1097 const bool default_log_op = op.log_op && !header.syncstopped;
1098 // controls whether this operation is logged (depends on op.op and ondisk)
1099 bool log_op = default_log_op;
1100
1101 entry.ver = op.ver;
1102 if (op.op == CLS_RGW_OP_CANCEL) {
1103 log_op = false; // don't log cancelation
1104 if (op.tag.size()) {
1105 if (!entry.exists && entry.pending_map.empty()) {
1106 // a racing delete succeeded, and we canceled the last pending op
1107 CLS_LOG(20, "INFO: %s: removing map entry with key=%s",
1108 __func__, escape_str(idx).c_str());
1109 rc = cls_cxx_map_remove_key(hctx, idx);
1110 if (rc < 0) {
1111 CLS_LOG(1, "ERROR: %s: unable to remove map key, key=%s, rc=%d",
1112 __func__, escape_str(idx).c_str(), rc);
1113 return rc;
1114 }
1115 } else {
1116 // we removed this tag from pending_map so need to write the changes
1117 CLS_LOG(20, "INFO: %s: setting map entry at key=%s",
1118 __func__, escape_str(idx).c_str());
1119 bufferlist new_key_bl;
1120 encode(entry, new_key_bl);
1121 rc = cls_cxx_map_set_val(hctx, idx, &new_key_bl);
1122 if (rc < 0) {
1123 CLS_LOG(1, "ERROR: %s: unable to set map val, key=%s, rc=%d",
1124 __func__, escape_str(idx).c_str(), rc);
1125 return rc;
1126 }
1127 }
1128 }
1129 } // CLS_RGW_OP_CANCEL
1130 else if (op.op == CLS_RGW_OP_DEL) {
1131 // unaccount deleted entry
1132 unaccount_entry(header, entry);
1133
1134 entry.meta = op.meta;
1135 if (!ondisk) {
1136 // no entry to erase
1137 log_op = false;
1138 } else if (!entry.pending_map.size()) {
1139 rc = cls_cxx_map_remove_key(hctx, idx);
1140 if (rc < 0) {
1141 return rc;
1142 }
1143 } else {
1144 entry.exists = false;
1145 bufferlist new_key_bl;
1146 encode(entry, new_key_bl);
1147 rc = cls_cxx_map_set_val(hctx, idx, &new_key_bl);
1148 if (rc < 0) {
1149 return rc;
1150 }
1151 }
1152 } // CLS_RGW_OP_DEL
1153 else if (op.op == CLS_RGW_OP_ADD) {
1154 // unaccount overwritten entry
1155 unaccount_entry(header, entry);
1156
1157 rgw_bucket_dir_entry_meta& meta = op.meta;
1158 rgw_bucket_category_stats& stats = header.stats[meta.category];
1159 entry.meta = meta;
1160 entry.key = op.key;
1161 entry.exists = true;
1162 entry.tag = op.tag;
1163 // account for new entry
1164 stats.num_entries++;
1165 stats.total_size += meta.accounted_size;
1166 stats.total_size_rounded += cls_rgw_get_rounded_size(meta.accounted_size);
1167 stats.actual_size += meta.size;
1168 bufferlist new_key_bl;
1169 encode(entry, new_key_bl);
1170 rc = cls_cxx_map_set_val(hctx, idx, &new_key_bl);
1171 if (rc < 0) {
1172 return rc;
1173 }
1174 } // CLS_RGW_OP_ADD
1175
1176 if (log_op) {
1177 rc = log_index_operation(hctx, op.key, op.op, op.tag, entry.meta.mtime,
1178 entry.ver, CLS_RGW_STATE_COMPLETE, header.ver,
1179 header.max_marker, op.bilog_flags, NULL, NULL,
1180 &op.zones_trace);
1181 if (rc < 0) {
1182 return rc;
1183 }
1184 }
1185
1186 CLS_LOG(20, "rgw_bucket_complete_op(): remove_objs.size()=%d",
1187 (int)op.remove_objs.size());
1188 for (const auto& remove_key : op.remove_objs) {
1189 rc = complete_remove_obj(hctx, header, remove_key, default_log_op);
1190 if (rc < 0) {
1191 continue; // part cleanup errors are not fatal
1192 }
1193 }
1194
1195 return write_bucket_header(hctx, &header);
1196 } // rgw_bucket_complete_op
1197
1198 template <class T>
1199 static int write_entry(cls_method_context_t hctx, T& entry, const string& key)
1200 {
1201 bufferlist bl;
1202 encode(entry, bl);
1203 return cls_cxx_map_set_val(hctx, key, &bl);
1204 }
1205
1206 static int read_olh(cls_method_context_t hctx,cls_rgw_obj_key& obj_key, rgw_bucket_olh_entry *olh_data_entry, string *index_key, bool *found)
1207 {
1208 cls_rgw_obj_key olh_key;
1209 olh_key.name = obj_key.name;
1210
1211 encode_olh_data_key(olh_key, index_key);
1212 int ret = read_index_entry(hctx, *index_key, olh_data_entry);
1213 if (ret < 0 && ret != -ENOENT) {
1214 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_key.name.c_str(), ret);
1215 return ret;
1216 }
1217 if (found) {
1218 *found = (ret != -ENOENT);
1219 }
1220 return 0;
1221 }
1222
1223 static void update_olh_log(rgw_bucket_olh_entry& olh_data_entry, OLHLogOp op, const string& op_tag,
1224 cls_rgw_obj_key& key, bool delete_marker, uint64_t epoch)
1225 {
1226 vector<rgw_bucket_olh_log_entry>& log = olh_data_entry.pending_log[olh_data_entry.epoch];
1227 rgw_bucket_olh_log_entry log_entry;
1228 log_entry.epoch = epoch;
1229 log_entry.op = op;
1230 log_entry.op_tag = op_tag;
1231 log_entry.key = key;
1232 log_entry.delete_marker = delete_marker;
1233 log.push_back(log_entry);
1234 }
1235
1236 static int write_obj_instance_entry(cls_method_context_t hctx, rgw_bucket_dir_entry& instance_entry, const string& instance_idx)
1237 {
1238 CLS_LOG(20, "write_entry() instance=%s idx=%s flags=%d", escape_str(instance_entry.key.instance).c_str(), instance_idx.c_str(), instance_entry.flags);
1239 /* write the instance entry */
1240 int ret = write_entry(hctx, instance_entry, instance_idx);
1241 if (ret < 0) {
1242 CLS_LOG(0, "ERROR: write_entry() instance_key=%s ret=%d", escape_str(instance_idx).c_str(), ret);
1243 return ret;
1244 }
1245 return 0;
1246 }
1247
1248 /*
1249 * write object instance entry, and if needed also the list entry
1250 */
1251 static int write_obj_entries(cls_method_context_t hctx, rgw_bucket_dir_entry& instance_entry, const string& instance_idx)
1252 {
1253 int ret = write_obj_instance_entry(hctx, instance_entry, instance_idx);
1254 if (ret < 0) {
1255 return ret;
1256 }
1257 string instance_list_idx;
1258 get_list_index_key(instance_entry, &instance_list_idx);
1259
1260 if (instance_idx != instance_list_idx) {
1261 CLS_LOG(20, "write_entry() idx=%s flags=%d", escape_str(instance_list_idx).c_str(), instance_entry.flags);
1262 /* write a new list entry for the object instance */
1263 ret = write_entry(hctx, instance_entry, instance_list_idx);
1264 if (ret < 0) {
1265 CLS_LOG(0, "ERROR: write_entry() instance=%s instance_list_idx=%s ret=%d", instance_entry.key.instance.c_str(), instance_list_idx.c_str(), ret);
1266 return ret;
1267 }
1268 }
1269 return 0;
1270 }
1271
1272
1273 class BIVerObjEntry {
1274 cls_method_context_t hctx;
1275 cls_rgw_obj_key key;
1276 string instance_idx;
1277
1278 rgw_bucket_dir_entry instance_entry;
1279
1280 bool initialized;
1281
1282 public:
1283 BIVerObjEntry(cls_method_context_t& _hctx, const cls_rgw_obj_key& _key) : hctx(_hctx), key(_key), initialized(false) {
1284 // empty
1285 }
1286
1287 int init(bool check_delete_marker = true) {
1288 int ret = read_key_entry(hctx, key, &instance_idx, &instance_entry,
1289 check_delete_marker && key.instance.empty()); /* this is potentially a delete marker, for null objects we
1290 keep separate instance entry for the delete markers */
1291
1292 if (ret < 0) {
1293 CLS_LOG(0, "ERROR: read_key_entry() idx=%s ret=%d", instance_idx.c_str(), ret);
1294 return ret;
1295 }
1296 initialized = true;
1297 CLS_LOG(20, "read instance_entry key.name=%s key.instance=%s flags=%d", instance_entry.key.name.c_str(), instance_entry.key.instance.c_str(), instance_entry.flags);
1298 return 0;
1299 }
1300
1301 rgw_bucket_dir_entry& get_dir_entry() {
1302 return instance_entry;
1303 }
1304
1305 void init_as_delete_marker(rgw_bucket_dir_entry_meta& meta) {
1306 /* a deletion marker, need to initialize it, there's no instance entry for it yet */
1307 instance_entry.key = key;
1308 instance_entry.flags = rgw_bucket_dir_entry::FLAG_DELETE_MARKER;
1309 instance_entry.meta = meta;
1310 instance_entry.tag = "delete-marker";
1311
1312 initialized = true;
1313 }
1314
1315 void set_epoch(uint64_t epoch) {
1316 instance_entry.versioned_epoch = epoch;
1317 }
1318
1319 int unlink_list_entry() {
1320 string list_idx;
1321 /* this instance has a previous list entry, remove that entry */
1322 get_list_index_key(instance_entry, &list_idx);
1323 CLS_LOG(20, "unlink_list_entry() list_idx=%s", escape_str(list_idx).c_str());
1324 int ret = cls_cxx_map_remove_key(hctx, list_idx);
1325 if (ret < 0) {
1326 CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() list_idx=%s ret=%d", list_idx.c_str(), ret);
1327 return ret;
1328 }
1329 return 0;
1330 }
1331
1332 int unlink() {
1333 /* remove the instance entry */
1334 CLS_LOG(20, "unlink() idx=%s", escape_str(instance_idx).c_str());
1335 int ret = cls_cxx_map_remove_key(hctx, instance_idx);
1336 if (ret < 0) {
1337 CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() instance_idx=%s ret=%d", instance_idx.c_str(), ret);
1338 return ret;
1339 }
1340 return 0;
1341 }
1342
1343 int write_entries(uint64_t flags_set, uint64_t flags_reset) {
1344 if (!initialized) {
1345 int ret = init();
1346 if (ret < 0) {
1347 return ret;
1348 }
1349 }
1350 instance_entry.flags &= ~flags_reset;
1351 instance_entry.flags |= flags_set;
1352
1353 /* write the instance and list entries */
1354 bool special_delete_marker_key = (instance_entry.is_delete_marker() && instance_entry.key.instance.empty());
1355 encode_obj_versioned_data_key(key, &instance_idx, special_delete_marker_key);
1356 int ret = write_obj_entries(hctx, instance_entry, instance_idx);
1357 if (ret < 0) {
1358 CLS_LOG(0, "ERROR: write_obj_entries() instance_idx=%s ret=%d", instance_idx.c_str(), ret);
1359 return ret;
1360 }
1361
1362 return 0;
1363 }
1364
1365 int write(uint64_t epoch, bool current) {
1366 if (instance_entry.versioned_epoch > 0) {
1367 CLS_LOG(20, "%s: instance_entry.versioned_epoch=%d epoch=%d", __func__, (int)instance_entry.versioned_epoch, (int)epoch);
1368 /* this instance has a previous list entry, remove that entry */
1369 int ret = unlink_list_entry();
1370 if (ret < 0) {
1371 return ret;
1372 }
1373 }
1374
1375 uint64_t flags = rgw_bucket_dir_entry::FLAG_VER;
1376 if (current) {
1377 flags |= rgw_bucket_dir_entry::FLAG_CURRENT;
1378 }
1379
1380 instance_entry.versioned_epoch = epoch;
1381 return write_entries(flags, 0);
1382 }
1383
1384 int demote_current() {
1385 return write_entries(0, rgw_bucket_dir_entry::FLAG_CURRENT);
1386 }
1387
1388 bool is_delete_marker() {
1389 return instance_entry.is_delete_marker();
1390 }
1391
1392 int find_next_key(cls_rgw_obj_key *next_key, bool *found) {
1393 string list_idx;
1394 /* this instance has a previous list entry, remove that entry */
1395 get_list_index_key(instance_entry, &list_idx);
1396 /* this is the current head, need to update! */
1397 map<string, bufferlist> keys;
1398 bool more;
1399 string filter = key.name; /* list key starts with key name, filter it to avoid a case where we cross to
1400 different namespace */
1401 int ret = cls_cxx_map_get_vals(hctx, list_idx, filter, 1, &keys, &more);
1402 if (ret < 0) {
1403 return ret;
1404 }
1405
1406 if (keys.size() < 1) {
1407 *found = false;
1408 return 0;
1409 }
1410
1411 rgw_bucket_dir_entry next_entry;
1412
1413 auto last = keys.rbegin();
1414 try {
1415 auto iter = last->second.cbegin();
1416 decode(next_entry, iter);
1417 } catch (ceph::buffer::error& err) {
1418 CLS_LOG(0, "ERROR; failed to decode entry: %s", last->first.c_str());
1419 return -EIO;
1420 }
1421
1422 *found = (key.name == next_entry.key.name);
1423 if (*found) {
1424 *next_key = next_entry.key;
1425 }
1426
1427 return 0;
1428 }
1429
1430 real_time mtime() {
1431 return instance_entry.meta.mtime;
1432 }
1433 }; // class BIVerObjEntry
1434
1435
1436 class BIOLHEntry {
1437 cls_method_context_t hctx;
1438 cls_rgw_obj_key key;
1439
1440 string olh_data_idx;
1441 rgw_bucket_olh_entry olh_data_entry;
1442
1443 bool initialized;
1444 public:
1445 BIOLHEntry(cls_method_context_t& _hctx, const cls_rgw_obj_key& _key) : hctx(_hctx), key(_key), initialized(false) { }
1446
1447 int init(bool *exists) {
1448 /* read olh */
1449 int ret = read_olh(hctx, key, &olh_data_entry, &olh_data_idx, exists);
1450 if (ret < 0) {
1451 return ret;
1452 }
1453
1454 initialized = true;
1455 return 0;
1456 }
1457
1458 bool start_modify(uint64_t candidate_epoch) {
1459 if (candidate_epoch) {
1460 if (candidate_epoch < olh_data_entry.epoch) {
1461 return false; /* olh cannot be modified, old epoch */
1462 }
1463 olh_data_entry.epoch = candidate_epoch;
1464 } else {
1465 if (olh_data_entry.epoch == 0) {
1466 olh_data_entry.epoch = 2; /* versioned epoch should start with 2, 1 is reserved to converted plain entries */
1467 } else {
1468 olh_data_entry.epoch++;
1469 }
1470 }
1471 return true;
1472 }
1473
1474 uint64_t get_epoch() {
1475 return olh_data_entry.epoch;
1476 }
1477
1478 rgw_bucket_olh_entry& get_entry() {
1479 return olh_data_entry;
1480 }
1481
1482 void update(cls_rgw_obj_key& key, bool delete_marker) {
1483 olh_data_entry.delete_marker = delete_marker;
1484 olh_data_entry.key = key;
1485 }
1486
1487 int write() {
1488 /* write the olh data entry */
1489 int ret = write_entry(hctx, olh_data_entry, olh_data_idx);
1490 if (ret < 0) {
1491 CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_idx.c_str(), ret);
1492 return ret;
1493 }
1494
1495 return 0;
1496 }
1497
1498 void update_log(OLHLogOp op, const string& op_tag, cls_rgw_obj_key& key, bool delete_marker, uint64_t epoch = 0) {
1499 if (epoch == 0) {
1500 epoch = olh_data_entry.epoch;
1501 }
1502 update_olh_log(olh_data_entry, op, op_tag, key, delete_marker, epoch);
1503 }
1504
1505 bool exists() { return olh_data_entry.exists; }
1506
1507 void set_exists(bool exists) {
1508 olh_data_entry.exists = exists;
1509 }
1510
1511 bool pending_removal() { return olh_data_entry.pending_removal; }
1512
1513 void set_pending_removal(bool pending_removal) {
1514 olh_data_entry.pending_removal = pending_removal;
1515 }
1516
1517 const string& get_tag() { return olh_data_entry.tag; }
1518 void set_tag(const string& tag) {
1519 olh_data_entry.tag = tag;
1520 }
1521 };
1522
1523 static int write_version_marker(cls_method_context_t hctx, cls_rgw_obj_key& key)
1524 {
1525 rgw_bucket_dir_entry entry;
1526 entry.key = key;
1527 entry.flags = rgw_bucket_dir_entry::FLAG_VER_MARKER;
1528 int ret = write_entry(hctx, entry, key.name);
1529 if (ret < 0) {
1530 CLS_LOG(0, "ERROR: write_entry returned ret=%d", ret);
1531 return ret;
1532 }
1533 return 0;
1534 }
1535
1536 /*
1537 * plain entries are the ones who were created when bucket was not
1538 * versioned, if we override these objects, we need to convert these
1539 * to versioned entries -- ones that have both data entry, and listing
1540 * key. Their version is going to be empty though
1541 */
1542 static int convert_plain_entry_to_versioned(cls_method_context_t hctx,
1543 cls_rgw_obj_key& key,
1544 bool demote_current,
1545 bool instance_only)
1546 {
1547 if (!key.instance.empty()) {
1548 return -EINVAL;
1549 }
1550
1551 rgw_bucket_dir_entry entry;
1552
1553 string orig_idx;
1554 int ret = read_key_entry(hctx, key, &orig_idx, &entry);
1555 if (ret != -ENOENT) {
1556 if (ret < 0) {
1557 CLS_LOG(0, "ERROR: read_key_entry() returned ret=%d", ret);
1558 return ret;
1559 }
1560
1561 entry.versioned_epoch = 1; /* converted entries are always 1 */
1562 entry.flags |= rgw_bucket_dir_entry::FLAG_VER;
1563
1564 if (demote_current) {
1565 entry.flags &= ~rgw_bucket_dir_entry::FLAG_CURRENT;
1566 }
1567
1568 string new_idx;
1569 encode_obj_versioned_data_key(key, &new_idx);
1570
1571 if (instance_only) {
1572 ret = write_obj_instance_entry(hctx, entry, new_idx);
1573 } else {
1574 ret = write_obj_entries(hctx, entry, new_idx);
1575 }
1576 if (ret < 0) {
1577 CLS_LOG(0, "ERROR: write_obj_entries new_idx=%s returned %d",
1578 new_idx.c_str(), ret);
1579 return ret;
1580 }
1581 }
1582
1583 ret = write_version_marker(hctx, key);
1584 if (ret < 0) {
1585 return ret;
1586 }
1587
1588 return 0;
1589 }
1590
1591 /*
1592 * Link an object version to an olh, update the relevant index
1593 * entries. It will also handle the deletion marker case. We have a
1594 * few entries that we need to take care of. For object 'foo',
1595 * instance BAR, we'd update the following (not actual encoding):
1596 *
1597 * - olh data: [BI_BUCKET_OLH_DATA_INDEX]foo
1598 * - object instance data: [BI_BUCKET_OBJ_INSTANCE_INDEX]foo,BAR
1599 * - object instance list entry: foo,123,BAR
1600 *
1601 * The instance list entry needs to be ordered by newer to older, so
1602 * we generate an appropriate number string that follows the name.
1603 * The top instance for each object is marked appropriately. We
1604 * generate instance entry for deletion markers here, as they are not
1605 * created prior.
1606 */
1607 static int rgw_bucket_link_olh(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
1608 {
1609 CLS_LOG(10, "entered %s", __func__);
1610 string olh_data_idx;
1611 string instance_idx;
1612
1613 // decode request
1614 rgw_cls_link_olh_op op;
1615 auto iter = in->cbegin();
1616 try {
1617 decode(op, iter);
1618 } catch (ceph::buffer::error& err) {
1619 CLS_LOG(0, "ERROR: rgw_bucket_link_olh_op(): failed to decode request\n");
1620 return -EINVAL;
1621 }
1622
1623 /* read instance entry */
1624 BIVerObjEntry obj(hctx, op.key);
1625 int ret = obj.init(op.delete_marker);
1626
1627 /* NOTE: When a delete is issued, a key instance is always provided,
1628 * either the one for which the delete is requested or a new random
1629 * one when no instance is specified. So we need to see which of
1630 * these two cases we're dealing with. The variable `existed` will
1631 * be true if the instance was specified and false if it was
1632 * randomly generated. It might have been cleaner if the instance
1633 * were empty and randomly generated here and returned in the reply,
1634 * as that would better allow a typo in the instance id. This code
1635 * should be audited and possibly cleaned up. */
1636
1637 bool existed = (ret == 0);
1638 if (ret == -ENOENT && op.delete_marker) {
1639 ret = 0;
1640 }
1641 if (ret < 0) {
1642 return ret;
1643 }
1644
1645 BIOLHEntry olh(hctx, op.key);
1646 bool olh_read_attempt = false;
1647 bool olh_found = false;
1648 if (!existed && op.delete_marker) {
1649 /* read olh */
1650 ret = olh.init(&olh_found);
1651 if (ret < 0) {
1652 return ret;
1653 }
1654 olh_read_attempt = true;
1655
1656 // if we're deleting (i.e., adding a delete marker, and the OLH
1657 // indicates it already refers to a delete marker, error out)
1658 if (olh_found && olh.get_entry().delete_marker) {
1659 CLS_LOG(10,
1660 "%s: delete marker received for \"%s\" although OLH"
1661 " already refers to a delete marker",
1662 __func__, escape_str(op.key.to_string()).c_str());
1663 return -ENOENT;
1664 }
1665 }
1666
1667 if (existed && !real_clock::is_zero(op.unmod_since)) {
1668 timespec mtime = ceph::real_clock::to_timespec(obj.mtime());
1669 timespec unmod = ceph::real_clock::to_timespec(op.unmod_since);
1670 if (!op.high_precision_time) {
1671 mtime.tv_nsec = 0;
1672 unmod.tv_nsec = 0;
1673 }
1674 if (mtime >= unmod) {
1675 return 0; /* no need tof set error, we just return 0 and avoid
1676 * writing to the bi log */
1677 }
1678 }
1679
1680 bool removing;
1681
1682 /*
1683 * Special handling for null instance object / delete-marker. For
1684 * these objects we're going to have separate instances for a data
1685 * object vs. delete-marker to avoid collisions. We now check if we
1686 * got to overwrite a previous entry, and in that case we'll remove
1687 * its list entry.
1688 */
1689 if (op.key.instance.empty()) {
1690 BIVerObjEntry other_obj(hctx, op.key);
1691 ret = other_obj.init(!op.delete_marker); /* try reading the other
1692 * null versioned
1693 * entry */
1694 existed = (ret >= 0 && !other_obj.is_delete_marker());
1695 if (ret >= 0 && other_obj.is_delete_marker() != op.delete_marker) {
1696 ret = other_obj.unlink_list_entry();
1697 if (ret < 0) {
1698 return ret;
1699 }
1700 }
1701
1702 removing = existed && op.delete_marker;
1703 if (!removing) {
1704 ret = other_obj.unlink();
1705 if (ret < 0) {
1706 return ret;
1707 }
1708 }
1709 } else {
1710 removing = (existed && !obj.is_delete_marker() && op.delete_marker);
1711 }
1712
1713 if (op.delete_marker) {
1714 /* a deletion marker, need to initialize entry as such */
1715 obj.init_as_delete_marker(op.meta);
1716 }
1717
1718 /* read olh */
1719 if (!olh_read_attempt) { // only read if we didn't attempt earlier
1720 ret = olh.init(&olh_found);
1721 if (ret < 0) {
1722 return ret;
1723 }
1724 olh_read_attempt = true;
1725 }
1726
1727 const uint64_t prev_epoch = olh.get_epoch();
1728
1729 if (!olh.start_modify(op.olh_epoch)) {
1730 ret = obj.write(op.olh_epoch, false);
1731 if (ret < 0) {
1732 return ret;
1733 }
1734 if (removing) {
1735 olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false, op.olh_epoch);
1736 }
1737 return 0;
1738 }
1739
1740 // promote this version to current if it's a newer epoch, or if it matches the
1741 // current epoch and sorts after the current instance
1742 const bool promote = (olh.get_epoch() > prev_epoch) ||
1743 (olh.get_epoch() == prev_epoch &&
1744 olh.get_entry().key.instance >= op.key.instance);
1745
1746 if (olh_found) {
1747 const string& olh_tag = olh.get_tag();
1748 if (op.olh_tag != olh_tag) {
1749 if (!olh.pending_removal()) {
1750 CLS_LOG(5, "NOTICE: op.olh_tag (%s) != olh.tag (%s)", op.olh_tag.c_str(), olh_tag.c_str());
1751 return -ECANCELED;
1752 }
1753 /* if pending removal, this is a new olh instance */
1754 olh.set_tag(op.olh_tag);
1755 }
1756 if (promote && olh.exists()) {
1757 rgw_bucket_olh_entry& olh_entry = olh.get_entry();
1758 /* found olh, previous instance is no longer the latest, need to update */
1759 if (!(olh_entry.key == op.key)) {
1760 BIVerObjEntry old_obj(hctx, olh_entry.key);
1761
1762 ret = old_obj.demote_current();
1763 if (ret < 0) {
1764 CLS_LOG(0, "ERROR: could not demote current on previous key ret=%d", ret);
1765 return ret;
1766 }
1767 }
1768 }
1769 olh.set_pending_removal(false);
1770 } else {
1771 bool instance_only = (op.key.instance.empty() && op.delete_marker);
1772 cls_rgw_obj_key key(op.key.name);
1773 ret = convert_plain_entry_to_versioned(hctx, key, promote, instance_only);
1774 if (ret < 0) {
1775 CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret);
1776 return ret;
1777 }
1778 olh.set_tag(op.olh_tag);
1779 }
1780
1781 /* update the olh log */
1782 olh.update_log(CLS_RGW_OLH_OP_LINK_OLH, op.op_tag, op.key, op.delete_marker);
1783 if (removing) {
1784 olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false);
1785 }
1786
1787 if (promote) {
1788 olh.update(op.key, op.delete_marker);
1789 }
1790 olh.set_exists(true);
1791
1792 ret = olh.write();
1793 if (ret < 0) {
1794 CLS_LOG(0, "ERROR: failed to update olh ret=%d", ret);
1795 return ret;
1796 }
1797
1798 /* write the instance and list entries */
1799 ret = obj.write(olh.get_epoch(), promote);
1800 if (ret < 0) {
1801 return ret;
1802 }
1803
1804 if (!op.log_op) {
1805 return 0;
1806 }
1807
1808 rgw_bucket_dir_header header;
1809 ret = read_bucket_header(hctx, &header);
1810 if (ret < 0) {
1811 CLS_LOG(1, "ERROR: rgw_bucket_link_olh(): failed to read header\n");
1812 return ret;
1813 }
1814 if (header.syncstopped) {
1815 return 0;
1816 }
1817
1818 rgw_bucket_dir_entry& entry = obj.get_dir_entry();
1819
1820 rgw_bucket_entry_ver ver;
1821 ver.epoch = (op.olh_epoch ? op.olh_epoch : olh.get_epoch());
1822
1823 string *powner = NULL;
1824 string *powner_display_name = NULL;
1825
1826 if (op.delete_marker) {
1827 powner = &entry.meta.owner;
1828 powner_display_name = &entry.meta.owner_display_name;
1829 }
1830
1831 RGWModifyOp operation = (op.delete_marker ? CLS_RGW_OP_LINK_OLH_DM : CLS_RGW_OP_LINK_OLH);
1832 ret = log_index_operation(hctx, op.key, operation, op.op_tag,
1833 entry.meta.mtime, ver,
1834 CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, op.bilog_flags | RGW_BILOG_FLAG_VERSIONED_OP,
1835 powner, powner_display_name, &op.zones_trace);
1836 if (ret < 0)
1837 return ret;
1838
1839 return write_bucket_header(hctx, &header); /* updates header version */
1840 }
1841
1842 static int rgw_bucket_unlink_instance(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
1843 {
1844 CLS_LOG(10, "entered %s", __func__);
1845 string olh_data_idx;
1846 string instance_idx;
1847
1848 // decode request
1849 rgw_cls_unlink_instance_op op;
1850 auto iter = in->cbegin();
1851 try {
1852 decode(op, iter);
1853 } catch (ceph::buffer::error& err) {
1854 CLS_LOG(0, "ERROR: rgw_bucket_rm_obj_instance_op(): failed to decode request\n");
1855 return -EINVAL;
1856 }
1857
1858 cls_rgw_obj_key dest_key = op.key;
1859 if (dest_key.instance == "null") {
1860 dest_key.instance.clear();
1861 }
1862
1863 BIVerObjEntry obj(hctx, dest_key);
1864 BIOLHEntry olh(hctx, dest_key);
1865
1866 int ret = obj.init();
1867 if (ret == -ENOENT) {
1868 return 0; /* already removed */
1869 }
1870 if (ret < 0) {
1871 CLS_LOG(0, "ERROR: obj.init() returned ret=%d", ret);
1872 return ret;
1873 }
1874
1875 bool olh_found;
1876 ret = olh.init(&olh_found);
1877 if (ret < 0) {
1878 CLS_LOG(0, "ERROR: olh.init() returned ret=%d", ret);
1879 return ret;
1880 }
1881
1882 if (!olh_found) {
1883 bool instance_only = false;
1884 cls_rgw_obj_key key(dest_key.name);
1885 ret = convert_plain_entry_to_versioned(hctx, key, true, instance_only);
1886 if (ret < 0) {
1887 CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret);
1888 return ret;
1889 }
1890 olh.update(dest_key, false);
1891 olh.set_tag(op.olh_tag);
1892
1893 obj.set_epoch(1);
1894 }
1895
1896 if (!olh.start_modify(op.olh_epoch)) {
1897 ret = obj.unlink_list_entry();
1898 if (ret < 0) {
1899 return ret;
1900 }
1901
1902 if (obj.is_delete_marker()) {
1903 return 0;
1904 }
1905
1906 olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false, op.olh_epoch);
1907 return olh.write();
1908 }
1909
1910 rgw_bucket_olh_entry& olh_entry = olh.get_entry();
1911 cls_rgw_obj_key& olh_key = olh_entry.key;
1912 CLS_LOG(20, "%s: updating olh log: existing olh entry: %s[%s] (delete_marker=%d)", __func__,
1913 olh_key.name.c_str(), olh_key.instance.c_str(), olh_entry.delete_marker);
1914
1915 if (olh_key == dest_key) {
1916 /* this is the current head, need to update! */
1917 cls_rgw_obj_key next_key;
1918 bool found = false;
1919 ret = obj.find_next_key(&next_key, &found);
1920 if (ret < 0) {
1921 CLS_LOG(0, "ERROR: obj.find_next_key() returned ret=%d", ret);
1922 return ret;
1923 }
1924
1925 if (found) {
1926 BIVerObjEntry next(hctx, next_key);
1927 ret = next.write(olh.get_epoch(), true);
1928 if (ret < 0) {
1929 CLS_LOG(0, "ERROR: next.write() returned ret=%d", ret);
1930 return ret;
1931 }
1932
1933 CLS_LOG(20, "%s: updating olh log: link olh -> %s[%s] (is_delete=%d)", __func__,
1934 next_key.name.c_str(), next_key.instance.c_str(), (int)next.is_delete_marker());
1935
1936 olh.update(next_key, next.is_delete_marker());
1937 olh.update_log(CLS_RGW_OLH_OP_LINK_OLH, op.op_tag, next_key, next.is_delete_marker());
1938 } else {
1939 // next_key is empty, but we need to preserve its name in case this entry
1940 // gets resharded, because this key is used for hash placement
1941 next_key.name = dest_key.name;
1942 olh.update(next_key, false);
1943 olh.update_log(CLS_RGW_OLH_OP_UNLINK_OLH, op.op_tag, next_key, false);
1944 olh.set_exists(false);
1945 olh.set_pending_removal(true);
1946 }
1947 }
1948
1949 if (!obj.is_delete_marker()) {
1950 olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false);
1951 } else {
1952 /* this is a delete marker, it's our responsibility to remove its
1953 * instance entry */
1954 ret = obj.unlink();
1955 if (ret < 0) {
1956 return ret;
1957 }
1958 }
1959
1960 ret = obj.unlink_list_entry();
1961 if (ret < 0) {
1962 return ret;
1963 }
1964
1965 ret = olh.write();
1966 if (ret < 0) {
1967 return ret;
1968 }
1969
1970 if (!op.log_op) {
1971 return 0;
1972 }
1973
1974 rgw_bucket_dir_header header;
1975 ret = read_bucket_header(hctx, &header);
1976 if (ret < 0) {
1977 CLS_LOG(1, "ERROR: rgw_bucket_unlink_instance(): failed to read header\n");
1978 return ret;
1979 }
1980 if (header.syncstopped) {
1981 return 0;
1982 }
1983
1984 rgw_bucket_entry_ver ver;
1985 ver.epoch = (op.olh_epoch ? op.olh_epoch : olh.get_epoch());
1986
1987 real_time mtime = obj.mtime(); /* mtime has no real meaning in
1988 * instance removal context */
1989 ret = log_index_operation(hctx, op.key, CLS_RGW_OP_UNLINK_INSTANCE, op.op_tag,
1990 mtime, ver,
1991 CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker,
1992 op.bilog_flags | RGW_BILOG_FLAG_VERSIONED_OP, NULL, NULL, &op.zones_trace);
1993 if (ret < 0)
1994 return ret;
1995
1996 return write_bucket_header(hctx, &header); /* updates header version */
1997 }
1998
1999 static int rgw_bucket_read_olh_log(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
2000 {
2001 CLS_LOG(10, "entered %s", __func__);
2002 // decode request
2003 rgw_cls_read_olh_log_op op;
2004 auto iter = in->cbegin();
2005 try {
2006 decode(op, iter);
2007 } catch (ceph::buffer::error& err) {
2008 CLS_LOG(0, "ERROR: rgw_bucket_read_olh_log(): failed to decode request\n");
2009 return -EINVAL;
2010 }
2011
2012 if (!op.olh.instance.empty()) {
2013 CLS_LOG(1, "bad key passed in (non empty instance)");
2014 return -EINVAL;
2015 }
2016
2017 rgw_bucket_olh_entry olh_data_entry;
2018 string olh_data_key;
2019 encode_olh_data_key(op.olh, &olh_data_key);
2020 int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry);
2021 if (ret < 0 && ret != -ENOENT) {
2022 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
2023 return ret;
2024 }
2025
2026 if (olh_data_entry.tag != op.olh_tag) {
2027 CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str());
2028 return -ECANCELED;
2029 }
2030
2031 rgw_cls_read_olh_log_ret op_ret;
2032
2033 #define MAX_OLH_LOG_ENTRIES 1000
2034 map<uint64_t, vector<rgw_bucket_olh_log_entry> >& log = olh_data_entry.pending_log;
2035
2036 if (log.begin()->first > op.ver_marker && log.size() <= MAX_OLH_LOG_ENTRIES) {
2037 op_ret.log = log;
2038 op_ret.is_truncated = false;
2039 } else {
2040 auto iter = log.upper_bound(op.ver_marker);
2041
2042 for (int i = 0; i < MAX_OLH_LOG_ENTRIES && iter != log.end(); ++i, ++iter) {
2043 op_ret.log[iter->first] = iter->second;
2044 }
2045 op_ret.is_truncated = (iter != log.end());
2046 }
2047
2048 encode(op_ret, *out);
2049
2050 return 0;
2051 }
2052
2053 static int rgw_bucket_trim_olh_log(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
2054 {
2055 CLS_LOG(10, "entered %s", __func__);
2056 // decode request
2057 rgw_cls_trim_olh_log_op op;
2058 auto iter = in->cbegin();
2059 try {
2060 decode(op, iter);
2061 } catch (ceph::buffer::error& err) {
2062 CLS_LOG(0, "ERROR: rgw_bucket_trim_olh_log(): failed to decode request\n");
2063 return -EINVAL;
2064 }
2065
2066 if (!op.olh.instance.empty()) {
2067 CLS_LOG(1, "bad key passed in (non empty instance)");
2068 return -EINVAL;
2069 }
2070
2071 /* read olh entry */
2072 rgw_bucket_olh_entry olh_data_entry;
2073 string olh_data_key;
2074 encode_olh_data_key(op.olh, &olh_data_key);
2075 int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry);
2076 if (ret < 0 && ret != -ENOENT) {
2077 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
2078 return ret;
2079 }
2080
2081 if (olh_data_entry.tag != op.olh_tag) {
2082 CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str());
2083 return -ECANCELED;
2084 }
2085
2086 /* remove all versions up to and including ver from the pending map */
2087 auto& log = olh_data_entry.pending_log;
2088 auto liter = log.begin();
2089 while (liter != log.end() && liter->first <= op.ver) {
2090 auto rm_iter = liter;
2091 ++liter;
2092 log.erase(rm_iter);
2093 }
2094
2095 /* write the olh data entry */
2096 ret = write_entry(hctx, olh_data_entry, olh_data_key);
2097 if (ret < 0) {
2098 CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
2099 return ret;
2100 }
2101
2102 return 0;
2103 }
2104
2105 static int rgw_bucket_clear_olh(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
2106 {
2107 CLS_LOG(10, "entered %s", __func__);
2108 // decode request
2109 rgw_cls_bucket_clear_olh_op op;
2110 auto iter = in->cbegin();
2111 try {
2112 decode(op, iter);
2113 } catch (ceph::buffer::error& err) {
2114 CLS_LOG(0, "ERROR: rgw_bucket_clear_olh(): failed to decode request\n");
2115 return -EINVAL;
2116 }
2117
2118 if (!op.key.instance.empty()) {
2119 CLS_LOG(1, "bad key passed in (non empty instance)");
2120 return -EINVAL;
2121 }
2122
2123 /* read olh entry */
2124 rgw_bucket_olh_entry olh_data_entry;
2125 string olh_data_key;
2126 encode_olh_data_key(op.key, &olh_data_key);
2127 int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry);
2128 if (ret < 0 && ret != -ENOENT) {
2129 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
2130 return ret;
2131 }
2132
2133 if (olh_data_entry.tag != op.olh_tag) {
2134 CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str());
2135 return -ECANCELED;
2136 }
2137
2138 ret = cls_cxx_map_remove_key(hctx, olh_data_key);
2139 if (ret < 0) {
2140 CLS_LOG(1, "NOTICE: %s: can't remove key %s ret=%d", __func__, olh_data_key.c_str(), ret);
2141 return ret;
2142 }
2143
2144 rgw_bucket_dir_entry plain_entry;
2145
2146 /* read plain entry, make sure it's a versioned place holder */
2147 ret = read_index_entry(hctx, op.key.name, &plain_entry);
2148 if (ret == -ENOENT) {
2149 /* we're done, no entry existing */
2150 return 0;
2151 }
2152 if (ret < 0) {
2153 CLS_LOG(0, "ERROR: read_index_entry key=%s ret=%d", op.key.name.c_str(), ret);
2154 return ret;
2155 }
2156
2157 if ((plain_entry.flags & rgw_bucket_dir_entry::FLAG_VER_MARKER) == 0) {
2158 /* it's not a version marker, don't remove it */
2159 return 0;
2160 }
2161
2162 ret = cls_cxx_map_remove_key(hctx, op.key.name);
2163 if (ret < 0) {
2164 CLS_LOG(1, "NOTICE: %s: can't remove key %s ret=%d", __func__, op.key.name.c_str(), ret);
2165 return ret;
2166 }
2167
2168 return 0;
2169 }
2170
2171 int rgw_dir_suggest_changes(cls_method_context_t hctx,
2172 bufferlist *in, bufferlist *out)
2173 {
2174 CLS_LOG(1, "entered %s", __func__);
2175
2176 bufferlist header_bl;
2177 rgw_bucket_dir_header header;
2178 bool header_changed = false;
2179
2180 int rc = read_bucket_header(hctx, &header);
2181 if (rc < 0) {
2182 CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to read header\n");
2183 return rc;
2184 }
2185
2186 timespan tag_timeout(
2187 std::chrono::seconds(
2188 header.tag_timeout ? header.tag_timeout : CEPH_RGW_TAG_TIMEOUT));
2189
2190 auto in_iter = in->cbegin();
2191
2192 while (!in_iter.end()) {
2193 __u8 op;
2194 rgw_bucket_dir_entry cur_change;
2195 rgw_bucket_dir_entry cur_disk;
2196 try {
2197 decode(op, in_iter);
2198 decode(cur_change, in_iter);
2199 } catch (ceph::buffer::error& err) {
2200 CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to decode request\n");
2201 return -EINVAL;
2202 }
2203
2204 bufferlist cur_disk_bl;
2205 string cur_change_key;
2206 encode_obj_index_key(cur_change.key, &cur_change_key);
2207 int ret = cls_cxx_map_get_val(hctx, cur_change_key, &cur_disk_bl);
2208 if (ret < 0 && ret != -ENOENT)
2209 return -EINVAL;
2210
2211 if (ret == -ENOENT) {
2212 continue;
2213 }
2214
2215 if (cur_disk_bl.length()) {
2216 auto cur_disk_iter = cur_disk_bl.cbegin();
2217 try {
2218 decode(cur_disk, cur_disk_iter);
2219 } catch (ceph::buffer::error& error) {
2220 CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to decode cur_disk\n");
2221 return -EINVAL;
2222 }
2223
2224 // remove any pending entries whose tag timeout has expired. until expiry,
2225 // these pending entries will prevent us from applying suggested changes
2226 real_time cur_time = real_clock::now();
2227 auto iter = cur_disk.pending_map.begin();
2228 while(iter != cur_disk.pending_map.end()) {
2229 auto cur_iter = iter++;
2230 if (cur_time > (cur_iter->second.timestamp + timespan(tag_timeout))) {
2231 cur_disk.pending_map.erase(cur_iter);
2232 }
2233 }
2234 }
2235
2236 CLS_LOG(20, "cur_disk.pending_map.empty()=%d op=%d cur_disk.exists=%d "
2237 "cur_disk.index_ver=%d cur_change.exists=%d cur_change.index_ver=%d",
2238 cur_disk.pending_map.empty(), (int)op, cur_disk.exists,
2239 (int)cur_disk.index_ver, cur_change.exists,
2240 (int)cur_change.index_ver);
2241
2242 if (cur_change.index_ver < cur_disk.index_ver) {
2243 // a pending on-disk entry was completed since this suggestion was made,
2244 // don't apply it yet. if the index really is inconsistent, the next
2245 // listing will get the latest version and resend the suggestion
2246 continue;
2247 }
2248
2249 if (cur_disk.pending_map.empty()) {
2250 if (cur_disk.exists) {
2251 rgw_bucket_category_stats& old_stats = header.stats[cur_disk.meta.category];
2252 CLS_LOG(10, "total_entries: %" PRId64 " -> %" PRId64 "", old_stats.num_entries, old_stats.num_entries - 1);
2253 old_stats.num_entries--;
2254 old_stats.total_size -= cur_disk.meta.accounted_size;
2255 old_stats.total_size_rounded -= cls_rgw_get_rounded_size(cur_disk.meta.accounted_size);
2256 old_stats.actual_size -= cur_disk.meta.size;
2257 header_changed = true;
2258 }
2259 rgw_bucket_category_stats& stats = header.stats[cur_change.meta.category];
2260 bool log_op = (op & CEPH_RGW_DIR_SUGGEST_LOG_OP) != 0;
2261 op &= CEPH_RGW_DIR_SUGGEST_OP_MASK;
2262 switch(op) {
2263 case CEPH_RGW_REMOVE:
2264 CLS_LOG(10, "CEPH_RGW_REMOVE name=%s instance=%s", cur_change.key.name.c_str(), cur_change.key.instance.c_str());
2265 ret = cls_cxx_map_remove_key(hctx, cur_change_key);
2266 if (ret < 0)
2267 return ret;
2268 if (log_op && cur_disk.exists && !header.syncstopped) {
2269 ret = log_index_operation(hctx, cur_disk.key, CLS_RGW_OP_DEL, cur_disk.tag, cur_disk.meta.mtime,
2270 cur_disk.ver, CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, 0, NULL, NULL, NULL);
2271 if (ret < 0) {
2272 CLS_LOG(0, "ERROR: %s: failed to log operation ret=%d", __func__, ret);
2273 return ret;
2274 }
2275 }
2276 break;
2277 case CEPH_RGW_UPDATE:
2278 CLS_LOG(10, "CEPH_RGW_UPDATE name=%s instance=%s total_entries: %" PRId64 " -> %" PRId64 "",
2279 cur_change.key.name.c_str(), cur_change.key.instance.c_str(), stats.num_entries, stats.num_entries + 1);
2280
2281 stats.num_entries++;
2282 stats.total_size += cur_change.meta.accounted_size;
2283 stats.total_size_rounded += cls_rgw_get_rounded_size(cur_change.meta.accounted_size);
2284 stats.actual_size += cur_change.meta.size;
2285 header_changed = true;
2286 cur_change.index_ver = header.ver;
2287 bufferlist cur_state_bl;
2288 encode(cur_change, cur_state_bl);
2289 ret = cls_cxx_map_set_val(hctx, cur_change_key, &cur_state_bl);
2290 if (ret < 0)
2291 return ret;
2292 if (log_op && !header.syncstopped) {
2293 ret = log_index_operation(hctx, cur_change.key, CLS_RGW_OP_ADD, cur_change.tag, cur_change.meta.mtime,
2294 cur_change.ver, CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, 0, NULL, NULL, NULL);
2295 if (ret < 0) {
2296 CLS_LOG(0, "ERROR: %s: failed to log operation ret=%d", __func__, ret);
2297 return ret;
2298 }
2299 }
2300 break;
2301 } // switch(op)
2302 } // if (cur_disk.pending_map.empty())
2303 } // while (!in_iter.end())
2304
2305 if (header_changed) {
2306 return write_bucket_header(hctx, &header);
2307 }
2308 return 0;
2309 }
2310
2311 static int rgw_obj_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
2312 {
2313 CLS_LOG(10, "entered %s", __func__);
2314 // decode request
2315 rgw_cls_obj_remove_op op;
2316 auto iter = in->cbegin();
2317 try {
2318 decode(op, iter);
2319 } catch (ceph::buffer::error& err) {
2320 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
2321 return -EINVAL;
2322 }
2323
2324 if (op.keep_attr_prefixes.empty()) {
2325 return cls_cxx_remove(hctx);
2326 }
2327
2328 map<string, bufferlist> attrset;
2329 int ret = cls_cxx_getxattrs(hctx, &attrset);
2330 if (ret < 0 && ret != -ENOENT) {
2331 CLS_LOG(0, "ERROR: %s: cls_cxx_getxattrs() returned %d", __func__, ret);
2332 return ret;
2333 }
2334
2335 map<string, bufferlist> new_attrs;
2336 for (auto iter = op.keep_attr_prefixes.begin();
2337 iter != op.keep_attr_prefixes.end(); ++iter) {
2338 auto& check_prefix = *iter;
2339
2340 for (auto aiter = attrset.lower_bound(check_prefix);
2341 aiter != attrset.end(); ++aiter) {
2342 const string& attr = aiter->first;
2343
2344 if (attr.substr(0, check_prefix.size()) > check_prefix) {
2345 break;
2346 }
2347
2348 new_attrs[attr] = aiter->second;
2349 }
2350 }
2351
2352 CLS_LOG(20, "%s: removing object", __func__);
2353 ret = cls_cxx_remove(hctx);
2354 if (ret < 0) {
2355 CLS_LOG(0, "ERROR: %s: cls_cxx_remove returned %d", __func__, ret);
2356 return ret;
2357 }
2358
2359 if (new_attrs.empty()) {
2360 /* no data to keep */
2361 return 0;
2362 }
2363
2364 ret = cls_cxx_create(hctx, false);
2365 if (ret < 0) {
2366 CLS_LOG(0, "ERROR: %s: cls_cxx_create returned %d", __func__, ret);
2367 return ret;
2368 }
2369
2370 for (auto aiter = new_attrs.begin();
2371 aiter != new_attrs.end(); ++aiter) {
2372 const auto& attr = aiter->first;
2373
2374 ret = cls_cxx_setxattr(hctx, attr.c_str(), &aiter->second);
2375 CLS_LOG(20, "%s: setting attr: %s", __func__, attr.c_str());
2376 if (ret < 0) {
2377 CLS_LOG(0, "ERROR: %s: cls_cxx_setxattr (attr=%s) returned %d", __func__, attr.c_str(), ret);
2378 return ret;
2379 }
2380 }
2381
2382 return 0;
2383 }
2384
2385 static int rgw_obj_store_pg_ver(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
2386 {
2387 CLS_LOG(10, "entered %s", __func__);
2388 // decode request
2389 rgw_cls_obj_store_pg_ver_op op;
2390 auto iter = in->cbegin();
2391 try {
2392 decode(op, iter);
2393 } catch (ceph::buffer::error& err) {
2394 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
2395 return -EINVAL;
2396 }
2397
2398 bufferlist bl;
2399 uint64_t ver = cls_current_version(hctx);
2400 encode(ver, bl);
2401 int ret = cls_cxx_setxattr(hctx, op.attr.c_str(), &bl);
2402 if (ret < 0) {
2403 CLS_LOG(0, "ERROR: %s: cls_cxx_setxattr (attr=%s) returned %d", __func__, op.attr.c_str(), ret);
2404 return ret;
2405 }
2406
2407 return 0;
2408 }
2409
2410 static int rgw_obj_check_attrs_prefix(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
2411 {
2412 CLS_LOG(10, "entered %s", __func__);
2413 // decode request
2414 rgw_cls_obj_check_attrs_prefix op;
2415 auto iter = in->cbegin();
2416 try {
2417 decode(op, iter);
2418 } catch (ceph::buffer::error& err) {
2419 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
2420 return -EINVAL;
2421 }
2422
2423 if (op.check_prefix.empty()) {
2424 return -EINVAL;
2425 }
2426
2427 map<string, bufferlist> attrset;
2428 int ret = cls_cxx_getxattrs(hctx, &attrset);
2429 if (ret < 0 && ret != -ENOENT) {
2430 CLS_LOG(0, "ERROR: %s: cls_cxx_getxattrs() returned %d", __func__, ret);
2431 return ret;
2432 }
2433
2434 bool exist = false;
2435
2436 for (auto aiter = attrset.lower_bound(op.check_prefix);
2437 aiter != attrset.end(); ++aiter) {
2438 const auto& attr = aiter->first;
2439
2440 if (attr.substr(0, op.check_prefix.size()) > op.check_prefix) {
2441 break;
2442 }
2443
2444 exist = true;
2445 }
2446
2447 if (exist == op.fail_if_exist) {
2448 return -ECANCELED;
2449 }
2450
2451 return 0;
2452 }
2453
2454 static int rgw_obj_check_mtime(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
2455 {
2456 CLS_LOG(10, "entered %s", __func__);
2457 // decode request
2458 rgw_cls_obj_check_mtime op;
2459 auto iter = in->cbegin();
2460 try {
2461 decode(op, iter);
2462 } catch (ceph::buffer::error& err) {
2463 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
2464 return -EINVAL;
2465 }
2466
2467 real_time obj_ut;
2468 int ret = cls_cxx_stat2(hctx, NULL, &obj_ut);
2469 if (ret < 0 && ret != -ENOENT) {
2470 CLS_LOG(0, "ERROR: %s: cls_cxx_stat() returned %d", __func__, ret);
2471 return ret;
2472 }
2473 if (ret == -ENOENT) {
2474 CLS_LOG(10, "object does not exist, skipping check");
2475 }
2476
2477 ceph_timespec obj_ts = ceph::real_clock::to_ceph_timespec(obj_ut);
2478 ceph_timespec op_ts = ceph::real_clock::to_ceph_timespec(op.mtime);
2479
2480 if (!op.high_precision_time) {
2481 obj_ts.tv_nsec = 0;
2482 op_ts.tv_nsec = 0;
2483 }
2484
2485 CLS_LOG(10, "%s: obj_ut=%lld.%06lld op.mtime=%lld.%06lld", __func__,
2486 (long long)obj_ts.tv_sec, (long long)obj_ts.tv_nsec,
2487 (long long)op_ts.tv_sec, (long long)op_ts.tv_nsec);
2488
2489 bool check;
2490
2491 switch (op.type) {
2492 case CLS_RGW_CHECK_TIME_MTIME_EQ:
2493 check = (obj_ts == op_ts);
2494 break;
2495 case CLS_RGW_CHECK_TIME_MTIME_LT:
2496 check = (obj_ts < op_ts);
2497 break;
2498 case CLS_RGW_CHECK_TIME_MTIME_LE:
2499 check = (obj_ts <= op_ts);
2500 break;
2501 case CLS_RGW_CHECK_TIME_MTIME_GT:
2502 check = (obj_ts > op_ts);
2503 break;
2504 case CLS_RGW_CHECK_TIME_MTIME_GE:
2505 check = (obj_ts >= op_ts);
2506 break;
2507 default:
2508 return -EINVAL;
2509 };
2510
2511 if (!check) {
2512 return -ECANCELED;
2513 }
2514
2515 return 0;
2516 }
2517
2518 static int rgw_bi_get_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
2519 {
2520 CLS_LOG(10, "entered %s", __func__);
2521 // decode request
2522 rgw_cls_bi_get_op op;
2523 auto iter = in->cbegin();
2524 try {
2525 decode(op, iter);
2526 } catch (ceph::buffer::error& err) {
2527 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
2528 return -EINVAL;
2529 }
2530
2531 string idx;
2532
2533 switch (op.type) {
2534 case BIIndexType::Plain:
2535 idx = op.key.name;
2536 break;
2537 case BIIndexType::Instance:
2538 encode_obj_index_key(op.key, &idx);
2539 break;
2540 case BIIndexType::OLH:
2541 encode_olh_data_key(op.key, &idx);
2542 break;
2543 default:
2544 CLS_LOG(10, "%s: invalid key type encoding: %d",
2545 __func__, int(op.type));
2546 return -EINVAL;
2547 }
2548
2549 rgw_cls_bi_get_ret op_ret;
2550
2551 rgw_cls_bi_entry& entry = op_ret.entry;
2552
2553 entry.type = op.type;
2554 entry.idx = idx;
2555
2556 int r = cls_cxx_map_get_val(hctx, idx, &entry.data);
2557 if (r < 0) {
2558 CLS_LOG(10, "%s: cls_cxx_map_get_val() returned %d", __func__, r);
2559 return r;
2560 }
2561
2562 encode(op_ret, *out);
2563
2564 return 0;
2565 }
2566
2567 static int rgw_bi_put_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
2568 {
2569 CLS_LOG(10, "entered %s", __func__);
2570 // decode request
2571 rgw_cls_bi_put_op op;
2572 auto iter = in->cbegin();
2573 try {
2574 decode(op, iter);
2575 } catch (ceph::buffer::error& err) {
2576 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
2577 return -EINVAL;
2578 }
2579
2580 rgw_cls_bi_entry& entry = op.entry;
2581
2582 int r = cls_cxx_map_set_val(hctx, entry.idx, &entry.data);
2583 if (r < 0) {
2584 CLS_LOG(0, "ERROR: %s: cls_cxx_map_set_val() returned r=%d", __func__, r);
2585 }
2586
2587 return 0;
2588 }
2589
2590
2591 /* The plain entries in the bucket index are divided into two regions
2592 * divided by the special entries that begin with 0x80. Those below
2593 * ("Low") are ascii entries. Those above ("High") bring in unicode
2594 * entries. This enum allows either or both regions to be listed in
2595 * list_plain_entries(). It's convenient that "Both" be in between the
2596 * others so we can use "<= Both" or ">= Both" logic.
2597 */
2598 enum class PlainEntriesRegion {
2599 Low, Both, High
2600 };
2601
2602
2603 /* Queries the omap for plain entries in the range of start_after_key
2604 * to end_key, non-inclusive. Both of those values must either be
2605 * before the "ugly namespace" or after it.
2606 *
2607 * Negative return values indicate errors. Non-negative return values
2608 * indicate number of entries retrieved. */
2609 static int list_plain_entries_help(cls_method_context_t hctx,
2610 const std::string& name_filter,
2611 const std::string& start_after_key, // exclusive
2612 const std::string& end_key, // exclusive
2613 uint32_t max,
2614 std::list<rgw_cls_bi_entry>* entries,
2615 bool& end_key_reached,
2616 bool& more)
2617 {
2618 CLS_LOG(10, "Entered %s: name_filter=\"%s\", start_after_key=\"%s\", end_key=\"%s\", max=%d",
2619 __func__, escape_str(name_filter).c_str(), escape_str(start_after_key).c_str(),
2620 escape_str(end_key).c_str(), max);
2621 int count = 0;
2622 std::map<std::string, bufferlist> raw_entries;
2623 int ret = cls_cxx_map_get_vals(hctx, start_after_key, name_filter, max,
2624 &raw_entries, &more);
2625 CLS_LOG(20, "%s: cls_cxx_map_get_vals ret=%d, raw_entries.size()=%lu, more=%d",
2626 __func__, ret, raw_entries.size(), more);
2627 if (ret < 0) {
2628 return ret;
2629 }
2630
2631 end_key_reached = false;
2632 for (auto iter : raw_entries) {
2633 if (!end_key.empty() && iter.first >= end_key) {
2634 CLS_LOG(20, "%s: end key reached at \"%s\"",
2635 __func__, escape_str(iter.first).c_str());
2636 end_key_reached = true;
2637 more = false;
2638 return count;
2639 }
2640
2641 rgw_bucket_dir_entry e;
2642 auto biter = iter.second.cbegin();
2643 try {
2644 decode(e, biter);
2645 } catch (ceph::buffer::error& err) {
2646 CLS_LOG(0, "ERROR: %s: failed to decode buffer for plain bucket index entry \"%s\"",
2647 __func__, escape_str(iter.first).c_str());
2648 return -EIO;
2649 }
2650
2651 if (!name_filter.empty() && e.key.name > name_filter) {
2652 CLS_LOG(20, "%s: due to filter \"%s\", skipping entry.idx=\"%s\" e.key.name=\"%s\"",
2653 __func__,
2654 escape_str(name_filter).c_str(),
2655 escape_str(iter.first).c_str(),
2656 escape_str(e.key.name).c_str());
2657 // skip the rest of the entries
2658 more = false;
2659 end_key_reached = true;
2660 return count;
2661 }
2662
2663 rgw_cls_bi_entry entry;
2664 entry.type = BIIndexType::Plain;
2665 entry.idx = iter.first;
2666 entry.data = iter.second;
2667
2668 entries->push_back(entry);
2669 count++;
2670
2671 CLS_LOG(20, "%s: adding entry %d entry.idx=\"%s\" e.key.name=\"%s\"",
2672 __func__,
2673 count,
2674 escape_str(entry.idx).c_str(),
2675 escape_str(e.key.name).c_str());
2676
2677 if (count >= int(max)) {
2678 // NB: this looks redundant, but leave in for time being
2679 return count;
2680 }
2681 } // iter for loop
2682
2683 return count;
2684 } // list_plain_entries_help
2685
2686 /*
2687 * Lists plain entries in either or both regions, the region of those
2688 * beginning with an ASCII character or a non-ASCII character, which
2689 * surround the "ugly" namespace used by special entries for versioned
2690 * buckets.
2691 *
2692 * The entries parameter is not cleared and additional entries are
2693 * appended to it.
2694 */
2695 static int list_plain_entries(cls_method_context_t hctx,
2696 const std::string& name_filter,
2697 const std::string& marker,
2698 uint32_t max,
2699 std::list<rgw_cls_bi_entry>* entries,
2700 bool* pmore,
2701 const PlainEntriesRegion region = PlainEntriesRegion::Both)
2702 {
2703 CLS_LOG(10, "entered %s: name_filter=\"%s\", marker=\"%s\", max=%d, region=%d",
2704 __func__, escape_str(name_filter).c_str(), escape_str(marker).c_str(), max, static_cast<int>(region));
2705 int r = 0;
2706 bool end_key_reached = false;
2707 bool more = false;
2708 const size_t start_size = entries->size();
2709
2710 if (region <= PlainEntriesRegion::Both && marker < BI_PREFIX_BEGIN) {
2711 // listing ascii plain namespace
2712 int r = list_plain_entries_help(hctx, name_filter, marker, BI_PREFIX_BEGIN, max,
2713 entries, end_key_reached, more);
2714 CLS_LOG(20, "%s: first list_plain_entries_help r=%d, end_key_reached=%d, more=%d",
2715 __func__, r, end_key_reached, more);
2716 if (r < 0) {
2717 return r;
2718 }
2719
2720 // see if we're done for this call (there may be more for a later call)
2721 if (r >= int(max) || !end_key_reached || (!more && region == PlainEntriesRegion::Low)) {
2722 if (pmore) {
2723 *pmore = more;
2724 }
2725
2726 return int(entries->size() - start_size);
2727 }
2728
2729 max = max - r;
2730 }
2731
2732 if (region >= PlainEntriesRegion::Both) {
2733 const std::string start_after_key = std::max(marker, BI_PREFIX_END);
2734
2735 // listing non-ascii plain namespace
2736 r = list_plain_entries_help(hctx, name_filter, start_after_key, {}, max,
2737 entries, end_key_reached, more);
2738 CLS_LOG(20, "%s: second list_plain_entries_help r=%d, end_key_reached=%d, more=%d",
2739 __func__, r, end_key_reached, more);
2740 if (r < 0) {
2741 return r;
2742 }
2743 }
2744
2745 if (pmore) {
2746 *pmore = more;
2747 }
2748
2749 return int(entries->size() - start_size);
2750 }
2751
2752 static int list_instance_entries(cls_method_context_t hctx,
2753 const string& name,
2754 const string& marker,
2755 uint32_t max,
2756 list<rgw_cls_bi_entry> *entries,
2757 bool *pmore)
2758 {
2759 cls_rgw_obj_key key(name);
2760 string first_instance_idx;
2761 encode_obj_versioned_data_key(key, &first_instance_idx);
2762 string start_after_key;
2763
2764 if (!name.empty()) {
2765 start_after_key = first_instance_idx;
2766 } else {
2767 start_after_key = BI_PREFIX_CHAR;
2768 start_after_key.append(bucket_index_prefixes[BI_BUCKET_OBJ_INSTANCE_INDEX]);
2769 }
2770 string filter = start_after_key;
2771 if (bi_entry_gt(marker, start_after_key)) {
2772 start_after_key = marker;
2773 }
2774 int count = 0;
2775 map<string, bufferlist> keys;
2776 bufferlist k;
2777 int ret = cls_cxx_map_get_val(hctx, start_after_key, &k);
2778 if (ret < 0 && ret != -ENOENT) {
2779 return ret;
2780 }
2781 // we need to include the exact match if a filter (name) is
2782 // specified and the marker has not yet advanced (i.e., been set)
2783 bool found_first = (ret == 0) && (start_after_key != marker);
2784 if (found_first) {
2785 --max;
2786 }
2787 if (max > 0) {
2788 ret = cls_cxx_map_get_vals(hctx, start_after_key, string(), max,
2789 &keys, pmore);
2790 CLS_LOG(20, "%s: start_after_key=\"%s\" first_instance_idx=\"%s\" keys.size()=%d",
2791 __func__, escape_str(start_after_key).c_str(),
2792 escape_str(first_instance_idx).c_str(), (int)keys.size());
2793 if (ret < 0) {
2794 return ret;
2795 }
2796 }
2797 if (found_first) {
2798 keys[start_after_key] = std::move(k);
2799 }
2800
2801 for (auto iter = keys.begin(); iter != keys.end(); ++iter) {
2802 rgw_cls_bi_entry entry;
2803 entry.type = BIIndexType::Instance;
2804 entry.idx = iter->first;
2805 entry.data = iter->second;
2806
2807 if (!filter.empty() && entry.idx.compare(0, filter.size(), filter) != 0) {
2808 /* we are skipping the rest of the entries */
2809 if (pmore) {
2810 *pmore = false;
2811 }
2812 return count;
2813 }
2814
2815 CLS_LOG(20, "%s: entry.idx=\"%s\"", __func__, escape_str(entry.idx).c_str());
2816
2817 auto biter = entry.data.cbegin();
2818
2819 rgw_bucket_dir_entry e;
2820 try {
2821 decode(e, biter);
2822 } catch (ceph::buffer::error& err) {
2823 CLS_LOG(0, "ERROR: %s: failed to decode buffer (size=%d)", __func__, entry.data.length());
2824 return -EIO;
2825 }
2826
2827 if (!name.empty() && e.key.name != name) {
2828 /* we are skipping the rest of the entries */
2829 if (pmore) {
2830 *pmore = false;
2831 }
2832 return count;
2833 }
2834
2835 entries->push_back(entry);
2836 count++;
2837 start_after_key = entry.idx;
2838 }
2839
2840 return count;
2841 }
2842
2843 static int list_olh_entries(cls_method_context_t hctx,
2844 const string& name,
2845 const string& marker,
2846 uint32_t max,
2847 list<rgw_cls_bi_entry> *entries,
2848 bool *pmore)
2849 {
2850 cls_rgw_obj_key key(name);
2851 string first_instance_idx;
2852 encode_olh_data_key(key, &first_instance_idx);
2853 string start_after_key;
2854
2855 if (!name.empty()) {
2856 start_after_key = first_instance_idx;
2857 } else {
2858 start_after_key = BI_PREFIX_CHAR;
2859 start_after_key.append(bucket_index_prefixes[BI_BUCKET_OLH_DATA_INDEX]);
2860 }
2861 string filter = start_after_key;
2862 if (bi_entry_gt(marker, start_after_key)) {
2863 start_after_key = marker;
2864 }
2865 int count = 0;
2866 map<string, bufferlist> keys;
2867 int ret;
2868 bufferlist k;
2869 ret = cls_cxx_map_get_val(hctx, start_after_key, &k);
2870 if (ret < 0 && ret != -ENOENT) {
2871 return ret;
2872 }
2873 // we need to include the exact match if a filter (name) is
2874 // specified and the marker has not yet advanced (i.e., been set)
2875 bool found_first = (ret == 0) && (start_after_key != marker);
2876 if (found_first) {
2877 --max;
2878 }
2879 if (max > 0) {
2880 ret = cls_cxx_map_get_vals(hctx, start_after_key, string(), max,
2881 &keys, pmore);
2882 CLS_LOG(20, "%s: start_after_key=\"%s\", first_instance_idx=\"%s\", keys.size()=%d",
2883 __func__, escape_str(start_after_key).c_str(),
2884 escape_str(first_instance_idx).c_str(), (int)keys.size());
2885 if (ret < 0) {
2886 return ret;
2887 }
2888 }
2889
2890 if (found_first) {
2891 keys[start_after_key] = std::move(k);
2892 }
2893
2894 for (auto iter = keys.begin(); iter != keys.end(); ++iter) {
2895 rgw_cls_bi_entry entry;
2896 entry.type = BIIndexType::OLH;
2897 entry.idx = iter->first;
2898 entry.data = iter->second;
2899
2900 if (!filter.empty() && entry.idx.compare(0, filter.size(), filter) != 0) {
2901 /* we are skipping the rest of the entries */
2902 if (pmore) {
2903 *pmore = false;
2904 }
2905 return count;
2906 }
2907
2908 CLS_LOG(20, "%s: entry.idx=\"%s\"", __func__, escape_str(entry.idx).c_str());
2909
2910 auto biter = entry.data.cbegin();
2911
2912 rgw_bucket_olh_entry e;
2913 try {
2914 decode(e, biter);
2915 } catch (ceph::buffer::error& err) {
2916 CLS_LOG(0, "ERROR: %s: failed to decode buffer (size=%d)", __func__, entry.data.length());
2917 return -EIO;
2918 }
2919
2920 if (!name.empty() && e.key.name != name) {
2921 /* we are skipping the rest of the entries */
2922 if (pmore) {
2923 *pmore = false;
2924 }
2925 return count;
2926 }
2927
2928 entries->push_back(entry);
2929 count++;
2930 start_after_key = entry.idx;
2931 }
2932
2933 return count;
2934 }
2935
2936 /* Lists all the entries that appear in a bucket index listing.
2937 *
2938 * It may not be obvious why this function calls three other "segment"
2939 * functions (list_plain_entries (twice), list_instance_entries,
2940 * list_olh_entries) that each list segments of the index space rather
2941 * than just move a marker through the space from start to end. The
2942 * reason is that a name filter may be provided in the op, and in that
2943 * case most entries will be skipped over, and small segments within
2944 * each larger segment will be listed.
2945 *
2946 * Ideally, each of the three segment functions should be able to
2947 * handle a marker and filter, if either/both is provided,
2948 * efficiently. So, for example, if the marker is after the segment,
2949 * ideally return quickly rather than iterating through entries in the
2950 * segment.
2951 *
2952 * Additionally, each of the three segment functions, if successful,
2953 * is expected to return the number of entries added to the output
2954 * list as a non-negative value. As per usual, negative return values
2955 * indicate error condtions.
2956 */
2957 static int rgw_bi_list_op(cls_method_context_t hctx,
2958 bufferlist *in,
2959 bufferlist *out)
2960 {
2961 CLS_LOG(10, "entered %s", __func__);
2962 // decode request
2963 rgw_cls_bi_list_op op;
2964 auto iter = in->cbegin();
2965 try {
2966 decode(op, iter);
2967 } catch (ceph::buffer::error& err) {
2968 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
2969 return -EINVAL;
2970 }
2971
2972 constexpr uint32_t MAX_BI_LIST_ENTRIES = 1000;
2973 const uint32_t max = std::min(op.max, MAX_BI_LIST_ENTRIES);
2974
2975 CLS_LOG(20, "%s: op.marker=\"%s\", op.name_filter=\"%s\", op.max=%u max=%u",
2976 __func__, escape_str(op.marker).c_str(), escape_str(op.name_filter).c_str(),
2977 op.max, max);
2978
2979 int ret;
2980 uint32_t count = 0;
2981 bool more = false;
2982 rgw_cls_bi_list_ret op_ret;
2983
2984 ret = list_plain_entries(hctx, op.name_filter, op.marker, max,
2985 &op_ret.entries, &more, PlainEntriesRegion::Low);
2986 if (ret < 0) {
2987 CLS_LOG(0, "ERROR: %s: list_plain_entries (low) returned ret=%d, marker=\"%s\", filter=\"%s\", max=%d",
2988 __func__, ret, escape_str(op.marker).c_str(), escape_str(op.name_filter).c_str(), max);
2989 return ret;
2990 }
2991
2992 count = ret;
2993 CLS_LOG(20, "%s: found %d plain ascii (low) entries, count=%u", __func__, ret, count);
2994
2995 if (!more) {
2996 ret = list_instance_entries(hctx, op.name_filter, op.marker, max - count, &op_ret.entries, &more);
2997 if (ret < 0) {
2998 CLS_LOG(0, "ERROR: %s: list_instance_entries returned ret=%d", __func__, ret);
2999 return ret;
3000 }
3001
3002 count += ret;
3003 CLS_LOG(20, "%s: found %d instance entries, count=%u", __func__, ret, count);
3004 }
3005
3006 if (!more) {
3007 ret = list_olh_entries(hctx, op.name_filter, op.marker, max - count, &op_ret.entries, &more);
3008 if (ret < 0) {
3009 CLS_LOG(0, "ERROR: %s: list_olh_entries returned ret=%d", __func__, ret);
3010 return ret;
3011 }
3012
3013 count += ret;
3014 CLS_LOG(20, "%s: found %d olh entries, count=%u", __func__, ret, count);
3015 }
3016
3017 if (!more) {
3018 ret = list_plain_entries(hctx, op.name_filter, op.marker, max - count,
3019 &op_ret.entries, &more, PlainEntriesRegion::High);
3020 if (ret < 0) {
3021 CLS_LOG(0, "ERROR: %s: list_plain_entries (high) returned ret=%d, marker=\"%s\", filter=\"%s\", max=%d",
3022 __func__, ret, escape_str(op.marker).c_str(), escape_str(op.name_filter).c_str(), max);
3023 return ret;
3024 }
3025
3026 count += ret;
3027 CLS_LOG(20, "%s: found %d non-ascii (high) plain entries, count=%u", __func__, ret, count);
3028 }
3029
3030 op_ret.is_truncated = (count > max) || more;
3031 while (count > max) {
3032 op_ret.entries.pop_back();
3033 count--;
3034 }
3035
3036 CLS_LOG(20, "%s: returning %lu entries, is_truncated=%d", __func__, op_ret.entries.size(), op_ret.is_truncated);
3037 encode(op_ret, *out);
3038
3039 return 0;
3040 } // rgw_bi_list_op
3041
3042
3043 int bi_log_record_decode(bufferlist& bl, rgw_bi_log_entry& e)
3044 {
3045 auto iter = bl.cbegin();
3046 try {
3047 decode(e, iter);
3048 } catch (ceph::buffer::error& err) {
3049 CLS_LOG(0, "ERROR: failed to decode rgw_bi_log_entry");
3050 return -EIO;
3051 }
3052 return 0;
3053 }
3054
3055
3056 static int bi_log_iterate_entries(cls_method_context_t hctx,
3057 const string& marker,
3058 const string& end_marker,
3059 string& key_iter,
3060 uint32_t max_entries,
3061 bool *truncated,
3062 int (*cb)(cls_method_context_t, const string&, rgw_bi_log_entry&, void *),
3063 void *param)
3064 {
3065 CLS_LOG(10, "bi_log_iterate_range");
3066
3067 map<string, bufferlist> keys;
3068 string filter_prefix, end_key;
3069 uint32_t i = 0;
3070 string key;
3071
3072 if (truncated)
3073 *truncated = false;
3074
3075 string start_after_key;
3076 if (key_iter.empty()) {
3077 key = BI_PREFIX_CHAR;
3078 key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
3079 key.append(marker);
3080
3081 start_after_key = key;
3082 } else {
3083 start_after_key = key_iter;
3084 }
3085
3086 if (end_marker.empty()) {
3087 end_key = BI_PREFIX_CHAR;
3088 end_key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX + 1]);
3089 } else {
3090 end_key = BI_PREFIX_CHAR;
3091 end_key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
3092 end_key.append(end_marker);
3093 }
3094
3095 CLS_LOG(10, "bi_log_iterate_entries start_after_key=%s end_key=%s",
3096 start_after_key.c_str(), end_key.c_str());
3097
3098 string filter;
3099
3100 int ret = cls_cxx_map_get_vals(hctx, start_after_key, filter, max_entries,
3101 &keys, truncated);
3102 if (ret < 0)
3103 return ret;
3104
3105 auto iter = keys.begin();
3106 if (iter == keys.end())
3107 return 0;
3108
3109 uint32_t num_keys = keys.size();
3110
3111 for (; iter != keys.end(); ++iter,++i) {
3112 const string& key = iter->first;
3113 rgw_bi_log_entry e;
3114
3115 CLS_LOG(10, "bi_log_iterate_entries key=%s bl.length=%d", key.c_str(), (int)iter->second.length());
3116
3117 if (key.compare(end_key) > 0) {
3118 key_iter = key;
3119 if (truncated) {
3120 *truncated = false;
3121 }
3122 return 0;
3123 }
3124
3125 ret = bi_log_record_decode(iter->second, e);
3126 if (ret < 0)
3127 return ret;
3128
3129 ret = cb(hctx, key, e, param);
3130 if (ret < 0)
3131 return ret;
3132
3133 if (i == num_keys - 1) {
3134 key_iter = key;
3135 }
3136 }
3137
3138 return 0;
3139 }
3140
3141 static int bi_log_list_cb(cls_method_context_t hctx, const string& key, rgw_bi_log_entry& info, void *param)
3142 {
3143 list<rgw_bi_log_entry> *l = (list<rgw_bi_log_entry> *)param;
3144 l->push_back(info);
3145 return 0;
3146 }
3147
3148 static int bi_log_list_entries(cls_method_context_t hctx, const string& marker,
3149 uint32_t max, list<rgw_bi_log_entry>& entries, bool *truncated)
3150 {
3151 string key_iter;
3152 string end_marker;
3153 int ret = bi_log_iterate_entries(hctx, marker, end_marker,
3154 key_iter, max, truncated,
3155 bi_log_list_cb, &entries);
3156 return ret;
3157 }
3158
3159 static int rgw_bi_log_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3160 {
3161 CLS_LOG(10, "entered %s", __func__);
3162 auto in_iter = in->cbegin();
3163
3164 cls_rgw_bi_log_list_op op;
3165 try {
3166 decode(op, in_iter);
3167 } catch (ceph::buffer::error& err) {
3168 CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n");
3169 return -EINVAL;
3170 }
3171
3172 cls_rgw_bi_log_list_ret op_ret;
3173 int ret = bi_log_list_entries(hctx, op.marker, op.max, op_ret.entries, &op_ret.truncated);
3174 if (ret < 0)
3175 return ret;
3176
3177 encode(op_ret, *out);
3178
3179 return 0;
3180 }
3181
3182 static int rgw_bi_log_trim(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3183 {
3184 CLS_LOG(10, "entered %s", __func__);
3185 auto in_iter = in->cbegin();
3186
3187 cls_rgw_bi_log_trim_op op;
3188 try {
3189 decode(op, in_iter);
3190 } catch (ceph::buffer::error& err) {
3191 CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n");
3192 return -EINVAL;
3193 }
3194
3195 string key_begin(1, BI_PREFIX_CHAR);
3196 key_begin.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
3197 key_begin.append(op.start_marker);
3198
3199 string key_end;
3200 if (op.end_marker.empty()) {
3201 key_end = BI_PREFIX_CHAR;
3202 key_end.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX + 1]);
3203 } else {
3204 key_end = BI_PREFIX_CHAR;
3205 key_end.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
3206 key_end.append(op.end_marker);
3207 // cls_cxx_map_remove_range() expects one-past-end
3208 key_end.append(1, '\0');
3209 }
3210
3211 // list a single key to detect whether the range is empty
3212 const size_t max_entries = 1;
3213 std::set<std::string> keys;
3214 bool more = false;
3215
3216 int rc = cls_cxx_map_get_keys(hctx, key_begin, max_entries, &keys, &more);
3217 if (rc < 0) {
3218 CLS_LOG(1, "ERROR: cls_cxx_map_get_keys failed rc=%d", rc);
3219 return rc;
3220 }
3221
3222 if (keys.empty()) {
3223 CLS_LOG(20, "range is empty key_begin=%s", key_begin.c_str());
3224 return -ENODATA;
3225 }
3226
3227 const std::string& first_key = *keys.begin();
3228 if (key_end < first_key) {
3229 CLS_LOG(20, "listed key %s past key_end=%s", first_key.c_str(), key_end.c_str());
3230 return -ENODATA;
3231 }
3232
3233 CLS_LOG(20, "listed key %s, removing through %s",
3234 first_key.c_str(), key_end.c_str());
3235
3236 rc = cls_cxx_map_remove_range(hctx, first_key, key_end);
3237 if (rc < 0) {
3238 CLS_LOG(1, "ERROR: cls_cxx_map_remove_range failed rc=%d", rc);
3239 return rc;
3240 }
3241 return 0;
3242 }
3243
3244 static int rgw_bi_log_resync(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3245 {
3246 CLS_LOG(10, "entered %s", __func__);
3247 rgw_bucket_dir_header header;
3248 int rc = read_bucket_header(hctx, &header);
3249 if (rc < 0) {
3250 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
3251 return rc;
3252 }
3253
3254 bufferlist bl;
3255
3256 rgw_bi_log_entry entry;
3257
3258 entry.timestamp = real_clock::now();
3259 entry.op = RGWModifyOp::CLS_RGW_OP_RESYNC;
3260 entry.state = RGWPendingState::CLS_RGW_STATE_COMPLETE;
3261
3262 string key;
3263 bi_log_index_key(hctx, key, entry.id, header.ver);
3264
3265 encode(entry, bl);
3266
3267 if (entry.id > header.max_marker)
3268 header.max_marker = entry.id;
3269
3270 header.syncstopped = false;
3271
3272 rc = cls_cxx_map_set_val(hctx, key, &bl);
3273 if (rc < 0)
3274 return rc;
3275
3276 return write_bucket_header(hctx, &header);
3277 }
3278
3279 static int rgw_bi_log_stop(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3280 {
3281 CLS_LOG(10, "entered %s", __func__);
3282 rgw_bucket_dir_header header;
3283 int rc = read_bucket_header(hctx, &header);
3284 if (rc < 0) {
3285 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
3286 return rc;
3287 }
3288
3289 bufferlist bl;
3290
3291 rgw_bi_log_entry entry;
3292
3293 entry.timestamp = real_clock::now();
3294 entry.op = RGWModifyOp::CLS_RGW_OP_SYNCSTOP;
3295 entry.state = RGWPendingState::CLS_RGW_STATE_COMPLETE;
3296
3297 string key;
3298 bi_log_index_key(hctx, key, entry.id, header.ver);
3299
3300 encode(entry, bl);
3301
3302 if (entry.id > header.max_marker)
3303 header.max_marker = entry.id;
3304 header.syncstopped = true;
3305
3306 rc = cls_cxx_map_set_val(hctx, key, &bl);
3307 if (rc < 0)
3308 return rc;
3309
3310 return write_bucket_header(hctx, &header);
3311 }
3312
3313
3314 static void usage_record_prefix_by_time(uint64_t epoch, string& key)
3315 {
3316 char buf[32];
3317 snprintf(buf, sizeof(buf), "%011llu", (long long unsigned)epoch);
3318 key = buf;
3319 }
3320
3321 static void usage_record_prefix_by_user(const string& user, uint64_t epoch, string& key)
3322 {
3323 char buf[user.size() + 32];
3324 snprintf(buf, sizeof(buf), "%s_%011llu_", user.c_str(), (long long unsigned)epoch);
3325 key = buf;
3326 }
3327
3328 static void usage_record_name_by_time(uint64_t epoch, const string& user, const string& bucket, string& key)
3329 {
3330 char buf[32 + user.size() + bucket.size()];
3331 snprintf(buf, sizeof(buf), "%011llu_%s_%s", (long long unsigned)epoch, user.c_str(), bucket.c_str());
3332 key = buf;
3333 }
3334
3335 static void usage_record_name_by_user(const string& user, uint64_t epoch, const string& bucket, string& key)
3336 {
3337 char buf[32 + user.size() + bucket.size()];
3338 snprintf(buf, sizeof(buf), "%s_%011llu_%s", user.c_str(), (long long unsigned)epoch, bucket.c_str());
3339 key = buf;
3340 }
3341
3342 static int usage_record_decode(bufferlist& record_bl, rgw_usage_log_entry& e)
3343 {
3344 auto kiter = record_bl.cbegin();
3345 try {
3346 decode(e, kiter);
3347 } catch (ceph::buffer::error& err) {
3348 CLS_LOG(1, "ERROR: usage_record_decode(): failed to decode record_bl\n");
3349 return -EINVAL;
3350 }
3351
3352 return 0;
3353 }
3354
3355 int rgw_user_usage_log_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3356 {
3357 CLS_LOG(10, "entered %s", __func__);
3358
3359 auto in_iter = in->cbegin();
3360 rgw_cls_usage_log_add_op op;
3361
3362 try {
3363 decode(op, in_iter);
3364 } catch (ceph::buffer::error& err) {
3365 CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): failed to decode request\n");
3366 return -EINVAL;
3367 }
3368
3369 rgw_usage_log_info& info = op.info;
3370
3371 for (auto iter = info.entries.begin(); iter != info.entries.end(); ++iter) {
3372 rgw_usage_log_entry& entry = *iter;
3373 string key_by_time;
3374
3375 rgw_user *puser = (entry.payer.empty() ? &entry.owner : &entry.payer);
3376
3377 usage_record_name_by_time(entry.epoch, puser->to_str(), entry.bucket, key_by_time);
3378
3379 CLS_LOG(10, "rgw_user_usage_log_add user=%s bucket=%s", puser->to_str().c_str(), entry.bucket.c_str());
3380
3381 bufferlist record_bl;
3382 int ret = cls_cxx_map_get_val(hctx, key_by_time, &record_bl);
3383 if (ret < 0 && ret != -ENOENT) {
3384 CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): cls_cxx_map_read_key returned %d", ret);
3385 return -EINVAL;
3386 }
3387 if (ret >= 0) {
3388 rgw_usage_log_entry e;
3389 ret = usage_record_decode(record_bl, e);
3390 if (ret < 0)
3391 return ret;
3392 CLS_LOG(10, "rgw_user_usage_log_add aggregating existing bucket\n");
3393 entry.aggregate(e);
3394 }
3395
3396 bufferlist new_record_bl;
3397 encode(entry, new_record_bl);
3398 ret = cls_cxx_map_set_val(hctx, key_by_time, &new_record_bl);
3399 if (ret < 0)
3400 return ret;
3401
3402 string key_by_user;
3403 usage_record_name_by_user(puser->to_str(), entry.epoch, entry.bucket, key_by_user);
3404 ret = cls_cxx_map_set_val(hctx, key_by_user, &new_record_bl);
3405 if (ret < 0)
3406 return ret;
3407 }
3408
3409 return 0;
3410 }
3411
3412 static int usage_iterate_range(cls_method_context_t hctx, uint64_t start, uint64_t end, const string& user,
3413 const string& bucket, string& key_iter, uint32_t max_entries, bool *truncated,
3414 int (*cb)(cls_method_context_t, const string&, rgw_usage_log_entry&, void *),
3415 void *param)
3416 {
3417 CLS_LOG(10, "entered %s", __func__);
3418
3419 map<string, bufferlist> keys;
3420 string filter_prefix;
3421 string start_key, end_key;
3422 bool by_user = !user.empty();
3423 string user_key;
3424 bool truncated_status = false;
3425
3426 ceph_assert(truncated != nullptr);
3427
3428 if (!by_user) {
3429 usage_record_prefix_by_time(end, end_key);
3430 } else {
3431 user_key = user;
3432 user_key.append("_");
3433 }
3434
3435 if (key_iter.empty()) {
3436 if (by_user) {
3437 usage_record_prefix_by_user(user, start, start_key);
3438 } else {
3439 usage_record_prefix_by_time(start, start_key);
3440 }
3441 } else {
3442 start_key = key_iter;
3443 }
3444
3445 CLS_LOG(20, "usage_iterate_range start_key=%s", start_key.c_str());
3446 int ret = cls_cxx_map_get_vals(hctx, start_key, filter_prefix, max_entries, &keys, &truncated_status);
3447 if (ret < 0)
3448 return ret;
3449
3450 *truncated = truncated_status;
3451
3452 auto iter = keys.begin();
3453 if (iter == keys.end())
3454 return 0;
3455
3456 for (; iter != keys.end(); ++iter) {
3457 const string& key = iter->first;
3458 rgw_usage_log_entry e;
3459
3460 key_iter = key;
3461 if (!by_user && key.compare(end_key) >= 0) {
3462 CLS_LOG(20, "usage_iterate_range reached key=%s, done", key.c_str());
3463 *truncated = false;
3464 key_iter = key;
3465 return 0;
3466 }
3467
3468 if (by_user && key.compare(0, user_key.size(), user_key) != 0) {
3469 CLS_LOG(20, "usage_iterate_range reached key=%s, done", key.c_str());
3470 *truncated = false;
3471 key_iter = key;
3472 return 0;
3473 }
3474
3475 ret = usage_record_decode(iter->second, e);
3476 if (ret < 0)
3477 return ret;
3478
3479 if (!bucket.empty() && bucket.compare(e.bucket))
3480 continue;
3481
3482 if (e.epoch < start)
3483 continue;
3484
3485 /* keys are sorted by epoch, so once we're past end we're done */
3486 if (e.epoch >= end) {
3487 *truncated = false;
3488 return 0;
3489 }
3490
3491 ret = cb(hctx, key, e, param);
3492 if (ret < 0)
3493 return ret;
3494 }
3495 return 0;
3496 }
3497
3498 static int usage_log_read_cb(cls_method_context_t hctx, const string& key, rgw_usage_log_entry& entry, void *param)
3499 {
3500 map<rgw_user_bucket, rgw_usage_log_entry> *usage = (map<rgw_user_bucket, rgw_usage_log_entry> *)param;
3501 rgw_user *puser;
3502 if (!entry.payer.empty()) {
3503 puser = &entry.payer;
3504 } else {
3505 puser = &entry.owner;
3506 }
3507 rgw_user_bucket ub(puser->to_str(), entry.bucket);
3508 rgw_usage_log_entry& le = (*usage)[ub];
3509 le.aggregate(entry);
3510
3511 return 0;
3512 }
3513
3514 int rgw_user_usage_log_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3515 {
3516 CLS_LOG(10, "entered %s", __func__);
3517
3518 auto in_iter = in->cbegin();
3519 rgw_cls_usage_log_read_op op;
3520
3521 try {
3522 decode(op, in_iter);
3523 } catch (ceph::buffer::error& err) {
3524 CLS_LOG(1, "ERROR: rgw_user_usage_log_read(): failed to decode request\n");
3525 return -EINVAL;
3526 }
3527
3528 rgw_cls_usage_log_read_ret ret_info;
3529 map<rgw_user_bucket, rgw_usage_log_entry> *usage = &ret_info.usage;
3530 string iter = op.iter;
3531 #define MAX_ENTRIES 1000
3532 uint32_t max_entries = (op.max_entries ? op.max_entries : MAX_ENTRIES);
3533 int ret = usage_iterate_range(hctx, op.start_epoch, op.end_epoch, op.owner, op.bucket, iter, max_entries, &ret_info.truncated, usage_log_read_cb, (void *)usage);
3534 if (ret < 0)
3535 return ret;
3536
3537 if (ret_info.truncated)
3538 ret_info.next_iter = iter;
3539
3540 encode(ret_info, *out);
3541 return 0;
3542 }
3543
3544 static int usage_log_trim_cb(cls_method_context_t hctx, const string& key, rgw_usage_log_entry& entry, void *param)
3545 {
3546 bool *found = (bool *)param;
3547 if (found) {
3548 *found = true;
3549 }
3550 string key_by_time;
3551 string key_by_user;
3552
3553 string o = entry.owner.to_str();
3554 usage_record_name_by_time(entry.epoch, o, entry.bucket, key_by_time);
3555 usage_record_name_by_user(o, entry.epoch, entry.bucket, key_by_user);
3556
3557 int ret = cls_cxx_map_remove_key(hctx, key_by_time);
3558 if (ret < 0)
3559 return ret;
3560
3561 return cls_cxx_map_remove_key(hctx, key_by_user);
3562 }
3563
3564 int rgw_user_usage_log_trim(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3565 {
3566 CLS_LOG(10, "entered %s", __func__);
3567
3568 /* only continue if object exists! */
3569 int ret = cls_cxx_stat(hctx, NULL, NULL);
3570 if (ret < 0)
3571 return ret;
3572
3573 auto in_iter = in->cbegin();
3574 rgw_cls_usage_log_trim_op op;
3575
3576 try {
3577 decode(op, in_iter);
3578 } catch (ceph::buffer::error& err) {
3579 CLS_LOG(1, "ERROR: rgw_user_log_usage_log_trim(): failed to decode request\n");
3580 return -EINVAL;
3581 }
3582
3583 string iter;
3584 bool more;
3585 bool found = false;
3586 #define MAX_USAGE_TRIM_ENTRIES 1000
3587 ret = usage_iterate_range(hctx, op.start_epoch, op.end_epoch, op.user, op.bucket, iter, MAX_USAGE_TRIM_ENTRIES, &more, usage_log_trim_cb, (void *)&found);
3588 if (ret < 0)
3589 return ret;
3590
3591 if (!more && !found)
3592 return -ENODATA;
3593
3594 return 0;
3595 }
3596
3597 int rgw_usage_log_clear(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3598 {
3599 CLS_LOG(10, "entered %s", __func__);
3600
3601 int ret = cls_cxx_map_clear(hctx);
3602 /* if object doesn't exist all the logs are cleared anyway */
3603 if (ret == -ENOENT)
3604 ret = 0;
3605
3606 return ret;
3607 }
3608
3609 /*
3610 * We hold the garbage collection chain data under two different
3611 * indexes: the first 'name' index keeps them under a unique tag that
3612 * represents the chains, and a second 'time' index keeps them by
3613 * their expiration timestamp. Each is prefixed differently (see
3614 * gc_index_prefixes below).
3615 *
3616 * Since key-value data is listed in lexical order by keys, generally
3617 * the name entries are retrieved first and then the time entries.
3618 * When listing the entries via `gc_iterate_entries` one parameter is
3619 * a marker, and if we were to pass "1_" (i.e.,
3620 * gc_index_prefixes[GC_OBJ_TIME_INDEX]), the listing would skip over
3621 * the 'name' entries and begin with the 'time' entries.
3622 *
3623 * Furthermore, the times are converted to strings such that lexical
3624 * order correlates with chronological order, so the entries are
3625 * returned chronologically from the earliest expiring to the latest
3626 * expiring. This allows for starting at "1_" and to keep retrieving
3627 * chunks of entries, and as long as they are prior to the current
3628 * time, they're expired and processing can continue.
3629 */
3630 #define GC_OBJ_NAME_INDEX 0
3631 #define GC_OBJ_TIME_INDEX 1
3632
3633 static string gc_index_prefixes[] = { "0_",
3634 "1_" };
3635
3636 static void prepend_index_prefix(const string& src, int index, string *dest)
3637 {
3638 *dest = gc_index_prefixes[index];
3639 dest->append(src);
3640 }
3641
3642 static int gc_omap_get(cls_method_context_t hctx, int type, const string& key, cls_rgw_gc_obj_info *info)
3643 {
3644 string index;
3645 prepend_index_prefix(key, type, &index);
3646
3647 int ret = read_omap_entry(hctx, index, info);
3648 if (ret < 0)
3649 return ret;
3650
3651 return 0;
3652 }
3653
3654 static int gc_omap_set(cls_method_context_t hctx, int type, const string& key, const cls_rgw_gc_obj_info *info)
3655 {
3656 bufferlist bl;
3657 encode(*info, bl);
3658
3659 string index = gc_index_prefixes[type];
3660 index.append(key);
3661
3662 int ret = cls_cxx_map_set_val(hctx, index, &bl);
3663 if (ret < 0)
3664 return ret;
3665
3666 return 0;
3667 }
3668
3669 static int gc_omap_remove(cls_method_context_t hctx, int type, const string& key)
3670 {
3671 string index = gc_index_prefixes[type];
3672 index.append(key);
3673
3674 int ret = cls_cxx_map_remove_key(hctx, index);
3675 if (ret < 0)
3676 return ret;
3677
3678 return 0;
3679 }
3680
3681 static bool key_in_index(const string& key, int index_type)
3682 {
3683 const string& prefix = gc_index_prefixes[index_type];
3684 return (key.compare(0, prefix.size(), prefix) == 0);
3685 }
3686
3687
3688 static int gc_update_entry(cls_method_context_t hctx, uint32_t expiration_secs,
3689 cls_rgw_gc_obj_info& info)
3690 {
3691 cls_rgw_gc_obj_info old_info;
3692 int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, info.tag, &old_info);
3693 if (ret == 0) {
3694 string key;
3695 get_time_key(old_info.time, &key);
3696 ret = gc_omap_remove(hctx, GC_OBJ_TIME_INDEX, key);
3697 if (ret < 0 && ret != -ENOENT) {
3698 CLS_LOG(0, "ERROR: failed to remove key=%s", key.c_str());
3699 return ret;
3700 }
3701 }
3702
3703 // calculate time and time key
3704 info.time = ceph::real_clock::now();
3705 info.time += make_timespan(expiration_secs);
3706 string time_key;
3707 get_time_key(info.time, &time_key);
3708
3709 if (info.chain.objs.empty()) {
3710 CLS_LOG(0,
3711 "WARNING: %s setting GC log entry with zero-length chain, "
3712 "tag='%s', timekey='%s'",
3713 __func__, info.tag.c_str(), time_key.c_str());
3714 }
3715
3716 ret = gc_omap_set(hctx, GC_OBJ_NAME_INDEX, info.tag, &info);
3717 if (ret < 0)
3718 return ret;
3719
3720 ret = gc_omap_set(hctx, GC_OBJ_TIME_INDEX, time_key, &info);
3721 if (ret < 0)
3722 goto done_err;
3723
3724 return 0;
3725
3726 done_err:
3727
3728 CLS_LOG(0, "ERROR: gc_set_entry error info.tag=%s, ret=%d",
3729 info.tag.c_str(), ret);
3730 gc_omap_remove(hctx, GC_OBJ_NAME_INDEX, info.tag);
3731
3732 return ret;
3733 }
3734
3735 static int gc_defer_entry(cls_method_context_t hctx, const string& tag, uint32_t expiration_secs)
3736 {
3737 cls_rgw_gc_obj_info info;
3738 int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, tag, &info);
3739 if (ret < 0)
3740 return ret;
3741 return gc_update_entry(hctx, expiration_secs, info);
3742 }
3743
3744 int gc_record_decode(bufferlist& bl, cls_rgw_gc_obj_info& e)
3745 {
3746 auto iter = bl.cbegin();
3747 try {
3748 decode(e, iter);
3749 } catch (ceph::buffer::error& err) {
3750 CLS_LOG(0, "ERROR: failed to decode cls_rgw_gc_obj_info");
3751 return -EIO;
3752 }
3753 return 0;
3754 }
3755
3756 static int rgw_cls_gc_set_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3757 {
3758 CLS_LOG(10, "entered %s", __func__);
3759 auto in_iter = in->cbegin();
3760
3761 cls_rgw_gc_set_entry_op op;
3762 try {
3763 decode(op, in_iter);
3764 } catch (ceph::buffer::error& err) {
3765 CLS_LOG(1, "ERROR: rgw_cls_gc_set_entry(): failed to decode entry\n");
3766 return -EINVAL;
3767 }
3768
3769 return gc_update_entry(hctx, op.expiration_secs, op.info);
3770 }
3771
3772 static int rgw_cls_gc_defer_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3773 {
3774 CLS_LOG(10, "entered %s", __func__);
3775 auto in_iter = in->cbegin();
3776
3777 cls_rgw_gc_defer_entry_op op;
3778 try {
3779 decode(op, in_iter);
3780 } catch (ceph::buffer::error& err) {
3781 CLS_LOG(1, "ERROR: rgw_cls_gc_defer_entry(): failed to decode entry\n");
3782 return -EINVAL;
3783 }
3784
3785 return gc_defer_entry(hctx, op.tag, op.expiration_secs);
3786 }
3787
3788 static int gc_iterate_entries(cls_method_context_t hctx,
3789 const string& marker,
3790 bool expired_only,
3791 string& out_marker,
3792 uint32_t max_entries,
3793 bool *truncated,
3794 int (*cb)(cls_method_context_t,
3795 const string&,
3796 cls_rgw_gc_obj_info&,
3797 void *),
3798 void *param)
3799 {
3800 CLS_LOG(10, "gc_iterate_entries");
3801
3802 map<string, bufferlist> keys;
3803 string filter_prefix, end_key;
3804 string key;
3805
3806 if (truncated)
3807 *truncated = false;
3808
3809 string start_key;
3810 if (marker.empty()) {
3811 prepend_index_prefix(marker, GC_OBJ_TIME_INDEX, &start_key);
3812 } else {
3813 start_key = marker;
3814 }
3815
3816 if (expired_only) {
3817 real_time now = ceph::real_clock::now();
3818 string now_str;
3819 get_time_key(now, &now_str);
3820 prepend_index_prefix(now_str, GC_OBJ_TIME_INDEX, &end_key);
3821
3822 CLS_LOG(10, "gc_iterate_entries end_key=%s", end_key.c_str());
3823 }
3824
3825 string filter;
3826
3827 int ret = cls_cxx_map_get_vals(hctx, start_key, filter, max_entries,
3828 &keys, truncated);
3829 if (ret < 0)
3830 return ret;
3831
3832 auto iter = keys.begin();
3833 if (iter == keys.end()) {
3834 // if keys empty must not come back as truncated
3835 ceph_assert(!truncated || !(*truncated));
3836 return 0;
3837 }
3838
3839 const string* last_key = nullptr; // last key processed, for end-marker
3840 for (; iter != keys.end(); ++iter) {
3841 const string& key = iter->first;
3842 cls_rgw_gc_obj_info e;
3843
3844 CLS_LOG(10, "gc_iterate_entries key=%s", key.c_str());
3845
3846 if (!end_key.empty() && key.compare(end_key) >= 0) {
3847 if (truncated)
3848 *truncated = false;
3849 return 0;
3850 }
3851
3852 if (!key_in_index(key, GC_OBJ_TIME_INDEX)) {
3853 if (truncated)
3854 *truncated = false;
3855 return 0;
3856 }
3857
3858 ret = gc_record_decode(iter->second, e);
3859 if (ret < 0)
3860 return ret;
3861
3862 ret = cb(hctx, key, e, param);
3863 if (ret < 0)
3864 return ret;
3865 last_key = &(iter->first); // update when callback successful
3866 }
3867
3868 // set the out marker if either caller does not capture truncated or
3869 // if they do capture and we are truncated
3870 if (!truncated || *truncated) {
3871 assert(last_key);
3872 out_marker = *last_key;
3873 }
3874
3875 return 0;
3876 }
3877
3878 static int gc_list_cb(cls_method_context_t hctx, const string& key, cls_rgw_gc_obj_info& info, void *param)
3879 {
3880 list<cls_rgw_gc_obj_info> *l = (list<cls_rgw_gc_obj_info> *)param;
3881 l->push_back(info);
3882 return 0;
3883 }
3884
3885 static int gc_list_entries(cls_method_context_t hctx, const string& marker,
3886 uint32_t max, bool expired_only,
3887 list<cls_rgw_gc_obj_info>& entries, bool *truncated, string& next_marker)
3888 {
3889 int ret = gc_iterate_entries(hctx, marker, expired_only,
3890 next_marker, max, truncated,
3891 gc_list_cb, &entries);
3892 return ret;
3893 }
3894
3895 static int rgw_cls_gc_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3896 {
3897 CLS_LOG(10, "entered %s", __func__);
3898 auto in_iter = in->cbegin();
3899
3900 cls_rgw_gc_list_op op;
3901 try {
3902 decode(op, in_iter);
3903 } catch (ceph::buffer::error& err) {
3904 CLS_LOG(1, "ERROR: rgw_cls_gc_list(): failed to decode entry\n");
3905 return -EINVAL;
3906 }
3907
3908 cls_rgw_gc_list_ret op_ret;
3909 #define GC_LIST_ENTRIES_DEFAULT 128
3910 int ret = gc_list_entries(hctx, op.marker, (op.max ? op.max : GC_LIST_ENTRIES_DEFAULT), op.expired_only,
3911 op_ret.entries, &op_ret.truncated, op_ret.next_marker);
3912 if (ret < 0)
3913 return ret;
3914
3915 encode(op_ret, *out);
3916
3917 return 0;
3918 }
3919
3920 static int gc_remove(cls_method_context_t hctx, vector<string>& tags)
3921 {
3922 for (auto iter = tags.begin(); iter != tags.end(); ++iter) {
3923 string& tag = *iter;
3924 cls_rgw_gc_obj_info info;
3925 int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, tag, &info);
3926 if (ret == -ENOENT) {
3927 CLS_LOG(0, "couldn't find tag in name index tag=%s", tag.c_str());
3928 continue;
3929 }
3930
3931 if (ret < 0)
3932 return ret;
3933
3934 string time_key;
3935 get_time_key(info.time, &time_key);
3936 ret = gc_omap_remove(hctx, GC_OBJ_TIME_INDEX, time_key);
3937 if (ret < 0 && ret != -ENOENT)
3938 return ret;
3939 if (ret == -ENOENT) {
3940 CLS_LOG(0, "couldn't find key in time index key=%s", time_key.c_str());
3941 }
3942
3943 ret = gc_omap_remove(hctx, GC_OBJ_NAME_INDEX, tag);
3944 if (ret < 0 && ret != -ENOENT)
3945 return ret;
3946 }
3947
3948 return 0;
3949 }
3950
3951 static int rgw_cls_gc_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3952 {
3953 CLS_LOG(10, "entered %s", __func__);
3954 auto in_iter = in->cbegin();
3955
3956 cls_rgw_gc_remove_op op;
3957 try {
3958 decode(op, in_iter);
3959 } catch (ceph::buffer::error& err) {
3960 CLS_LOG(1, "ERROR: rgw_cls_gc_remove(): failed to decode entry\n");
3961 return -EINVAL;
3962 }
3963
3964 return gc_remove(hctx, op.tags);
3965 }
3966
3967 static int rgw_cls_lc_get_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3968 {
3969 CLS_LOG(10, "entered %s", __func__);
3970 auto in_iter = in->cbegin();
3971
3972 cls_rgw_lc_get_entry_op op;
3973 try {
3974 decode(op, in_iter);
3975 } catch (ceph::buffer::error& err) {
3976 CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n");
3977 return -EINVAL;
3978 }
3979
3980 cls_rgw_lc_entry lc_entry;
3981 int ret = read_omap_entry(hctx, op.marker, &lc_entry);
3982 if (ret < 0)
3983 return ret;
3984
3985 cls_rgw_lc_get_entry_ret op_ret(std::move(lc_entry));
3986 encode(op_ret, *out);
3987 return 0;
3988 }
3989
3990
3991 static int rgw_cls_lc_set_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
3992 {
3993 CLS_LOG(10, "entered %s", __func__);
3994 auto in_iter = in->cbegin();
3995
3996 cls_rgw_lc_set_entry_op op;
3997 try {
3998 decode(op, in_iter);
3999 } catch (ceph::buffer::error& err) {
4000 CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n");
4001 return -EINVAL;
4002 }
4003
4004 bufferlist bl;
4005 encode(op.entry, bl);
4006
4007 int ret = cls_cxx_map_set_val(hctx, op.entry.bucket, &bl);
4008 return ret;
4009 }
4010
4011 static int rgw_cls_lc_rm_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4012 {
4013 CLS_LOG(10, "entered %s", __func__);
4014 auto in_iter = in->cbegin();
4015
4016 cls_rgw_lc_rm_entry_op op;
4017 try {
4018 decode(op, in_iter);
4019 } catch (ceph::buffer::error& err) {
4020 CLS_LOG(1, "ERROR: rgw_cls_lc_rm_entry(): failed to decode entry\n");
4021 return -EINVAL;
4022 }
4023
4024 int ret = cls_cxx_map_remove_key(hctx, op.entry.bucket);
4025 return ret;
4026 }
4027
4028 static int rgw_cls_lc_get_next_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4029 {
4030 CLS_LOG(10, "entered %s", __func__);
4031 auto in_iter = in->cbegin();
4032 cls_rgw_lc_get_next_entry_ret op_ret;
4033 cls_rgw_lc_get_next_entry_op op;
4034 try {
4035 decode(op, in_iter);
4036 } catch (ceph::buffer::error& err) {
4037 CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry: failed to decode op\n");
4038 return -EINVAL;
4039 }
4040
4041 map<string, bufferlist> vals;
4042 string filter_prefix;
4043 bool more;
4044 int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, 1, &vals, &more);
4045 if (ret < 0)
4046 return ret;
4047 cls_rgw_lc_entry entry;
4048 if (!vals.empty()) {
4049 auto it = vals.begin();
4050 in_iter = it->second.begin();
4051 try {
4052 decode(entry, in_iter);
4053 } catch (ceph::buffer::error& err) {
4054 CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry(): failed to decode entry\n");
4055 return -EIO;
4056 }
4057 }
4058 op_ret.entry = entry;
4059 encode(op_ret, *out);
4060 return 0;
4061 }
4062
4063 static int rgw_cls_lc_list_entries(cls_method_context_t hctx, bufferlist *in,
4064 bufferlist *out)
4065 {
4066 CLS_LOG(10, "entered %s", __func__);
4067 cls_rgw_lc_list_entries_op op;
4068 auto in_iter = in->cbegin();
4069 try {
4070 decode(op, in_iter);
4071 } catch (ceph::buffer::error& err) {
4072 CLS_LOG(1, "ERROR: rgw_cls_lc_list_entries(): failed to decode op\n");
4073 return -EINVAL;
4074 }
4075
4076 cls_rgw_lc_list_entries_ret op_ret(op.compat_v);
4077 map<string, bufferlist> vals;
4078 string filter_prefix;
4079 int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, op.max_entries,
4080 &vals, &op_ret.is_truncated);
4081 if (ret < 0)
4082 return ret;
4083 for (auto it = vals.begin(); it != vals.end(); ++it) {
4084 cls_rgw_lc_entry entry;
4085 auto iter = it->second.cbegin();
4086 try {
4087 decode(entry, iter);
4088 } catch (buffer::error& err) {
4089 /* try backward compat */
4090 pair<string, int> oe;
4091 try {
4092 iter = it->second.begin();
4093 decode(oe, iter);
4094 entry = {oe.first, 0 /* start */, uint32_t(oe.second)};
4095 } catch(buffer::error& err) {
4096 CLS_LOG(
4097 1, "ERROR: rgw_cls_lc_list_entries(): failed to decode entry\n");
4098 return -EIO;
4099 }
4100 }
4101 op_ret.entries.push_back(entry);
4102 }
4103 encode(op_ret, *out);
4104 return 0;
4105 }
4106
4107 static int rgw_cls_lc_put_head(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4108 {
4109 CLS_LOG(10, "entered %s", __func__);
4110 auto in_iter = in->cbegin();
4111
4112 cls_rgw_lc_put_head_op op;
4113 try {
4114 decode(op, in_iter);
4115 } catch (ceph::buffer::error& err) {
4116 CLS_LOG(1, "ERROR: rgw_cls_lc_put_head(): failed to decode entry\n");
4117 return -EINVAL;
4118 }
4119
4120 bufferlist bl;
4121 encode(op.head, bl);
4122 int ret = cls_cxx_map_write_header(hctx,&bl);
4123 return ret;
4124 }
4125
4126 static int rgw_cls_lc_get_head(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4127 {
4128 CLS_LOG(10, "entered %s", __func__);
4129 bufferlist bl;
4130 int ret = cls_cxx_map_read_header(hctx, &bl);
4131 if (ret < 0)
4132 return ret;
4133 cls_rgw_lc_obj_head head;
4134 if (bl.length() != 0) {
4135 auto iter = bl.cbegin();
4136 try {
4137 decode(head, iter);
4138 } catch (ceph::buffer::error& err) {
4139 CLS_LOG(0, "ERROR: rgw_cls_lc_get_head(): failed to decode entry %s",err.what());
4140 return -EINVAL;
4141 }
4142 } else {
4143 head.start_date = 0;
4144 head.marker.clear();
4145 }
4146 cls_rgw_lc_get_head_ret op_ret;
4147 op_ret.head = head;
4148 encode(op_ret, *out);
4149 return 0;
4150 }
4151
4152 static int rgw_reshard_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4153 {
4154 CLS_LOG(10, "entered %s", __func__);
4155 auto in_iter = in->cbegin();
4156
4157 cls_rgw_reshard_add_op op;
4158 try {
4159 decode(op, in_iter);
4160 } catch (ceph::buffer::error& err) {
4161 CLS_LOG(1, "ERROR: rgw_reshard_add: failed to decode entry\n");
4162 return -EINVAL;
4163 }
4164
4165
4166 string key;
4167 op.entry.get_key(&key);
4168
4169 bufferlist bl;
4170 encode(op.entry, bl);
4171 int ret = cls_cxx_map_set_val(hctx, key, &bl);
4172 if (ret < 0) {
4173 CLS_ERR("error adding reshard job for bucket %s with key %s",op.entry.bucket_name.c_str(), key.c_str());
4174 return ret;
4175 }
4176
4177 return ret;
4178 }
4179
4180 static int rgw_reshard_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4181 {
4182 CLS_LOG(10, "entered %s", __func__);
4183 cls_rgw_reshard_list_op op;
4184 auto in_iter = in->cbegin();
4185 try {
4186 decode(op, in_iter);
4187 } catch (ceph::buffer::error& err) {
4188 CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
4189 return -EINVAL;
4190 }
4191 cls_rgw_reshard_list_ret op_ret;
4192 map<string, bufferlist> vals;
4193 string filter_prefix;
4194 #define MAX_RESHARD_LIST_ENTRIES 1000
4195 /* one extra entry for identifying truncation */
4196 int32_t max = (op.max && (op.max < MAX_RESHARD_LIST_ENTRIES) ? op.max : MAX_RESHARD_LIST_ENTRIES);
4197 int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, max, &vals, &op_ret.is_truncated);
4198 if (ret < 0)
4199 return ret;
4200 cls_rgw_reshard_entry entry;
4201 int i = 0;
4202 for (auto it = vals.begin(); i < (int)op.max && it != vals.end(); ++it, ++i) {
4203 auto iter = it->second.cbegin();
4204 try {
4205 decode(entry, iter);
4206 } catch (ceph::buffer::error& err) {
4207 CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
4208 return -EIO;
4209 }
4210 op_ret.entries.push_back(entry);
4211 }
4212 encode(op_ret, *out);
4213 return 0;
4214 }
4215
4216 static int rgw_reshard_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4217 {
4218 CLS_LOG(10, "entered %s", __func__);
4219 auto in_iter = in->cbegin();
4220
4221 cls_rgw_reshard_get_op op;
4222 try {
4223 decode(op, in_iter);
4224 } catch (ceph::buffer::error& err) {
4225 CLS_LOG(1, "ERROR: rgw_reshard_get: failed to decode entry\n");
4226 return -EINVAL;
4227 }
4228
4229 string key;
4230 cls_rgw_reshard_entry entry;
4231 op.entry.get_key(&key);
4232 int ret = read_omap_entry(hctx, key, &entry);
4233 if (ret < 0) {
4234 return ret;
4235 }
4236
4237 cls_rgw_reshard_get_ret op_ret;
4238 op_ret.entry = entry;
4239 encode(op_ret, *out);
4240 return 0;
4241 }
4242
4243 static int rgw_reshard_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4244 {
4245 CLS_LOG(10, "entered %s", __func__);
4246 auto in_iter = in->cbegin();
4247
4248 cls_rgw_reshard_remove_op op;
4249 try {
4250 decode(op, in_iter);
4251 } catch (ceph::buffer::error& err) {
4252 CLS_LOG(1, "ERROR: rgw_cls_rehard_remove: failed to decode entry\n");
4253 return -EINVAL;
4254 }
4255
4256 string key;
4257 cls_rgw_reshard_entry entry;
4258 cls_rgw_reshard_entry::generate_key(op.tenant, op.bucket_name, &key);
4259 int ret = read_omap_entry(hctx, key, &entry);
4260 if (ret < 0) {
4261 return ret;
4262 }
4263
4264 if (!op.bucket_id.empty() &&
4265 entry.bucket_id != op.bucket_id) {
4266 return 0;
4267 }
4268
4269 ret = cls_cxx_map_remove_key(hctx, key);
4270 if (ret < 0) {
4271 CLS_LOG(0, "ERROR: failed to remove key: key=%s ret=%d", key.c_str(), ret);
4272 return 0;
4273 }
4274 return ret;
4275 }
4276
4277 static int rgw_set_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4278 {
4279 CLS_LOG(10, "entered %s", __func__);
4280 cls_rgw_set_bucket_resharding_op op;
4281
4282 auto in_iter = in->cbegin();
4283 try {
4284 decode(op, in_iter);
4285 } catch (ceph::buffer::error& err) {
4286 CLS_LOG(1, "ERROR: cls_rgw_set_bucket_resharding: failed to decode entry\n");
4287 return -EINVAL;
4288 }
4289
4290 rgw_bucket_dir_header header;
4291 int rc = read_bucket_header(hctx, &header);
4292 if (rc < 0) {
4293 CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
4294 return rc;
4295 }
4296
4297 header.new_instance.set_status(op.entry.new_bucket_instance_id, op.entry.num_shards, op.entry.reshard_status);
4298
4299 return write_bucket_header(hctx, &header);
4300 }
4301
4302 static int rgw_clear_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4303 {
4304 CLS_LOG(10, "entered %s", __func__);
4305 cls_rgw_clear_bucket_resharding_op op;
4306
4307 auto in_iter = in->cbegin();
4308 try {
4309 decode(op, in_iter);
4310 } catch (ceph::buffer::error& err) {
4311 CLS_LOG(1, "ERROR: cls_rgw_clear_bucket_resharding: failed to decode entry\n");
4312 return -EINVAL;
4313 }
4314
4315 rgw_bucket_dir_header header;
4316 int rc = read_bucket_header(hctx, &header);
4317 if (rc < 0) {
4318 CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
4319 return rc;
4320 }
4321 header.new_instance.clear();
4322
4323 return write_bucket_header(hctx, &header);
4324 }
4325
4326 static int rgw_guard_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
4327 {
4328 CLS_LOG(10, "entered %s", __func__);
4329 cls_rgw_guard_bucket_resharding_op op;
4330
4331 auto in_iter = in->cbegin();
4332 try {
4333 decode(op, in_iter);
4334 } catch (ceph::buffer::error& err) {
4335 CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__);
4336 return -EINVAL;
4337 }
4338
4339 rgw_bucket_dir_header header;
4340 int rc = read_bucket_header(hctx, &header);
4341 if (rc < 0) {
4342 CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
4343 return rc;
4344 }
4345
4346 if (header.resharding()) {
4347 return op.ret_err;
4348 }
4349
4350 return 0;
4351 }
4352
4353 static int rgw_get_bucket_resharding(cls_method_context_t hctx,
4354 bufferlist *in, bufferlist *out)
4355 {
4356 CLS_LOG(10, "entered %s", __func__);
4357 cls_rgw_get_bucket_resharding_op op;
4358
4359 auto in_iter = in->cbegin();
4360 try {
4361 decode(op, in_iter);
4362 } catch (ceph::buffer::error& err) {
4363 CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__);
4364 return -EINVAL;
4365 }
4366
4367 rgw_bucket_dir_header header;
4368 int rc = read_bucket_header(hctx, &header);
4369 if (rc < 0) {
4370 CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
4371 return rc;
4372 }
4373
4374 cls_rgw_get_bucket_resharding_ret op_ret;
4375 op_ret.new_instance = header.new_instance;
4376
4377 encode(op_ret, *out);
4378
4379 return 0;
4380 }
4381
4382 CLS_INIT(rgw)
4383 {
4384 CLS_LOG(1, "Loaded rgw class!");
4385
4386 cls_handle_t h_class;
4387 cls_method_handle_t h_rgw_bucket_init_index;
4388 cls_method_handle_t h_rgw_bucket_set_tag_timeout;
4389 cls_method_handle_t h_rgw_bucket_list;
4390 cls_method_handle_t h_rgw_bucket_check_index;
4391 cls_method_handle_t h_rgw_bucket_rebuild_index;
4392 cls_method_handle_t h_rgw_bucket_update_stats;
4393 cls_method_handle_t h_rgw_bucket_prepare_op;
4394 cls_method_handle_t h_rgw_bucket_complete_op;
4395 cls_method_handle_t h_rgw_bucket_link_olh;
4396 cls_method_handle_t h_rgw_bucket_unlink_instance_op;
4397 cls_method_handle_t h_rgw_bucket_read_olh_log;
4398 cls_method_handle_t h_rgw_bucket_trim_olh_log;
4399 cls_method_handle_t h_rgw_bucket_clear_olh;
4400 cls_method_handle_t h_rgw_obj_remove;
4401 cls_method_handle_t h_rgw_obj_store_pg_ver;
4402 cls_method_handle_t h_rgw_obj_check_attrs_prefix;
4403 cls_method_handle_t h_rgw_obj_check_mtime;
4404 cls_method_handle_t h_rgw_bi_get_op;
4405 cls_method_handle_t h_rgw_bi_put_op;
4406 cls_method_handle_t h_rgw_bi_list_op;
4407 cls_method_handle_t h_rgw_bi_log_list_op;
4408 cls_method_handle_t h_rgw_bi_log_resync_op;
4409 cls_method_handle_t h_rgw_bi_log_stop_op;
4410 cls_method_handle_t h_rgw_dir_suggest_changes;
4411 cls_method_handle_t h_rgw_user_usage_log_add;
4412 cls_method_handle_t h_rgw_user_usage_log_read;
4413 cls_method_handle_t h_rgw_user_usage_log_trim;
4414 cls_method_handle_t h_rgw_usage_log_clear;
4415 cls_method_handle_t h_rgw_gc_set_entry;
4416 cls_method_handle_t h_rgw_gc_list;
4417 cls_method_handle_t h_rgw_gc_remove;
4418 cls_method_handle_t h_rgw_lc_get_entry;
4419 cls_method_handle_t h_rgw_lc_set_entry;
4420 cls_method_handle_t h_rgw_lc_rm_entry;
4421 cls_method_handle_t h_rgw_lc_get_next_entry;
4422 cls_method_handle_t h_rgw_lc_put_head;
4423 cls_method_handle_t h_rgw_lc_get_head;
4424 cls_method_handle_t h_rgw_lc_list_entries;
4425 cls_method_handle_t h_rgw_reshard_add;
4426 cls_method_handle_t h_rgw_reshard_list;
4427 cls_method_handle_t h_rgw_reshard_get;
4428 cls_method_handle_t h_rgw_reshard_remove;
4429 cls_method_handle_t h_rgw_set_bucket_resharding;
4430 cls_method_handle_t h_rgw_clear_bucket_resharding;
4431 cls_method_handle_t h_rgw_guard_bucket_resharding;
4432 cls_method_handle_t h_rgw_get_bucket_resharding;
4433
4434 cls_register(RGW_CLASS, &h_class);
4435
4436 /* bucket index */
4437 cls_register_cxx_method(h_class, RGW_BUCKET_INIT_INDEX, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_init_index, &h_rgw_bucket_init_index);
4438 cls_register_cxx_method(h_class, RGW_BUCKET_SET_TAG_TIMEOUT, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_set_tag_timeout, &h_rgw_bucket_set_tag_timeout);
4439 cls_register_cxx_method(h_class, RGW_BUCKET_LIST, CLS_METHOD_RD, rgw_bucket_list, &h_rgw_bucket_list);
4440 cls_register_cxx_method(h_class, RGW_BUCKET_CHECK_INDEX, CLS_METHOD_RD, rgw_bucket_check_index, &h_rgw_bucket_check_index);
4441 cls_register_cxx_method(h_class, RGW_BUCKET_REBUILD_INDEX, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_rebuild_index, &h_rgw_bucket_rebuild_index);
4442 cls_register_cxx_method(h_class, RGW_BUCKET_UPDATE_STATS, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_update_stats, &h_rgw_bucket_update_stats);
4443 cls_register_cxx_method(h_class, RGW_BUCKET_PREPARE_OP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_prepare_op, &h_rgw_bucket_prepare_op);
4444 cls_register_cxx_method(h_class, RGW_BUCKET_COMPLETE_OP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_complete_op, &h_rgw_bucket_complete_op);
4445 cls_register_cxx_method(h_class, RGW_BUCKET_LINK_OLH, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_link_olh, &h_rgw_bucket_link_olh);
4446 cls_register_cxx_method(h_class, RGW_BUCKET_UNLINK_INSTANCE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_unlink_instance, &h_rgw_bucket_unlink_instance_op);
4447 cls_register_cxx_method(h_class, RGW_BUCKET_READ_OLH_LOG, CLS_METHOD_RD, rgw_bucket_read_olh_log, &h_rgw_bucket_read_olh_log);
4448 cls_register_cxx_method(h_class, RGW_BUCKET_TRIM_OLH_LOG, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_trim_olh_log, &h_rgw_bucket_trim_olh_log);
4449 cls_register_cxx_method(h_class, RGW_BUCKET_CLEAR_OLH, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_clear_olh, &h_rgw_bucket_clear_olh);
4450
4451 cls_register_cxx_method(h_class, RGW_OBJ_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_obj_remove, &h_rgw_obj_remove);
4452 cls_register_cxx_method(h_class, RGW_OBJ_STORE_PG_VER, CLS_METHOD_WR, rgw_obj_store_pg_ver, &h_rgw_obj_store_pg_ver);
4453 cls_register_cxx_method(h_class, RGW_OBJ_CHECK_ATTRS_PREFIX, CLS_METHOD_RD, rgw_obj_check_attrs_prefix, &h_rgw_obj_check_attrs_prefix);
4454 cls_register_cxx_method(h_class, RGW_OBJ_CHECK_MTIME, CLS_METHOD_RD, rgw_obj_check_mtime, &h_rgw_obj_check_mtime);
4455
4456 cls_register_cxx_method(h_class, RGW_BI_GET, CLS_METHOD_RD, rgw_bi_get_op, &h_rgw_bi_get_op);
4457 cls_register_cxx_method(h_class, RGW_BI_PUT, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_put_op, &h_rgw_bi_put_op);
4458 cls_register_cxx_method(h_class, RGW_BI_LIST, CLS_METHOD_RD, rgw_bi_list_op, &h_rgw_bi_list_op);
4459
4460 cls_register_cxx_method(h_class, RGW_BI_LOG_LIST, CLS_METHOD_RD, rgw_bi_log_list, &h_rgw_bi_log_list_op);
4461 cls_register_cxx_method(h_class, RGW_BI_LOG_TRIM, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_trim, &h_rgw_bi_log_list_op);
4462 cls_register_cxx_method(h_class, RGW_DIR_SUGGEST_CHANGES, CLS_METHOD_RD | CLS_METHOD_WR, rgw_dir_suggest_changes, &h_rgw_dir_suggest_changes);
4463
4464 cls_register_cxx_method(h_class, RGW_BI_LOG_RESYNC, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_resync, &h_rgw_bi_log_resync_op);
4465 cls_register_cxx_method(h_class, RGW_BI_LOG_STOP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_stop, &h_rgw_bi_log_stop_op);
4466
4467 /* usage logging */
4468 cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_ADD, CLS_METHOD_RD | CLS_METHOD_WR, rgw_user_usage_log_add, &h_rgw_user_usage_log_add);
4469 cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_READ, CLS_METHOD_RD, rgw_user_usage_log_read, &h_rgw_user_usage_log_read);
4470 cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_TRIM, CLS_METHOD_RD | CLS_METHOD_WR, rgw_user_usage_log_trim, &h_rgw_user_usage_log_trim);
4471 cls_register_cxx_method(h_class, RGW_USAGE_LOG_CLEAR, CLS_METHOD_WR, rgw_usage_log_clear, &h_rgw_usage_log_clear);
4472
4473 /* garbage collection */
4474 cls_register_cxx_method(h_class, RGW_GC_SET_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_set_entry, &h_rgw_gc_set_entry);
4475 cls_register_cxx_method(h_class, RGW_GC_DEFER_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_defer_entry, &h_rgw_gc_set_entry);
4476 cls_register_cxx_method(h_class, RGW_GC_LIST, CLS_METHOD_RD, rgw_cls_gc_list, &h_rgw_gc_list);
4477 cls_register_cxx_method(h_class, RGW_GC_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_remove, &h_rgw_gc_remove);
4478
4479 /* lifecycle bucket list */
4480 cls_register_cxx_method(h_class, RGW_LC_GET_ENTRY, CLS_METHOD_RD, rgw_cls_lc_get_entry, &h_rgw_lc_get_entry);
4481 cls_register_cxx_method(h_class, RGW_LC_SET_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_lc_set_entry, &h_rgw_lc_set_entry);
4482 cls_register_cxx_method(h_class, RGW_LC_RM_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_lc_rm_entry, &h_rgw_lc_rm_entry);
4483 cls_register_cxx_method(h_class, RGW_LC_GET_NEXT_ENTRY, CLS_METHOD_RD, rgw_cls_lc_get_next_entry, &h_rgw_lc_get_next_entry);
4484 cls_register_cxx_method(h_class, RGW_LC_PUT_HEAD, CLS_METHOD_RD| CLS_METHOD_WR, rgw_cls_lc_put_head, &h_rgw_lc_put_head);
4485 cls_register_cxx_method(h_class, RGW_LC_GET_HEAD, CLS_METHOD_RD, rgw_cls_lc_get_head, &h_rgw_lc_get_head);
4486 cls_register_cxx_method(h_class, RGW_LC_LIST_ENTRIES, CLS_METHOD_RD, rgw_cls_lc_list_entries, &h_rgw_lc_list_entries);
4487
4488 /* resharding */
4489 cls_register_cxx_method(h_class, RGW_RESHARD_ADD, CLS_METHOD_RD | CLS_METHOD_WR, rgw_reshard_add, &h_rgw_reshard_add);
4490 cls_register_cxx_method(h_class, RGW_RESHARD_LIST, CLS_METHOD_RD, rgw_reshard_list, &h_rgw_reshard_list);
4491 cls_register_cxx_method(h_class, RGW_RESHARD_GET, CLS_METHOD_RD,rgw_reshard_get, &h_rgw_reshard_get);
4492 cls_register_cxx_method(h_class, RGW_RESHARD_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_reshard_remove, &h_rgw_reshard_remove);
4493
4494 /* resharding attribute */
4495 cls_register_cxx_method(h_class, RGW_SET_BUCKET_RESHARDING, CLS_METHOD_RD | CLS_METHOD_WR,
4496 rgw_set_bucket_resharding, &h_rgw_set_bucket_resharding);
4497 cls_register_cxx_method(h_class, RGW_CLEAR_BUCKET_RESHARDING, CLS_METHOD_RD | CLS_METHOD_WR,
4498 rgw_clear_bucket_resharding, &h_rgw_clear_bucket_resharding);
4499 cls_register_cxx_method(h_class, RGW_GUARD_BUCKET_RESHARDING, CLS_METHOD_RD ,
4500 rgw_guard_bucket_resharding, &h_rgw_guard_bucket_resharding);
4501 cls_register_cxx_method(h_class, RGW_GET_BUCKET_RESHARDING, CLS_METHOD_RD ,
4502 rgw_get_bucket_resharding, &h_rgw_get_bucket_resharding);
4503
4504 return;
4505 }