1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #include "include/types.h"
8 #include <boost/algorithm/string.hpp>
10 #include "objclass/objclass.h"
11 #include "cls/rgw/cls_rgw_ops.h"
12 #include "cls/rgw/cls_rgw_const.h"
13 #include "common/Clock.h"
14 #include "common/strtol.h"
15 #include "common/escape.h"
17 #include "include/compat.h"
18 #include <boost/lexical_cast.hpp>
26 using ceph::bufferlist
;
29 using ceph::make_timespan
;
30 using ceph::real_clock
;
31 using ceph::real_time
;
38 // No UTF-8 character can begin with 0x80, so this is a safe indicator
39 // of a special bucket-index entry for the first byte. Note: although
40 // it has no impact, the 2nd, 3rd, or 4th byte of a UTF-8 character
42 #define BI_PREFIX_CHAR 0x80
44 #define BI_BUCKET_OBJS_INDEX 0
45 #define BI_BUCKET_LOG_INDEX 1
46 #define BI_BUCKET_OBJ_INSTANCE_INDEX 2
47 #define BI_BUCKET_OLH_DATA_INDEX 3
49 #define BI_BUCKET_LAST_INDEX 4
51 static std::string bucket_index_prefixes
[] = { "", /* special handling for the objs list index */
52 "0_", /* bucket log index */
53 "1000_", /* obj instance index */
54 "1001_", /* olh data index */
56 /* this must be the last index */
59 // this string is greater than all ascii plain entries and less than
60 // all special entries
61 static const std::string BI_PREFIX_BEGIN
= string(1, BI_PREFIX_CHAR
);
63 // this string is greater than all special entries and less than all
64 // non-ascii plain entries
65 static const std::string BI_PREFIX_END
= string(1, BI_PREFIX_CHAR
) +
66 bucket_index_prefixes
[BI_BUCKET_LAST_INDEX
];
68 /* Returns whether parameter is not a key for a special entry. Empty
69 * strings are considered plain also, so, for example, an empty marker
70 * is also considered plain. TODO: check to make sure all callers are
71 * using appropriately.
73 static bool bi_is_plain_entry(const std::string
& s
) {
74 return (s
.empty() || (unsigned char)s
[0] != BI_PREFIX_CHAR
);
77 int bi_entry_type(const string
& s
)
79 if (bi_is_plain_entry(s
)) {
80 return BI_BUCKET_OBJS_INDEX
;
84 i
< sizeof(bucket_index_prefixes
) / sizeof(bucket_index_prefixes
[0]);
86 const string
& t
= bucket_index_prefixes
[i
];
88 if (s
.compare(1, t
.size(), t
) == 0) {
96 static bool bi_entry_gt(const string
& first
, const string
& second
)
98 int fi
= bi_entry_type(first
);
99 int si
= bi_entry_type(second
);
103 } else if (fi
< si
) {
107 return first
> second
;
110 static void get_time_key(real_time
& ut
, string
*key
)
113 ceph_timespec ts
= ceph::real_clock::to_ceph_timespec(ut
);
114 snprintf(buf
, 32, "%011llu.%09u", (unsigned long long)ts
.tv_sec
, (unsigned int)ts
.tv_nsec
);
118 static void get_index_ver_key(cls_method_context_t hctx
, uint64_t index_ver
, string
*key
)
121 snprintf(buf
, sizeof(buf
), "%011llu.%llu.%d", (unsigned long long)index_ver
,
122 (unsigned long long)cls_current_version(hctx
),
123 cls_current_subop_num(hctx
));
127 static void bi_log_prefix(string
& key
)
129 key
= BI_PREFIX_CHAR
;
130 key
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
]);
133 static void bi_log_index_key(cls_method_context_t hctx
, string
& key
, string
& id
, uint64_t index_ver
)
136 get_index_ver_key(hctx
, index_ver
, &id
);
140 static int log_index_operation(cls_method_context_t hctx
, const cls_rgw_obj_key
& obj_key
,
141 RGWModifyOp op
, const string
& tag
, real_time timestamp
,
142 const rgw_bucket_entry_ver
& ver
, RGWPendingState state
, uint64_t index_ver
,
143 string
& max_marker
, uint16_t bilog_flags
, string
*owner
, string
*owner_display_name
, rgw_zone_set
*zones_trace
)
147 rgw_bi_log_entry entry
;
149 entry
.object
= obj_key
.name
;
150 entry
.instance
= obj_key
.instance
;
151 entry
.timestamp
= timestamp
;
155 entry
.index_ver
= index_ver
;
157 entry
.bilog_flags
= bilog_flags
;
159 entry
.owner
= *owner
;
161 if (owner_display_name
) {
162 entry
.owner_display_name
= *owner_display_name
;
165 entry
.zones_trace
= std::move(*zones_trace
);
169 bi_log_index_key(hctx
, key
, entry
.id
, index_ver
);
173 if (entry
.id
> max_marker
)
174 max_marker
= entry
.id
;
176 return cls_cxx_map_set_val(hctx
, key
, &bl
);
180 * Read list of objects, skipping objects in the "ugly namespace". The
181 * "ugly namespace" entries begin with BI_PREFIX_CHAR (0x80). Valid
182 * UTF-8 object names can *both* preceed and follow the "ugly
185 static int get_obj_vals(cls_method_context_t hctx
,
186 const std::string
& start
,
187 const std::string
& filter_prefix
,
189 std::map
<std::string
, bufferlist
> *pkeys
,
192 int ret
= cls_cxx_map_get_vals(hctx
, start
, filter_prefix
,
193 num_entries
, pkeys
, pmore
);
198 if (pkeys
->empty()) {
202 auto last_element
= pkeys
->crbegin();
203 if ((unsigned char)last_element
->first
[0] < BI_PREFIX_CHAR
) {
204 /* if the first character of the last entry is less than the
205 * prefix then all entries must preceed the "ugly namespace" and
211 auto first_element
= pkeys
->cbegin();
212 if ((unsigned char)first_element
->first
[0] > BI_PREFIX_CHAR
) {
213 /* if the first character of the first entry is after the "ugly
214 * namespace" then all entries must follow the "ugly namespace"
215 * then all entries do and we're done
220 /* at this point we know we have entries that could precede the
221 * "ugly namespace", be in the "ugly namespace", and follow the
222 * "ugly namespace", so let's rebuild the list, only keeping entries
223 * outside the "ugly namespace"
226 auto comp
= [](const pair
<std::string
, bufferlist
>& l
, const std::string
&r
) {
229 std::string new_start
= {static_cast<char>(BI_PREFIX_CHAR
+ 1)};
231 auto lower
= pkeys
->lower_bound(string
{static_cast<char>(BI_PREFIX_CHAR
)});
232 auto upper
= std::lower_bound(lower
, pkeys
->end(), new_start
, comp
);
233 pkeys
->erase(lower
, upper
);
235 if (num_entries
== (int)pkeys
->size() || !(*pmore
)) {
239 if (pkeys
->size() && new_start
< pkeys
->crbegin()->first
) {
240 new_start
= pkeys
->rbegin()->first
;
243 std::map
<std::string
, bufferlist
> new_keys
;
245 /* now get some more keys */
246 ret
= cls_cxx_map_get_vals(hctx
, new_start
, filter_prefix
,
247 num_entries
- pkeys
->size(), &new_keys
, pmore
);
252 pkeys
->insert(std::make_move_iterator(new_keys
.begin()),
253 std::make_move_iterator(new_keys
.end()));
259 * get a monotonically decreasing string representation.
260 * For num = x, num = y, where x > y, str(x) < str(y)
261 * Another property is that string size starts short and grows as num increases
263 static void decreasing_str(uint64_t num
, string
*str
)
266 if (num
< 0x10) { /* 16 */
267 snprintf(buf
, sizeof(buf
), "9%02lld", 15 - (long long)num
);
268 } else if (num
< 0x100) { /* 256 */
269 snprintf(buf
, sizeof(buf
), "8%03lld", 255 - (long long)num
);
270 } else if (num
< 0x1000) /* 4096 */ {
271 snprintf(buf
, sizeof(buf
), "7%04lld", 4095 - (long long)num
);
272 } else if (num
< 0x10000) /* 65536 */ {
273 snprintf(buf
, sizeof(buf
), "6%05lld", 65535 - (long long)num
);
274 } else if (num
< 0x100000000) /* 4G */ {
275 snprintf(buf
, sizeof(buf
), "5%010lld", 0xFFFFFFFF - (long long)num
);
277 snprintf(buf
, sizeof(buf
), "4%020lld", (long long)-num
);
284 * We hold two different indexes for objects. The first one holds the
285 * list of objects in the order that we want them to be listed. The
286 * second one only holds the objects instances (for versioned
287 * objects), and they're not arranged in any particular order. When
288 * listing objects we'll use the first index, when doing operations on
289 * the objects themselves we'll use the second index. Note that
290 * regular objects only map to the first index anyway
293 static void get_list_index_key(rgw_bucket_dir_entry
& entry
, string
*index_key
)
295 *index_key
= entry
.key
.name
;
298 decreasing_str(entry
.versioned_epoch
, &ver_str
);
299 string
instance_delim("\0i", 2);
300 string
ver_delim("\0v", 2);
302 index_key
->append(ver_delim
);
303 index_key
->append(ver_str
);
304 index_key
->append(instance_delim
);
305 index_key
->append(entry
.key
.instance
);
308 static void encode_obj_versioned_data_key(const cls_rgw_obj_key
& key
, string
*index_key
, bool append_delete_marker_suffix
= false)
310 *index_key
= BI_PREFIX_CHAR
;
311 index_key
->append(bucket_index_prefixes
[BI_BUCKET_OBJ_INSTANCE_INDEX
]);
312 index_key
->append(key
.name
);
313 string
delim("\0i", 2);
314 index_key
->append(delim
);
315 index_key
->append(key
.instance
);
316 if (append_delete_marker_suffix
) {
318 index_key
->append(dm
);
322 static void encode_obj_index_key(const cls_rgw_obj_key
& key
, string
*index_key
)
324 if (key
.instance
.empty()) {
325 *index_key
= key
.name
;
327 encode_obj_versioned_data_key(key
, index_key
);
331 static void encode_olh_data_key(const cls_rgw_obj_key
& key
, string
*index_key
)
333 *index_key
= BI_PREFIX_CHAR
;
334 index_key
->append(bucket_index_prefixes
[BI_BUCKET_OLH_DATA_INDEX
]);
335 index_key
->append(key
.name
);
339 static int read_index_entry(cls_method_context_t hctx
, string
& name
, T
*entry
);
341 static int encode_list_index_key(cls_method_context_t hctx
, const cls_rgw_obj_key
& key
, string
*index_key
)
343 if (key
.instance
.empty()) {
344 *index_key
= key
.name
;
348 string obj_index_key
;
349 cls_rgw_obj_key
tmp_key(key
);
350 if (tmp_key
.instance
== "null") {
351 tmp_key
.instance
.clear();
353 encode_obj_versioned_data_key(tmp_key
, &obj_index_key
);
355 rgw_bucket_dir_entry entry
;
357 int ret
= read_index_entry(hctx
, obj_index_key
, &entry
);
358 if (ret
== -ENOENT
) {
359 /* couldn't find the entry, set key value after the current object */
360 char buf
[2] = { 0x1, 0 };
362 *index_key
= key
.name
+ s
;
366 CLS_LOG(1, "ERROR: encode_list_index_key(): cls_cxx_map_get_val returned %d", ret
);
370 get_list_index_key(entry
, index_key
);
375 static void split_key(const string
& key
, list
<string
>& vals
)
378 const char *p
= key
.c_str();
379 while (pos
< key
.size()) {
380 size_t len
= strlen(p
);
387 static std::string
escape_str(const std::string
& s
)
389 int len
= escape_json_attr_len(s
.c_str(), s
.size());
390 std::string
escaped(len
, 0);
391 escape_json_attr(s
.c_str(), s
.size(), escaped
.data());
396 * list index key structure:
398 * <obj name>\0[v<ver>\0i<instance id>]
400 static int decode_list_index_key(const string
& index_key
, cls_rgw_obj_key
*key
, uint64_t *ver
)
402 size_t len
= strlen(index_key
.c_str());
404 key
->instance
.clear();
407 if (len
== index_key
.size()) {
408 key
->name
= index_key
;
413 split_key(index_key
, vals
);
416 CLS_LOG(0, "ERROR: %s: bad index_key (%s): split_key() returned empty vals", __func__
, escape_str(index_key
).c_str());
420 auto iter
= vals
.begin();
424 if (iter
== vals
.end()) {
425 CLS_LOG(0, "ERROR: %s: bad index_key (%s): no vals", __func__
, escape_str(index_key
).c_str());
429 for (; iter
!= vals
.end(); ++iter
) {
432 key
->instance
= val
.substr(1);
433 } else if (val
[0] == 'v') {
435 const char *s
= val
.c_str() + 1;
436 *ver
= strict_strtoll(s
, 10, &err
);
438 CLS_LOG(0, "ERROR: %s: bad index_key (%s): could not parse val (v=%s)", __func__
, escape_str(index_key
).c_str(), s
);
447 static int read_bucket_header(cls_method_context_t hctx
,
448 rgw_bucket_dir_header
*header
)
451 int rc
= cls_cxx_map_read_header(hctx
, &bl
);
455 if (bl
.length() == 0) {
456 *header
= rgw_bucket_dir_header();
459 auto iter
= bl
.cbegin();
461 decode(*header
, iter
);
462 } catch (ceph::buffer::error
& err
) {
463 CLS_LOG(1, "ERROR: read_bucket_header(): failed to decode header\n");
470 int rgw_bucket_list(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
472 CLS_LOG(10, "entered %s", __func__
);
474 // maximum number of calls to get_obj_vals we'll try; compromise
475 // between wanting to return the requested # of entries, but not
476 // wanting to slow down this op with too many omap reads
477 constexpr int max_attempts
= 8;
479 auto iter
= in
->cbegin();
484 } catch (ceph::buffer::error
& err
) {
485 CLS_LOG(1, "ERROR: %s: failed to decode request", __func__
);
489 rgw_cls_list_ret ret
;
490 rgw_bucket_dir
& new_dir
= ret
.dir
;
491 auto& name_entry_map
= new_dir
.m
; // map of keys to entries
493 int rc
= read_bucket_header(hctx
, &new_dir
.header
);
495 CLS_LOG(1, "ERROR: %s: failed to read header", __func__
);
499 // some calls just want the header and request 0 entries
500 if (op
.num_entries
<= 0) {
501 ret
.is_truncated
= false;
506 // key that we can start listing at, one of a) sent in by caller, b)
507 // last item visited, or c) when delimiter present, a key that will
508 // move past the subdirectory
509 std::string start_after_omap_key
;
510 encode_list_index_key(hctx
, op
.start_obj
, &start_after_omap_key
);
512 // this is set whenenver start_after_omap_key is set to keep them in
513 // sync since this will be the returned marker when a marker is
515 cls_rgw_obj_key start_after_entry_key
;
517 // last key stored in result, so if we have to call get_obj_vals
518 // multiple times, we do not add the overlap to result
519 std::string prev_omap_key
;
521 // last prefix_key stored in result, so we can skip over entries
522 // with the same prefix_key
523 std::string prev_prefix_omap_key
;
525 bool done
= false; // whether we need to keep calling get_obj_vals
526 bool more
= true; // output parameter of get_obj_vals
527 bool has_delimiter
= !op
.delimiter
.empty();
530 start_after_omap_key
> op
.filter_prefix
&&
531 boost::algorithm::ends_with(start_after_omap_key
, op
.delimiter
)) {
532 // advance past all subdirectory entries if we start after a
534 start_after_omap_key
= cls_rgw_after_delim(start_after_omap_key
);
537 for (int attempt
= 0;
538 attempt
< max_attempts
&&
541 name_entry_map
.size() < op
.num_entries
;
543 std::map
<std::string
, bufferlist
> keys
;
545 // note: get_obj_vals skips past the "ugly namespace" (i.e.,
546 // entries that start with the BI_PREFIX_CHAR), so no need to
547 // check for such entries
548 rc
= get_obj_vals(hctx
, start_after_omap_key
, op
.filter_prefix
,
549 op
.num_entries
- name_entry_map
.size(),
554 CLS_LOG(20, "%s: on attempt %d get_obj_vls returned %ld entries, more=%d",
555 __func__
, attempt
, keys
.size(), more
);
559 for (auto kiter
= keys
.cbegin(); kiter
!= keys
.cend(); ++kiter
) {
560 rgw_bucket_dir_entry entry
;
562 const bufferlist
& entrybl
= kiter
->second
;
563 auto eiter
= entrybl
.cbegin();
564 decode(entry
, eiter
);
565 } catch (ceph::buffer::error
& err
) {
566 CLS_LOG(1, "ERROR: %s: failed to decode entry, key=%s",
567 __func__
, kiter
->first
.c_str());
571 start_after_omap_key
= kiter
->first
;
572 start_after_entry_key
= entry
.key
;
573 CLS_LOG(20, "%s: working on key=%s len=%zu",
574 __func__
, kiter
->first
.c_str(), kiter
->first
.size());
578 int ret
= decode_list_index_key(kiter
->first
, &key
, &ver
);
580 CLS_LOG(0, "ERROR: %s: failed to decode list index key (%s)",
581 __func__
, escape_str(kiter
->first
).c_str());
585 if (!entry
.is_valid()) {
586 CLS_LOG(20, "%s: entry %s[%s] is not valid",
587 __func__
, key
.name
.c_str(), key
.instance
.c_str());
591 // filter out noncurrent versions, delete markers, and initial marker
592 if (!op
.list_versions
&&
593 (!entry
.is_visible() || op
.start_obj
.name
== key
.name
)) {
594 CLS_LOG(20, "%s: entry %s[%s] is not visible",
595 __func__
, key
.name
.c_str(), key
.instance
.c_str());
600 int delim_pos
= key
.name
.find(op
.delimiter
, op
.filter_prefix
.size());
602 if (delim_pos
>= 0) {
603 /* extract key with trailing delimiter */
605 key
.name
.substr(0, delim_pos
+ op
.delimiter
.length());
607 if (prefix_key
== prev_prefix_omap_key
) {
608 continue; // we've already added this;
610 prev_prefix_omap_key
= prefix_key
;
613 if (name_entry_map
.size() < op
.num_entries
) {
614 rgw_bucket_dir_entry proxy_entry
;
615 cls_rgw_obj_key
proxy_key(prefix_key
);
616 proxy_entry
.key
= cls_rgw_obj_key(proxy_key
);
617 proxy_entry
.flags
= rgw_bucket_dir_entry::FLAG_COMMON_PREFIX
;
618 name_entry_map
[prefix_key
] = proxy_entry
;
620 CLS_LOG(20, "%s: got common prefix entry %s[%s] num entries=%lu",
621 __func__
, proxy_key
.name
.c_str(), proxy_key
.instance
.c_str(),
622 name_entry_map
.size());
625 // make sure that if this is the last item added to the
626 // result from this call to get_obj_vals, the next call will
627 // skip past rest of "subdirectory"
628 start_after_omap_key
= cls_rgw_after_delim(prefix_key
);
629 start_after_entry_key
.set(start_after_omap_key
);
631 // advance past this subdirectory, but then back up one,
632 // so the loop increment will put us in the right place
633 kiter
= keys
.lower_bound(start_after_omap_key
);
639 // no delimiter after prefix found, so this is a "top-level"
640 // item and we can just fall through
643 if (name_entry_map
.size() < op
.num_entries
&&
644 kiter
->first
!= prev_omap_key
) {
645 name_entry_map
[kiter
->first
] = entry
;
646 prev_omap_key
= kiter
->first
;
647 CLS_LOG(20, "%s: got object entry %s[%s] num entries=%d",
648 __func__
, key
.name
.c_str(), key
.instance
.c_str(),
649 int(name_entry_map
.size()));
651 } // for (auto kiter...
652 } // for (int attempt...
654 ret
.is_truncated
= more
&& !done
;
655 if (ret
.is_truncated
) {
656 ret
.marker
= start_after_entry_key
;
658 CLS_LOG(20, "%s: normal exit returning %ld entries, is_truncated=%d",
659 __func__
, ret
.dir
.m
.size(), ret
.is_truncated
);
662 if (ret
.is_truncated
&& name_entry_map
.size() == 0) {
663 CLS_LOG(5, "%s: returning value RGWBIAdvanceAndRetryError", __func__
);
664 return RGWBIAdvanceAndRetryError
;
671 static int check_index(cls_method_context_t hctx
,
672 rgw_bucket_dir_header
*existing_header
,
673 rgw_bucket_dir_header
*calc_header
)
675 int rc
= read_bucket_header(hctx
, existing_header
);
677 CLS_LOG(1, "ERROR: check_index(): failed to read header\n");
681 calc_header
->tag_timeout
= existing_header
->tag_timeout
;
682 calc_header
->ver
= existing_header
->ver
;
683 calc_header
->syncstopped
= existing_header
->syncstopped
;
685 map
<string
, bufferlist
> keys
;
687 string filter_prefix
;
689 #define CHECK_CHUNK_SIZE 1000
694 rc
= get_obj_vals(hctx
, start_obj
, filter_prefix
, CHECK_CHUNK_SIZE
, &keys
, &more
);
698 for (auto kiter
= keys
.begin(); kiter
!= keys
.end(); ++kiter
) {
699 if (!bi_is_plain_entry(kiter
->first
)) {
704 rgw_bucket_dir_entry entry
;
705 auto eiter
= kiter
->second
.cbegin();
707 decode(entry
, eiter
);
708 } catch (ceph::buffer::error
& err
) {
709 CLS_LOG(1, "ERROR: rgw_bucket_list(): failed to decode entry, key=%s", kiter
->first
.c_str());
712 rgw_bucket_category_stats
& stats
= calc_header
->stats
[entry
.meta
.category
];
714 stats
.total_size
+= entry
.meta
.accounted_size
;
715 stats
.total_size_rounded
+= cls_rgw_get_rounded_size(entry
.meta
.accounted_size
);
716 stats
.actual_size
+= entry
.meta
.size
;
718 start_obj
= kiter
->first
;
720 } while (keys
.size() == CHECK_CHUNK_SIZE
&& !done
);
725 int rgw_bucket_check_index(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
727 CLS_LOG(10, "entered %s", __func__
);
728 rgw_cls_check_index_ret ret
;
730 int rc
= check_index(hctx
, &ret
.existing_header
, &ret
.calculated_header
);
739 static int write_bucket_header(cls_method_context_t hctx
, rgw_bucket_dir_header
*header
)
743 bufferlist header_bl
;
744 encode(*header
, header_bl
);
745 return cls_cxx_map_write_header(hctx
, &header_bl
);
749 int rgw_bucket_rebuild_index(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
751 CLS_LOG(10, "entered %s", __func__
);
752 rgw_bucket_dir_header existing_header
;
753 rgw_bucket_dir_header calc_header
;
754 int rc
= check_index(hctx
, &existing_header
, &calc_header
);
758 return write_bucket_header(hctx
, &calc_header
);
761 int rgw_bucket_update_stats(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
763 CLS_LOG(10, "entered %s", __func__
);
765 rgw_cls_bucket_update_stats_op op
;
766 auto iter
= in
->cbegin();
769 } catch (ceph::buffer::error
& err
) {
770 CLS_LOG(1, "ERROR: %s: failed to decode request", __func__
);
774 rgw_bucket_dir_header header
;
775 int rc
= read_bucket_header(hctx
, &header
);
777 CLS_LOG(1, "ERROR: %s: failed to read header", __func__
);
781 for (auto& s
: op
.stats
) {
782 auto& dest
= header
.stats
[s
.first
];
786 dest
.total_size
+= s
.second
.total_size
;
787 dest
.total_size_rounded
+= s
.second
.total_size_rounded
;
788 dest
.num_entries
+= s
.second
.num_entries
;
789 dest
.actual_size
+= s
.second
.actual_size
;
793 return write_bucket_header(hctx
, &header
);
796 int rgw_bucket_init_index(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
798 CLS_LOG(10, "entered %s", __func__
);
799 bufferlist header_bl
;
800 int rc
= cls_cxx_map_read_header(hctx
, &header_bl
);
811 if (header_bl
.length() != 0) {
812 CLS_LOG(1, "ERROR: index already initialized\n");
818 return write_bucket_header(hctx
, &dir
.header
);
821 int rgw_bucket_set_tag_timeout(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
823 CLS_LOG(10, "entered %s", __func__
);
825 rgw_cls_tag_timeout_op op
;
826 auto iter
= in
->cbegin();
829 } catch (ceph::buffer::error
& err
) {
830 CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to decode request\n");
834 rgw_bucket_dir_header header
;
835 int rc
= read_bucket_header(hctx
, &header
);
837 CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to read header\n");
841 header
.tag_timeout
= op
.tag_timeout
;
843 return write_bucket_header(hctx
, &header
);
846 static int read_key_entry(cls_method_context_t hctx
, const cls_rgw_obj_key
& key
,
847 string
*idx
, rgw_bucket_dir_entry
*entry
,
848 bool special_delete_marker_name
= false);
850 int rgw_bucket_prepare_op(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
852 CLS_LOG(10, "entered %s", __func__
);
854 rgw_cls_obj_prepare_op op
;
855 auto iter
= in
->cbegin();
858 } catch (ceph::buffer::error
& err
) {
859 CLS_LOG(1, "ERROR: rgw_bucket_prepare_op(): failed to decode request\n");
863 if (op
.tag
.empty()) {
864 CLS_LOG(1, "ERROR: tag is empty\n");
868 CLS_LOG(1, "rgw_bucket_prepare_op(): request: op=%d name=%s instance=%s tag=%s",
869 op
.op
, op
.key
.name
.c_str(), op
.key
.instance
.c_str(), op
.tag
.c_str());
874 rgw_bucket_dir_entry entry
;
875 int rc
= read_key_entry(hctx
, op
.key
, &idx
, &entry
);
876 if (rc
< 0 && rc
!= -ENOENT
)
879 bool noent
= (rc
== -ENOENT
);
883 if (noent
) { // no entry, initialize fields
885 entry
.ver
= rgw_bucket_entry_ver();
886 entry
.exists
= false;
887 entry
.locator
= op
.locator
;
890 // fill in proper state
891 rgw_bucket_pending_info info
;
892 info
.timestamp
= real_clock::now();
893 info
.state
= CLS_RGW_STATE_PENDING_MODIFY
;
895 entry
.pending_map
.insert(pair
<string
, rgw_bucket_pending_info
>(op
.tag
, info
));
897 // write out new key to disk
899 encode(entry
, info_bl
);
900 return cls_cxx_map_set_val(hctx
, idx
, &info_bl
);
903 static void unaccount_entry(rgw_bucket_dir_header
& header
,
904 rgw_bucket_dir_entry
& entry
)
907 rgw_bucket_category_stats
& stats
= header
.stats
[entry
.meta
.category
];
909 stats
.total_size
-= entry
.meta
.accounted_size
;
910 stats
.total_size_rounded
-=
911 cls_rgw_get_rounded_size(entry
.meta
.accounted_size
);
912 stats
.actual_size
-= entry
.meta
.size
;
916 static void log_entry(const char *func
, const char *str
, rgw_bucket_dir_entry
*entry
)
918 CLS_LOG(1, "%s: %s: ver=%ld:%llu name=%s instance=%s locator=%s", func
, str
,
919 (long)entry
->ver
.pool
, (unsigned long long)entry
->ver
.epoch
,
920 entry
->key
.name
.c_str(), entry
->key
.instance
.c_str(), entry
->locator
.c_str());
923 static void log_entry(const char *func
, const char *str
, rgw_bucket_olh_entry
*entry
)
925 CLS_LOG(1, "%s: %s: epoch=%llu name=%s instance=%s tag=%s", func
, str
,
926 (unsigned long long)entry
->epoch
, entry
->key
.name
.c_str(), entry
->key
.instance
.c_str(),
931 static int read_omap_entry(cls_method_context_t hctx
, const std::string
& name
,
934 bufferlist current_entry
;
935 int rc
= cls_cxx_map_get_val(hctx
, name
, ¤t_entry
);
940 auto cur_iter
= current_entry
.cbegin();
942 decode(*entry
, cur_iter
);
943 } catch (ceph::buffer::error
& err
) {
944 CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__
);
951 static int read_index_entry(cls_method_context_t hctx
, string
& name
, T
* entry
)
953 int ret
= read_omap_entry(hctx
, name
, entry
);
958 log_entry(__func__
, "existing entry", entry
);
962 static int read_key_entry(cls_method_context_t hctx
, const cls_rgw_obj_key
& key
,
963 string
*idx
, rgw_bucket_dir_entry
*entry
,
964 bool special_delete_marker_name
)
966 encode_obj_index_key(key
, idx
);
967 int rc
= read_index_entry(hctx
, *idx
, entry
);
972 if (key
.instance
.empty() &&
973 entry
->flags
& rgw_bucket_dir_entry::FLAG_VER_MARKER
) {
974 /* we only do it where key.instance is empty. In this case the
975 * delete marker will have a separate entry in the index to avoid
976 * collisions with the actual object, as it's mutable
978 if (special_delete_marker_name
) {
979 encode_obj_versioned_data_key(key
, idx
, true);
980 rc
= read_index_entry(hctx
, *idx
, entry
);
985 encode_obj_versioned_data_key(key
, idx
);
986 rc
= read_index_entry(hctx
, *idx
, entry
);
988 *entry
= rgw_bucket_dir_entry(); /* need to reset entry because we initialized it earlier */
996 // called by rgw_bucket_complete_op() for each item in op.remove_objs
997 static int complete_remove_obj(cls_method_context_t hctx
,
998 rgw_bucket_dir_header
& header
,
999 const cls_rgw_obj_key
& key
, bool log_op
)
1001 rgw_bucket_dir_entry entry
;
1003 int ret
= read_key_entry(hctx
, key
, &idx
, &entry
);
1005 CLS_LOG(1, "%s: read_key_entry name=%s instance=%s failed with %d",
1006 __func__
, key
.name
.c_str(), key
.instance
.c_str(), ret
);
1009 CLS_LOG(10, "%s: read entry name=%s instance=%s category=%d", __func__
,
1010 entry
.key
.name
.c_str(), entry
.key
.instance
.c_str(),
1011 int(entry
.meta
.category
));
1012 unaccount_entry(header
, entry
);
1015 ++header
.ver
; // increment index version, or we'll overwrite keys previously written
1016 const std::string tag
;
1017 ret
= log_index_operation(hctx
, key
, CLS_RGW_OP_DEL
, tag
, entry
.meta
.mtime
,
1018 entry
.ver
, CLS_RGW_STATE_COMPLETE
, header
.ver
,
1019 header
.max_marker
, 0, nullptr, nullptr, nullptr);
1025 ret
= cls_cxx_map_remove_key(hctx
, idx
);
1027 CLS_LOG(1, "%s: cls_cxx_map_remove_key failed with %d", __func__
, ret
);
1033 int rgw_bucket_complete_op(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
1035 CLS_LOG(10, "entered %s", __func__
);
1038 rgw_cls_obj_complete_op op
;
1039 auto iter
= in
->cbegin();
1042 } catch (ceph::buffer::error
& err
) {
1043 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to decode request\n");
1047 CLS_LOG(1, "rgw_bucket_complete_op(): request: op=%d name=%s instance=%s ver=%lu:%llu tag=%s",
1048 op
.op
, op
.key
.name
.c_str(), op
.key
.instance
.c_str(),
1049 (unsigned long)op
.ver
.pool
, (unsigned long long)op
.ver
.epoch
,
1052 rgw_bucket_dir_header header
;
1053 int rc
= read_bucket_header(hctx
, &header
);
1055 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
1059 rgw_bucket_dir_entry entry
;
1063 rc
= read_key_entry(hctx
, op
.key
, &idx
, &entry
);
1064 if (rc
== -ENOENT
) {
1067 entry
.meta
= op
.meta
;
1068 entry
.locator
= op
.locator
;
1070 } else if (rc
< 0) {
1074 entry
.index_ver
= header
.ver
;
1075 /* resetting entry flags, entry might have been previously a delete
1077 entry
.flags
&= rgw_bucket_dir_entry::FLAG_VER
;
1079 if (op
.tag
.size()) {
1080 auto pinter
= entry
.pending_map
.find(op
.tag
);
1081 if (pinter
== entry
.pending_map
.end()) {
1082 CLS_LOG(1, "ERROR: couldn't find tag for pending operation\n");
1085 entry
.pending_map
.erase(pinter
);
1088 if (op
.tag
.size() && op
.op
== CLS_RGW_OP_CANCEL
) {
1089 CLS_LOG(1, "rgw_bucket_complete_op(): cancel requested\n");
1090 } else if (op
.ver
.pool
== entry
.ver
.pool
&&
1091 op
.ver
.epoch
&& op
.ver
.epoch
<= entry
.ver
.epoch
) {
1092 CLS_LOG(1, "rgw_bucket_complete_op(): skipping request, old epoch\n");
1093 op
.op
= CLS_RGW_OP_CANCEL
;
1096 // controls whether remove_objs deletions are logged
1097 const bool default_log_op
= op
.log_op
&& !header
.syncstopped
;
1098 // controls whether this operation is logged (depends on op.op and ondisk)
1099 bool log_op
= default_log_op
;
1102 if (op
.op
== CLS_RGW_OP_CANCEL
) {
1103 log_op
= false; // don't log cancelation
1104 if (op
.tag
.size()) {
1105 if (!entry
.exists
&& entry
.pending_map
.empty()) {
1106 // a racing delete succeeded, and we canceled the last pending op
1107 CLS_LOG(20, "INFO: %s: removing map entry with key=%s",
1108 __func__
, escape_str(idx
).c_str());
1109 rc
= cls_cxx_map_remove_key(hctx
, idx
);
1111 CLS_LOG(1, "ERROR: %s: unable to remove map key, key=%s, rc=%d",
1112 __func__
, escape_str(idx
).c_str(), rc
);
1116 // we removed this tag from pending_map so need to write the changes
1117 CLS_LOG(20, "INFO: %s: setting map entry at key=%s",
1118 __func__
, escape_str(idx
).c_str());
1119 bufferlist new_key_bl
;
1120 encode(entry
, new_key_bl
);
1121 rc
= cls_cxx_map_set_val(hctx
, idx
, &new_key_bl
);
1123 CLS_LOG(1, "ERROR: %s: unable to set map val, key=%s, rc=%d",
1124 __func__
, escape_str(idx
).c_str(), rc
);
1129 } // CLS_RGW_OP_CANCEL
1130 else if (op
.op
== CLS_RGW_OP_DEL
) {
1131 // unaccount deleted entry
1132 unaccount_entry(header
, entry
);
1134 entry
.meta
= op
.meta
;
1136 // no entry to erase
1138 } else if (!entry
.pending_map
.size()) {
1139 rc
= cls_cxx_map_remove_key(hctx
, idx
);
1144 entry
.exists
= false;
1145 bufferlist new_key_bl
;
1146 encode(entry
, new_key_bl
);
1147 rc
= cls_cxx_map_set_val(hctx
, idx
, &new_key_bl
);
1153 else if (op
.op
== CLS_RGW_OP_ADD
) {
1154 // unaccount overwritten entry
1155 unaccount_entry(header
, entry
);
1157 rgw_bucket_dir_entry_meta
& meta
= op
.meta
;
1158 rgw_bucket_category_stats
& stats
= header
.stats
[meta
.category
];
1161 entry
.exists
= true;
1163 // account for new entry
1164 stats
.num_entries
++;
1165 stats
.total_size
+= meta
.accounted_size
;
1166 stats
.total_size_rounded
+= cls_rgw_get_rounded_size(meta
.accounted_size
);
1167 stats
.actual_size
+= meta
.size
;
1168 bufferlist new_key_bl
;
1169 encode(entry
, new_key_bl
);
1170 rc
= cls_cxx_map_set_val(hctx
, idx
, &new_key_bl
);
1177 rc
= log_index_operation(hctx
, op
.key
, op
.op
, op
.tag
, entry
.meta
.mtime
,
1178 entry
.ver
, CLS_RGW_STATE_COMPLETE
, header
.ver
,
1179 header
.max_marker
, op
.bilog_flags
, NULL
, NULL
,
1186 CLS_LOG(20, "rgw_bucket_complete_op(): remove_objs.size()=%d",
1187 (int)op
.remove_objs
.size());
1188 for (const auto& remove_key
: op
.remove_objs
) {
1189 rc
= complete_remove_obj(hctx
, header
, remove_key
, default_log_op
);
1191 continue; // part cleanup errors are not fatal
1195 return write_bucket_header(hctx
, &header
);
1196 } // rgw_bucket_complete_op
1199 static int write_entry(cls_method_context_t hctx
, T
& entry
, const string
& key
)
1203 return cls_cxx_map_set_val(hctx
, key
, &bl
);
1206 static int read_olh(cls_method_context_t hctx
,cls_rgw_obj_key
& obj_key
, rgw_bucket_olh_entry
*olh_data_entry
, string
*index_key
, bool *found
)
1208 cls_rgw_obj_key olh_key
;
1209 olh_key
.name
= obj_key
.name
;
1211 encode_olh_data_key(olh_key
, index_key
);
1212 int ret
= read_index_entry(hctx
, *index_key
, olh_data_entry
);
1213 if (ret
< 0 && ret
!= -ENOENT
) {
1214 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_key
.name
.c_str(), ret
);
1218 *found
= (ret
!= -ENOENT
);
1223 static void update_olh_log(rgw_bucket_olh_entry
& olh_data_entry
, OLHLogOp op
, const string
& op_tag
,
1224 cls_rgw_obj_key
& key
, bool delete_marker
, uint64_t epoch
)
1226 vector
<rgw_bucket_olh_log_entry
>& log
= olh_data_entry
.pending_log
[olh_data_entry
.epoch
];
1227 rgw_bucket_olh_log_entry log_entry
;
1228 log_entry
.epoch
= epoch
;
1230 log_entry
.op_tag
= op_tag
;
1231 log_entry
.key
= key
;
1232 log_entry
.delete_marker
= delete_marker
;
1233 log
.push_back(log_entry
);
1236 static int write_obj_instance_entry(cls_method_context_t hctx
, rgw_bucket_dir_entry
& instance_entry
, const string
& instance_idx
)
1238 CLS_LOG(20, "write_entry() instance=%s idx=%s flags=%d", escape_str(instance_entry
.key
.instance
).c_str(), instance_idx
.c_str(), instance_entry
.flags
);
1239 /* write the instance entry */
1240 int ret
= write_entry(hctx
, instance_entry
, instance_idx
);
1242 CLS_LOG(0, "ERROR: write_entry() instance_key=%s ret=%d", escape_str(instance_idx
).c_str(), ret
);
1249 * write object instance entry, and if needed also the list entry
1251 static int write_obj_entries(cls_method_context_t hctx
, rgw_bucket_dir_entry
& instance_entry
, const string
& instance_idx
)
1253 int ret
= write_obj_instance_entry(hctx
, instance_entry
, instance_idx
);
1257 string instance_list_idx
;
1258 get_list_index_key(instance_entry
, &instance_list_idx
);
1260 if (instance_idx
!= instance_list_idx
) {
1261 CLS_LOG(20, "write_entry() idx=%s flags=%d", escape_str(instance_list_idx
).c_str(), instance_entry
.flags
);
1262 /* write a new list entry for the object instance */
1263 ret
= write_entry(hctx
, instance_entry
, instance_list_idx
);
1265 CLS_LOG(0, "ERROR: write_entry() instance=%s instance_list_idx=%s ret=%d", instance_entry
.key
.instance
.c_str(), instance_list_idx
.c_str(), ret
);
1273 class BIVerObjEntry
{
1274 cls_method_context_t hctx
;
1275 cls_rgw_obj_key key
;
1276 string instance_idx
;
1278 rgw_bucket_dir_entry instance_entry
;
1283 BIVerObjEntry(cls_method_context_t
& _hctx
, const cls_rgw_obj_key
& _key
) : hctx(_hctx
), key(_key
), initialized(false) {
1287 int init(bool check_delete_marker
= true) {
1288 int ret
= read_key_entry(hctx
, key
, &instance_idx
, &instance_entry
,
1289 check_delete_marker
&& key
.instance
.empty()); /* this is potentially a delete marker, for null objects we
1290 keep separate instance entry for the delete markers */
1293 CLS_LOG(0, "ERROR: read_key_entry() idx=%s ret=%d", instance_idx
.c_str(), ret
);
1297 CLS_LOG(20, "read instance_entry key.name=%s key.instance=%s flags=%d", instance_entry
.key
.name
.c_str(), instance_entry
.key
.instance
.c_str(), instance_entry
.flags
);
1301 rgw_bucket_dir_entry
& get_dir_entry() {
1302 return instance_entry
;
1305 void init_as_delete_marker(rgw_bucket_dir_entry_meta
& meta
) {
1306 /* a deletion marker, need to initialize it, there's no instance entry for it yet */
1307 instance_entry
.key
= key
;
1308 instance_entry
.flags
= rgw_bucket_dir_entry::FLAG_DELETE_MARKER
;
1309 instance_entry
.meta
= meta
;
1310 instance_entry
.tag
= "delete-marker";
1315 void set_epoch(uint64_t epoch
) {
1316 instance_entry
.versioned_epoch
= epoch
;
1319 int unlink_list_entry() {
1321 /* this instance has a previous list entry, remove that entry */
1322 get_list_index_key(instance_entry
, &list_idx
);
1323 CLS_LOG(20, "unlink_list_entry() list_idx=%s", escape_str(list_idx
).c_str());
1324 int ret
= cls_cxx_map_remove_key(hctx
, list_idx
);
1326 CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() list_idx=%s ret=%d", list_idx
.c_str(), ret
);
1333 /* remove the instance entry */
1334 CLS_LOG(20, "unlink() idx=%s", escape_str(instance_idx
).c_str());
1335 int ret
= cls_cxx_map_remove_key(hctx
, instance_idx
);
1337 CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() instance_idx=%s ret=%d", instance_idx
.c_str(), ret
);
1343 int write_entries(uint64_t flags_set
, uint64_t flags_reset
) {
1350 instance_entry
.flags
&= ~flags_reset
;
1351 instance_entry
.flags
|= flags_set
;
1353 /* write the instance and list entries */
1354 bool special_delete_marker_key
= (instance_entry
.is_delete_marker() && instance_entry
.key
.instance
.empty());
1355 encode_obj_versioned_data_key(key
, &instance_idx
, special_delete_marker_key
);
1356 int ret
= write_obj_entries(hctx
, instance_entry
, instance_idx
);
1358 CLS_LOG(0, "ERROR: write_obj_entries() instance_idx=%s ret=%d", instance_idx
.c_str(), ret
);
1365 int write(uint64_t epoch
, bool current
) {
1366 if (instance_entry
.versioned_epoch
> 0) {
1367 CLS_LOG(20, "%s: instance_entry.versioned_epoch=%d epoch=%d", __func__
, (int)instance_entry
.versioned_epoch
, (int)epoch
);
1368 /* this instance has a previous list entry, remove that entry */
1369 int ret
= unlink_list_entry();
1375 uint64_t flags
= rgw_bucket_dir_entry::FLAG_VER
;
1377 flags
|= rgw_bucket_dir_entry::FLAG_CURRENT
;
1380 instance_entry
.versioned_epoch
= epoch
;
1381 return write_entries(flags
, 0);
1384 int demote_current() {
1385 return write_entries(0, rgw_bucket_dir_entry::FLAG_CURRENT
);
1388 bool is_delete_marker() {
1389 return instance_entry
.is_delete_marker();
1392 int find_next_key(cls_rgw_obj_key
*next_key
, bool *found
) {
1394 /* this instance has a previous list entry, remove that entry */
1395 get_list_index_key(instance_entry
, &list_idx
);
1396 /* this is the current head, need to update! */
1397 map
<string
, bufferlist
> keys
;
1399 string filter
= key
.name
; /* list key starts with key name, filter it to avoid a case where we cross to
1400 different namespace */
1401 int ret
= cls_cxx_map_get_vals(hctx
, list_idx
, filter
, 1, &keys
, &more
);
1406 if (keys
.size() < 1) {
1411 rgw_bucket_dir_entry next_entry
;
1413 auto last
= keys
.rbegin();
1415 auto iter
= last
->second
.cbegin();
1416 decode(next_entry
, iter
);
1417 } catch (ceph::buffer::error
& err
) {
1418 CLS_LOG(0, "ERROR; failed to decode entry: %s", last
->first
.c_str());
1422 *found
= (key
.name
== next_entry
.key
.name
);
1424 *next_key
= next_entry
.key
;
1431 return instance_entry
.meta
.mtime
;
1433 }; // class BIVerObjEntry
1437 cls_method_context_t hctx
;
1438 cls_rgw_obj_key key
;
1440 string olh_data_idx
;
1441 rgw_bucket_olh_entry olh_data_entry
;
1445 BIOLHEntry(cls_method_context_t
& _hctx
, const cls_rgw_obj_key
& _key
) : hctx(_hctx
), key(_key
), initialized(false) { }
1447 int init(bool *exists
) {
1449 int ret
= read_olh(hctx
, key
, &olh_data_entry
, &olh_data_idx
, exists
);
1458 bool start_modify(uint64_t candidate_epoch
) {
1459 if (candidate_epoch
) {
1460 if (candidate_epoch
< olh_data_entry
.epoch
) {
1461 return false; /* olh cannot be modified, old epoch */
1463 olh_data_entry
.epoch
= candidate_epoch
;
1465 if (olh_data_entry
.epoch
== 0) {
1466 olh_data_entry
.epoch
= 2; /* versioned epoch should start with 2, 1 is reserved to converted plain entries */
1468 olh_data_entry
.epoch
++;
1474 uint64_t get_epoch() {
1475 return olh_data_entry
.epoch
;
1478 rgw_bucket_olh_entry
& get_entry() {
1479 return olh_data_entry
;
1482 void update(cls_rgw_obj_key
& key
, bool delete_marker
) {
1483 olh_data_entry
.delete_marker
= delete_marker
;
1484 olh_data_entry
.key
= key
;
1488 /* write the olh data entry */
1489 int ret
= write_entry(hctx
, olh_data_entry
, olh_data_idx
);
1491 CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_idx
.c_str(), ret
);
1498 void update_log(OLHLogOp op
, const string
& op_tag
, cls_rgw_obj_key
& key
, bool delete_marker
, uint64_t epoch
= 0) {
1500 epoch
= olh_data_entry
.epoch
;
1502 update_olh_log(olh_data_entry
, op
, op_tag
, key
, delete_marker
, epoch
);
1505 bool exists() { return olh_data_entry
.exists
; }
1507 void set_exists(bool exists
) {
1508 olh_data_entry
.exists
= exists
;
1511 bool pending_removal() { return olh_data_entry
.pending_removal
; }
1513 void set_pending_removal(bool pending_removal
) {
1514 olh_data_entry
.pending_removal
= pending_removal
;
1517 const string
& get_tag() { return olh_data_entry
.tag
; }
1518 void set_tag(const string
& tag
) {
1519 olh_data_entry
.tag
= tag
;
1523 static int write_version_marker(cls_method_context_t hctx
, cls_rgw_obj_key
& key
)
1525 rgw_bucket_dir_entry entry
;
1527 entry
.flags
= rgw_bucket_dir_entry::FLAG_VER_MARKER
;
1528 int ret
= write_entry(hctx
, entry
, key
.name
);
1530 CLS_LOG(0, "ERROR: write_entry returned ret=%d", ret
);
1537 * plain entries are the ones who were created when bucket was not
1538 * versioned, if we override these objects, we need to convert these
1539 * to versioned entries -- ones that have both data entry, and listing
1540 * key. Their version is going to be empty though
1542 static int convert_plain_entry_to_versioned(cls_method_context_t hctx
,
1543 cls_rgw_obj_key
& key
,
1544 bool demote_current
,
1547 if (!key
.instance
.empty()) {
1551 rgw_bucket_dir_entry entry
;
1554 int ret
= read_key_entry(hctx
, key
, &orig_idx
, &entry
);
1555 if (ret
!= -ENOENT
) {
1557 CLS_LOG(0, "ERROR: read_key_entry() returned ret=%d", ret
);
1561 entry
.versioned_epoch
= 1; /* converted entries are always 1 */
1562 entry
.flags
|= rgw_bucket_dir_entry::FLAG_VER
;
1564 if (demote_current
) {
1565 entry
.flags
&= ~rgw_bucket_dir_entry::FLAG_CURRENT
;
1569 encode_obj_versioned_data_key(key
, &new_idx
);
1571 if (instance_only
) {
1572 ret
= write_obj_instance_entry(hctx
, entry
, new_idx
);
1574 ret
= write_obj_entries(hctx
, entry
, new_idx
);
1577 CLS_LOG(0, "ERROR: write_obj_entries new_idx=%s returned %d",
1578 new_idx
.c_str(), ret
);
1583 ret
= write_version_marker(hctx
, key
);
1592 * Link an object version to an olh, update the relevant index
1593 * entries. It will also handle the deletion marker case. We have a
1594 * few entries that we need to take care of. For object 'foo',
1595 * instance BAR, we'd update the following (not actual encoding):
1597 * - olh data: [BI_BUCKET_OLH_DATA_INDEX]foo
1598 * - object instance data: [BI_BUCKET_OBJ_INSTANCE_INDEX]foo,BAR
1599 * - object instance list entry: foo,123,BAR
1601 * The instance list entry needs to be ordered by newer to older, so
1602 * we generate an appropriate number string that follows the name.
1603 * The top instance for each object is marked appropriately. We
1604 * generate instance entry for deletion markers here, as they are not
1607 static int rgw_bucket_link_olh(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
1609 CLS_LOG(10, "entered %s", __func__
);
1610 string olh_data_idx
;
1611 string instance_idx
;
1614 rgw_cls_link_olh_op op
;
1615 auto iter
= in
->cbegin();
1618 } catch (ceph::buffer::error
& err
) {
1619 CLS_LOG(0, "ERROR: rgw_bucket_link_olh_op(): failed to decode request\n");
1623 /* read instance entry */
1624 BIVerObjEntry
obj(hctx
, op
.key
);
1625 int ret
= obj
.init(op
.delete_marker
);
1627 /* NOTE: When a delete is issued, a key instance is always provided,
1628 * either the one for which the delete is requested or a new random
1629 * one when no instance is specified. So we need to see which of
1630 * these two cases we're dealing with. The variable `existed` will
1631 * be true if the instance was specified and false if it was
1632 * randomly generated. It might have been cleaner if the instance
1633 * were empty and randomly generated here and returned in the reply,
1634 * as that would better allow a typo in the instance id. This code
1635 * should be audited and possibly cleaned up. */
1637 bool existed
= (ret
== 0);
1638 if (ret
== -ENOENT
&& op
.delete_marker
) {
1645 BIOLHEntry
olh(hctx
, op
.key
);
1646 bool olh_read_attempt
= false;
1647 bool olh_found
= false;
1648 if (!existed
&& op
.delete_marker
) {
1650 ret
= olh
.init(&olh_found
);
1654 olh_read_attempt
= true;
1656 // if we're deleting (i.e., adding a delete marker, and the OLH
1657 // indicates it already refers to a delete marker, error out)
1658 if (olh_found
&& olh
.get_entry().delete_marker
) {
1660 "%s: delete marker received for \"%s\" although OLH"
1661 " already refers to a delete marker",
1662 __func__
, escape_str(op
.key
.to_string()).c_str());
1667 if (existed
&& !real_clock::is_zero(op
.unmod_since
)) {
1668 timespec mtime
= ceph::real_clock::to_timespec(obj
.mtime());
1669 timespec unmod
= ceph::real_clock::to_timespec(op
.unmod_since
);
1670 if (!op
.high_precision_time
) {
1674 if (mtime
>= unmod
) {
1675 return 0; /* no need tof set error, we just return 0 and avoid
1676 * writing to the bi log */
1683 * Special handling for null instance object / delete-marker. For
1684 * these objects we're going to have separate instances for a data
1685 * object vs. delete-marker to avoid collisions. We now check if we
1686 * got to overwrite a previous entry, and in that case we'll remove
1689 if (op
.key
.instance
.empty()) {
1690 BIVerObjEntry
other_obj(hctx
, op
.key
);
1691 ret
= other_obj
.init(!op
.delete_marker
); /* try reading the other
1694 existed
= (ret
>= 0 && !other_obj
.is_delete_marker());
1695 if (ret
>= 0 && other_obj
.is_delete_marker() != op
.delete_marker
) {
1696 ret
= other_obj
.unlink_list_entry();
1702 removing
= existed
&& op
.delete_marker
;
1704 ret
= other_obj
.unlink();
1710 removing
= (existed
&& !obj
.is_delete_marker() && op
.delete_marker
);
1713 if (op
.delete_marker
) {
1714 /* a deletion marker, need to initialize entry as such */
1715 obj
.init_as_delete_marker(op
.meta
);
1719 if (!olh_read_attempt
) { // only read if we didn't attempt earlier
1720 ret
= olh
.init(&olh_found
);
1724 olh_read_attempt
= true;
1727 const uint64_t prev_epoch
= olh
.get_epoch();
1729 if (!olh
.start_modify(op
.olh_epoch
)) {
1730 ret
= obj
.write(op
.olh_epoch
, false);
1735 olh
.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE
, op
.op_tag
, op
.key
, false, op
.olh_epoch
);
1740 // promote this version to current if it's a newer epoch, or if it matches the
1741 // current epoch and sorts after the current instance
1742 const bool promote
= (olh
.get_epoch() > prev_epoch
) ||
1743 (olh
.get_epoch() == prev_epoch
&&
1744 olh
.get_entry().key
.instance
>= op
.key
.instance
);
1747 const string
& olh_tag
= olh
.get_tag();
1748 if (op
.olh_tag
!= olh_tag
) {
1749 if (!olh
.pending_removal()) {
1750 CLS_LOG(5, "NOTICE: op.olh_tag (%s) != olh.tag (%s)", op
.olh_tag
.c_str(), olh_tag
.c_str());
1753 /* if pending removal, this is a new olh instance */
1754 olh
.set_tag(op
.olh_tag
);
1756 if (promote
&& olh
.exists()) {
1757 rgw_bucket_olh_entry
& olh_entry
= olh
.get_entry();
1758 /* found olh, previous instance is no longer the latest, need to update */
1759 if (!(olh_entry
.key
== op
.key
)) {
1760 BIVerObjEntry
old_obj(hctx
, olh_entry
.key
);
1762 ret
= old_obj
.demote_current();
1764 CLS_LOG(0, "ERROR: could not demote current on previous key ret=%d", ret
);
1769 olh
.set_pending_removal(false);
1771 bool instance_only
= (op
.key
.instance
.empty() && op
.delete_marker
);
1772 cls_rgw_obj_key
key(op
.key
.name
);
1773 ret
= convert_plain_entry_to_versioned(hctx
, key
, promote
, instance_only
);
1775 CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret
);
1778 olh
.set_tag(op
.olh_tag
);
1781 /* update the olh log */
1782 olh
.update_log(CLS_RGW_OLH_OP_LINK_OLH
, op
.op_tag
, op
.key
, op
.delete_marker
);
1784 olh
.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE
, op
.op_tag
, op
.key
, false);
1788 olh
.update(op
.key
, op
.delete_marker
);
1790 olh
.set_exists(true);
1794 CLS_LOG(0, "ERROR: failed to update olh ret=%d", ret
);
1798 /* write the instance and list entries */
1799 ret
= obj
.write(olh
.get_epoch(), promote
);
1808 rgw_bucket_dir_header header
;
1809 ret
= read_bucket_header(hctx
, &header
);
1811 CLS_LOG(1, "ERROR: rgw_bucket_link_olh(): failed to read header\n");
1814 if (header
.syncstopped
) {
1818 rgw_bucket_dir_entry
& entry
= obj
.get_dir_entry();
1820 rgw_bucket_entry_ver ver
;
1821 ver
.epoch
= (op
.olh_epoch
? op
.olh_epoch
: olh
.get_epoch());
1823 string
*powner
= NULL
;
1824 string
*powner_display_name
= NULL
;
1826 if (op
.delete_marker
) {
1827 powner
= &entry
.meta
.owner
;
1828 powner_display_name
= &entry
.meta
.owner_display_name
;
1831 RGWModifyOp operation
= (op
.delete_marker
? CLS_RGW_OP_LINK_OLH_DM
: CLS_RGW_OP_LINK_OLH
);
1832 ret
= log_index_operation(hctx
, op
.key
, operation
, op
.op_tag
,
1833 entry
.meta
.mtime
, ver
,
1834 CLS_RGW_STATE_COMPLETE
, header
.ver
, header
.max_marker
, op
.bilog_flags
| RGW_BILOG_FLAG_VERSIONED_OP
,
1835 powner
, powner_display_name
, &op
.zones_trace
);
1839 return write_bucket_header(hctx
, &header
); /* updates header version */
1842 static int rgw_bucket_unlink_instance(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
1844 CLS_LOG(10, "entered %s", __func__
);
1845 string olh_data_idx
;
1846 string instance_idx
;
1849 rgw_cls_unlink_instance_op op
;
1850 auto iter
= in
->cbegin();
1853 } catch (ceph::buffer::error
& err
) {
1854 CLS_LOG(0, "ERROR: rgw_bucket_rm_obj_instance_op(): failed to decode request\n");
1858 cls_rgw_obj_key dest_key
= op
.key
;
1859 if (dest_key
.instance
== "null") {
1860 dest_key
.instance
.clear();
1863 BIVerObjEntry
obj(hctx
, dest_key
);
1864 BIOLHEntry
olh(hctx
, dest_key
);
1866 int ret
= obj
.init();
1867 if (ret
== -ENOENT
) {
1868 return 0; /* already removed */
1871 CLS_LOG(0, "ERROR: obj.init() returned ret=%d", ret
);
1876 ret
= olh
.init(&olh_found
);
1878 CLS_LOG(0, "ERROR: olh.init() returned ret=%d", ret
);
1883 bool instance_only
= false;
1884 cls_rgw_obj_key
key(dest_key
.name
);
1885 ret
= convert_plain_entry_to_versioned(hctx
, key
, true, instance_only
);
1887 CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret
);
1890 olh
.update(dest_key
, false);
1891 olh
.set_tag(op
.olh_tag
);
1896 if (!olh
.start_modify(op
.olh_epoch
)) {
1897 ret
= obj
.unlink_list_entry();
1902 if (obj
.is_delete_marker()) {
1906 olh
.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE
, op
.op_tag
, op
.key
, false, op
.olh_epoch
);
1910 rgw_bucket_olh_entry
& olh_entry
= olh
.get_entry();
1911 cls_rgw_obj_key
& olh_key
= olh_entry
.key
;
1912 CLS_LOG(20, "%s: updating olh log: existing olh entry: %s[%s] (delete_marker=%d)", __func__
,
1913 olh_key
.name
.c_str(), olh_key
.instance
.c_str(), olh_entry
.delete_marker
);
1915 if (olh_key
== dest_key
) {
1916 /* this is the current head, need to update! */
1917 cls_rgw_obj_key next_key
;
1919 ret
= obj
.find_next_key(&next_key
, &found
);
1921 CLS_LOG(0, "ERROR: obj.find_next_key() returned ret=%d", ret
);
1926 BIVerObjEntry
next(hctx
, next_key
);
1927 ret
= next
.write(olh
.get_epoch(), true);
1929 CLS_LOG(0, "ERROR: next.write() returned ret=%d", ret
);
1933 CLS_LOG(20, "%s: updating olh log: link olh -> %s[%s] (is_delete=%d)", __func__
,
1934 next_key
.name
.c_str(), next_key
.instance
.c_str(), (int)next
.is_delete_marker());
1936 olh
.update(next_key
, next
.is_delete_marker());
1937 olh
.update_log(CLS_RGW_OLH_OP_LINK_OLH
, op
.op_tag
, next_key
, next
.is_delete_marker());
1939 // next_key is empty, but we need to preserve its name in case this entry
1940 // gets resharded, because this key is used for hash placement
1941 next_key
.name
= dest_key
.name
;
1942 olh
.update(next_key
, false);
1943 olh
.update_log(CLS_RGW_OLH_OP_UNLINK_OLH
, op
.op_tag
, next_key
, false);
1944 olh
.set_exists(false);
1945 olh
.set_pending_removal(true);
1949 if (!obj
.is_delete_marker()) {
1950 olh
.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE
, op
.op_tag
, op
.key
, false);
1952 /* this is a delete marker, it's our responsibility to remove its
1960 ret
= obj
.unlink_list_entry();
1974 rgw_bucket_dir_header header
;
1975 ret
= read_bucket_header(hctx
, &header
);
1977 CLS_LOG(1, "ERROR: rgw_bucket_unlink_instance(): failed to read header\n");
1980 if (header
.syncstopped
) {
1984 rgw_bucket_entry_ver ver
;
1985 ver
.epoch
= (op
.olh_epoch
? op
.olh_epoch
: olh
.get_epoch());
1987 real_time mtime
= obj
.mtime(); /* mtime has no real meaning in
1988 * instance removal context */
1989 ret
= log_index_operation(hctx
, op
.key
, CLS_RGW_OP_UNLINK_INSTANCE
, op
.op_tag
,
1991 CLS_RGW_STATE_COMPLETE
, header
.ver
, header
.max_marker
,
1992 op
.bilog_flags
| RGW_BILOG_FLAG_VERSIONED_OP
, NULL
, NULL
, &op
.zones_trace
);
1996 return write_bucket_header(hctx
, &header
); /* updates header version */
1999 static int rgw_bucket_read_olh_log(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2001 CLS_LOG(10, "entered %s", __func__
);
2003 rgw_cls_read_olh_log_op op
;
2004 auto iter
= in
->cbegin();
2007 } catch (ceph::buffer::error
& err
) {
2008 CLS_LOG(0, "ERROR: rgw_bucket_read_olh_log(): failed to decode request\n");
2012 if (!op
.olh
.instance
.empty()) {
2013 CLS_LOG(1, "bad key passed in (non empty instance)");
2017 rgw_bucket_olh_entry olh_data_entry
;
2018 string olh_data_key
;
2019 encode_olh_data_key(op
.olh
, &olh_data_key
);
2020 int ret
= read_index_entry(hctx
, olh_data_key
, &olh_data_entry
);
2021 if (ret
< 0 && ret
!= -ENOENT
) {
2022 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key
.c_str(), ret
);
2026 if (olh_data_entry
.tag
!= op
.olh_tag
) {
2027 CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__
, olh_data_entry
.tag
.c_str(), op
.olh_tag
.c_str());
2031 rgw_cls_read_olh_log_ret op_ret
;
2033 #define MAX_OLH_LOG_ENTRIES 1000
2034 map
<uint64_t, vector
<rgw_bucket_olh_log_entry
> >& log
= olh_data_entry
.pending_log
;
2036 if (log
.begin()->first
> op
.ver_marker
&& log
.size() <= MAX_OLH_LOG_ENTRIES
) {
2038 op_ret
.is_truncated
= false;
2040 auto iter
= log
.upper_bound(op
.ver_marker
);
2042 for (int i
= 0; i
< MAX_OLH_LOG_ENTRIES
&& iter
!= log
.end(); ++i
, ++iter
) {
2043 op_ret
.log
[iter
->first
] = iter
->second
;
2045 op_ret
.is_truncated
= (iter
!= log
.end());
2048 encode(op_ret
, *out
);
2053 static int rgw_bucket_trim_olh_log(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2055 CLS_LOG(10, "entered %s", __func__
);
2057 rgw_cls_trim_olh_log_op op
;
2058 auto iter
= in
->cbegin();
2061 } catch (ceph::buffer::error
& err
) {
2062 CLS_LOG(0, "ERROR: rgw_bucket_trim_olh_log(): failed to decode request\n");
2066 if (!op
.olh
.instance
.empty()) {
2067 CLS_LOG(1, "bad key passed in (non empty instance)");
2071 /* read olh entry */
2072 rgw_bucket_olh_entry olh_data_entry
;
2073 string olh_data_key
;
2074 encode_olh_data_key(op
.olh
, &olh_data_key
);
2075 int ret
= read_index_entry(hctx
, olh_data_key
, &olh_data_entry
);
2076 if (ret
< 0 && ret
!= -ENOENT
) {
2077 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key
.c_str(), ret
);
2081 if (olh_data_entry
.tag
!= op
.olh_tag
) {
2082 CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__
, olh_data_entry
.tag
.c_str(), op
.olh_tag
.c_str());
2086 /* remove all versions up to and including ver from the pending map */
2087 auto& log
= olh_data_entry
.pending_log
;
2088 auto liter
= log
.begin();
2089 while (liter
!= log
.end() && liter
->first
<= op
.ver
) {
2090 auto rm_iter
= liter
;
2095 /* write the olh data entry */
2096 ret
= write_entry(hctx
, olh_data_entry
, olh_data_key
);
2098 CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_key
.c_str(), ret
);
2105 static int rgw_bucket_clear_olh(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2107 CLS_LOG(10, "entered %s", __func__
);
2109 rgw_cls_bucket_clear_olh_op op
;
2110 auto iter
= in
->cbegin();
2113 } catch (ceph::buffer::error
& err
) {
2114 CLS_LOG(0, "ERROR: rgw_bucket_clear_olh(): failed to decode request\n");
2118 if (!op
.key
.instance
.empty()) {
2119 CLS_LOG(1, "bad key passed in (non empty instance)");
2123 /* read olh entry */
2124 rgw_bucket_olh_entry olh_data_entry
;
2125 string olh_data_key
;
2126 encode_olh_data_key(op
.key
, &olh_data_key
);
2127 int ret
= read_index_entry(hctx
, olh_data_key
, &olh_data_entry
);
2128 if (ret
< 0 && ret
!= -ENOENT
) {
2129 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key
.c_str(), ret
);
2133 if (olh_data_entry
.tag
!= op
.olh_tag
) {
2134 CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__
, olh_data_entry
.tag
.c_str(), op
.olh_tag
.c_str());
2138 ret
= cls_cxx_map_remove_key(hctx
, olh_data_key
);
2140 CLS_LOG(1, "NOTICE: %s: can't remove key %s ret=%d", __func__
, olh_data_key
.c_str(), ret
);
2144 rgw_bucket_dir_entry plain_entry
;
2146 /* read plain entry, make sure it's a versioned place holder */
2147 ret
= read_index_entry(hctx
, op
.key
.name
, &plain_entry
);
2148 if (ret
== -ENOENT
) {
2149 /* we're done, no entry existing */
2153 CLS_LOG(0, "ERROR: read_index_entry key=%s ret=%d", op
.key
.name
.c_str(), ret
);
2157 if ((plain_entry
.flags
& rgw_bucket_dir_entry::FLAG_VER_MARKER
) == 0) {
2158 /* it's not a version marker, don't remove it */
2162 ret
= cls_cxx_map_remove_key(hctx
, op
.key
.name
);
2164 CLS_LOG(1, "NOTICE: %s: can't remove key %s ret=%d", __func__
, op
.key
.name
.c_str(), ret
);
2171 int rgw_dir_suggest_changes(cls_method_context_t hctx
,
2172 bufferlist
*in
, bufferlist
*out
)
2174 CLS_LOG(1, "entered %s", __func__
);
2176 bufferlist header_bl
;
2177 rgw_bucket_dir_header header
;
2178 bool header_changed
= false;
2180 int rc
= read_bucket_header(hctx
, &header
);
2182 CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to read header\n");
2186 timespan
tag_timeout(
2187 std::chrono::seconds(
2188 header
.tag_timeout
? header
.tag_timeout
: CEPH_RGW_TAG_TIMEOUT
));
2190 auto in_iter
= in
->cbegin();
2192 while (!in_iter
.end()) {
2194 rgw_bucket_dir_entry cur_change
;
2195 rgw_bucket_dir_entry cur_disk
;
2197 decode(op
, in_iter
);
2198 decode(cur_change
, in_iter
);
2199 } catch (ceph::buffer::error
& err
) {
2200 CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to decode request\n");
2204 bufferlist cur_disk_bl
;
2205 string cur_change_key
;
2206 encode_obj_index_key(cur_change
.key
, &cur_change_key
);
2207 int ret
= cls_cxx_map_get_val(hctx
, cur_change_key
, &cur_disk_bl
);
2208 if (ret
< 0 && ret
!= -ENOENT
)
2211 if (ret
== -ENOENT
) {
2215 if (cur_disk_bl
.length()) {
2216 auto cur_disk_iter
= cur_disk_bl
.cbegin();
2218 decode(cur_disk
, cur_disk_iter
);
2219 } catch (ceph::buffer::error
& error
) {
2220 CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to decode cur_disk\n");
2224 // remove any pending entries whose tag timeout has expired. until expiry,
2225 // these pending entries will prevent us from applying suggested changes
2226 real_time cur_time
= real_clock::now();
2227 auto iter
= cur_disk
.pending_map
.begin();
2228 while(iter
!= cur_disk
.pending_map
.end()) {
2229 auto cur_iter
= iter
++;
2230 if (cur_time
> (cur_iter
->second
.timestamp
+ timespan(tag_timeout
))) {
2231 cur_disk
.pending_map
.erase(cur_iter
);
2236 CLS_LOG(20, "cur_disk.pending_map.empty()=%d op=%d cur_disk.exists=%d "
2237 "cur_disk.index_ver=%d cur_change.exists=%d cur_change.index_ver=%d",
2238 cur_disk
.pending_map
.empty(), (int)op
, cur_disk
.exists
,
2239 (int)cur_disk
.index_ver
, cur_change
.exists
,
2240 (int)cur_change
.index_ver
);
2242 if (cur_change
.index_ver
< cur_disk
.index_ver
) {
2243 // a pending on-disk entry was completed since this suggestion was made,
2244 // don't apply it yet. if the index really is inconsistent, the next
2245 // listing will get the latest version and resend the suggestion
2249 if (cur_disk
.pending_map
.empty()) {
2250 if (cur_disk
.exists
) {
2251 rgw_bucket_category_stats
& old_stats
= header
.stats
[cur_disk
.meta
.category
];
2252 CLS_LOG(10, "total_entries: %" PRId64
" -> %" PRId64
"", old_stats
.num_entries
, old_stats
.num_entries
- 1);
2253 old_stats
.num_entries
--;
2254 old_stats
.total_size
-= cur_disk
.meta
.accounted_size
;
2255 old_stats
.total_size_rounded
-= cls_rgw_get_rounded_size(cur_disk
.meta
.accounted_size
);
2256 old_stats
.actual_size
-= cur_disk
.meta
.size
;
2257 header_changed
= true;
2259 rgw_bucket_category_stats
& stats
= header
.stats
[cur_change
.meta
.category
];
2260 bool log_op
= (op
& CEPH_RGW_DIR_SUGGEST_LOG_OP
) != 0;
2261 op
&= CEPH_RGW_DIR_SUGGEST_OP_MASK
;
2263 case CEPH_RGW_REMOVE
:
2264 CLS_LOG(10, "CEPH_RGW_REMOVE name=%s instance=%s", cur_change
.key
.name
.c_str(), cur_change
.key
.instance
.c_str());
2265 ret
= cls_cxx_map_remove_key(hctx
, cur_change_key
);
2268 if (log_op
&& cur_disk
.exists
&& !header
.syncstopped
) {
2269 ret
= log_index_operation(hctx
, cur_disk
.key
, CLS_RGW_OP_DEL
, cur_disk
.tag
, cur_disk
.meta
.mtime
,
2270 cur_disk
.ver
, CLS_RGW_STATE_COMPLETE
, header
.ver
, header
.max_marker
, 0, NULL
, NULL
, NULL
);
2272 CLS_LOG(0, "ERROR: %s: failed to log operation ret=%d", __func__
, ret
);
2277 case CEPH_RGW_UPDATE
:
2278 CLS_LOG(10, "CEPH_RGW_UPDATE name=%s instance=%s total_entries: %" PRId64
" -> %" PRId64
"",
2279 cur_change
.key
.name
.c_str(), cur_change
.key
.instance
.c_str(), stats
.num_entries
, stats
.num_entries
+ 1);
2281 stats
.num_entries
++;
2282 stats
.total_size
+= cur_change
.meta
.accounted_size
;
2283 stats
.total_size_rounded
+= cls_rgw_get_rounded_size(cur_change
.meta
.accounted_size
);
2284 stats
.actual_size
+= cur_change
.meta
.size
;
2285 header_changed
= true;
2286 cur_change
.index_ver
= header
.ver
;
2287 bufferlist cur_state_bl
;
2288 encode(cur_change
, cur_state_bl
);
2289 ret
= cls_cxx_map_set_val(hctx
, cur_change_key
, &cur_state_bl
);
2292 if (log_op
&& !header
.syncstopped
) {
2293 ret
= log_index_operation(hctx
, cur_change
.key
, CLS_RGW_OP_ADD
, cur_change
.tag
, cur_change
.meta
.mtime
,
2294 cur_change
.ver
, CLS_RGW_STATE_COMPLETE
, header
.ver
, header
.max_marker
, 0, NULL
, NULL
, NULL
);
2296 CLS_LOG(0, "ERROR: %s: failed to log operation ret=%d", __func__
, ret
);
2302 } // if (cur_disk.pending_map.empty())
2303 } // while (!in_iter.end())
2305 if (header_changed
) {
2306 return write_bucket_header(hctx
, &header
);
2311 static int rgw_obj_remove(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2313 CLS_LOG(10, "entered %s", __func__
);
2315 rgw_cls_obj_remove_op op
;
2316 auto iter
= in
->cbegin();
2319 } catch (ceph::buffer::error
& err
) {
2320 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__
);
2324 if (op
.keep_attr_prefixes
.empty()) {
2325 return cls_cxx_remove(hctx
);
2328 map
<string
, bufferlist
> attrset
;
2329 int ret
= cls_cxx_getxattrs(hctx
, &attrset
);
2330 if (ret
< 0 && ret
!= -ENOENT
) {
2331 CLS_LOG(0, "ERROR: %s: cls_cxx_getxattrs() returned %d", __func__
, ret
);
2335 map
<string
, bufferlist
> new_attrs
;
2336 for (auto iter
= op
.keep_attr_prefixes
.begin();
2337 iter
!= op
.keep_attr_prefixes
.end(); ++iter
) {
2338 auto& check_prefix
= *iter
;
2340 for (auto aiter
= attrset
.lower_bound(check_prefix
);
2341 aiter
!= attrset
.end(); ++aiter
) {
2342 const string
& attr
= aiter
->first
;
2344 if (attr
.substr(0, check_prefix
.size()) > check_prefix
) {
2348 new_attrs
[attr
] = aiter
->second
;
2352 CLS_LOG(20, "%s: removing object", __func__
);
2353 ret
= cls_cxx_remove(hctx
);
2355 CLS_LOG(0, "ERROR: %s: cls_cxx_remove returned %d", __func__
, ret
);
2359 if (new_attrs
.empty()) {
2360 /* no data to keep */
2364 ret
= cls_cxx_create(hctx
, false);
2366 CLS_LOG(0, "ERROR: %s: cls_cxx_create returned %d", __func__
, ret
);
2370 for (auto aiter
= new_attrs
.begin();
2371 aiter
!= new_attrs
.end(); ++aiter
) {
2372 const auto& attr
= aiter
->first
;
2374 ret
= cls_cxx_setxattr(hctx
, attr
.c_str(), &aiter
->second
);
2375 CLS_LOG(20, "%s: setting attr: %s", __func__
, attr
.c_str());
2377 CLS_LOG(0, "ERROR: %s: cls_cxx_setxattr (attr=%s) returned %d", __func__
, attr
.c_str(), ret
);
2385 static int rgw_obj_store_pg_ver(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2387 CLS_LOG(10, "entered %s", __func__
);
2389 rgw_cls_obj_store_pg_ver_op op
;
2390 auto iter
= in
->cbegin();
2393 } catch (ceph::buffer::error
& err
) {
2394 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__
);
2399 uint64_t ver
= cls_current_version(hctx
);
2401 int ret
= cls_cxx_setxattr(hctx
, op
.attr
.c_str(), &bl
);
2403 CLS_LOG(0, "ERROR: %s: cls_cxx_setxattr (attr=%s) returned %d", __func__
, op
.attr
.c_str(), ret
);
2410 static int rgw_obj_check_attrs_prefix(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2412 CLS_LOG(10, "entered %s", __func__
);
2414 rgw_cls_obj_check_attrs_prefix op
;
2415 auto iter
= in
->cbegin();
2418 } catch (ceph::buffer::error
& err
) {
2419 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__
);
2423 if (op
.check_prefix
.empty()) {
2427 map
<string
, bufferlist
> attrset
;
2428 int ret
= cls_cxx_getxattrs(hctx
, &attrset
);
2429 if (ret
< 0 && ret
!= -ENOENT
) {
2430 CLS_LOG(0, "ERROR: %s: cls_cxx_getxattrs() returned %d", __func__
, ret
);
2436 for (auto aiter
= attrset
.lower_bound(op
.check_prefix
);
2437 aiter
!= attrset
.end(); ++aiter
) {
2438 const auto& attr
= aiter
->first
;
2440 if (attr
.substr(0, op
.check_prefix
.size()) > op
.check_prefix
) {
2447 if (exist
== op
.fail_if_exist
) {
2454 static int rgw_obj_check_mtime(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2456 CLS_LOG(10, "entered %s", __func__
);
2458 rgw_cls_obj_check_mtime op
;
2459 auto iter
= in
->cbegin();
2462 } catch (ceph::buffer::error
& err
) {
2463 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__
);
2468 int ret
= cls_cxx_stat2(hctx
, NULL
, &obj_ut
);
2469 if (ret
< 0 && ret
!= -ENOENT
) {
2470 CLS_LOG(0, "ERROR: %s: cls_cxx_stat() returned %d", __func__
, ret
);
2473 if (ret
== -ENOENT
) {
2474 CLS_LOG(10, "object does not exist, skipping check");
2477 ceph_timespec obj_ts
= ceph::real_clock::to_ceph_timespec(obj_ut
);
2478 ceph_timespec op_ts
= ceph::real_clock::to_ceph_timespec(op
.mtime
);
2480 if (!op
.high_precision_time
) {
2485 CLS_LOG(10, "%s: obj_ut=%lld.%06lld op.mtime=%lld.%06lld", __func__
,
2486 (long long)obj_ts
.tv_sec
, (long long)obj_ts
.tv_nsec
,
2487 (long long)op_ts
.tv_sec
, (long long)op_ts
.tv_nsec
);
2492 case CLS_RGW_CHECK_TIME_MTIME_EQ
:
2493 check
= (obj_ts
== op_ts
);
2495 case CLS_RGW_CHECK_TIME_MTIME_LT
:
2496 check
= (obj_ts
< op_ts
);
2498 case CLS_RGW_CHECK_TIME_MTIME_LE
:
2499 check
= (obj_ts
<= op_ts
);
2501 case CLS_RGW_CHECK_TIME_MTIME_GT
:
2502 check
= (obj_ts
> op_ts
);
2504 case CLS_RGW_CHECK_TIME_MTIME_GE
:
2505 check
= (obj_ts
>= op_ts
);
2518 static int rgw_bi_get_op(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2520 CLS_LOG(10, "entered %s", __func__
);
2522 rgw_cls_bi_get_op op
;
2523 auto iter
= in
->cbegin();
2526 } catch (ceph::buffer::error
& err
) {
2527 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__
);
2534 case BIIndexType::Plain
:
2537 case BIIndexType::Instance
:
2538 encode_obj_index_key(op
.key
, &idx
);
2540 case BIIndexType::OLH
:
2541 encode_olh_data_key(op
.key
, &idx
);
2544 CLS_LOG(10, "%s: invalid key type encoding: %d",
2545 __func__
, int(op
.type
));
2549 rgw_cls_bi_get_ret op_ret
;
2551 rgw_cls_bi_entry
& entry
= op_ret
.entry
;
2553 entry
.type
= op
.type
;
2556 int r
= cls_cxx_map_get_val(hctx
, idx
, &entry
.data
);
2558 CLS_LOG(10, "%s: cls_cxx_map_get_val() returned %d", __func__
, r
);
2562 encode(op_ret
, *out
);
2567 static int rgw_bi_put_op(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2569 CLS_LOG(10, "entered %s", __func__
);
2571 rgw_cls_bi_put_op op
;
2572 auto iter
= in
->cbegin();
2575 } catch (ceph::buffer::error
& err
) {
2576 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__
);
2580 rgw_cls_bi_entry
& entry
= op
.entry
;
2582 int r
= cls_cxx_map_set_val(hctx
, entry
.idx
, &entry
.data
);
2584 CLS_LOG(0, "ERROR: %s: cls_cxx_map_set_val() returned r=%d", __func__
, r
);
2591 /* The plain entries in the bucket index are divided into two regions
2592 * divided by the special entries that begin with 0x80. Those below
2593 * ("Low") are ascii entries. Those above ("High") bring in unicode
2594 * entries. This enum allows either or both regions to be listed in
2595 * list_plain_entries(). It's convenient that "Both" be in between the
2596 * others so we can use "<= Both" or ">= Both" logic.
2598 enum class PlainEntriesRegion
{
2603 /* Queries the omap for plain entries in the range of start_after_key
2604 * to end_key, non-inclusive. Both of those values must either be
2605 * before the "ugly namespace" or after it.
2607 * Negative return values indicate errors. Non-negative return values
2608 * indicate number of entries retrieved. */
2609 static int list_plain_entries_help(cls_method_context_t hctx
,
2610 const std::string
& name_filter
,
2611 const std::string
& start_after_key
, // exclusive
2612 const std::string
& end_key
, // exclusive
2614 std::list
<rgw_cls_bi_entry
>* entries
,
2615 bool& end_key_reached
,
2618 CLS_LOG(10, "Entered %s: name_filter=\"%s\", start_after_key=\"%s\", end_key=\"%s\", max=%d",
2619 __func__
, escape_str(name_filter
).c_str(), escape_str(start_after_key
).c_str(),
2620 escape_str(end_key
).c_str(), max
);
2622 std::map
<std::string
, bufferlist
> raw_entries
;
2623 int ret
= cls_cxx_map_get_vals(hctx
, start_after_key
, name_filter
, max
,
2624 &raw_entries
, &more
);
2625 CLS_LOG(20, "%s: cls_cxx_map_get_vals ret=%d, raw_entries.size()=%lu, more=%d",
2626 __func__
, ret
, raw_entries
.size(), more
);
2631 end_key_reached
= false;
2632 for (auto iter
: raw_entries
) {
2633 if (!end_key
.empty() && iter
.first
>= end_key
) {
2634 CLS_LOG(20, "%s: end key reached at \"%s\"",
2635 __func__
, escape_str(iter
.first
).c_str());
2636 end_key_reached
= true;
2641 rgw_bucket_dir_entry e
;
2642 auto biter
= iter
.second
.cbegin();
2645 } catch (ceph::buffer::error
& err
) {
2646 CLS_LOG(0, "ERROR: %s: failed to decode buffer for plain bucket index entry \"%s\"",
2647 __func__
, escape_str(iter
.first
).c_str());
2651 if (!name_filter
.empty() && e
.key
.name
> name_filter
) {
2652 CLS_LOG(20, "%s: due to filter \"%s\", skipping entry.idx=\"%s\" e.key.name=\"%s\"",
2654 escape_str(name_filter
).c_str(),
2655 escape_str(iter
.first
).c_str(),
2656 escape_str(e
.key
.name
).c_str());
2657 // skip the rest of the entries
2659 end_key_reached
= true;
2663 rgw_cls_bi_entry entry
;
2664 entry
.type
= BIIndexType::Plain
;
2665 entry
.idx
= iter
.first
;
2666 entry
.data
= iter
.second
;
2668 entries
->push_back(entry
);
2671 CLS_LOG(20, "%s: adding entry %d entry.idx=\"%s\" e.key.name=\"%s\"",
2674 escape_str(entry
.idx
).c_str(),
2675 escape_str(e
.key
.name
).c_str());
2677 if (count
>= int(max
)) {
2678 // NB: this looks redundant, but leave in for time being
2684 } // list_plain_entries_help
2687 * Lists plain entries in either or both regions, the region of those
2688 * beginning with an ASCII character or a non-ASCII character, which
2689 * surround the "ugly" namespace used by special entries for versioned
2692 * The entries parameter is not cleared and additional entries are
2695 static int list_plain_entries(cls_method_context_t hctx
,
2696 const std::string
& name_filter
,
2697 const std::string
& marker
,
2699 std::list
<rgw_cls_bi_entry
>* entries
,
2701 const PlainEntriesRegion region
= PlainEntriesRegion::Both
)
2703 CLS_LOG(10, "entered %s: name_filter=\"%s\", marker=\"%s\", max=%d, region=%d",
2704 __func__
, escape_str(name_filter
).c_str(), escape_str(marker
).c_str(), max
, static_cast<int>(region
));
2706 bool end_key_reached
= false;
2708 const size_t start_size
= entries
->size();
2710 if (region
<= PlainEntriesRegion::Both
&& marker
< BI_PREFIX_BEGIN
) {
2711 // listing ascii plain namespace
2712 int r
= list_plain_entries_help(hctx
, name_filter
, marker
, BI_PREFIX_BEGIN
, max
,
2713 entries
, end_key_reached
, more
);
2714 CLS_LOG(20, "%s: first list_plain_entries_help r=%d, end_key_reached=%d, more=%d",
2715 __func__
, r
, end_key_reached
, more
);
2720 // see if we're done for this call (there may be more for a later call)
2721 if (r
>= int(max
) || !end_key_reached
|| (!more
&& region
== PlainEntriesRegion::Low
)) {
2726 return int(entries
->size() - start_size
);
2732 if (region
>= PlainEntriesRegion::Both
) {
2733 const std::string start_after_key
= std::max(marker
, BI_PREFIX_END
);
2735 // listing non-ascii plain namespace
2736 r
= list_plain_entries_help(hctx
, name_filter
, start_after_key
, {}, max
,
2737 entries
, end_key_reached
, more
);
2738 CLS_LOG(20, "%s: second list_plain_entries_help r=%d, end_key_reached=%d, more=%d",
2739 __func__
, r
, end_key_reached
, more
);
2749 return int(entries
->size() - start_size
);
2752 static int list_instance_entries(cls_method_context_t hctx
,
2754 const string
& marker
,
2756 list
<rgw_cls_bi_entry
> *entries
,
2759 cls_rgw_obj_key
key(name
);
2760 string first_instance_idx
;
2761 encode_obj_versioned_data_key(key
, &first_instance_idx
);
2762 string start_after_key
;
2764 if (!name
.empty()) {
2765 start_after_key
= first_instance_idx
;
2767 start_after_key
= BI_PREFIX_CHAR
;
2768 start_after_key
.append(bucket_index_prefixes
[BI_BUCKET_OBJ_INSTANCE_INDEX
]);
2770 string filter
= start_after_key
;
2771 if (bi_entry_gt(marker
, start_after_key
)) {
2772 start_after_key
= marker
;
2775 map
<string
, bufferlist
> keys
;
2777 int ret
= cls_cxx_map_get_val(hctx
, start_after_key
, &k
);
2778 if (ret
< 0 && ret
!= -ENOENT
) {
2781 // we need to include the exact match if a filter (name) is
2782 // specified and the marker has not yet advanced (i.e., been set)
2783 bool found_first
= (ret
== 0) && (start_after_key
!= marker
);
2788 ret
= cls_cxx_map_get_vals(hctx
, start_after_key
, string(), max
,
2790 CLS_LOG(20, "%s: start_after_key=\"%s\" first_instance_idx=\"%s\" keys.size()=%d",
2791 __func__
, escape_str(start_after_key
).c_str(),
2792 escape_str(first_instance_idx
).c_str(), (int)keys
.size());
2798 keys
[start_after_key
] = std::move(k
);
2801 for (auto iter
= keys
.begin(); iter
!= keys
.end(); ++iter
) {
2802 rgw_cls_bi_entry entry
;
2803 entry
.type
= BIIndexType::Instance
;
2804 entry
.idx
= iter
->first
;
2805 entry
.data
= iter
->second
;
2807 if (!filter
.empty() && entry
.idx
.compare(0, filter
.size(), filter
) != 0) {
2808 /* we are skipping the rest of the entries */
2815 CLS_LOG(20, "%s: entry.idx=\"%s\"", __func__
, escape_str(entry
.idx
).c_str());
2817 auto biter
= entry
.data
.cbegin();
2819 rgw_bucket_dir_entry e
;
2822 } catch (ceph::buffer::error
& err
) {
2823 CLS_LOG(0, "ERROR: %s: failed to decode buffer (size=%d)", __func__
, entry
.data
.length());
2827 if (!name
.empty() && e
.key
.name
!= name
) {
2828 /* we are skipping the rest of the entries */
2835 entries
->push_back(entry
);
2837 start_after_key
= entry
.idx
;
2843 static int list_olh_entries(cls_method_context_t hctx
,
2845 const string
& marker
,
2847 list
<rgw_cls_bi_entry
> *entries
,
2850 cls_rgw_obj_key
key(name
);
2851 string first_instance_idx
;
2852 encode_olh_data_key(key
, &first_instance_idx
);
2853 string start_after_key
;
2855 if (!name
.empty()) {
2856 start_after_key
= first_instance_idx
;
2858 start_after_key
= BI_PREFIX_CHAR
;
2859 start_after_key
.append(bucket_index_prefixes
[BI_BUCKET_OLH_DATA_INDEX
]);
2861 string filter
= start_after_key
;
2862 if (bi_entry_gt(marker
, start_after_key
)) {
2863 start_after_key
= marker
;
2866 map
<string
, bufferlist
> keys
;
2869 ret
= cls_cxx_map_get_val(hctx
, start_after_key
, &k
);
2870 if (ret
< 0 && ret
!= -ENOENT
) {
2873 // we need to include the exact match if a filter (name) is
2874 // specified and the marker has not yet advanced (i.e., been set)
2875 bool found_first
= (ret
== 0) && (start_after_key
!= marker
);
2880 ret
= cls_cxx_map_get_vals(hctx
, start_after_key
, string(), max
,
2882 CLS_LOG(20, "%s: start_after_key=\"%s\", first_instance_idx=\"%s\", keys.size()=%d",
2883 __func__
, escape_str(start_after_key
).c_str(),
2884 escape_str(first_instance_idx
).c_str(), (int)keys
.size());
2891 keys
[start_after_key
] = std::move(k
);
2894 for (auto iter
= keys
.begin(); iter
!= keys
.end(); ++iter
) {
2895 rgw_cls_bi_entry entry
;
2896 entry
.type
= BIIndexType::OLH
;
2897 entry
.idx
= iter
->first
;
2898 entry
.data
= iter
->second
;
2900 if (!filter
.empty() && entry
.idx
.compare(0, filter
.size(), filter
) != 0) {
2901 /* we are skipping the rest of the entries */
2908 CLS_LOG(20, "%s: entry.idx=\"%s\"", __func__
, escape_str(entry
.idx
).c_str());
2910 auto biter
= entry
.data
.cbegin();
2912 rgw_bucket_olh_entry e
;
2915 } catch (ceph::buffer::error
& err
) {
2916 CLS_LOG(0, "ERROR: %s: failed to decode buffer (size=%d)", __func__
, entry
.data
.length());
2920 if (!name
.empty() && e
.key
.name
!= name
) {
2921 /* we are skipping the rest of the entries */
2928 entries
->push_back(entry
);
2930 start_after_key
= entry
.idx
;
2936 /* Lists all the entries that appear in a bucket index listing.
2938 * It may not be obvious why this function calls three other "segment"
2939 * functions (list_plain_entries (twice), list_instance_entries,
2940 * list_olh_entries) that each list segments of the index space rather
2941 * than just move a marker through the space from start to end. The
2942 * reason is that a name filter may be provided in the op, and in that
2943 * case most entries will be skipped over, and small segments within
2944 * each larger segment will be listed.
2946 * Ideally, each of the three segment functions should be able to
2947 * handle a marker and filter, if either/both is provided,
2948 * efficiently. So, for example, if the marker is after the segment,
2949 * ideally return quickly rather than iterating through entries in the
2952 * Additionally, each of the three segment functions, if successful,
2953 * is expected to return the number of entries added to the output
2954 * list as a non-negative value. As per usual, negative return values
2955 * indicate error condtions.
2957 static int rgw_bi_list_op(cls_method_context_t hctx
,
2961 CLS_LOG(10, "entered %s", __func__
);
2963 rgw_cls_bi_list_op op
;
2964 auto iter
= in
->cbegin();
2967 } catch (ceph::buffer::error
& err
) {
2968 CLS_LOG(0, "ERROR: %s: failed to decode request", __func__
);
2972 constexpr uint32_t MAX_BI_LIST_ENTRIES
= 1000;
2973 const uint32_t max
= std::min(op
.max
, MAX_BI_LIST_ENTRIES
);
2975 CLS_LOG(20, "%s: op.marker=\"%s\", op.name_filter=\"%s\", op.max=%u max=%u",
2976 __func__
, escape_str(op
.marker
).c_str(), escape_str(op
.name_filter
).c_str(),
2982 rgw_cls_bi_list_ret op_ret
;
2984 ret
= list_plain_entries(hctx
, op
.name_filter
, op
.marker
, max
,
2985 &op_ret
.entries
, &more
, PlainEntriesRegion::Low
);
2987 CLS_LOG(0, "ERROR: %s: list_plain_entries (low) returned ret=%d, marker=\"%s\", filter=\"%s\", max=%d",
2988 __func__
, ret
, escape_str(op
.marker
).c_str(), escape_str(op
.name_filter
).c_str(), max
);
2993 CLS_LOG(20, "%s: found %d plain ascii (low) entries, count=%u", __func__
, ret
, count
);
2996 ret
= list_instance_entries(hctx
, op
.name_filter
, op
.marker
, max
- count
, &op_ret
.entries
, &more
);
2998 CLS_LOG(0, "ERROR: %s: list_instance_entries returned ret=%d", __func__
, ret
);
3003 CLS_LOG(20, "%s: found %d instance entries, count=%u", __func__
, ret
, count
);
3007 ret
= list_olh_entries(hctx
, op
.name_filter
, op
.marker
, max
- count
, &op_ret
.entries
, &more
);
3009 CLS_LOG(0, "ERROR: %s: list_olh_entries returned ret=%d", __func__
, ret
);
3014 CLS_LOG(20, "%s: found %d olh entries, count=%u", __func__
, ret
, count
);
3018 ret
= list_plain_entries(hctx
, op
.name_filter
, op
.marker
, max
- count
,
3019 &op_ret
.entries
, &more
, PlainEntriesRegion::High
);
3021 CLS_LOG(0, "ERROR: %s: list_plain_entries (high) returned ret=%d, marker=\"%s\", filter=\"%s\", max=%d",
3022 __func__
, ret
, escape_str(op
.marker
).c_str(), escape_str(op
.name_filter
).c_str(), max
);
3027 CLS_LOG(20, "%s: found %d non-ascii (high) plain entries, count=%u", __func__
, ret
, count
);
3030 op_ret
.is_truncated
= (count
> max
) || more
;
3031 while (count
> max
) {
3032 op_ret
.entries
.pop_back();
3036 CLS_LOG(20, "%s: returning %lu entries, is_truncated=%d", __func__
, op_ret
.entries
.size(), op_ret
.is_truncated
);
3037 encode(op_ret
, *out
);
3043 int bi_log_record_decode(bufferlist
& bl
, rgw_bi_log_entry
& e
)
3045 auto iter
= bl
.cbegin();
3048 } catch (ceph::buffer::error
& err
) {
3049 CLS_LOG(0, "ERROR: failed to decode rgw_bi_log_entry");
3056 static int bi_log_iterate_entries(cls_method_context_t hctx
,
3057 const string
& marker
,
3058 const string
& end_marker
,
3060 uint32_t max_entries
,
3062 int (*cb
)(cls_method_context_t
, const string
&, rgw_bi_log_entry
&, void *),
3065 CLS_LOG(10, "bi_log_iterate_range");
3067 map
<string
, bufferlist
> keys
;
3068 string filter_prefix
, end_key
;
3075 string start_after_key
;
3076 if (key_iter
.empty()) {
3077 key
= BI_PREFIX_CHAR
;
3078 key
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
]);
3081 start_after_key
= key
;
3083 start_after_key
= key_iter
;
3086 if (end_marker
.empty()) {
3087 end_key
= BI_PREFIX_CHAR
;
3088 end_key
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
+ 1]);
3090 end_key
= BI_PREFIX_CHAR
;
3091 end_key
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
]);
3092 end_key
.append(end_marker
);
3095 CLS_LOG(10, "bi_log_iterate_entries start_after_key=%s end_key=%s",
3096 start_after_key
.c_str(), end_key
.c_str());
3100 int ret
= cls_cxx_map_get_vals(hctx
, start_after_key
, filter
, max_entries
,
3105 auto iter
= keys
.begin();
3106 if (iter
== keys
.end())
3109 uint32_t num_keys
= keys
.size();
3111 for (; iter
!= keys
.end(); ++iter
,++i
) {
3112 const string
& key
= iter
->first
;
3115 CLS_LOG(10, "bi_log_iterate_entries key=%s bl.length=%d", key
.c_str(), (int)iter
->second
.length());
3117 if (key
.compare(end_key
) > 0) {
3125 ret
= bi_log_record_decode(iter
->second
, e
);
3129 ret
= cb(hctx
, key
, e
, param
);
3133 if (i
== num_keys
- 1) {
3141 static int bi_log_list_cb(cls_method_context_t hctx
, const string
& key
, rgw_bi_log_entry
& info
, void *param
)
3143 list
<rgw_bi_log_entry
> *l
= (list
<rgw_bi_log_entry
> *)param
;
3148 static int bi_log_list_entries(cls_method_context_t hctx
, const string
& marker
,
3149 uint32_t max
, list
<rgw_bi_log_entry
>& entries
, bool *truncated
)
3153 int ret
= bi_log_iterate_entries(hctx
, marker
, end_marker
,
3154 key_iter
, max
, truncated
,
3155 bi_log_list_cb
, &entries
);
3159 static int rgw_bi_log_list(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3161 CLS_LOG(10, "entered %s", __func__
);
3162 auto in_iter
= in
->cbegin();
3164 cls_rgw_bi_log_list_op op
;
3166 decode(op
, in_iter
);
3167 } catch (ceph::buffer::error
& err
) {
3168 CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n");
3172 cls_rgw_bi_log_list_ret op_ret
;
3173 int ret
= bi_log_list_entries(hctx
, op
.marker
, op
.max
, op_ret
.entries
, &op_ret
.truncated
);
3177 encode(op_ret
, *out
);
3182 static int rgw_bi_log_trim(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3184 CLS_LOG(10, "entered %s", __func__
);
3185 auto in_iter
= in
->cbegin();
3187 cls_rgw_bi_log_trim_op op
;
3189 decode(op
, in_iter
);
3190 } catch (ceph::buffer::error
& err
) {
3191 CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n");
3195 string
key_begin(1, BI_PREFIX_CHAR
);
3196 key_begin
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
]);
3197 key_begin
.append(op
.start_marker
);
3200 if (op
.end_marker
.empty()) {
3201 key_end
= BI_PREFIX_CHAR
;
3202 key_end
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
+ 1]);
3204 key_end
= BI_PREFIX_CHAR
;
3205 key_end
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
]);
3206 key_end
.append(op
.end_marker
);
3207 // cls_cxx_map_remove_range() expects one-past-end
3208 key_end
.append(1, '\0');
3211 // list a single key to detect whether the range is empty
3212 const size_t max_entries
= 1;
3213 std::set
<std::string
> keys
;
3216 int rc
= cls_cxx_map_get_keys(hctx
, key_begin
, max_entries
, &keys
, &more
);
3218 CLS_LOG(1, "ERROR: cls_cxx_map_get_keys failed rc=%d", rc
);
3223 CLS_LOG(20, "range is empty key_begin=%s", key_begin
.c_str());
3227 const std::string
& first_key
= *keys
.begin();
3228 if (key_end
< first_key
) {
3229 CLS_LOG(20, "listed key %s past key_end=%s", first_key
.c_str(), key_end
.c_str());
3233 CLS_LOG(20, "listed key %s, removing through %s",
3234 first_key
.c_str(), key_end
.c_str());
3236 rc
= cls_cxx_map_remove_range(hctx
, first_key
, key_end
);
3238 CLS_LOG(1, "ERROR: cls_cxx_map_remove_range failed rc=%d", rc
);
3244 static int rgw_bi_log_resync(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3246 CLS_LOG(10, "entered %s", __func__
);
3247 rgw_bucket_dir_header header
;
3248 int rc
= read_bucket_header(hctx
, &header
);
3250 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
3256 rgw_bi_log_entry entry
;
3258 entry
.timestamp
= real_clock::now();
3259 entry
.op
= RGWModifyOp::CLS_RGW_OP_RESYNC
;
3260 entry
.state
= RGWPendingState::CLS_RGW_STATE_COMPLETE
;
3263 bi_log_index_key(hctx
, key
, entry
.id
, header
.ver
);
3267 if (entry
.id
> header
.max_marker
)
3268 header
.max_marker
= entry
.id
;
3270 header
.syncstopped
= false;
3272 rc
= cls_cxx_map_set_val(hctx
, key
, &bl
);
3276 return write_bucket_header(hctx
, &header
);
3279 static int rgw_bi_log_stop(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3281 CLS_LOG(10, "entered %s", __func__
);
3282 rgw_bucket_dir_header header
;
3283 int rc
= read_bucket_header(hctx
, &header
);
3285 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
3291 rgw_bi_log_entry entry
;
3293 entry
.timestamp
= real_clock::now();
3294 entry
.op
= RGWModifyOp::CLS_RGW_OP_SYNCSTOP
;
3295 entry
.state
= RGWPendingState::CLS_RGW_STATE_COMPLETE
;
3298 bi_log_index_key(hctx
, key
, entry
.id
, header
.ver
);
3302 if (entry
.id
> header
.max_marker
)
3303 header
.max_marker
= entry
.id
;
3304 header
.syncstopped
= true;
3306 rc
= cls_cxx_map_set_val(hctx
, key
, &bl
);
3310 return write_bucket_header(hctx
, &header
);
3314 static void usage_record_prefix_by_time(uint64_t epoch
, string
& key
)
3317 snprintf(buf
, sizeof(buf
), "%011llu", (long long unsigned)epoch
);
3321 static void usage_record_prefix_by_user(const string
& user
, uint64_t epoch
, string
& key
)
3323 char buf
[user
.size() + 32];
3324 snprintf(buf
, sizeof(buf
), "%s_%011llu_", user
.c_str(), (long long unsigned)epoch
);
3328 static void usage_record_name_by_time(uint64_t epoch
, const string
& user
, const string
& bucket
, string
& key
)
3330 char buf
[32 + user
.size() + bucket
.size()];
3331 snprintf(buf
, sizeof(buf
), "%011llu_%s_%s", (long long unsigned)epoch
, user
.c_str(), bucket
.c_str());
3335 static void usage_record_name_by_user(const string
& user
, uint64_t epoch
, const string
& bucket
, string
& key
)
3337 char buf
[32 + user
.size() + bucket
.size()];
3338 snprintf(buf
, sizeof(buf
), "%s_%011llu_%s", user
.c_str(), (long long unsigned)epoch
, bucket
.c_str());
3342 static int usage_record_decode(bufferlist
& record_bl
, rgw_usage_log_entry
& e
)
3344 auto kiter
= record_bl
.cbegin();
3347 } catch (ceph::buffer::error
& err
) {
3348 CLS_LOG(1, "ERROR: usage_record_decode(): failed to decode record_bl\n");
3355 int rgw_user_usage_log_add(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3357 CLS_LOG(10, "entered %s", __func__
);
3359 auto in_iter
= in
->cbegin();
3360 rgw_cls_usage_log_add_op op
;
3363 decode(op
, in_iter
);
3364 } catch (ceph::buffer::error
& err
) {
3365 CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): failed to decode request\n");
3369 rgw_usage_log_info
& info
= op
.info
;
3371 for (auto iter
= info
.entries
.begin(); iter
!= info
.entries
.end(); ++iter
) {
3372 rgw_usage_log_entry
& entry
= *iter
;
3375 rgw_user
*puser
= (entry
.payer
.empty() ? &entry
.owner
: &entry
.payer
);
3377 usage_record_name_by_time(entry
.epoch
, puser
->to_str(), entry
.bucket
, key_by_time
);
3379 CLS_LOG(10, "rgw_user_usage_log_add user=%s bucket=%s", puser
->to_str().c_str(), entry
.bucket
.c_str());
3381 bufferlist record_bl
;
3382 int ret
= cls_cxx_map_get_val(hctx
, key_by_time
, &record_bl
);
3383 if (ret
< 0 && ret
!= -ENOENT
) {
3384 CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): cls_cxx_map_read_key returned %d", ret
);
3388 rgw_usage_log_entry e
;
3389 ret
= usage_record_decode(record_bl
, e
);
3392 CLS_LOG(10, "rgw_user_usage_log_add aggregating existing bucket\n");
3396 bufferlist new_record_bl
;
3397 encode(entry
, new_record_bl
);
3398 ret
= cls_cxx_map_set_val(hctx
, key_by_time
, &new_record_bl
);
3403 usage_record_name_by_user(puser
->to_str(), entry
.epoch
, entry
.bucket
, key_by_user
);
3404 ret
= cls_cxx_map_set_val(hctx
, key_by_user
, &new_record_bl
);
3412 static int usage_iterate_range(cls_method_context_t hctx
, uint64_t start
, uint64_t end
, const string
& user
,
3413 const string
& bucket
, string
& key_iter
, uint32_t max_entries
, bool *truncated
,
3414 int (*cb
)(cls_method_context_t
, const string
&, rgw_usage_log_entry
&, void *),
3417 CLS_LOG(10, "entered %s", __func__
);
3419 map
<string
, bufferlist
> keys
;
3420 string filter_prefix
;
3421 string start_key
, end_key
;
3422 bool by_user
= !user
.empty();
3424 bool truncated_status
= false;
3426 ceph_assert(truncated
!= nullptr);
3429 usage_record_prefix_by_time(end
, end_key
);
3432 user_key
.append("_");
3435 if (key_iter
.empty()) {
3437 usage_record_prefix_by_user(user
, start
, start_key
);
3439 usage_record_prefix_by_time(start
, start_key
);
3442 start_key
= key_iter
;
3445 CLS_LOG(20, "usage_iterate_range start_key=%s", start_key
.c_str());
3446 int ret
= cls_cxx_map_get_vals(hctx
, start_key
, filter_prefix
, max_entries
, &keys
, &truncated_status
);
3450 *truncated
= truncated_status
;
3452 auto iter
= keys
.begin();
3453 if (iter
== keys
.end())
3456 for (; iter
!= keys
.end(); ++iter
) {
3457 const string
& key
= iter
->first
;
3458 rgw_usage_log_entry e
;
3461 if (!by_user
&& key
.compare(end_key
) >= 0) {
3462 CLS_LOG(20, "usage_iterate_range reached key=%s, done", key
.c_str());
3468 if (by_user
&& key
.compare(0, user_key
.size(), user_key
) != 0) {
3469 CLS_LOG(20, "usage_iterate_range reached key=%s, done", key
.c_str());
3475 ret
= usage_record_decode(iter
->second
, e
);
3479 if (!bucket
.empty() && bucket
.compare(e
.bucket
))
3482 if (e
.epoch
< start
)
3485 /* keys are sorted by epoch, so once we're past end we're done */
3486 if (e
.epoch
>= end
) {
3491 ret
= cb(hctx
, key
, e
, param
);
3498 static int usage_log_read_cb(cls_method_context_t hctx
, const string
& key
, rgw_usage_log_entry
& entry
, void *param
)
3500 map
<rgw_user_bucket
, rgw_usage_log_entry
> *usage
= (map
<rgw_user_bucket
, rgw_usage_log_entry
> *)param
;
3502 if (!entry
.payer
.empty()) {
3503 puser
= &entry
.payer
;
3505 puser
= &entry
.owner
;
3507 rgw_user_bucket
ub(puser
->to_str(), entry
.bucket
);
3508 rgw_usage_log_entry
& le
= (*usage
)[ub
];
3509 le
.aggregate(entry
);
3514 int rgw_user_usage_log_read(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3516 CLS_LOG(10, "entered %s", __func__
);
3518 auto in_iter
= in
->cbegin();
3519 rgw_cls_usage_log_read_op op
;
3522 decode(op
, in_iter
);
3523 } catch (ceph::buffer::error
& err
) {
3524 CLS_LOG(1, "ERROR: rgw_user_usage_log_read(): failed to decode request\n");
3528 rgw_cls_usage_log_read_ret ret_info
;
3529 map
<rgw_user_bucket
, rgw_usage_log_entry
> *usage
= &ret_info
.usage
;
3530 string iter
= op
.iter
;
3531 #define MAX_ENTRIES 1000
3532 uint32_t max_entries
= (op
.max_entries
? op
.max_entries
: MAX_ENTRIES
);
3533 int ret
= usage_iterate_range(hctx
, op
.start_epoch
, op
.end_epoch
, op
.owner
, op
.bucket
, iter
, max_entries
, &ret_info
.truncated
, usage_log_read_cb
, (void *)usage
);
3537 if (ret_info
.truncated
)
3538 ret_info
.next_iter
= iter
;
3540 encode(ret_info
, *out
);
3544 static int usage_log_trim_cb(cls_method_context_t hctx
, const string
& key
, rgw_usage_log_entry
& entry
, void *param
)
3546 bool *found
= (bool *)param
;
3553 string o
= entry
.owner
.to_str();
3554 usage_record_name_by_time(entry
.epoch
, o
, entry
.bucket
, key_by_time
);
3555 usage_record_name_by_user(o
, entry
.epoch
, entry
.bucket
, key_by_user
);
3557 int ret
= cls_cxx_map_remove_key(hctx
, key_by_time
);
3561 return cls_cxx_map_remove_key(hctx
, key_by_user
);
3564 int rgw_user_usage_log_trim(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3566 CLS_LOG(10, "entered %s", __func__
);
3568 /* only continue if object exists! */
3569 int ret
= cls_cxx_stat(hctx
, NULL
, NULL
);
3573 auto in_iter
= in
->cbegin();
3574 rgw_cls_usage_log_trim_op op
;
3577 decode(op
, in_iter
);
3578 } catch (ceph::buffer::error
& err
) {
3579 CLS_LOG(1, "ERROR: rgw_user_log_usage_log_trim(): failed to decode request\n");
3586 #define MAX_USAGE_TRIM_ENTRIES 1000
3587 ret
= usage_iterate_range(hctx
, op
.start_epoch
, op
.end_epoch
, op
.user
, op
.bucket
, iter
, MAX_USAGE_TRIM_ENTRIES
, &more
, usage_log_trim_cb
, (void *)&found
);
3591 if (!more
&& !found
)
3597 int rgw_usage_log_clear(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3599 CLS_LOG(10, "entered %s", __func__
);
3601 int ret
= cls_cxx_map_clear(hctx
);
3602 /* if object doesn't exist all the logs are cleared anyway */
3610 * We hold the garbage collection chain data under two different
3611 * indexes: the first 'name' index keeps them under a unique tag that
3612 * represents the chains, and a second 'time' index keeps them by
3613 * their expiration timestamp. Each is prefixed differently (see
3614 * gc_index_prefixes below).
3616 * Since key-value data is listed in lexical order by keys, generally
3617 * the name entries are retrieved first and then the time entries.
3618 * When listing the entries via `gc_iterate_entries` one parameter is
3619 * a marker, and if we were to pass "1_" (i.e.,
3620 * gc_index_prefixes[GC_OBJ_TIME_INDEX]), the listing would skip over
3621 * the 'name' entries and begin with the 'time' entries.
3623 * Furthermore, the times are converted to strings such that lexical
3624 * order correlates with chronological order, so the entries are
3625 * returned chronologically from the earliest expiring to the latest
3626 * expiring. This allows for starting at "1_" and to keep retrieving
3627 * chunks of entries, and as long as they are prior to the current
3628 * time, they're expired and processing can continue.
3630 #define GC_OBJ_NAME_INDEX 0
3631 #define GC_OBJ_TIME_INDEX 1
3633 static string gc_index_prefixes
[] = { "0_",
3636 static void prepend_index_prefix(const string
& src
, int index
, string
*dest
)
3638 *dest
= gc_index_prefixes
[index
];
3642 static int gc_omap_get(cls_method_context_t hctx
, int type
, const string
& key
, cls_rgw_gc_obj_info
*info
)
3645 prepend_index_prefix(key
, type
, &index
);
3647 int ret
= read_omap_entry(hctx
, index
, info
);
3654 static int gc_omap_set(cls_method_context_t hctx
, int type
, const string
& key
, const cls_rgw_gc_obj_info
*info
)
3659 string index
= gc_index_prefixes
[type
];
3662 int ret
= cls_cxx_map_set_val(hctx
, index
, &bl
);
3669 static int gc_omap_remove(cls_method_context_t hctx
, int type
, const string
& key
)
3671 string index
= gc_index_prefixes
[type
];
3674 int ret
= cls_cxx_map_remove_key(hctx
, index
);
3681 static bool key_in_index(const string
& key
, int index_type
)
3683 const string
& prefix
= gc_index_prefixes
[index_type
];
3684 return (key
.compare(0, prefix
.size(), prefix
) == 0);
3688 static int gc_update_entry(cls_method_context_t hctx
, uint32_t expiration_secs
,
3689 cls_rgw_gc_obj_info
& info
)
3691 cls_rgw_gc_obj_info old_info
;
3692 int ret
= gc_omap_get(hctx
, GC_OBJ_NAME_INDEX
, info
.tag
, &old_info
);
3695 get_time_key(old_info
.time
, &key
);
3696 ret
= gc_omap_remove(hctx
, GC_OBJ_TIME_INDEX
, key
);
3697 if (ret
< 0 && ret
!= -ENOENT
) {
3698 CLS_LOG(0, "ERROR: failed to remove key=%s", key
.c_str());
3703 // calculate time and time key
3704 info
.time
= ceph::real_clock::now();
3705 info
.time
+= make_timespan(expiration_secs
);
3707 get_time_key(info
.time
, &time_key
);
3709 if (info
.chain
.objs
.empty()) {
3711 "WARNING: %s setting GC log entry with zero-length chain, "
3712 "tag='%s', timekey='%s'",
3713 __func__
, info
.tag
.c_str(), time_key
.c_str());
3716 ret
= gc_omap_set(hctx
, GC_OBJ_NAME_INDEX
, info
.tag
, &info
);
3720 ret
= gc_omap_set(hctx
, GC_OBJ_TIME_INDEX
, time_key
, &info
);
3728 CLS_LOG(0, "ERROR: gc_set_entry error info.tag=%s, ret=%d",
3729 info
.tag
.c_str(), ret
);
3730 gc_omap_remove(hctx
, GC_OBJ_NAME_INDEX
, info
.tag
);
3735 static int gc_defer_entry(cls_method_context_t hctx
, const string
& tag
, uint32_t expiration_secs
)
3737 cls_rgw_gc_obj_info info
;
3738 int ret
= gc_omap_get(hctx
, GC_OBJ_NAME_INDEX
, tag
, &info
);
3741 return gc_update_entry(hctx
, expiration_secs
, info
);
3744 int gc_record_decode(bufferlist
& bl
, cls_rgw_gc_obj_info
& e
)
3746 auto iter
= bl
.cbegin();
3749 } catch (ceph::buffer::error
& err
) {
3750 CLS_LOG(0, "ERROR: failed to decode cls_rgw_gc_obj_info");
3756 static int rgw_cls_gc_set_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3758 CLS_LOG(10, "entered %s", __func__
);
3759 auto in_iter
= in
->cbegin();
3761 cls_rgw_gc_set_entry_op op
;
3763 decode(op
, in_iter
);
3764 } catch (ceph::buffer::error
& err
) {
3765 CLS_LOG(1, "ERROR: rgw_cls_gc_set_entry(): failed to decode entry\n");
3769 return gc_update_entry(hctx
, op
.expiration_secs
, op
.info
);
3772 static int rgw_cls_gc_defer_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3774 CLS_LOG(10, "entered %s", __func__
);
3775 auto in_iter
= in
->cbegin();
3777 cls_rgw_gc_defer_entry_op op
;
3779 decode(op
, in_iter
);
3780 } catch (ceph::buffer::error
& err
) {
3781 CLS_LOG(1, "ERROR: rgw_cls_gc_defer_entry(): failed to decode entry\n");
3785 return gc_defer_entry(hctx
, op
.tag
, op
.expiration_secs
);
3788 static int gc_iterate_entries(cls_method_context_t hctx
,
3789 const string
& marker
,
3792 uint32_t max_entries
,
3794 int (*cb
)(cls_method_context_t
,
3796 cls_rgw_gc_obj_info
&,
3800 CLS_LOG(10, "gc_iterate_entries");
3802 map
<string
, bufferlist
> keys
;
3803 string filter_prefix
, end_key
;
3810 if (marker
.empty()) {
3811 prepend_index_prefix(marker
, GC_OBJ_TIME_INDEX
, &start_key
);
3817 real_time now
= ceph::real_clock::now();
3819 get_time_key(now
, &now_str
);
3820 prepend_index_prefix(now_str
, GC_OBJ_TIME_INDEX
, &end_key
);
3822 CLS_LOG(10, "gc_iterate_entries end_key=%s", end_key
.c_str());
3827 int ret
= cls_cxx_map_get_vals(hctx
, start_key
, filter
, max_entries
,
3832 auto iter
= keys
.begin();
3833 if (iter
== keys
.end()) {
3834 // if keys empty must not come back as truncated
3835 ceph_assert(!truncated
|| !(*truncated
));
3839 const string
* last_key
= nullptr; // last key processed, for end-marker
3840 for (; iter
!= keys
.end(); ++iter
) {
3841 const string
& key
= iter
->first
;
3842 cls_rgw_gc_obj_info e
;
3844 CLS_LOG(10, "gc_iterate_entries key=%s", key
.c_str());
3846 if (!end_key
.empty() && key
.compare(end_key
) >= 0) {
3852 if (!key_in_index(key
, GC_OBJ_TIME_INDEX
)) {
3858 ret
= gc_record_decode(iter
->second
, e
);
3862 ret
= cb(hctx
, key
, e
, param
);
3865 last_key
= &(iter
->first
); // update when callback successful
3868 // set the out marker if either caller does not capture truncated or
3869 // if they do capture and we are truncated
3870 if (!truncated
|| *truncated
) {
3872 out_marker
= *last_key
;
3878 static int gc_list_cb(cls_method_context_t hctx
, const string
& key
, cls_rgw_gc_obj_info
& info
, void *param
)
3880 list
<cls_rgw_gc_obj_info
> *l
= (list
<cls_rgw_gc_obj_info
> *)param
;
3885 static int gc_list_entries(cls_method_context_t hctx
, const string
& marker
,
3886 uint32_t max
, bool expired_only
,
3887 list
<cls_rgw_gc_obj_info
>& entries
, bool *truncated
, string
& next_marker
)
3889 int ret
= gc_iterate_entries(hctx
, marker
, expired_only
,
3890 next_marker
, max
, truncated
,
3891 gc_list_cb
, &entries
);
3895 static int rgw_cls_gc_list(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3897 CLS_LOG(10, "entered %s", __func__
);
3898 auto in_iter
= in
->cbegin();
3900 cls_rgw_gc_list_op op
;
3902 decode(op
, in_iter
);
3903 } catch (ceph::buffer::error
& err
) {
3904 CLS_LOG(1, "ERROR: rgw_cls_gc_list(): failed to decode entry\n");
3908 cls_rgw_gc_list_ret op_ret
;
3909 #define GC_LIST_ENTRIES_DEFAULT 128
3910 int ret
= gc_list_entries(hctx
, op
.marker
, (op
.max
? op
.max
: GC_LIST_ENTRIES_DEFAULT
), op
.expired_only
,
3911 op_ret
.entries
, &op_ret
.truncated
, op_ret
.next_marker
);
3915 encode(op_ret
, *out
);
3920 static int gc_remove(cls_method_context_t hctx
, vector
<string
>& tags
)
3922 for (auto iter
= tags
.begin(); iter
!= tags
.end(); ++iter
) {
3923 string
& tag
= *iter
;
3924 cls_rgw_gc_obj_info info
;
3925 int ret
= gc_omap_get(hctx
, GC_OBJ_NAME_INDEX
, tag
, &info
);
3926 if (ret
== -ENOENT
) {
3927 CLS_LOG(0, "couldn't find tag in name index tag=%s", tag
.c_str());
3935 get_time_key(info
.time
, &time_key
);
3936 ret
= gc_omap_remove(hctx
, GC_OBJ_TIME_INDEX
, time_key
);
3937 if (ret
< 0 && ret
!= -ENOENT
)
3939 if (ret
== -ENOENT
) {
3940 CLS_LOG(0, "couldn't find key in time index key=%s", time_key
.c_str());
3943 ret
= gc_omap_remove(hctx
, GC_OBJ_NAME_INDEX
, tag
);
3944 if (ret
< 0 && ret
!= -ENOENT
)
3951 static int rgw_cls_gc_remove(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3953 CLS_LOG(10, "entered %s", __func__
);
3954 auto in_iter
= in
->cbegin();
3956 cls_rgw_gc_remove_op op
;
3958 decode(op
, in_iter
);
3959 } catch (ceph::buffer::error
& err
) {
3960 CLS_LOG(1, "ERROR: rgw_cls_gc_remove(): failed to decode entry\n");
3964 return gc_remove(hctx
, op
.tags
);
3967 static int rgw_cls_lc_get_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3969 CLS_LOG(10, "entered %s", __func__
);
3970 auto in_iter
= in
->cbegin();
3972 cls_rgw_lc_get_entry_op op
;
3974 decode(op
, in_iter
);
3975 } catch (ceph::buffer::error
& err
) {
3976 CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n");
3980 cls_rgw_lc_entry lc_entry
;
3981 int ret
= read_omap_entry(hctx
, op
.marker
, &lc_entry
);
3985 cls_rgw_lc_get_entry_ret
op_ret(std::move(lc_entry
));
3986 encode(op_ret
, *out
);
3991 static int rgw_cls_lc_set_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3993 CLS_LOG(10, "entered %s", __func__
);
3994 auto in_iter
= in
->cbegin();
3996 cls_rgw_lc_set_entry_op op
;
3998 decode(op
, in_iter
);
3999 } catch (ceph::buffer::error
& err
) {
4000 CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n");
4005 encode(op
.entry
, bl
);
4007 int ret
= cls_cxx_map_set_val(hctx
, op
.entry
.bucket
, &bl
);
4011 static int rgw_cls_lc_rm_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4013 CLS_LOG(10, "entered %s", __func__
);
4014 auto in_iter
= in
->cbegin();
4016 cls_rgw_lc_rm_entry_op op
;
4018 decode(op
, in_iter
);
4019 } catch (ceph::buffer::error
& err
) {
4020 CLS_LOG(1, "ERROR: rgw_cls_lc_rm_entry(): failed to decode entry\n");
4024 int ret
= cls_cxx_map_remove_key(hctx
, op
.entry
.bucket
);
4028 static int rgw_cls_lc_get_next_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4030 CLS_LOG(10, "entered %s", __func__
);
4031 auto in_iter
= in
->cbegin();
4032 cls_rgw_lc_get_next_entry_ret op_ret
;
4033 cls_rgw_lc_get_next_entry_op op
;
4035 decode(op
, in_iter
);
4036 } catch (ceph::buffer::error
& err
) {
4037 CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry: failed to decode op\n");
4041 map
<string
, bufferlist
> vals
;
4042 string filter_prefix
;
4044 int ret
= cls_cxx_map_get_vals(hctx
, op
.marker
, filter_prefix
, 1, &vals
, &more
);
4047 cls_rgw_lc_entry entry
;
4048 if (!vals
.empty()) {
4049 auto it
= vals
.begin();
4050 in_iter
= it
->second
.begin();
4052 decode(entry
, in_iter
);
4053 } catch (ceph::buffer::error
& err
) {
4054 CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry(): failed to decode entry\n");
4058 op_ret
.entry
= entry
;
4059 encode(op_ret
, *out
);
4063 static int rgw_cls_lc_list_entries(cls_method_context_t hctx
, bufferlist
*in
,
4066 CLS_LOG(10, "entered %s", __func__
);
4067 cls_rgw_lc_list_entries_op op
;
4068 auto in_iter
= in
->cbegin();
4070 decode(op
, in_iter
);
4071 } catch (ceph::buffer::error
& err
) {
4072 CLS_LOG(1, "ERROR: rgw_cls_lc_list_entries(): failed to decode op\n");
4076 cls_rgw_lc_list_entries_ret
op_ret(op
.compat_v
);
4077 map
<string
, bufferlist
> vals
;
4078 string filter_prefix
;
4079 int ret
= cls_cxx_map_get_vals(hctx
, op
.marker
, filter_prefix
, op
.max_entries
,
4080 &vals
, &op_ret
.is_truncated
);
4083 for (auto it
= vals
.begin(); it
!= vals
.end(); ++it
) {
4084 cls_rgw_lc_entry entry
;
4085 auto iter
= it
->second
.cbegin();
4087 decode(entry
, iter
);
4088 } catch (buffer::error
& err
) {
4089 /* try backward compat */
4090 pair
<string
, int> oe
;
4092 iter
= it
->second
.begin();
4094 entry
= {oe
.first
, 0 /* start */, uint32_t(oe
.second
)};
4095 } catch(buffer::error
& err
) {
4097 1, "ERROR: rgw_cls_lc_list_entries(): failed to decode entry\n");
4101 op_ret
.entries
.push_back(entry
);
4103 encode(op_ret
, *out
);
4107 static int rgw_cls_lc_put_head(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4109 CLS_LOG(10, "entered %s", __func__
);
4110 auto in_iter
= in
->cbegin();
4112 cls_rgw_lc_put_head_op op
;
4114 decode(op
, in_iter
);
4115 } catch (ceph::buffer::error
& err
) {
4116 CLS_LOG(1, "ERROR: rgw_cls_lc_put_head(): failed to decode entry\n");
4121 encode(op
.head
, bl
);
4122 int ret
= cls_cxx_map_write_header(hctx
,&bl
);
4126 static int rgw_cls_lc_get_head(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4128 CLS_LOG(10, "entered %s", __func__
);
4130 int ret
= cls_cxx_map_read_header(hctx
, &bl
);
4133 cls_rgw_lc_obj_head head
;
4134 if (bl
.length() != 0) {
4135 auto iter
= bl
.cbegin();
4138 } catch (ceph::buffer::error
& err
) {
4139 CLS_LOG(0, "ERROR: rgw_cls_lc_get_head(): failed to decode entry %s",err
.what());
4143 head
.start_date
= 0;
4144 head
.marker
.clear();
4146 cls_rgw_lc_get_head_ret op_ret
;
4148 encode(op_ret
, *out
);
4152 static int rgw_reshard_add(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4154 CLS_LOG(10, "entered %s", __func__
);
4155 auto in_iter
= in
->cbegin();
4157 cls_rgw_reshard_add_op op
;
4159 decode(op
, in_iter
);
4160 } catch (ceph::buffer::error
& err
) {
4161 CLS_LOG(1, "ERROR: rgw_reshard_add: failed to decode entry\n");
4167 op
.entry
.get_key(&key
);
4170 encode(op
.entry
, bl
);
4171 int ret
= cls_cxx_map_set_val(hctx
, key
, &bl
);
4173 CLS_ERR("error adding reshard job for bucket %s with key %s",op
.entry
.bucket_name
.c_str(), key
.c_str());
4180 static int rgw_reshard_list(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4182 CLS_LOG(10, "entered %s", __func__
);
4183 cls_rgw_reshard_list_op op
;
4184 auto in_iter
= in
->cbegin();
4186 decode(op
, in_iter
);
4187 } catch (ceph::buffer::error
& err
) {
4188 CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
4191 cls_rgw_reshard_list_ret op_ret
;
4192 map
<string
, bufferlist
> vals
;
4193 string filter_prefix
;
4194 #define MAX_RESHARD_LIST_ENTRIES 1000
4195 /* one extra entry for identifying truncation */
4196 int32_t max
= (op
.max
&& (op
.max
< MAX_RESHARD_LIST_ENTRIES
) ? op
.max
: MAX_RESHARD_LIST_ENTRIES
);
4197 int ret
= cls_cxx_map_get_vals(hctx
, op
.marker
, filter_prefix
, max
, &vals
, &op_ret
.is_truncated
);
4200 cls_rgw_reshard_entry entry
;
4202 for (auto it
= vals
.begin(); i
< (int)op
.max
&& it
!= vals
.end(); ++it
, ++i
) {
4203 auto iter
= it
->second
.cbegin();
4205 decode(entry
, iter
);
4206 } catch (ceph::buffer::error
& err
) {
4207 CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
4210 op_ret
.entries
.push_back(entry
);
4212 encode(op_ret
, *out
);
4216 static int rgw_reshard_get(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4218 CLS_LOG(10, "entered %s", __func__
);
4219 auto in_iter
= in
->cbegin();
4221 cls_rgw_reshard_get_op op
;
4223 decode(op
, in_iter
);
4224 } catch (ceph::buffer::error
& err
) {
4225 CLS_LOG(1, "ERROR: rgw_reshard_get: failed to decode entry\n");
4230 cls_rgw_reshard_entry entry
;
4231 op
.entry
.get_key(&key
);
4232 int ret
= read_omap_entry(hctx
, key
, &entry
);
4237 cls_rgw_reshard_get_ret op_ret
;
4238 op_ret
.entry
= entry
;
4239 encode(op_ret
, *out
);
4243 static int rgw_reshard_remove(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4245 CLS_LOG(10, "entered %s", __func__
);
4246 auto in_iter
= in
->cbegin();
4248 cls_rgw_reshard_remove_op op
;
4250 decode(op
, in_iter
);
4251 } catch (ceph::buffer::error
& err
) {
4252 CLS_LOG(1, "ERROR: rgw_cls_rehard_remove: failed to decode entry\n");
4257 cls_rgw_reshard_entry entry
;
4258 cls_rgw_reshard_entry::generate_key(op
.tenant
, op
.bucket_name
, &key
);
4259 int ret
= read_omap_entry(hctx
, key
, &entry
);
4264 if (!op
.bucket_id
.empty() &&
4265 entry
.bucket_id
!= op
.bucket_id
) {
4269 ret
= cls_cxx_map_remove_key(hctx
, key
);
4271 CLS_LOG(0, "ERROR: failed to remove key: key=%s ret=%d", key
.c_str(), ret
);
4277 static int rgw_set_bucket_resharding(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4279 CLS_LOG(10, "entered %s", __func__
);
4280 cls_rgw_set_bucket_resharding_op op
;
4282 auto in_iter
= in
->cbegin();
4284 decode(op
, in_iter
);
4285 } catch (ceph::buffer::error
& err
) {
4286 CLS_LOG(1, "ERROR: cls_rgw_set_bucket_resharding: failed to decode entry\n");
4290 rgw_bucket_dir_header header
;
4291 int rc
= read_bucket_header(hctx
, &header
);
4293 CLS_LOG(1, "ERROR: %s: failed to read header", __func__
);
4297 header
.new_instance
.set_status(op
.entry
.new_bucket_instance_id
, op
.entry
.num_shards
, op
.entry
.reshard_status
);
4299 return write_bucket_header(hctx
, &header
);
4302 static int rgw_clear_bucket_resharding(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4304 CLS_LOG(10, "entered %s", __func__
);
4305 cls_rgw_clear_bucket_resharding_op op
;
4307 auto in_iter
= in
->cbegin();
4309 decode(op
, in_iter
);
4310 } catch (ceph::buffer::error
& err
) {
4311 CLS_LOG(1, "ERROR: cls_rgw_clear_bucket_resharding: failed to decode entry\n");
4315 rgw_bucket_dir_header header
;
4316 int rc
= read_bucket_header(hctx
, &header
);
4318 CLS_LOG(1, "ERROR: %s: failed to read header", __func__
);
4321 header
.new_instance
.clear();
4323 return write_bucket_header(hctx
, &header
);
4326 static int rgw_guard_bucket_resharding(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4328 CLS_LOG(10, "entered %s", __func__
);
4329 cls_rgw_guard_bucket_resharding_op op
;
4331 auto in_iter
= in
->cbegin();
4333 decode(op
, in_iter
);
4334 } catch (ceph::buffer::error
& err
) {
4335 CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__
);
4339 rgw_bucket_dir_header header
;
4340 int rc
= read_bucket_header(hctx
, &header
);
4342 CLS_LOG(1, "ERROR: %s: failed to read header", __func__
);
4346 if (header
.resharding()) {
4353 static int rgw_get_bucket_resharding(cls_method_context_t hctx
,
4354 bufferlist
*in
, bufferlist
*out
)
4356 CLS_LOG(10, "entered %s", __func__
);
4357 cls_rgw_get_bucket_resharding_op op
;
4359 auto in_iter
= in
->cbegin();
4361 decode(op
, in_iter
);
4362 } catch (ceph::buffer::error
& err
) {
4363 CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__
);
4367 rgw_bucket_dir_header header
;
4368 int rc
= read_bucket_header(hctx
, &header
);
4370 CLS_LOG(1, "ERROR: %s: failed to read header", __func__
);
4374 cls_rgw_get_bucket_resharding_ret op_ret
;
4375 op_ret
.new_instance
= header
.new_instance
;
4377 encode(op_ret
, *out
);
4384 CLS_LOG(1, "Loaded rgw class!");
4386 cls_handle_t h_class
;
4387 cls_method_handle_t h_rgw_bucket_init_index
;
4388 cls_method_handle_t h_rgw_bucket_set_tag_timeout
;
4389 cls_method_handle_t h_rgw_bucket_list
;
4390 cls_method_handle_t h_rgw_bucket_check_index
;
4391 cls_method_handle_t h_rgw_bucket_rebuild_index
;
4392 cls_method_handle_t h_rgw_bucket_update_stats
;
4393 cls_method_handle_t h_rgw_bucket_prepare_op
;
4394 cls_method_handle_t h_rgw_bucket_complete_op
;
4395 cls_method_handle_t h_rgw_bucket_link_olh
;
4396 cls_method_handle_t h_rgw_bucket_unlink_instance_op
;
4397 cls_method_handle_t h_rgw_bucket_read_olh_log
;
4398 cls_method_handle_t h_rgw_bucket_trim_olh_log
;
4399 cls_method_handle_t h_rgw_bucket_clear_olh
;
4400 cls_method_handle_t h_rgw_obj_remove
;
4401 cls_method_handle_t h_rgw_obj_store_pg_ver
;
4402 cls_method_handle_t h_rgw_obj_check_attrs_prefix
;
4403 cls_method_handle_t h_rgw_obj_check_mtime
;
4404 cls_method_handle_t h_rgw_bi_get_op
;
4405 cls_method_handle_t h_rgw_bi_put_op
;
4406 cls_method_handle_t h_rgw_bi_list_op
;
4407 cls_method_handle_t h_rgw_bi_log_list_op
;
4408 cls_method_handle_t h_rgw_bi_log_resync_op
;
4409 cls_method_handle_t h_rgw_bi_log_stop_op
;
4410 cls_method_handle_t h_rgw_dir_suggest_changes
;
4411 cls_method_handle_t h_rgw_user_usage_log_add
;
4412 cls_method_handle_t h_rgw_user_usage_log_read
;
4413 cls_method_handle_t h_rgw_user_usage_log_trim
;
4414 cls_method_handle_t h_rgw_usage_log_clear
;
4415 cls_method_handle_t h_rgw_gc_set_entry
;
4416 cls_method_handle_t h_rgw_gc_list
;
4417 cls_method_handle_t h_rgw_gc_remove
;
4418 cls_method_handle_t h_rgw_lc_get_entry
;
4419 cls_method_handle_t h_rgw_lc_set_entry
;
4420 cls_method_handle_t h_rgw_lc_rm_entry
;
4421 cls_method_handle_t h_rgw_lc_get_next_entry
;
4422 cls_method_handle_t h_rgw_lc_put_head
;
4423 cls_method_handle_t h_rgw_lc_get_head
;
4424 cls_method_handle_t h_rgw_lc_list_entries
;
4425 cls_method_handle_t h_rgw_reshard_add
;
4426 cls_method_handle_t h_rgw_reshard_list
;
4427 cls_method_handle_t h_rgw_reshard_get
;
4428 cls_method_handle_t h_rgw_reshard_remove
;
4429 cls_method_handle_t h_rgw_set_bucket_resharding
;
4430 cls_method_handle_t h_rgw_clear_bucket_resharding
;
4431 cls_method_handle_t h_rgw_guard_bucket_resharding
;
4432 cls_method_handle_t h_rgw_get_bucket_resharding
;
4434 cls_register(RGW_CLASS
, &h_class
);
4437 cls_register_cxx_method(h_class
, RGW_BUCKET_INIT_INDEX
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_init_index
, &h_rgw_bucket_init_index
);
4438 cls_register_cxx_method(h_class
, RGW_BUCKET_SET_TAG_TIMEOUT
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_set_tag_timeout
, &h_rgw_bucket_set_tag_timeout
);
4439 cls_register_cxx_method(h_class
, RGW_BUCKET_LIST
, CLS_METHOD_RD
, rgw_bucket_list
, &h_rgw_bucket_list
);
4440 cls_register_cxx_method(h_class
, RGW_BUCKET_CHECK_INDEX
, CLS_METHOD_RD
, rgw_bucket_check_index
, &h_rgw_bucket_check_index
);
4441 cls_register_cxx_method(h_class
, RGW_BUCKET_REBUILD_INDEX
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_rebuild_index
, &h_rgw_bucket_rebuild_index
);
4442 cls_register_cxx_method(h_class
, RGW_BUCKET_UPDATE_STATS
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_update_stats
, &h_rgw_bucket_update_stats
);
4443 cls_register_cxx_method(h_class
, RGW_BUCKET_PREPARE_OP
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_prepare_op
, &h_rgw_bucket_prepare_op
);
4444 cls_register_cxx_method(h_class
, RGW_BUCKET_COMPLETE_OP
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_complete_op
, &h_rgw_bucket_complete_op
);
4445 cls_register_cxx_method(h_class
, RGW_BUCKET_LINK_OLH
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_link_olh
, &h_rgw_bucket_link_olh
);
4446 cls_register_cxx_method(h_class
, RGW_BUCKET_UNLINK_INSTANCE
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_unlink_instance
, &h_rgw_bucket_unlink_instance_op
);
4447 cls_register_cxx_method(h_class
, RGW_BUCKET_READ_OLH_LOG
, CLS_METHOD_RD
, rgw_bucket_read_olh_log
, &h_rgw_bucket_read_olh_log
);
4448 cls_register_cxx_method(h_class
, RGW_BUCKET_TRIM_OLH_LOG
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_trim_olh_log
, &h_rgw_bucket_trim_olh_log
);
4449 cls_register_cxx_method(h_class
, RGW_BUCKET_CLEAR_OLH
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_clear_olh
, &h_rgw_bucket_clear_olh
);
4451 cls_register_cxx_method(h_class
, RGW_OBJ_REMOVE
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_obj_remove
, &h_rgw_obj_remove
);
4452 cls_register_cxx_method(h_class
, RGW_OBJ_STORE_PG_VER
, CLS_METHOD_WR
, rgw_obj_store_pg_ver
, &h_rgw_obj_store_pg_ver
);
4453 cls_register_cxx_method(h_class
, RGW_OBJ_CHECK_ATTRS_PREFIX
, CLS_METHOD_RD
, rgw_obj_check_attrs_prefix
, &h_rgw_obj_check_attrs_prefix
);
4454 cls_register_cxx_method(h_class
, RGW_OBJ_CHECK_MTIME
, CLS_METHOD_RD
, rgw_obj_check_mtime
, &h_rgw_obj_check_mtime
);
4456 cls_register_cxx_method(h_class
, RGW_BI_GET
, CLS_METHOD_RD
, rgw_bi_get_op
, &h_rgw_bi_get_op
);
4457 cls_register_cxx_method(h_class
, RGW_BI_PUT
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bi_put_op
, &h_rgw_bi_put_op
);
4458 cls_register_cxx_method(h_class
, RGW_BI_LIST
, CLS_METHOD_RD
, rgw_bi_list_op
, &h_rgw_bi_list_op
);
4460 cls_register_cxx_method(h_class
, RGW_BI_LOG_LIST
, CLS_METHOD_RD
, rgw_bi_log_list
, &h_rgw_bi_log_list_op
);
4461 cls_register_cxx_method(h_class
, RGW_BI_LOG_TRIM
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bi_log_trim
, &h_rgw_bi_log_list_op
);
4462 cls_register_cxx_method(h_class
, RGW_DIR_SUGGEST_CHANGES
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_dir_suggest_changes
, &h_rgw_dir_suggest_changes
);
4464 cls_register_cxx_method(h_class
, RGW_BI_LOG_RESYNC
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bi_log_resync
, &h_rgw_bi_log_resync_op
);
4465 cls_register_cxx_method(h_class
, RGW_BI_LOG_STOP
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bi_log_stop
, &h_rgw_bi_log_stop_op
);
4468 cls_register_cxx_method(h_class
, RGW_USER_USAGE_LOG_ADD
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_user_usage_log_add
, &h_rgw_user_usage_log_add
);
4469 cls_register_cxx_method(h_class
, RGW_USER_USAGE_LOG_READ
, CLS_METHOD_RD
, rgw_user_usage_log_read
, &h_rgw_user_usage_log_read
);
4470 cls_register_cxx_method(h_class
, RGW_USER_USAGE_LOG_TRIM
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_user_usage_log_trim
, &h_rgw_user_usage_log_trim
);
4471 cls_register_cxx_method(h_class
, RGW_USAGE_LOG_CLEAR
, CLS_METHOD_WR
, rgw_usage_log_clear
, &h_rgw_usage_log_clear
);
4473 /* garbage collection */
4474 cls_register_cxx_method(h_class
, RGW_GC_SET_ENTRY
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_gc_set_entry
, &h_rgw_gc_set_entry
);
4475 cls_register_cxx_method(h_class
, RGW_GC_DEFER_ENTRY
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_gc_defer_entry
, &h_rgw_gc_set_entry
);
4476 cls_register_cxx_method(h_class
, RGW_GC_LIST
, CLS_METHOD_RD
, rgw_cls_gc_list
, &h_rgw_gc_list
);
4477 cls_register_cxx_method(h_class
, RGW_GC_REMOVE
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_gc_remove
, &h_rgw_gc_remove
);
4479 /* lifecycle bucket list */
4480 cls_register_cxx_method(h_class
, RGW_LC_GET_ENTRY
, CLS_METHOD_RD
, rgw_cls_lc_get_entry
, &h_rgw_lc_get_entry
);
4481 cls_register_cxx_method(h_class
, RGW_LC_SET_ENTRY
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_lc_set_entry
, &h_rgw_lc_set_entry
);
4482 cls_register_cxx_method(h_class
, RGW_LC_RM_ENTRY
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_lc_rm_entry
, &h_rgw_lc_rm_entry
);
4483 cls_register_cxx_method(h_class
, RGW_LC_GET_NEXT_ENTRY
, CLS_METHOD_RD
, rgw_cls_lc_get_next_entry
, &h_rgw_lc_get_next_entry
);
4484 cls_register_cxx_method(h_class
, RGW_LC_PUT_HEAD
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_lc_put_head
, &h_rgw_lc_put_head
);
4485 cls_register_cxx_method(h_class
, RGW_LC_GET_HEAD
, CLS_METHOD_RD
, rgw_cls_lc_get_head
, &h_rgw_lc_get_head
);
4486 cls_register_cxx_method(h_class
, RGW_LC_LIST_ENTRIES
, CLS_METHOD_RD
, rgw_cls_lc_list_entries
, &h_rgw_lc_list_entries
);
4489 cls_register_cxx_method(h_class
, RGW_RESHARD_ADD
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_reshard_add
, &h_rgw_reshard_add
);
4490 cls_register_cxx_method(h_class
, RGW_RESHARD_LIST
, CLS_METHOD_RD
, rgw_reshard_list
, &h_rgw_reshard_list
);
4491 cls_register_cxx_method(h_class
, RGW_RESHARD_GET
, CLS_METHOD_RD
,rgw_reshard_get
, &h_rgw_reshard_get
);
4492 cls_register_cxx_method(h_class
, RGW_RESHARD_REMOVE
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_reshard_remove
, &h_rgw_reshard_remove
);
4494 /* resharding attribute */
4495 cls_register_cxx_method(h_class
, RGW_SET_BUCKET_RESHARDING
, CLS_METHOD_RD
| CLS_METHOD_WR
,
4496 rgw_set_bucket_resharding
, &h_rgw_set_bucket_resharding
);
4497 cls_register_cxx_method(h_class
, RGW_CLEAR_BUCKET_RESHARDING
, CLS_METHOD_RD
| CLS_METHOD_WR
,
4498 rgw_clear_bucket_resharding
, &h_rgw_clear_bucket_resharding
);
4499 cls_register_cxx_method(h_class
, RGW_GUARD_BUCKET_RESHARDING
, CLS_METHOD_RD
,
4500 rgw_guard_bucket_resharding
, &h_rgw_guard_bucket_resharding
);
4501 cls_register_cxx_method(h_class
, RGW_GET_BUCKET_RESHARDING
, CLS_METHOD_RD
,
4502 rgw_get_bucket_resharding
, &h_rgw_get_bucket_resharding
);