1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #include "include/types.h"
8 #include <boost/algorithm/string.hpp>
10 #include "objclass/objclass.h"
11 #include "cls/rgw/cls_rgw_ops.h"
12 #include "cls/rgw/cls_rgw_const.h"
13 #include "common/Clock.h"
14 #include "common/strtol.h"
15 #include "common/escape.h"
17 #include "include/compat.h"
18 #include <boost/lexical_cast.hpp>
24 // No UTF-8 character can begin with 0x80, so this is a safe indicator
25 // of a special bucket-index entry for the first byte. Note: although
26 // it has no impact, the 2nd, 3rd, or 4th byte of a UTF-8 character
28 #define BI_PREFIX_CHAR 0x80
30 #define BI_BUCKET_OBJS_INDEX 0
31 #define BI_BUCKET_LOG_INDEX 1
32 #define BI_BUCKET_OBJ_INSTANCE_INDEX 2
33 #define BI_BUCKET_OLH_DATA_INDEX 3
35 #define BI_BUCKET_LAST_INDEX 4
37 static std::string bucket_index_prefixes
[] = { "", /* special handling for the objs list index */
38 "0_", /* bucket log index */
39 "1000_", /* obj instance index */
40 "1001_", /* olh data index */
42 /* this must be the last index */
45 static bool bi_is_objs_index(const string
& s
) {
46 return ((unsigned char)s
[0] != BI_PREFIX_CHAR
);
49 int bi_entry_type(const string
& s
)
51 if (bi_is_objs_index(s
)) {
52 return BI_BUCKET_OBJS_INDEX
;
56 i
< sizeof(bucket_index_prefixes
) / sizeof(bucket_index_prefixes
[0]);
58 const string
& t
= bucket_index_prefixes
[i
];
60 if (s
.compare(1, t
.size(), t
) == 0) {
68 static bool bi_entry_gt(const string
& first
, const string
& second
)
70 int fi
= bi_entry_type(first
);
71 int si
= bi_entry_type(second
);
79 return first
> second
;
82 static void get_time_key(real_time
& ut
, string
*key
)
85 ceph_timespec ts
= ceph::real_clock::to_ceph_timespec(ut
);
86 snprintf(buf
, 32, "%011llu.%09u", (unsigned long long)ts
.tv_sec
, (unsigned int)ts
.tv_nsec
);
90 static void get_index_ver_key(cls_method_context_t hctx
, uint64_t index_ver
, string
*key
)
93 snprintf(buf
, sizeof(buf
), "%011llu.%llu.%d", (unsigned long long)index_ver
,
94 (unsigned long long)cls_current_version(hctx
),
95 cls_current_subop_num(hctx
));
99 static void bi_log_prefix(string
& key
)
101 key
= BI_PREFIX_CHAR
;
102 key
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
]);
105 static void bi_log_index_key(cls_method_context_t hctx
, string
& key
, string
& id
, uint64_t index_ver
)
108 get_index_ver_key(hctx
, index_ver
, &id
);
112 static int log_index_operation(cls_method_context_t hctx
, cls_rgw_obj_key
& obj_key
, RGWModifyOp op
,
113 string
& tag
, real_time
& timestamp
,
114 rgw_bucket_entry_ver
& ver
, RGWPendingState state
, uint64_t index_ver
,
115 string
& max_marker
, uint16_t bilog_flags
, string
*owner
, string
*owner_display_name
, rgw_zone_set
*zones_trace
)
119 rgw_bi_log_entry entry
;
121 entry
.object
= obj_key
.name
;
122 entry
.instance
= obj_key
.instance
;
123 entry
.timestamp
= timestamp
;
127 entry
.index_ver
= index_ver
;
129 entry
.bilog_flags
= bilog_flags
;
131 entry
.owner
= *owner
;
133 if (owner_display_name
) {
134 entry
.owner_display_name
= *owner_display_name
;
137 entry
.zones_trace
= std::move(*zones_trace
);
141 bi_log_index_key(hctx
, key
, entry
.id
, index_ver
);
145 if (entry
.id
> max_marker
)
146 max_marker
= entry
.id
;
148 return cls_cxx_map_set_val(hctx
, key
, &bl
);
152 * Read list of objects, skipping objects in the "ugly namespace". The
153 * "ugly namespace" entries begin with BI_PREFIX_CHAR (0x80). Valid
154 * UTF-8 object names can *both* preceed and follow the "ugly
157 static int get_obj_vals(cls_method_context_t hctx
,
159 const string
& filter_prefix
,
161 map
<string
, bufferlist
> *pkeys
,
164 int ret
= cls_cxx_map_get_vals(hctx
, start
, filter_prefix
,
165 num_entries
, pkeys
, pmore
);
170 if (pkeys
->empty()) {
174 auto last_element
= pkeys
->rbegin();
175 if ((unsigned char)last_element
->first
[0] < BI_PREFIX_CHAR
) {
176 /* if the first character of the last entry is less than the
177 * prefix then all entries must preceed the "ugly namespace" and
183 auto first_element
= pkeys
->begin();
184 if ((unsigned char)first_element
->first
[0] > BI_PREFIX_CHAR
) {
185 /* the first character of the last entry is in or after the "ugly
186 * namespace", so if the first character of the first entry
187 * follows the "ugly namespace" then all entries do and we're done
192 /* at this point we know we have entries that could precede the
193 * "ugly namespace", be in the "ugly namespace", and follow the
194 * "ugly namespace", so let's rebuild the list, only keeping entries
195 * outside the "ugly namespace"
198 auto comp
= [](const pair
<string
, bufferlist
>& l
, const string
&r
) {
201 string new_start
= {static_cast<char>(BI_PREFIX_CHAR
+ 1)};
203 auto lower
= pkeys
->lower_bound(string
{static_cast<char>(BI_PREFIX_CHAR
)});
204 auto upper
= std::lower_bound(lower
, pkeys
->end(), new_start
, comp
);
205 pkeys
->erase(lower
, upper
);
207 if (num_entries
== (int)pkeys
->size() || !(*pmore
)) {
211 if (pkeys
->size() && new_start
< pkeys
->rbegin()->first
) {
212 new_start
= pkeys
->rbegin()->first
;
215 map
<string
, bufferlist
> new_keys
;
217 /* now get some more keys */
218 ret
= cls_cxx_map_get_vals(hctx
, new_start
, filter_prefix
,
219 num_entries
- pkeys
->size(), &new_keys
, pmore
);
224 pkeys
->insert(std::make_move_iterator(new_keys
.begin()),
225 std::make_move_iterator(new_keys
.end()));
231 * get a monotonically decreasing string representation.
232 * For num = x, num = y, where x > y, str(x) < str(y)
233 * Another property is that string size starts short and grows as num increases
235 static void decreasing_str(uint64_t num
, string
*str
)
238 if (num
< 0x10) { /* 16 */
239 snprintf(buf
, sizeof(buf
), "9%02lld", 15 - (long long)num
);
240 } else if (num
< 0x100) { /* 256 */
241 snprintf(buf
, sizeof(buf
), "8%03lld", 255 - (long long)num
);
242 } else if (num
< 0x1000) /* 4096 */ {
243 snprintf(buf
, sizeof(buf
), "7%04lld", 4095 - (long long)num
);
244 } else if (num
< 0x10000) /* 65536 */ {
245 snprintf(buf
, sizeof(buf
), "6%05lld", 65535 - (long long)num
);
246 } else if (num
< 0x100000000) /* 4G */ {
247 snprintf(buf
, sizeof(buf
), "5%010lld", 0xFFFFFFFF - (long long)num
);
249 snprintf(buf
, sizeof(buf
), "4%020lld", (long long)-num
);
256 * We hold two different indexes for objects. The first one holds the
257 * list of objects in the order that we want them to be listed. The
258 * second one only holds the objects instances (for versioned
259 * objects), and they're not arranged in any particular order. When
260 * listing objects we'll use the first index, when doing operations on
261 * the objects themselves we'll use the second index. Note that
262 * regular objects only map to the first index anyway
265 static void get_list_index_key(rgw_bucket_dir_entry
& entry
, string
*index_key
)
267 *index_key
= entry
.key
.name
;
270 decreasing_str(entry
.versioned_epoch
, &ver_str
);
271 string
instance_delim("\0i", 2);
272 string
ver_delim("\0v", 2);
274 index_key
->append(ver_delim
);
275 index_key
->append(ver_str
);
276 index_key
->append(instance_delim
);
277 index_key
->append(entry
.key
.instance
);
280 static void encode_obj_versioned_data_key(const cls_rgw_obj_key
& key
, string
*index_key
, bool append_delete_marker_suffix
= false)
282 *index_key
= BI_PREFIX_CHAR
;
283 index_key
->append(bucket_index_prefixes
[BI_BUCKET_OBJ_INSTANCE_INDEX
]);
284 index_key
->append(key
.name
);
285 string
delim("\0i", 2);
286 index_key
->append(delim
);
287 index_key
->append(key
.instance
);
288 if (append_delete_marker_suffix
) {
290 index_key
->append(dm
);
294 static void encode_obj_index_key(const cls_rgw_obj_key
& key
, string
*index_key
)
296 if (key
.instance
.empty()) {
297 *index_key
= key
.name
;
299 encode_obj_versioned_data_key(key
, index_key
);
303 static void encode_olh_data_key(const cls_rgw_obj_key
& key
, string
*index_key
)
305 *index_key
= BI_PREFIX_CHAR
;
306 index_key
->append(bucket_index_prefixes
[BI_BUCKET_OLH_DATA_INDEX
]);
307 index_key
->append(key
.name
);
311 static int read_index_entry(cls_method_context_t hctx
, string
& name
, T
*entry
);
313 static int encode_list_index_key(cls_method_context_t hctx
, const cls_rgw_obj_key
& key
, string
*index_key
)
315 if (key
.instance
.empty()) {
316 *index_key
= key
.name
;
320 string obj_index_key
;
321 cls_rgw_obj_key
tmp_key(key
);
322 if (tmp_key
.instance
== "null") {
323 tmp_key
.instance
.clear();
325 encode_obj_versioned_data_key(tmp_key
, &obj_index_key
);
327 rgw_bucket_dir_entry entry
;
329 int ret
= read_index_entry(hctx
, obj_index_key
, &entry
);
330 if (ret
== -ENOENT
) {
331 /* couldn't find the entry, set key value after the current object */
332 char buf
[2] = { 0x1, 0 };
334 *index_key
= key
.name
+ s
;
338 CLS_LOG(1, "ERROR: encode_list_index_key(): cls_cxx_map_get_val returned %d\n", ret
);
342 get_list_index_key(entry
, index_key
);
347 static void split_key(const string
& key
, list
<string
>& vals
)
350 const char *p
= key
.c_str();
351 while (pos
< key
.size()) {
352 size_t len
= strlen(p
);
359 static string
escape_str(const string
& s
)
361 int len
= escape_json_attr_len(s
.c_str(), s
.size());
362 std::string
escaped(len
, 0);
363 escape_json_attr(s
.c_str(), s
.size(), escaped
.data());
368 * list index key structure:
370 * <obj name>\0[v<ver>\0i<instance id>]
372 static int decode_list_index_key(const string
& index_key
, cls_rgw_obj_key
*key
, uint64_t *ver
)
374 size_t len
= strlen(index_key
.c_str());
376 key
->instance
.clear();
379 if (len
== index_key
.size()) {
380 key
->name
= index_key
;
385 split_key(index_key
, vals
);
388 CLS_LOG(0, "ERROR: %s(): bad index_key (%s): split_key() returned empty vals", __func__
, escape_str(index_key
).c_str());
392 list
<string
>::iterator iter
= vals
.begin();
396 if (iter
== vals
.end()) {
397 CLS_LOG(0, "ERROR: %s(): bad index_key (%s): no vals", __func__
, escape_str(index_key
).c_str());
401 for (; iter
!= vals
.end(); ++iter
) {
404 key
->instance
= val
.substr(1);
405 } else if (val
[0] == 'v') {
407 const char *s
= val
.c_str() + 1;
408 *ver
= strict_strtoll(s
, 10, &err
);
410 CLS_LOG(0, "ERROR: %s(): bad index_key (%s): could not parse val (v=%s)", __func__
, escape_str(index_key
).c_str(), s
);
419 static int read_bucket_header(cls_method_context_t hctx
,
420 rgw_bucket_dir_header
*header
)
423 int rc
= cls_cxx_map_read_header(hctx
, &bl
);
427 if (bl
.length() == 0) {
428 *header
= rgw_bucket_dir_header();
431 auto iter
= bl
.cbegin();
433 decode(*header
, iter
);
434 } catch (buffer::error
& err
) {
435 CLS_LOG(1, "ERROR: read_bucket_header(): failed to decode header\n");
442 int rgw_bucket_list(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
444 // maximum number of calls to get_obj_vals we'll try; compromise
445 // between wanting to return the requested # of entries, but not
446 // wanting to slow down this op with too many omap reads
447 constexpr int max_attempts
= 8;
449 auto iter
= in
->cbegin();
454 } catch (buffer::error
& err
) {
455 CLS_LOG(1, "ERROR: %s: failed to decode request\n", __func__
);
459 rgw_cls_list_ret ret
;
460 rgw_bucket_dir
& new_dir
= ret
.dir
;
461 auto& name_entry_map
= new_dir
.m
; // map of keys to entries
463 int rc
= read_bucket_header(hctx
, &new_dir
.header
);
465 CLS_LOG(1, "ERROR: %s: failed to read header\n", __func__
);
469 string start_after_key
; // key that we can start listing at, one of a)
470 // sent in by caller, b) last item visited, or
471 // c) when delimiter present, a key that will
472 // move past the subdirectory
473 encode_list_index_key(hctx
, op
.start_obj
, &start_after_key
);
475 string previous_key
; // last key stored in result, so if we have to
476 // call get_obj_vals multiple times, we do not
477 // add the overlap to result
478 string previous_prefix_key
; // last prefix_key stored in result, so
479 // we can skip over entries with the
482 bool done
= false; // whether we need to keep calling get_obj_vals
483 bool more
= true; // output parameter of get_obj_vals
484 bool has_delimiter
= !op
.delimiter
.empty();
487 boost::algorithm::ends_with(start_after_key
, op
.delimiter
)) {
488 // advance past all subdirectory entries if we start after a
490 start_after_key
= cls_rgw_after_delim(start_after_key
);
493 for (int attempt
= 0;
494 attempt
< max_attempts
&&
497 name_entry_map
.size() < op
.num_entries
;
499 map
<string
, bufferlist
> keys
;
500 rc
= get_obj_vals(hctx
, start_after_key
, op
.filter_prefix
,
501 op
.num_entries
- name_entry_map
.size(),
509 for (auto kiter
= keys
.cbegin(); kiter
!= keys
.cend(); ++kiter
) {
510 if (!bi_is_objs_index(kiter
->first
)) {
511 // we're done if we walked off the end of the objects area of
517 rgw_bucket_dir_entry entry
;
519 const bufferlist
& entrybl
= kiter
->second
;
520 auto eiter
= entrybl
.cbegin();
521 decode(entry
, eiter
);
522 } catch (buffer::error
& err
) {
523 CLS_LOG(1, "ERROR: %s: failed to decode entry, key=%s\n",
524 __func__
, kiter
->first
.c_str());
528 start_after_key
= kiter
->first
;
529 CLS_LOG(20, "%s: working on key=%s len=%zu",
530 __func__
, kiter
->first
.c_str(), kiter
->first
.size());
534 int ret
= decode_list_index_key(kiter
->first
, &key
, &ver
);
536 CLS_LOG(0, "ERROR: %s: failed to decode list index key (%s)\n",
537 __func__
, escape_str(kiter
->first
).c_str());
541 if (!entry
.is_valid()) {
542 CLS_LOG(20, "%s: entry %s[%s] is not valid\n",
543 __func__
, key
.name
.c_str(), key
.instance
.c_str());
547 // filter out noncurrent versions, delete markers, and initial marker
548 if (!op
.list_versions
&&
549 (!entry
.is_visible() || op
.start_obj
.name
== key
.name
)) {
550 CLS_LOG(20, "%s: entry %s[%s] is not visible\n",
551 __func__
, key
.name
.c_str(), key
.instance
.c_str());
556 int delim_pos
= key
.name
.find(op
.delimiter
, op
.filter_prefix
.size());
558 if (delim_pos
>= 0) {
559 /* extract key with trailing delimiter */
561 key
.name
.substr(0, delim_pos
+ op
.delimiter
.length());
563 if (prefix_key
== previous_prefix_key
) {
564 continue; // we've already added this;
566 previous_prefix_key
= prefix_key
;
569 if (name_entry_map
.size() < op
.num_entries
) {
570 rgw_bucket_dir_entry proxy_entry
;
571 cls_rgw_obj_key
proxy_key(prefix_key
);
572 proxy_entry
.key
= cls_rgw_obj_key(proxy_key
);
573 proxy_entry
.flags
= rgw_bucket_dir_entry::FLAG_COMMON_PREFIX
;
574 name_entry_map
[prefix_key
] = proxy_entry
;
576 CLS_LOG(20, "%s: got common prefix entry %s[%s] num entries=%lu\n",
577 __func__
, proxy_key
.name
.c_str(), proxy_key
.instance
.c_str(),
578 name_entry_map
.size());
581 // make sure that if this is the last item added to the
582 // result from this call to get_obj_vals, the next call will
583 // skip past rest of "subdirectory"
584 start_after_key
= cls_rgw_after_delim(prefix_key
);
586 // advance to past this subdirectory, but then back up one,
587 // so the loop increment will put us in the right place
588 kiter
= keys
.lower_bound(start_after_key
);
594 // no delimiter after prefix found, so this is a "top-level"
595 // item and we can just fall through
598 if (name_entry_map
.size() < op
.num_entries
&&
599 kiter
->first
!= previous_key
) {
600 name_entry_map
[kiter
->first
] = entry
;
601 previous_key
= kiter
->first
;
602 CLS_LOG(20, "%s: got object entry %s[%s] num entries=%d\n",
603 __func__
, key
.name
.c_str(), key
.instance
.c_str(),
604 int(name_entry_map
.size()));
606 } // for (auto kiter...
607 } // for (int attempt...
609 ret
.is_truncated
= more
&& !done
;
614 static int check_index(cls_method_context_t hctx
,
615 rgw_bucket_dir_header
*existing_header
,
616 rgw_bucket_dir_header
*calc_header
)
618 int rc
= read_bucket_header(hctx
, existing_header
);
620 CLS_LOG(1, "ERROR: check_index(): failed to read header\n");
624 calc_header
->tag_timeout
= existing_header
->tag_timeout
;
625 calc_header
->ver
= existing_header
->ver
;
627 map
<string
, bufferlist
> keys
;
629 string filter_prefix
;
631 #define CHECK_CHUNK_SIZE 1000
636 rc
= get_obj_vals(hctx
, start_obj
, filter_prefix
, CHECK_CHUNK_SIZE
, &keys
, &more
);
640 std::map
<string
, bufferlist
>::iterator kiter
= keys
.begin();
641 for (; kiter
!= keys
.end(); ++kiter
) {
642 if (!bi_is_objs_index(kiter
->first
)) {
647 rgw_bucket_dir_entry entry
;
648 auto eiter
= kiter
->second
.cbegin();
650 decode(entry
, eiter
);
651 } catch (buffer::error
& err
) {
652 CLS_LOG(1, "ERROR: rgw_bucket_list(): failed to decode entry, key=%s\n", kiter
->first
.c_str());
655 rgw_bucket_category_stats
& stats
= calc_header
->stats
[entry
.meta
.category
];
657 stats
.total_size
+= entry
.meta
.accounted_size
;
658 stats
.total_size_rounded
+= cls_rgw_get_rounded_size(entry
.meta
.accounted_size
);
659 stats
.actual_size
+= entry
.meta
.size
;
661 start_obj
= kiter
->first
;
663 } while (keys
.size() == CHECK_CHUNK_SIZE
&& !done
);
668 int rgw_bucket_check_index(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
670 rgw_cls_check_index_ret ret
;
672 int rc
= check_index(hctx
, &ret
.existing_header
, &ret
.calculated_header
);
681 static int write_bucket_header(cls_method_context_t hctx
, rgw_bucket_dir_header
*header
)
685 bufferlist header_bl
;
686 encode(*header
, header_bl
);
687 return cls_cxx_map_write_header(hctx
, &header_bl
);
691 int rgw_bucket_rebuild_index(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
693 rgw_bucket_dir_header existing_header
;
694 rgw_bucket_dir_header calc_header
;
695 int rc
= check_index(hctx
, &existing_header
, &calc_header
);
699 return write_bucket_header(hctx
, &calc_header
);
702 int rgw_bucket_update_stats(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
705 rgw_cls_bucket_update_stats_op op
;
706 auto iter
= in
->cbegin();
709 } catch (buffer::error
& err
) {
710 CLS_LOG(1, "ERROR: %s(): failed to decode request\n", __func__
);
714 rgw_bucket_dir_header header
;
715 int rc
= read_bucket_header(hctx
, &header
);
717 CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__
);
721 for (auto& s
: op
.stats
) {
722 auto& dest
= header
.stats
[s
.first
];
726 dest
.total_size
+= s
.second
.total_size
;
727 dest
.total_size_rounded
+= s
.second
.total_size_rounded
;
728 dest
.num_entries
+= s
.second
.num_entries
;
729 dest
.actual_size
+= s
.second
.actual_size
;
733 return write_bucket_header(hctx
, &header
);
736 int rgw_bucket_init_index(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
738 bufferlist header_bl
;
739 int rc
= cls_cxx_map_read_header(hctx
, &header_bl
);
750 if (header_bl
.length() != 0) {
751 CLS_LOG(1, "ERROR: index already initialized\n");
757 return write_bucket_header(hctx
, &dir
.header
);
760 int rgw_bucket_set_tag_timeout(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
763 rgw_cls_tag_timeout_op op
;
764 auto iter
= in
->cbegin();
767 } catch (buffer::error
& err
) {
768 CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to decode request\n");
772 rgw_bucket_dir_header header
;
773 int rc
= read_bucket_header(hctx
, &header
);
775 CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to read header\n");
779 header
.tag_timeout
= op
.tag_timeout
;
781 return write_bucket_header(hctx
, &header
);
784 static int read_key_entry(cls_method_context_t hctx
, cls_rgw_obj_key
& key
,
785 string
*idx
, rgw_bucket_dir_entry
*entry
,
786 bool special_delete_marker_name
= false);
788 int rgw_bucket_prepare_op(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
791 rgw_cls_obj_prepare_op op
;
792 auto iter
= in
->cbegin();
795 } catch (buffer::error
& err
) {
796 CLS_LOG(1, "ERROR: rgw_bucket_prepare_op(): failed to decode request\n");
800 if (op
.tag
.empty()) {
801 CLS_LOG(1, "ERROR: tag is empty\n");
805 CLS_LOG(1, "rgw_bucket_prepare_op(): request: op=%d name=%s instance=%s tag=%s\n",
806 op
.op
, op
.key
.name
.c_str(), op
.key
.instance
.c_str(), op
.tag
.c_str());
811 rgw_bucket_dir_entry entry
;
812 int rc
= read_key_entry(hctx
, op
.key
, &idx
, &entry
);
813 if (rc
< 0 && rc
!= -ENOENT
)
816 bool noent
= (rc
== -ENOENT
);
820 if (noent
) { // no entry, initialize fields
822 entry
.ver
= rgw_bucket_entry_ver();
823 entry
.exists
= false;
824 entry
.locator
= op
.locator
;
827 // fill in proper state
828 rgw_bucket_pending_info info
;
829 info
.timestamp
= real_clock::now();
830 info
.state
= CLS_RGW_STATE_PENDING_MODIFY
;
832 entry
.pending_map
.insert(pair
<string
, rgw_bucket_pending_info
>(op
.tag
, info
));
834 // write out new key to disk
836 encode(entry
, info_bl
);
837 return cls_cxx_map_set_val(hctx
, idx
, &info_bl
);
840 static void unaccount_entry(rgw_bucket_dir_header
& header
,
841 rgw_bucket_dir_entry
& entry
)
843 rgw_bucket_category_stats
& stats
= header
.stats
[entry
.meta
.category
];
845 stats
.total_size
-= entry
.meta
.accounted_size
;
846 stats
.total_size_rounded
-= cls_rgw_get_rounded_size(entry
.meta
.accounted_size
);
847 stats
.actual_size
-= entry
.meta
.size
;
850 static void log_entry(const char *func
, const char *str
, rgw_bucket_dir_entry
*entry
)
852 CLS_LOG(1, "%s(): %s: ver=%ld:%llu name=%s instance=%s locator=%s\n", func
, str
,
853 (long)entry
->ver
.pool
, (unsigned long long)entry
->ver
.epoch
,
854 entry
->key
.name
.c_str(), entry
->key
.instance
.c_str(), entry
->locator
.c_str());
857 static void log_entry(const char *func
, const char *str
, rgw_bucket_olh_entry
*entry
)
859 CLS_LOG(1, "%s(): %s: epoch=%llu name=%s instance=%s tag=%s\n", func
, str
,
860 (unsigned long long)entry
->epoch
, entry
->key
.name
.c_str(), entry
->key
.instance
.c_str(),
865 static int read_omap_entry(cls_method_context_t hctx
, const std::string
& name
,
868 bufferlist current_entry
;
869 int rc
= cls_cxx_map_get_val(hctx
, name
, ¤t_entry
);
874 auto cur_iter
= current_entry
.cbegin();
876 decode(*entry
, cur_iter
);
877 } catch (buffer::error
& err
) {
878 CLS_LOG(1, "ERROR: %s(): failed to decode entry\n", __func__
);
885 static int read_index_entry(cls_method_context_t hctx
, string
& name
, T
* entry
)
887 int ret
= read_omap_entry(hctx
, name
, entry
);
892 log_entry(__func__
, "existing entry", entry
);
896 static int read_key_entry(cls_method_context_t hctx
, cls_rgw_obj_key
& key
,
897 string
*idx
, rgw_bucket_dir_entry
*entry
,
898 bool special_delete_marker_name
)
900 encode_obj_index_key(key
, idx
);
901 int rc
= read_index_entry(hctx
, *idx
, entry
);
906 if (key
.instance
.empty() &&
907 entry
->flags
& rgw_bucket_dir_entry::FLAG_VER_MARKER
) {
908 /* we only do it where key.instance is empty. In this case the
909 * delete marker will have a separate entry in the index to avoid
910 * collisions with the actual object, as it's mutable
912 if (special_delete_marker_name
) {
913 encode_obj_versioned_data_key(key
, idx
, true);
914 rc
= read_index_entry(hctx
, *idx
, entry
);
919 encode_obj_versioned_data_key(key
, idx
);
920 rc
= read_index_entry(hctx
, *idx
, entry
);
922 *entry
= rgw_bucket_dir_entry(); /* need to reset entry because we initialized it earlier */
930 int rgw_bucket_complete_op(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
933 rgw_cls_obj_complete_op op
;
934 auto iter
= in
->cbegin();
937 } catch (buffer::error
& err
) {
938 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to decode request\n");
941 CLS_LOG(1, "rgw_bucket_complete_op(): request: op=%d name=%s instance=%s ver=%lu:%llu tag=%s\n",
942 op
.op
, op
.key
.name
.c_str(), op
.key
.instance
.c_str(),
943 (unsigned long)op
.ver
.pool
, (unsigned long long)op
.ver
.epoch
,
946 rgw_bucket_dir_header header
;
947 int rc
= read_bucket_header(hctx
, &header
);
949 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
953 rgw_bucket_dir_entry entry
;
957 rc
= read_key_entry(hctx
, op
.key
, &idx
, &entry
);
961 entry
.meta
= op
.meta
;
962 entry
.locator
= op
.locator
;
968 entry
.index_ver
= header
.ver
;
969 /* resetting entry flags, entry might have been previously a delete
971 entry
.flags
= (entry
.key
.instance
.empty() ?
973 rgw_bucket_dir_entry::FLAG_VER
);
976 map
<string
, rgw_bucket_pending_info
>::iterator pinter
= entry
.pending_map
.find(op
.tag
);
977 if (pinter
== entry
.pending_map
.end()) {
978 CLS_LOG(1, "ERROR: couldn't find tag for pending operation\n");
981 entry
.pending_map
.erase(pinter
);
985 bufferlist update_bl
;
987 if (op
.tag
.size() && op
.op
== CLS_RGW_OP_CANCEL
) {
988 CLS_LOG(1, "rgw_bucket_complete_op(): cancel requested\n");
990 } else if (op
.ver
.pool
== entry
.ver
.pool
&&
991 op
.ver
.epoch
&& op
.ver
.epoch
<= entry
.ver
.epoch
) {
992 CLS_LOG(1, "rgw_bucket_complete_op(): skipping request, old epoch\n");
999 bufferlist new_key_bl
;
1000 encode(entry
, new_key_bl
);
1001 return cls_cxx_map_set_val(hctx
, idx
, &new_key_bl
);
1007 unaccount_entry(header
, entry
);
1011 switch ((int)op
.op
) {
1012 case CLS_RGW_OP_DEL
:
1013 entry
.meta
= op
.meta
;
1015 if (!entry
.pending_map
.size()) {
1016 int ret
= cls_cxx_map_remove_key(hctx
, idx
);
1020 entry
.exists
= false;
1021 bufferlist new_key_bl
;
1022 encode(entry
, new_key_bl
);
1023 int ret
= cls_cxx_map_set_val(hctx
, idx
, &new_key_bl
);
1031 case CLS_RGW_OP_ADD
:
1033 rgw_bucket_dir_entry_meta
& meta
= op
.meta
;
1034 rgw_bucket_category_stats
& stats
= header
.stats
[meta
.category
];
1037 entry
.exists
= true;
1039 stats
.num_entries
++;
1040 stats
.total_size
+= meta
.accounted_size
;
1041 stats
.total_size_rounded
+= cls_rgw_get_rounded_size(meta
.accounted_size
);
1042 stats
.actual_size
+= meta
.size
;
1043 bufferlist new_key_bl
;
1044 encode(entry
, new_key_bl
);
1045 int ret
= cls_cxx_map_set_val(hctx
, idx
, &new_key_bl
);
1052 if (op
.log_op
&& !header
.syncstopped
) {
1053 rc
= log_index_operation(hctx
, op
.key
, op
.op
, op
.tag
, entry
.meta
.mtime
, entry
.ver
,
1054 CLS_RGW_STATE_COMPLETE
, header
.ver
, header
.max_marker
, op
.bilog_flags
, NULL
, NULL
, &op
.zones_trace
);
1059 list
<cls_rgw_obj_key
>::iterator remove_iter
;
1060 CLS_LOG(20, "rgw_bucket_complete_op(): remove_objs.size()=%d\n", (int)op
.remove_objs
.size());
1061 for (remove_iter
= op
.remove_objs
.begin(); remove_iter
!= op
.remove_objs
.end(); ++remove_iter
) {
1062 cls_rgw_obj_key
& remove_key
= *remove_iter
;
1063 CLS_LOG(1, "rgw_bucket_complete_op(): removing entries, read_index_entry name=%s instance=%s\n",
1064 remove_key
.name
.c_str(), remove_key
.instance
.c_str());
1065 rgw_bucket_dir_entry remove_entry
;
1067 int ret
= read_key_entry(hctx
, remove_key
, &k
, &remove_entry
);
1069 CLS_LOG(1, "rgw_bucket_complete_op(): removing entries, read_index_entry name=%s instance=%s ret=%d\n",
1070 remove_key
.name
.c_str(), remove_key
.instance
.c_str(), ret
);
1074 "rgw_bucket_complete_op(): entry.name=%s entry.instance=%s entry.meta.category=%d\n",
1075 remove_entry
.key
.name
.c_str(),
1076 remove_entry
.key
.instance
.c_str(),
1077 int(remove_entry
.meta
.category
));
1078 unaccount_entry(header
, remove_entry
);
1080 if (op
.log_op
&& !header
.syncstopped
) {
1081 ++header
.ver
; // increment index version, or we'll overwrite keys previously written
1082 rc
= log_index_operation(hctx
, remove_key
, CLS_RGW_OP_DEL
, op
.tag
, remove_entry
.meta
.mtime
,
1083 remove_entry
.ver
, CLS_RGW_STATE_COMPLETE
, header
.ver
, header
.max_marker
, op
.bilog_flags
, NULL
, NULL
, &op
.zones_trace
);
1088 ret
= cls_cxx_map_remove_key(hctx
, k
);
1090 CLS_LOG(1, "rgw_bucket_complete_op(): cls_cxx_map_remove_key, failed to remove entry, name=%s instance=%s read_index_entry ret=%d\n", remove_key
.name
.c_str(), remove_key
.instance
.c_str(), rc
);
1095 return write_bucket_header(hctx
, &header
);
1099 static int write_entry(cls_method_context_t hctx
, T
& entry
, const string
& key
)
1103 return cls_cxx_map_set_val(hctx
, key
, &bl
);
1106 static int read_olh(cls_method_context_t hctx
,cls_rgw_obj_key
& obj_key
, rgw_bucket_olh_entry
*olh_data_entry
, string
*index_key
, bool *found
)
1108 cls_rgw_obj_key olh_key
;
1109 olh_key
.name
= obj_key
.name
;
1111 encode_olh_data_key(olh_key
, index_key
);
1112 int ret
= read_index_entry(hctx
, *index_key
, olh_data_entry
);
1113 if (ret
< 0 && ret
!= -ENOENT
) {
1114 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_key
.name
.c_str(), ret
);
1118 *found
= (ret
!= -ENOENT
);
1123 static void update_olh_log(rgw_bucket_olh_entry
& olh_data_entry
, OLHLogOp op
, const string
& op_tag
,
1124 cls_rgw_obj_key
& key
, bool delete_marker
, uint64_t epoch
)
1126 vector
<rgw_bucket_olh_log_entry
>& log
= olh_data_entry
.pending_log
[olh_data_entry
.epoch
];
1127 rgw_bucket_olh_log_entry log_entry
;
1128 log_entry
.epoch
= epoch
;
1130 log_entry
.op_tag
= op_tag
;
1131 log_entry
.key
= key
;
1132 log_entry
.delete_marker
= delete_marker
;
1133 log
.push_back(log_entry
);
1136 static int write_obj_instance_entry(cls_method_context_t hctx
, rgw_bucket_dir_entry
& instance_entry
, const string
& instance_idx
)
1138 CLS_LOG(20, "write_entry() instance=%s idx=%s flags=%d", escape_str(instance_entry
.key
.instance
).c_str(), instance_idx
.c_str(), instance_entry
.flags
);
1139 /* write the instance entry */
1140 int ret
= write_entry(hctx
, instance_entry
, instance_idx
);
1142 CLS_LOG(0, "ERROR: write_entry() instance_key=%s ret=%d", escape_str(instance_idx
).c_str(), ret
);
1149 * write object instance entry, and if needed also the list entry
1151 static int write_obj_entries(cls_method_context_t hctx
, rgw_bucket_dir_entry
& instance_entry
, const string
& instance_idx
)
1153 int ret
= write_obj_instance_entry(hctx
, instance_entry
, instance_idx
);
1157 string instance_list_idx
;
1158 get_list_index_key(instance_entry
, &instance_list_idx
);
1160 if (instance_idx
!= instance_list_idx
) {
1161 CLS_LOG(20, "write_entry() idx=%s flags=%d", escape_str(instance_list_idx
).c_str(), instance_entry
.flags
);
1162 /* write a new list entry for the object instance */
1163 ret
= write_entry(hctx
, instance_entry
, instance_list_idx
);
1165 CLS_LOG(0, "ERROR: write_entry() instance=%s instance_list_idx=%s ret=%d", instance_entry
.key
.instance
.c_str(), instance_list_idx
.c_str(), ret
);
1173 class BIVerObjEntry
{
1174 cls_method_context_t hctx
;
1175 cls_rgw_obj_key key
;
1176 string instance_idx
;
1178 rgw_bucket_dir_entry instance_entry
;
1183 BIVerObjEntry(cls_method_context_t
& _hctx
, const cls_rgw_obj_key
& _key
) : hctx(_hctx
), key(_key
), initialized(false) {
1186 int init(bool check_delete_marker
= true) {
1187 int ret
= read_key_entry(hctx
, key
, &instance_idx
, &instance_entry
,
1188 check_delete_marker
&& key
.instance
.empty()); /* this is potentially a delete marker, for null objects we
1189 keep separate instance entry for the delete markers */
1192 CLS_LOG(0, "ERROR: read_key_entry() idx=%s ret=%d", instance_idx
.c_str(), ret
);
1196 CLS_LOG(20, "read instance_entry key.name=%s key.instance=%s flags=%d", instance_entry
.key
.name
.c_str(), instance_entry
.key
.instance
.c_str(), instance_entry
.flags
);
1200 rgw_bucket_dir_entry
& get_dir_entry() {
1201 return instance_entry
;
1204 void init_as_delete_marker(rgw_bucket_dir_entry_meta
& meta
) {
1205 /* a deletion marker, need to initialize it, there's no instance entry for it yet */
1206 instance_entry
.key
= key
;
1207 instance_entry
.flags
= rgw_bucket_dir_entry::FLAG_DELETE_MARKER
;
1208 instance_entry
.meta
= meta
;
1209 instance_entry
.tag
= "delete-marker";
1214 void set_epoch(uint64_t epoch
) {
1215 instance_entry
.versioned_epoch
= epoch
;
1218 int unlink_list_entry() {
1220 /* this instance has a previous list entry, remove that entry */
1221 get_list_index_key(instance_entry
, &list_idx
);
1222 CLS_LOG(20, "unlink_list_entry() list_idx=%s", escape_str(list_idx
).c_str());
1223 int ret
= cls_cxx_map_remove_key(hctx
, list_idx
);
1225 CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() list_idx=%s ret=%d", list_idx
.c_str(), ret
);
1232 /* remove the instance entry */
1233 CLS_LOG(20, "unlink() idx=%s", escape_str(instance_idx
).c_str());
1234 int ret
= cls_cxx_map_remove_key(hctx
, instance_idx
);
1236 CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() instance_idx=%s ret=%d", instance_idx
.c_str(), ret
);
1242 int write_entries(uint64_t flags_set
, uint64_t flags_reset
) {
1249 instance_entry
.flags
&= ~flags_reset
;
1250 instance_entry
.flags
|= flags_set
;
1252 /* write the instance and list entries */
1253 bool special_delete_marker_key
= (instance_entry
.is_delete_marker() && instance_entry
.key
.instance
.empty());
1254 encode_obj_versioned_data_key(key
, &instance_idx
, special_delete_marker_key
);
1255 int ret
= write_obj_entries(hctx
, instance_entry
, instance_idx
);
1257 CLS_LOG(0, "ERROR: write_obj_entries() instance_idx=%s ret=%d", instance_idx
.c_str(), ret
);
1264 int write(uint64_t epoch
, bool current
) {
1265 if (instance_entry
.versioned_epoch
> 0) {
1266 CLS_LOG(20, "%s(): instance_entry.versioned_epoch=%d epoch=%d", __func__
, (int)instance_entry
.versioned_epoch
, (int)epoch
);
1267 /* this instance has a previous list entry, remove that entry */
1268 int ret
= unlink_list_entry();
1274 uint64_t flags
= rgw_bucket_dir_entry::FLAG_VER
;
1276 flags
|= rgw_bucket_dir_entry::FLAG_CURRENT
;
1279 instance_entry
.versioned_epoch
= epoch
;
1280 return write_entries(flags
, 0);
1283 int demote_current() {
1284 return write_entries(0, rgw_bucket_dir_entry::FLAG_CURRENT
);
1287 bool is_delete_marker() {
1288 return instance_entry
.is_delete_marker();
1291 int find_next_key(cls_rgw_obj_key
*next_key
, bool *found
) {
1293 /* this instance has a previous list entry, remove that entry */
1294 get_list_index_key(instance_entry
, &list_idx
);
1295 /* this is the current head, need to update! */
1296 map
<string
, bufferlist
> keys
;
1298 string filter
= key
.name
; /* list key starts with key name, filter it to avoid a case where we cross to
1299 different namespace */
1300 int ret
= cls_cxx_map_get_vals(hctx
, list_idx
, filter
, 1, &keys
, &more
);
1305 if (keys
.size() < 1) {
1310 rgw_bucket_dir_entry next_entry
;
1312 map
<string
, bufferlist
>::reverse_iterator last
= keys
.rbegin();
1314 auto iter
= last
->second
.cbegin();
1315 decode(next_entry
, iter
);
1316 } catch (buffer::error
& err
) {
1317 CLS_LOG(0, "ERROR; failed to decode entry: %s", last
->first
.c_str());
1321 *found
= (key
.name
== next_entry
.key
.name
);
1323 *next_key
= next_entry
.key
;
1330 return instance_entry
.meta
.mtime
;
1336 cls_method_context_t hctx
;
1337 cls_rgw_obj_key key
;
1339 string olh_data_idx
;
1340 rgw_bucket_olh_entry olh_data_entry
;
1344 BIOLHEntry(cls_method_context_t
& _hctx
, const cls_rgw_obj_key
& _key
) : hctx(_hctx
), key(_key
), initialized(false) { }
1346 int init(bool *exists
) {
1348 int ret
= read_olh(hctx
, key
, &olh_data_entry
, &olh_data_idx
, exists
);
1357 bool start_modify(uint64_t candidate_epoch
) {
1358 if (candidate_epoch
) {
1359 if (candidate_epoch
< olh_data_entry
.epoch
) {
1360 return false; /* olh cannot be modified, old epoch */
1362 olh_data_entry
.epoch
= candidate_epoch
;
1364 if (olh_data_entry
.epoch
== 0) {
1365 olh_data_entry
.epoch
= 2; /* versioned epoch should start with 2, 1 is reserved to converted plain entries */
1367 olh_data_entry
.epoch
++;
1373 uint64_t get_epoch() {
1374 return olh_data_entry
.epoch
;
1377 rgw_bucket_olh_entry
& get_entry() {
1378 return olh_data_entry
;
1381 void update(cls_rgw_obj_key
& key
, bool delete_marker
) {
1382 olh_data_entry
.delete_marker
= delete_marker
;
1383 olh_data_entry
.key
= key
;
1387 /* write the olh data entry */
1388 int ret
= write_entry(hctx
, olh_data_entry
, olh_data_idx
);
1390 CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_idx
.c_str(), ret
);
1397 void update_log(OLHLogOp op
, const string
& op_tag
, cls_rgw_obj_key
& key
, bool delete_marker
, uint64_t epoch
= 0) {
1399 epoch
= olh_data_entry
.epoch
;
1401 update_olh_log(olh_data_entry
, op
, op_tag
, key
, delete_marker
, epoch
);
1404 bool exists() { return olh_data_entry
.exists
; }
1406 void set_exists(bool exists
) {
1407 olh_data_entry
.exists
= exists
;
1410 bool pending_removal() { return olh_data_entry
.pending_removal
; }
1412 void set_pending_removal(bool pending_removal
) {
1413 olh_data_entry
.pending_removal
= pending_removal
;
1416 const string
& get_tag() { return olh_data_entry
.tag
; }
1417 void set_tag(const string
& tag
) {
1418 olh_data_entry
.tag
= tag
;
1422 static int write_version_marker(cls_method_context_t hctx
, cls_rgw_obj_key
& key
)
1424 rgw_bucket_dir_entry entry
;
1426 entry
.flags
= rgw_bucket_dir_entry::FLAG_VER_MARKER
;
1427 int ret
= write_entry(hctx
, entry
, key
.name
);
1429 CLS_LOG(0, "ERROR: write_entry returned ret=%d", ret
);
1436 * plain entries are the ones who were created when bucket was not
1437 * versioned, if we override these objects, we need to convert these
1438 * to versioned entries -- ones that have both data entry, and listing
1439 * key. Their version is going to be empty though
1441 static int convert_plain_entry_to_versioned(cls_method_context_t hctx
,
1442 cls_rgw_obj_key
& key
,
1443 bool demote_current
,
1446 if (!key
.instance
.empty()) {
1450 rgw_bucket_dir_entry entry
;
1453 int ret
= read_key_entry(hctx
, key
, &orig_idx
, &entry
);
1454 if (ret
!= -ENOENT
) {
1456 CLS_LOG(0, "ERROR: read_key_entry() returned ret=%d", ret
);
1460 entry
.versioned_epoch
= 1; /* converted entries are always 1 */
1461 entry
.flags
|= rgw_bucket_dir_entry::FLAG_VER
;
1463 if (demote_current
) {
1464 entry
.flags
&= ~rgw_bucket_dir_entry::FLAG_CURRENT
;
1468 encode_obj_versioned_data_key(key
, &new_idx
);
1470 if (instance_only
) {
1471 ret
= write_obj_instance_entry(hctx
, entry
, new_idx
);
1473 ret
= write_obj_entries(hctx
, entry
, new_idx
);
1476 CLS_LOG(0, "ERROR: write_obj_entries new_idx=%s returned %d",
1477 new_idx
.c_str(), ret
);
1482 ret
= write_version_marker(hctx
, key
);
1491 * Link an object version to an olh, update the relevant index
1492 * entries. It will also handle the deletion marker case. We have a
1493 * few entries that we need to take care of. For object 'foo',
1494 * instance BAR, we'd update the following (not actual encoding):
1496 * - olh data: [BI_BUCKET_OLH_DATA_INDEX]foo
1497 * - object instance data: [BI_BUCKET_OBJ_INSTANCE_INDEX]foo,BAR
1498 * - object instance list entry: foo,123,BAR
1500 * The instance list entry needs to be ordered by newer to older, so
1501 * we generate an appropriate number string that follows the name.
1502 * The top instance for each object is marked appropriately. We
1503 * generate instance entry for deletion markers here, as they are not
1506 static int rgw_bucket_link_olh(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
1508 string olh_data_idx
;
1509 string instance_idx
;
1512 rgw_cls_link_olh_op op
;
1513 auto iter
= in
->cbegin();
1516 } catch (buffer::error
& err
) {
1517 CLS_LOG(0, "ERROR: rgw_bucket_link_olh_op(): failed to decode request\n");
1521 BIVerObjEntry
obj(hctx
, op
.key
);
1522 BIOLHEntry
olh(hctx
, op
.key
);
1524 /* read instance entry */
1525 int ret
= obj
.init(op
.delete_marker
);
1526 bool existed
= (ret
== 0);
1527 if (ret
== -ENOENT
&& op
.delete_marker
) {
1534 if (existed
&& !real_clock::is_zero(op
.unmod_since
)) {
1535 timespec mtime
= ceph::real_clock::to_timespec(obj
.mtime());
1536 timespec unmod
= ceph::real_clock::to_timespec(op
.unmod_since
);
1537 if (!op
.high_precision_time
) {
1541 if (mtime
>= unmod
) {
1542 return 0; /* no need tof set error, we just return 0 and avoid
1543 * writing to the bi log */
1550 * Special handling for null instance object / delete-marker. For
1551 * these objects we're going to have separate instances for a data
1552 * object vs. delete-marker to avoid collisions. We now check if we
1553 * got to overwrite a previous entry, and in that case we'll remove
1556 if (op
.key
.instance
.empty()) {
1557 BIVerObjEntry
other_obj(hctx
, op
.key
);
1558 ret
= other_obj
.init(!op
.delete_marker
); /* try reading the other
1561 existed
= (ret
>= 0 && !other_obj
.is_delete_marker());
1562 if (ret
>= 0 && other_obj
.is_delete_marker() != op
.delete_marker
) {
1563 ret
= other_obj
.unlink_list_entry();
1569 removing
= existed
&& op
.delete_marker
;
1571 ret
= other_obj
.unlink();
1577 removing
= (existed
&& !obj
.is_delete_marker() && op
.delete_marker
);
1580 if (op
.delete_marker
) {
1581 /* a deletion marker, need to initialize entry as such */
1582 obj
.init_as_delete_marker(op
.meta
);
1587 ret
= olh
.init(&olh_found
);
1591 const uint64_t prev_epoch
= olh
.get_epoch();
1593 if (!olh
.start_modify(op
.olh_epoch
)) {
1594 ret
= obj
.write(op
.olh_epoch
, false);
1599 olh
.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE
, op
.op_tag
, op
.key
, false, op
.olh_epoch
);
1604 // promote this version to current if it's a newer epoch, or if it matches the
1605 // current epoch and sorts after the current instance
1606 const bool promote
= (olh
.get_epoch() > prev_epoch
) ||
1607 (olh
.get_epoch() == prev_epoch
&&
1608 olh
.get_entry().key
.instance
>= op
.key
.instance
);
1611 const string
& olh_tag
= olh
.get_tag();
1612 if (op
.olh_tag
!= olh_tag
) {
1613 if (!olh
.pending_removal()) {
1614 CLS_LOG(5, "NOTICE: op.olh_tag (%s) != olh.tag (%s)", op
.olh_tag
.c_str(), olh_tag
.c_str());
1617 /* if pending removal, this is a new olh instance */
1618 olh
.set_tag(op
.olh_tag
);
1620 if (promote
&& olh
.exists()) {
1621 rgw_bucket_olh_entry
& olh_entry
= olh
.get_entry();
1622 /* found olh, previous instance is no longer the latest, need to update */
1623 if (!(olh_entry
.key
== op
.key
)) {
1624 BIVerObjEntry
old_obj(hctx
, olh_entry
.key
);
1626 ret
= old_obj
.demote_current();
1628 CLS_LOG(0, "ERROR: could not demote current on previous key ret=%d", ret
);
1633 olh
.set_pending_removal(false);
1635 bool instance_only
= (op
.key
.instance
.empty() && op
.delete_marker
);
1636 cls_rgw_obj_key
key(op
.key
.name
);
1637 ret
= convert_plain_entry_to_versioned(hctx
, key
, promote
, instance_only
);
1639 CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret
);
1642 olh
.set_tag(op
.olh_tag
);
1645 /* update the olh log */
1646 olh
.update_log(CLS_RGW_OLH_OP_LINK_OLH
, op
.op_tag
, op
.key
, op
.delete_marker
);
1648 olh
.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE
, op
.op_tag
, op
.key
, false);
1652 olh
.update(op
.key
, op
.delete_marker
);
1654 olh
.set_exists(true);
1658 CLS_LOG(0, "ERROR: failed to update olh ret=%d", ret
);
1662 /* write the instance and list entries */
1663 ret
= obj
.write(olh
.get_epoch(), promote
);
1668 rgw_bucket_dir_header header
;
1669 ret
= read_bucket_header(hctx
, &header
);
1671 CLS_LOG(1, "ERROR: rgw_bucket_link_olh(): failed to read header\n");
1675 if (op
.log_op
&& !header
.syncstopped
) {
1676 rgw_bucket_dir_entry
& entry
= obj
.get_dir_entry();
1678 rgw_bucket_entry_ver ver
;
1679 ver
.epoch
= (op
.olh_epoch
? op
.olh_epoch
: olh
.get_epoch());
1681 string
*powner
= NULL
;
1682 string
*powner_display_name
= NULL
;
1684 if (op
.delete_marker
) {
1685 powner
= &entry
.meta
.owner
;
1686 powner_display_name
= &entry
.meta
.owner_display_name
;
1689 RGWModifyOp operation
= (op
.delete_marker
? CLS_RGW_OP_LINK_OLH_DM
: CLS_RGW_OP_LINK_OLH
);
1690 ret
= log_index_operation(hctx
, op
.key
, operation
, op
.op_tag
,
1691 entry
.meta
.mtime
, ver
,
1692 CLS_RGW_STATE_COMPLETE
, header
.ver
, header
.max_marker
, op
.bilog_flags
| RGW_BILOG_FLAG_VERSIONED_OP
,
1693 powner
, powner_display_name
, &op
.zones_trace
);
1697 return write_bucket_header(hctx
, &header
); /* updates header version */
1703 static int rgw_bucket_unlink_instance(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
1705 string olh_data_idx
;
1706 string instance_idx
;
1709 rgw_cls_unlink_instance_op op
;
1710 auto iter
= in
->cbegin();
1713 } catch (buffer::error
& err
) {
1714 CLS_LOG(0, "ERROR: rgw_bucket_rm_obj_instance_op(): failed to decode request\n");
1718 cls_rgw_obj_key dest_key
= op
.key
;
1719 if (dest_key
.instance
== "null") {
1720 dest_key
.instance
.clear();
1723 BIVerObjEntry
obj(hctx
, dest_key
);
1724 BIOLHEntry
olh(hctx
, dest_key
);
1726 int ret
= obj
.init();
1727 if (ret
== -ENOENT
) {
1728 return 0; /* already removed */
1731 CLS_LOG(0, "ERROR: obj.init() returned ret=%d", ret
);
1736 ret
= olh
.init(&olh_found
);
1738 CLS_LOG(0, "ERROR: olh.init() returned ret=%d", ret
);
1743 bool instance_only
= false;
1744 cls_rgw_obj_key
key(dest_key
.name
);
1745 ret
= convert_plain_entry_to_versioned(hctx
, key
, true, instance_only
);
1747 CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret
);
1750 olh
.update(dest_key
, false);
1751 olh
.set_tag(op
.olh_tag
);
1756 if (!olh
.start_modify(op
.olh_epoch
)) {
1757 ret
= obj
.unlink_list_entry();
1762 if (!obj
.is_delete_marker()) {
1763 olh
.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE
, op
.op_tag
, op
.key
, false, op
.olh_epoch
);
1769 rgw_bucket_olh_entry
& olh_entry
= olh
.get_entry();
1770 cls_rgw_obj_key
& olh_key
= olh_entry
.key
;
1771 CLS_LOG(20, "%s(): updating olh log: existing olh entry: %s[%s] (delete_marker=%d)", __func__
,
1772 olh_key
.name
.c_str(), olh_key
.instance
.c_str(), olh_entry
.delete_marker
);
1774 if (olh_key
== dest_key
) {
1775 /* this is the current head, need to update! */
1776 cls_rgw_obj_key next_key
;
1778 ret
= obj
.find_next_key(&next_key
, &found
);
1780 CLS_LOG(0, "ERROR: obj.find_next_key() returned ret=%d", ret
);
1785 BIVerObjEntry
next(hctx
, next_key
);
1786 ret
= next
.write(olh
.get_epoch(), true);
1788 CLS_LOG(0, "ERROR: next.write() returned ret=%d", ret
);
1792 CLS_LOG(20, "%s(): updating olh log: link olh -> %s[%s] (is_delete=%d)", __func__
,
1793 next_key
.name
.c_str(), next_key
.instance
.c_str(), (int)next
.is_delete_marker());
1795 olh
.update(next_key
, next
.is_delete_marker());
1796 olh
.update_log(CLS_RGW_OLH_OP_LINK_OLH
, op
.op_tag
, next_key
, next
.is_delete_marker());
1798 // next_key is empty, but we need to preserve its name in case this entry
1799 // gets resharded, because this key is used for hash placement
1800 next_key
.name
= dest_key
.name
;
1801 olh
.update(next_key
, false);
1802 olh
.update_log(CLS_RGW_OLH_OP_UNLINK_OLH
, op
.op_tag
, next_key
, false);
1803 olh
.set_exists(false);
1804 olh
.set_pending_removal(true);
1808 if (!obj
.is_delete_marker()) {
1809 olh
.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE
, op
.op_tag
, op
.key
, false);
1811 /* this is a delete marker, it's our responsibility to remove its
1819 ret
= obj
.unlink_list_entry();
1829 rgw_bucket_dir_header header
;
1830 ret
= read_bucket_header(hctx
, &header
);
1832 CLS_LOG(1, "ERROR: rgw_bucket_unlink_instance(): failed to read header\n");
1836 if (op
.log_op
&& !header
.syncstopped
) {
1837 rgw_bucket_entry_ver ver
;
1838 ver
.epoch
= (op
.olh_epoch
? op
.olh_epoch
: olh
.get_epoch());
1840 real_time mtime
= obj
.mtime(); /* mtime has no real meaning in
1841 * instance removal context */
1842 ret
= log_index_operation(hctx
, op
.key
, CLS_RGW_OP_UNLINK_INSTANCE
, op
.op_tag
,
1844 CLS_RGW_STATE_COMPLETE
, header
.ver
, header
.max_marker
,
1845 op
.bilog_flags
| RGW_BILOG_FLAG_VERSIONED_OP
, NULL
, NULL
, &op
.zones_trace
);
1849 return write_bucket_header(hctx
, &header
); /* updates header version */
1855 static int rgw_bucket_read_olh_log(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
1858 rgw_cls_read_olh_log_op op
;
1859 auto iter
= in
->cbegin();
1862 } catch (buffer::error
& err
) {
1863 CLS_LOG(0, "ERROR: rgw_bucket_read_olh_log(): failed to decode request\n");
1867 if (!op
.olh
.instance
.empty()) {
1868 CLS_LOG(1, "bad key passed in (non empty instance)");
1872 rgw_bucket_olh_entry olh_data_entry
;
1873 string olh_data_key
;
1874 encode_olh_data_key(op
.olh
, &olh_data_key
);
1875 int ret
= read_index_entry(hctx
, olh_data_key
, &olh_data_entry
);
1876 if (ret
< 0 && ret
!= -ENOENT
) {
1877 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key
.c_str(), ret
);
1881 if (olh_data_entry
.tag
!= op
.olh_tag
) {
1882 CLS_LOG(1, "NOTICE: %s(): olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__
, olh_data_entry
.tag
.c_str(), op
.olh_tag
.c_str());
1886 rgw_cls_read_olh_log_ret op_ret
;
1888 #define MAX_OLH_LOG_ENTRIES 1000
1889 map
<uint64_t, vector
<rgw_bucket_olh_log_entry
> >& log
= olh_data_entry
.pending_log
;
1891 if (log
.begin()->first
> op
.ver_marker
&& log
.size() <= MAX_OLH_LOG_ENTRIES
) {
1893 op_ret
.is_truncated
= false;
1895 map
<uint64_t, vector
<rgw_bucket_olh_log_entry
> >::iterator iter
= log
.upper_bound(op
.ver_marker
);
1897 for (int i
= 0; i
< MAX_OLH_LOG_ENTRIES
&& iter
!= log
.end(); ++i
, ++iter
) {
1898 op_ret
.log
[iter
->first
] = iter
->second
;
1900 op_ret
.is_truncated
= (iter
!= log
.end());
1903 encode(op_ret
, *out
);
1908 static int rgw_bucket_trim_olh_log(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
1911 rgw_cls_trim_olh_log_op op
;
1912 auto iter
= in
->cbegin();
1915 } catch (buffer::error
& err
) {
1916 CLS_LOG(0, "ERROR: rgw_bucket_trim_olh_log(): failed to decode request\n");
1920 if (!op
.olh
.instance
.empty()) {
1921 CLS_LOG(1, "bad key passed in (non empty instance)");
1925 /* read olh entry */
1926 rgw_bucket_olh_entry olh_data_entry
;
1927 string olh_data_key
;
1928 encode_olh_data_key(op
.olh
, &olh_data_key
);
1929 int ret
= read_index_entry(hctx
, olh_data_key
, &olh_data_entry
);
1930 if (ret
< 0 && ret
!= -ENOENT
) {
1931 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key
.c_str(), ret
);
1935 if (olh_data_entry
.tag
!= op
.olh_tag
) {
1936 CLS_LOG(1, "NOTICE: %s(): olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__
, olh_data_entry
.tag
.c_str(), op
.olh_tag
.c_str());
1940 /* remove all versions up to and including ver from the pending map */
1941 map
<uint64_t, vector
<rgw_bucket_olh_log_entry
> >& log
= olh_data_entry
.pending_log
;
1942 map
<uint64_t, vector
<rgw_bucket_olh_log_entry
> >::iterator liter
= log
.begin();
1943 while (liter
!= log
.end() && liter
->first
<= op
.ver
) {
1944 map
<uint64_t, vector
<rgw_bucket_olh_log_entry
> >::iterator rm_iter
= liter
;
1949 /* write the olh data entry */
1950 ret
= write_entry(hctx
, olh_data_entry
, olh_data_key
);
1952 CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_key
.c_str(), ret
);
1959 static int rgw_bucket_clear_olh(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
1962 rgw_cls_bucket_clear_olh_op op
;
1963 auto iter
= in
->cbegin();
1966 } catch (buffer::error
& err
) {
1967 CLS_LOG(0, "ERROR: rgw_bucket_clear_olh(): failed to decode request\n");
1971 if (!op
.key
.instance
.empty()) {
1972 CLS_LOG(1, "bad key passed in (non empty instance)");
1976 /* read olh entry */
1977 rgw_bucket_olh_entry olh_data_entry
;
1978 string olh_data_key
;
1979 encode_olh_data_key(op
.key
, &olh_data_key
);
1980 int ret
= read_index_entry(hctx
, olh_data_key
, &olh_data_entry
);
1981 if (ret
< 0 && ret
!= -ENOENT
) {
1982 CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key
.c_str(), ret
);
1986 if (olh_data_entry
.tag
!= op
.olh_tag
) {
1987 CLS_LOG(1, "NOTICE: %s(): olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__
, olh_data_entry
.tag
.c_str(), op
.olh_tag
.c_str());
1991 ret
= cls_cxx_map_remove_key(hctx
, olh_data_key
);
1993 CLS_LOG(1, "NOTICE: %s(): can't remove key %s ret=%d", __func__
, olh_data_key
.c_str(), ret
);
1997 rgw_bucket_dir_entry plain_entry
;
1999 /* read plain entry, make sure it's a versioned place holder */
2000 ret
= read_index_entry(hctx
, op
.key
.name
, &plain_entry
);
2001 if (ret
== -ENOENT
) {
2002 /* we're done, no entry existing */
2006 CLS_LOG(0, "ERROR: read_index_entry key=%s ret=%d", op
.key
.name
.c_str(), ret
);
2010 if ((plain_entry
.flags
& rgw_bucket_dir_entry::FLAG_VER_MARKER
) == 0) {
2011 /* it's not a version marker, don't remove it */
2015 ret
= cls_cxx_map_remove_key(hctx
, op
.key
.name
);
2017 CLS_LOG(1, "NOTICE: %s(): can't remove key %s ret=%d", __func__
, op
.key
.name
.c_str(), ret
);
2024 int rgw_dir_suggest_changes(cls_method_context_t hctx
,
2025 bufferlist
*in
, bufferlist
*out
)
2027 CLS_LOG(1, "rgw_dir_suggest_changes()");
2029 bufferlist header_bl
;
2030 rgw_bucket_dir_header header
;
2031 bool header_changed
= false;
2033 int rc
= read_bucket_header(hctx
, &header
);
2035 CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to read header\n");
2039 timespan
tag_timeout(
2040 std::chrono::seconds(
2041 header
.tag_timeout
? header
.tag_timeout
: CEPH_RGW_TAG_TIMEOUT
));
2043 auto in_iter
= in
->cbegin();
2045 while (!in_iter
.end()) {
2047 rgw_bucket_dir_entry cur_change
;
2048 rgw_bucket_dir_entry cur_disk
;
2050 decode(op
, in_iter
);
2051 decode(cur_change
, in_iter
);
2052 } catch (buffer::error
& err
) {
2053 CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to decode request\n");
2057 bufferlist cur_disk_bl
;
2058 string cur_change_key
;
2059 encode_obj_index_key(cur_change
.key
, &cur_change_key
);
2060 int ret
= cls_cxx_map_get_val(hctx
, cur_change_key
, &cur_disk_bl
);
2061 if (ret
< 0 && ret
!= -ENOENT
)
2064 if (ret
== -ENOENT
) {
2068 if (cur_disk_bl
.length()) {
2069 auto cur_disk_iter
= cur_disk_bl
.cbegin();
2071 decode(cur_disk
, cur_disk_iter
);
2072 } catch (buffer::error
& error
) {
2073 CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to decode cur_disk\n");
2077 real_time cur_time
= real_clock::now();
2078 map
<string
, rgw_bucket_pending_info
>::iterator iter
=
2079 cur_disk
.pending_map
.begin();
2080 while(iter
!= cur_disk
.pending_map
.end()) {
2081 map
<string
, rgw_bucket_pending_info
>::iterator cur_iter
=iter
++;
2082 if (cur_time
> (cur_iter
->second
.timestamp
+ timespan(tag_timeout
))) {
2083 cur_disk
.pending_map
.erase(cur_iter
);
2088 CLS_LOG(20, "cur_disk.pending_map.empty()=%d op=%d cur_disk.exists=%d cur_change.pending_map.size()=%d cur_change.exists=%d\n",
2089 cur_disk
.pending_map
.empty(), (int)op
, cur_disk
.exists
,
2090 (int)cur_change
.pending_map
.size(), cur_change
.exists
);
2092 if (cur_disk
.pending_map
.empty()) {
2093 if (cur_disk
.exists
) {
2094 rgw_bucket_category_stats
& old_stats
= header
.stats
[cur_disk
.meta
.category
];
2095 CLS_LOG(10, "total_entries: %" PRId64
" -> %" PRId64
"\n", old_stats
.num_entries
, old_stats
.num_entries
- 1);
2096 old_stats
.num_entries
--;
2097 old_stats
.total_size
-= cur_disk
.meta
.accounted_size
;
2098 old_stats
.total_size_rounded
-= cls_rgw_get_rounded_size(cur_disk
.meta
.accounted_size
);
2099 old_stats
.actual_size
-= cur_disk
.meta
.size
;
2100 header_changed
= true;
2102 rgw_bucket_category_stats
& stats
= header
.stats
[cur_change
.meta
.category
];
2103 bool log_op
= (op
& CEPH_RGW_DIR_SUGGEST_LOG_OP
) != 0;
2104 op
&= CEPH_RGW_DIR_SUGGEST_OP_MASK
;
2106 case CEPH_RGW_REMOVE
:
2107 CLS_LOG(10, "CEPH_RGW_REMOVE name=%s instance=%s\n", cur_change
.key
.name
.c_str(), cur_change
.key
.instance
.c_str());
2108 ret
= cls_cxx_map_remove_key(hctx
, cur_change_key
);
2111 if (log_op
&& cur_disk
.exists
&& !header
.syncstopped
) {
2112 ret
= log_index_operation(hctx
, cur_disk
.key
, CLS_RGW_OP_DEL
, cur_disk
.tag
, cur_disk
.meta
.mtime
,
2113 cur_disk
.ver
, CLS_RGW_STATE_COMPLETE
, header
.ver
, header
.max_marker
, 0, NULL
, NULL
, NULL
);
2115 CLS_LOG(0, "ERROR: %s(): failed to log operation ret=%d", __func__
, ret
);
2120 case CEPH_RGW_UPDATE
:
2121 CLS_LOG(10, "CEPH_RGW_UPDATE name=%s instance=%s total_entries: %" PRId64
" -> %" PRId64
"\n",
2122 cur_change
.key
.name
.c_str(), cur_change
.key
.instance
.c_str(), stats
.num_entries
, stats
.num_entries
+ 1);
2124 stats
.num_entries
++;
2125 stats
.total_size
+= cur_change
.meta
.accounted_size
;
2126 stats
.total_size_rounded
+= cls_rgw_get_rounded_size(cur_change
.meta
.accounted_size
);
2127 stats
.actual_size
+= cur_change
.meta
.size
;
2128 header_changed
= true;
2129 cur_change
.index_ver
= header
.ver
;
2130 bufferlist cur_state_bl
;
2131 encode(cur_change
, cur_state_bl
);
2132 ret
= cls_cxx_map_set_val(hctx
, cur_change_key
, &cur_state_bl
);
2135 if (log_op
&& !header
.syncstopped
) {
2136 ret
= log_index_operation(hctx
, cur_change
.key
, CLS_RGW_OP_ADD
, cur_change
.tag
, cur_change
.meta
.mtime
,
2137 cur_change
.ver
, CLS_RGW_STATE_COMPLETE
, header
.ver
, header
.max_marker
, 0, NULL
, NULL
, NULL
);
2139 CLS_LOG(0, "ERROR: %s(): failed to log operation ret=%d", __func__
, ret
);
2145 } // if (cur_disk.pending_map.empty())
2146 } // while (!in_iter.end())
2148 if (header_changed
) {
2149 return write_bucket_header(hctx
, &header
);
2154 static int rgw_obj_remove(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2157 rgw_cls_obj_remove_op op
;
2158 auto iter
= in
->cbegin();
2161 } catch (buffer::error
& err
) {
2162 CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__
);
2166 if (op
.keep_attr_prefixes
.empty()) {
2167 return cls_cxx_remove(hctx
);
2170 map
<string
, bufferlist
> attrset
;
2171 int ret
= cls_cxx_getxattrs(hctx
, &attrset
);
2172 if (ret
< 0 && ret
!= -ENOENT
) {
2173 CLS_LOG(0, "ERROR: %s(): cls_cxx_getxattrs() returned %d", __func__
, ret
);
2177 map
<string
, bufferlist
> new_attrs
;
2178 for (list
<string
>::iterator iter
= op
.keep_attr_prefixes
.begin();
2179 iter
!= op
.keep_attr_prefixes
.end(); ++iter
) {
2180 string
& check_prefix
= *iter
;
2182 for (map
<string
, bufferlist
>::iterator aiter
= attrset
.lower_bound(check_prefix
);
2183 aiter
!= attrset
.end(); ++aiter
) {
2184 const string
& attr
= aiter
->first
;
2186 if (attr
.substr(0, check_prefix
.size()) > check_prefix
) {
2190 new_attrs
[attr
] = aiter
->second
;
2194 CLS_LOG(20, "%s(): removing object", __func__
);
2195 ret
= cls_cxx_remove(hctx
);
2197 CLS_LOG(0, "ERROR: %s(): cls_cxx_remove returned %d", __func__
, ret
);
2201 if (new_attrs
.empty()) {
2202 /* no data to keep */
2206 ret
= cls_cxx_create(hctx
, false);
2208 CLS_LOG(0, "ERROR: %s(): cls_cxx_create returned %d", __func__
, ret
);
2212 for (map
<string
, bufferlist
>::iterator aiter
= new_attrs
.begin();
2213 aiter
!= new_attrs
.end(); ++aiter
) {
2214 const string
& attr
= aiter
->first
;
2216 ret
= cls_cxx_setxattr(hctx
, attr
.c_str(), &aiter
->second
);
2217 CLS_LOG(20, "%s(): setting attr: %s", __func__
, attr
.c_str());
2219 CLS_LOG(0, "ERROR: %s(): cls_cxx_setxattr (attr=%s) returned %d", __func__
, attr
.c_str(), ret
);
2227 static int rgw_obj_store_pg_ver(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2230 rgw_cls_obj_store_pg_ver_op op
;
2231 auto iter
= in
->cbegin();
2234 } catch (buffer::error
& err
) {
2235 CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__
);
2240 uint64_t ver
= cls_current_version(hctx
);
2242 int ret
= cls_cxx_setxattr(hctx
, op
.attr
.c_str(), &bl
);
2244 CLS_LOG(0, "ERROR: %s(): cls_cxx_setxattr (attr=%s) returned %d", __func__
, op
.attr
.c_str(), ret
);
2251 static int rgw_obj_check_attrs_prefix(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2254 rgw_cls_obj_check_attrs_prefix op
;
2255 auto iter
= in
->cbegin();
2258 } catch (buffer::error
& err
) {
2259 CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__
);
2263 if (op
.check_prefix
.empty()) {
2267 map
<string
, bufferlist
> attrset
;
2268 int ret
= cls_cxx_getxattrs(hctx
, &attrset
);
2269 if (ret
< 0 && ret
!= -ENOENT
) {
2270 CLS_LOG(0, "ERROR: %s(): cls_cxx_getxattrs() returned %d", __func__
, ret
);
2276 for (map
<string
, bufferlist
>::iterator aiter
= attrset
.lower_bound(op
.check_prefix
);
2277 aiter
!= attrset
.end(); ++aiter
) {
2278 const string
& attr
= aiter
->first
;
2280 if (attr
.substr(0, op
.check_prefix
.size()) > op
.check_prefix
) {
2287 if (exist
== op
.fail_if_exist
) {
2294 static int rgw_obj_check_mtime(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2297 rgw_cls_obj_check_mtime op
;
2298 auto iter
= in
->cbegin();
2301 } catch (buffer::error
& err
) {
2302 CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__
);
2307 int ret
= cls_cxx_stat2(hctx
, NULL
, &obj_ut
);
2308 if (ret
< 0 && ret
!= -ENOENT
) {
2309 CLS_LOG(0, "ERROR: %s(): cls_cxx_stat() returned %d", __func__
, ret
);
2312 if (ret
== -ENOENT
) {
2313 CLS_LOG(10, "object does not exist, skipping check");
2316 ceph_timespec obj_ts
= ceph::real_clock::to_ceph_timespec(obj_ut
);
2317 ceph_timespec op_ts
= ceph::real_clock::to_ceph_timespec(op
.mtime
);
2319 if (!op
.high_precision_time
) {
2324 CLS_LOG(10, "%s: obj_ut=%lld.%06lld op.mtime=%lld.%06lld", __func__
,
2325 (long long)obj_ts
.tv_sec
, (long long)obj_ts
.tv_nsec
,
2326 (long long)op_ts
.tv_sec
, (long long)op_ts
.tv_nsec
);
2331 case CLS_RGW_CHECK_TIME_MTIME_EQ
:
2332 check
= (obj_ts
== op_ts
);
2334 case CLS_RGW_CHECK_TIME_MTIME_LT
:
2335 check
= (obj_ts
< op_ts
);
2337 case CLS_RGW_CHECK_TIME_MTIME_LE
:
2338 check
= (obj_ts
<= op_ts
);
2340 case CLS_RGW_CHECK_TIME_MTIME_GT
:
2341 check
= (obj_ts
> op_ts
);
2343 case CLS_RGW_CHECK_TIME_MTIME_GE
:
2344 check
= (obj_ts
>= op_ts
);
2357 static int rgw_bi_get_op(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2360 rgw_cls_bi_get_op op
;
2361 auto iter
= in
->cbegin();
2364 } catch (buffer::error
& err
) {
2365 CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__
);
2372 case BIIndexType::Plain
:
2375 case BIIndexType::Instance
:
2376 encode_obj_index_key(op
.key
, &idx
);
2378 case BIIndexType::OLH
:
2379 encode_olh_data_key(op
.key
, &idx
);
2382 CLS_LOG(10, "%s(): invalid key type encoding: %d",
2383 __func__
, int(op
.type
));
2387 rgw_cls_bi_get_ret op_ret
;
2389 rgw_cls_bi_entry
& entry
= op_ret
.entry
;
2391 entry
.type
= op
.type
;
2394 int r
= cls_cxx_map_get_val(hctx
, idx
, &entry
.data
);
2396 CLS_LOG(10, "%s(): cls_cxx_map_get_val() returned %d", __func__
, r
);
2400 encode(op_ret
, *out
);
2405 static int rgw_bi_put_op(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2408 rgw_cls_bi_put_op op
;
2409 auto iter
= in
->cbegin();
2412 } catch (buffer::error
& err
) {
2413 CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__
);
2417 rgw_cls_bi_entry
& entry
= op
.entry
;
2419 int r
= cls_cxx_map_set_val(hctx
, entry
.idx
, &entry
.data
);
2421 CLS_LOG(0, "ERROR: %s(): cls_cxx_map_set_val() returned r=%d", __func__
, r
);
2427 static int list_plain_entries(cls_method_context_t hctx
,
2429 const string
& marker
,
2431 list
<rgw_cls_bi_entry
> *entries
,
2434 string filter
= name
;
2435 string start_after_key
= marker
;
2437 string end_key
; // stop listing at bi_log_prefix
2438 bi_log_prefix(end_key
);
2441 map
<string
, bufferlist
> keys
;
2442 int ret
= cls_cxx_map_get_vals(hctx
, start_after_key
, filter
, max
,
2448 map
<string
, bufferlist
>::iterator iter
;
2449 for (iter
= keys
.begin(); iter
!= keys
.end(); ++iter
) {
2450 if (iter
->first
>= end_key
) {
2451 /* past the end of plain namespace */
2458 rgw_cls_bi_entry entry
;
2459 entry
.type
= BIIndexType::Plain
;
2460 entry
.idx
= iter
->first
;
2461 entry
.data
= iter
->second
;
2463 auto biter
= entry
.data
.cbegin();
2465 rgw_bucket_dir_entry e
;
2468 } catch (buffer::error
& err
) {
2469 CLS_LOG(0, "ERROR: %s(): failed to decode buffer", __func__
);
2473 CLS_LOG(20, "%s(): entry.idx=%s e.key.name=%s", __func__
, escape_str(entry
.idx
).c_str(), escape_str(e
.key
.name
).c_str());
2475 if (!name
.empty() && e
.key
.name
!= name
) {
2476 /* we are skipping the rest of the entries */
2483 entries
->push_back(entry
);
2485 if (count
>= (int)max
) {
2488 start_after_key
= entry
.idx
;
2494 static int list_instance_entries(cls_method_context_t hctx
,
2496 const string
& marker
,
2498 list
<rgw_cls_bi_entry
> *entries
,
2501 cls_rgw_obj_key
key(name
);
2502 string first_instance_idx
;
2503 encode_obj_versioned_data_key(key
, &first_instance_idx
);
2504 string start_after_key
;
2506 if (!name
.empty()) {
2507 start_after_key
= first_instance_idx
;
2509 start_after_key
= BI_PREFIX_CHAR
;
2510 start_after_key
.append(bucket_index_prefixes
[BI_BUCKET_OBJ_INSTANCE_INDEX
]);
2512 string filter
= start_after_key
;
2513 if (bi_entry_gt(marker
, start_after_key
)) {
2514 start_after_key
= marker
;
2517 map
<string
, bufferlist
> keys
;
2519 int ret
= cls_cxx_map_get_val(hctx
, start_after_key
, &k
);
2520 if (ret
< 0 && ret
!= -ENOENT
) {
2523 bool found_first
= (ret
== 0);
2528 ret
= cls_cxx_map_get_vals(hctx
, start_after_key
, string(), max
,
2530 CLS_LOG(20, "%s(): start_after_key=%s first_instance_idx=%s keys.size()=%d",
2531 __func__
, escape_str(start_after_key
).c_str(),
2532 escape_str(first_instance_idx
).c_str(), (int)keys
.size());
2538 keys
[start_after_key
].claim(k
);
2541 map
<string
, bufferlist
>::iterator iter
;
2542 for (iter
= keys
.begin(); iter
!= keys
.end(); ++iter
) {
2543 rgw_cls_bi_entry entry
;
2544 entry
.type
= BIIndexType::Instance
;
2545 entry
.idx
= iter
->first
;
2546 entry
.data
= iter
->second
;
2548 if (!filter
.empty() && entry
.idx
.compare(0, filter
.size(), filter
) != 0) {
2549 /* we are skipping the rest of the entries */
2556 CLS_LOG(20, "%s(): entry.idx=%s", __func__
, escape_str(entry
.idx
).c_str());
2558 auto biter
= entry
.data
.cbegin();
2560 rgw_bucket_dir_entry e
;
2563 } catch (buffer::error
& err
) {
2564 CLS_LOG(0, "ERROR: %s(): failed to decode buffer (size=%d)", __func__
, entry
.data
.length());
2568 if (!name
.empty() && e
.key
.name
!= name
) {
2569 /* we are skipping the rest of the entries */
2576 entries
->push_back(entry
);
2578 start_after_key
= entry
.idx
;
2584 static int list_olh_entries(cls_method_context_t hctx
,
2586 const string
& marker
,
2588 list
<rgw_cls_bi_entry
> *entries
,
2591 cls_rgw_obj_key
key(name
);
2592 string first_instance_idx
;
2593 encode_olh_data_key(key
, &first_instance_idx
);
2594 string start_after_key
;
2596 if (!name
.empty()) {
2597 start_after_key
= first_instance_idx
;
2599 start_after_key
= BI_PREFIX_CHAR
;
2600 start_after_key
.append(bucket_index_prefixes
[BI_BUCKET_OLH_DATA_INDEX
]);
2602 string filter
= start_after_key
;
2603 if (bi_entry_gt(marker
, start_after_key
)) {
2604 start_after_key
= marker
;
2607 map
<string
, bufferlist
> keys
;
2610 ret
= cls_cxx_map_get_val(hctx
, start_after_key
, &k
);
2611 if (ret
< 0 && ret
!= -ENOENT
) {
2614 bool found_first
= (ret
== 0);
2619 ret
= cls_cxx_map_get_vals(hctx
, start_after_key
, string(), max
,
2621 CLS_LOG(20, "%s(): start_after_key=%s first_instance_idx=%s keys.size()=%d",
2622 __func__
, escape_str(start_after_key
).c_str(),
2623 escape_str(first_instance_idx
).c_str(), (int)keys
.size());
2630 keys
[start_after_key
].claim(k
);
2633 map
<string
, bufferlist
>::iterator iter
;
2634 for (iter
= keys
.begin(); iter
!= keys
.end(); ++iter
) {
2635 rgw_cls_bi_entry entry
;
2636 entry
.type
= BIIndexType::OLH
;
2637 entry
.idx
= iter
->first
;
2638 entry
.data
= iter
->second
;
2640 if (!filter
.empty() && entry
.idx
.compare(0, filter
.size(), filter
) != 0) {
2641 /* we are skipping the rest of the entries */
2648 CLS_LOG(20, "%s(): entry.idx=%s", __func__
, escape_str(entry
.idx
).c_str());
2650 auto biter
= entry
.data
.cbegin();
2652 rgw_bucket_olh_entry e
;
2655 } catch (buffer::error
& err
) {
2656 CLS_LOG(0, "ERROR: %s(): failed to decode buffer (size=%d)", __func__
, entry
.data
.length());
2660 if (!name
.empty() && e
.key
.name
!= name
) {
2661 /* we are skipping the rest of the entries */
2668 entries
->push_back(entry
);
2670 start_after_key
= entry
.idx
;
2676 static int rgw_bi_list_op(cls_method_context_t hctx
,
2681 rgw_cls_bi_list_op op
;
2682 auto iter
= in
->cbegin();
2685 } catch (buffer::error
& err
) {
2686 CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__
);
2690 rgw_cls_bi_list_ret op_ret
;
2692 string filter
= op
.name
;
2693 #define MAX_BI_LIST_ENTRIES 1000
2694 int32_t max
= (op
.max
< MAX_BI_LIST_ENTRIES
? op
.max
: MAX_BI_LIST_ENTRIES
);
2696 int ret
= list_plain_entries(hctx
, op
.name
, op
.marker
, max
,
2697 &op_ret
.entries
, &more
);
2699 CLS_LOG(0, "ERROR: %s(): list_plain_entries returned ret=%d", __func__
, ret
);
2704 CLS_LOG(20, "found %d plain entries", count
);
2707 ret
= list_instance_entries(hctx
, op
.name
, op
.marker
, max
- count
, &op_ret
.entries
, &more
);
2709 CLS_LOG(0, "ERROR: %s(): list_instance_entries returned ret=%d", __func__
, ret
);
2717 ret
= list_olh_entries(hctx
, op
.name
, op
.marker
, max
- count
, &op_ret
.entries
, &more
);
2719 CLS_LOG(0, "ERROR: %s(): list_olh_entries returned ret=%d", __func__
, ret
);
2726 op_ret
.is_truncated
= (count
>= max
) || more
;
2727 while (count
> max
) {
2728 op_ret
.entries
.pop_back();
2732 encode(op_ret
, *out
);
2737 int bi_log_record_decode(bufferlist
& bl
, rgw_bi_log_entry
& e
)
2739 auto iter
= bl
.cbegin();
2742 } catch (buffer::error
& err
) {
2743 CLS_LOG(0, "ERROR: failed to decode rgw_bi_log_entry");
2749 static int bi_log_iterate_entries(cls_method_context_t hctx
,
2750 const string
& marker
,
2751 const string
& end_marker
,
2753 uint32_t max_entries
,
2755 int (*cb
)(cls_method_context_t
, const string
&, rgw_bi_log_entry
&, void *),
2758 CLS_LOG(10, "bi_log_iterate_range");
2760 map
<string
, bufferlist
> keys
;
2761 string filter_prefix
, end_key
;
2768 string start_after_key
;
2769 if (key_iter
.empty()) {
2770 key
= BI_PREFIX_CHAR
;
2771 key
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
]);
2774 start_after_key
= key
;
2776 start_after_key
= key_iter
;
2779 if (end_marker
.empty()) {
2780 end_key
= BI_PREFIX_CHAR
;
2781 end_key
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
+ 1]);
2783 end_key
= BI_PREFIX_CHAR
;
2784 end_key
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
]);
2785 end_key
.append(end_marker
);
2788 CLS_LOG(10, "bi_log_iterate_entries start_after_key=%s end_key=%s\n",
2789 start_after_key
.c_str(), end_key
.c_str());
2793 int ret
= cls_cxx_map_get_vals(hctx
, start_after_key
, filter
, max_entries
,
2798 map
<string
, bufferlist
>::iterator iter
= keys
.begin();
2799 if (iter
== keys
.end())
2802 uint32_t num_keys
= keys
.size();
2804 for (; iter
!= keys
.end(); ++iter
,++i
) {
2805 const string
& key
= iter
->first
;
2808 CLS_LOG(10, "bi_log_iterate_entries key=%s bl.length=%d\n", key
.c_str(), (int)iter
->second
.length());
2810 if (key
.compare(end_key
) > 0) {
2818 ret
= bi_log_record_decode(iter
->second
, e
);
2822 ret
= cb(hctx
, key
, e
, param
);
2826 if (i
== num_keys
- 1) {
2834 static int bi_log_list_cb(cls_method_context_t hctx
, const string
& key
, rgw_bi_log_entry
& info
, void *param
)
2836 list
<rgw_bi_log_entry
> *l
= (list
<rgw_bi_log_entry
> *)param
;
2841 static int bi_log_list_entries(cls_method_context_t hctx
, const string
& marker
,
2842 uint32_t max
, list
<rgw_bi_log_entry
>& entries
, bool *truncated
)
2846 int ret
= bi_log_iterate_entries(hctx
, marker
, end_marker
,
2847 key_iter
, max
, truncated
,
2848 bi_log_list_cb
, &entries
);
2852 static int rgw_bi_log_list(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2854 auto in_iter
= in
->cbegin();
2856 cls_rgw_bi_log_list_op op
;
2858 decode(op
, in_iter
);
2859 } catch (buffer::error
& err
) {
2860 CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n");
2864 cls_rgw_bi_log_list_ret op_ret
;
2865 int ret
= bi_log_list_entries(hctx
, op
.marker
, op
.max
, op_ret
.entries
, &op_ret
.truncated
);
2869 encode(op_ret
, *out
);
2874 static int rgw_bi_log_trim(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2876 auto in_iter
= in
->cbegin();
2878 cls_rgw_bi_log_trim_op op
;
2880 decode(op
, in_iter
);
2881 } catch (buffer::error
& err
) {
2882 CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n");
2886 string
key_begin(1, BI_PREFIX_CHAR
);
2887 key_begin
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
]);
2888 key_begin
.append(op
.start_marker
);
2891 if (op
.end_marker
.empty()) {
2892 key_end
= BI_PREFIX_CHAR
;
2893 key_end
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
+ 1]);
2895 key_end
= BI_PREFIX_CHAR
;
2896 key_end
.append(bucket_index_prefixes
[BI_BUCKET_LOG_INDEX
]);
2897 key_end
.append(op
.end_marker
);
2898 // cls_cxx_map_remove_range() expects one-past-end
2899 key_end
.append(1, '\0');
2902 // list a single key to detect whether the range is empty
2903 const size_t max_entries
= 1;
2904 std::set
<std::string
> keys
;
2907 int rc
= cls_cxx_map_get_keys(hctx
, key_begin
, max_entries
, &keys
, &more
);
2909 CLS_LOG(1, "ERROR: cls_cxx_map_get_keys failed rc=%d", rc
);
2914 CLS_LOG(20, "range is empty key_begin=%s", key_begin
.c_str());
2918 const std::string
& first_key
= *keys
.begin();
2919 if (key_end
< first_key
) {
2920 CLS_LOG(20, "listed key %s past key_end=%s", first_key
.c_str(), key_end
.c_str());
2924 CLS_LOG(20, "listed key %s, removing through %s",
2925 first_key
.c_str(), key_end
.c_str());
2927 rc
= cls_cxx_map_remove_range(hctx
, first_key
, key_end
);
2929 CLS_LOG(1, "ERROR: cls_cxx_map_remove_range failed rc=%d", rc
);
2935 static int rgw_bi_log_resync(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2937 rgw_bucket_dir_header header
;
2938 int rc
= read_bucket_header(hctx
, &header
);
2940 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
2946 rgw_bi_log_entry entry
;
2948 entry
.timestamp
= real_clock::now();
2949 entry
.op
= RGWModifyOp::CLS_RGW_OP_RESYNC
;
2950 entry
.state
= RGWPendingState::CLS_RGW_STATE_COMPLETE
;
2953 bi_log_index_key(hctx
, key
, entry
.id
, header
.ver
);
2957 if (entry
.id
> header
.max_marker
)
2958 header
.max_marker
= entry
.id
;
2960 header
.syncstopped
= false;
2962 rc
= cls_cxx_map_set_val(hctx
, key
, &bl
);
2966 return write_bucket_header(hctx
, &header
);
2969 static int rgw_bi_log_stop(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
2971 rgw_bucket_dir_header header
;
2972 int rc
= read_bucket_header(hctx
, &header
);
2974 CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
2980 rgw_bi_log_entry entry
;
2982 entry
.timestamp
= real_clock::now();
2983 entry
.op
= RGWModifyOp::CLS_RGW_OP_SYNCSTOP
;
2984 entry
.state
= RGWPendingState::CLS_RGW_STATE_COMPLETE
;
2987 bi_log_index_key(hctx
, key
, entry
.id
, header
.ver
);
2991 if (entry
.id
> header
.max_marker
)
2992 header
.max_marker
= entry
.id
;
2993 header
.syncstopped
= true;
2995 rc
= cls_cxx_map_set_val(hctx
, key
, &bl
);
2999 return write_bucket_header(hctx
, &header
);
3003 static void usage_record_prefix_by_time(uint64_t epoch
, string
& key
)
3006 snprintf(buf
, sizeof(buf
), "%011llu", (long long unsigned)epoch
);
3010 static void usage_record_prefix_by_user(const string
& user
, uint64_t epoch
, string
& key
)
3012 char buf
[user
.size() + 32];
3013 snprintf(buf
, sizeof(buf
), "%s_%011llu_", user
.c_str(), (long long unsigned)epoch
);
3017 static void usage_record_name_by_time(uint64_t epoch
, const string
& user
, const string
& bucket
, string
& key
)
3019 char buf
[32 + user
.size() + bucket
.size()];
3020 snprintf(buf
, sizeof(buf
), "%011llu_%s_%s", (long long unsigned)epoch
, user
.c_str(), bucket
.c_str());
3024 static void usage_record_name_by_user(const string
& user
, uint64_t epoch
, const string
& bucket
, string
& key
)
3026 char buf
[32 + user
.size() + bucket
.size()];
3027 snprintf(buf
, sizeof(buf
), "%s_%011llu_%s", user
.c_str(), (long long unsigned)epoch
, bucket
.c_str());
3031 static int usage_record_decode(bufferlist
& record_bl
, rgw_usage_log_entry
& e
)
3033 auto kiter
= record_bl
.cbegin();
3036 } catch (buffer::error
& err
) {
3037 CLS_LOG(1, "ERROR: usage_record_decode(): failed to decode record_bl\n");
3044 int rgw_user_usage_log_add(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3046 CLS_LOG(10, "rgw_user_usage_log_add()");
3048 auto in_iter
= in
->cbegin();
3049 rgw_cls_usage_log_add_op op
;
3052 decode(op
, in_iter
);
3053 } catch (buffer::error
& err
) {
3054 CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): failed to decode request\n");
3058 rgw_usage_log_info
& info
= op
.info
;
3059 vector
<rgw_usage_log_entry
>::iterator iter
;
3061 for (iter
= info
.entries
.begin(); iter
!= info
.entries
.end(); ++iter
) {
3062 rgw_usage_log_entry
& entry
= *iter
;
3065 rgw_user
*puser
= (entry
.payer
.empty() ? &entry
.owner
: &entry
.payer
);
3067 usage_record_name_by_time(entry
.epoch
, puser
->to_str(), entry
.bucket
, key_by_time
);
3069 CLS_LOG(10, "rgw_user_usage_log_add user=%s bucket=%s\n", puser
->to_str().c_str(), entry
.bucket
.c_str());
3071 bufferlist record_bl
;
3072 int ret
= cls_cxx_map_get_val(hctx
, key_by_time
, &record_bl
);
3073 if (ret
< 0 && ret
!= -ENOENT
) {
3074 CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): cls_cxx_map_read_key returned %d\n", ret
);
3078 rgw_usage_log_entry e
;
3079 ret
= usage_record_decode(record_bl
, e
);
3082 CLS_LOG(10, "rgw_user_usage_log_add aggregating existing bucket\n");
3086 bufferlist new_record_bl
;
3087 encode(entry
, new_record_bl
);
3088 ret
= cls_cxx_map_set_val(hctx
, key_by_time
, &new_record_bl
);
3093 usage_record_name_by_user(puser
->to_str(), entry
.epoch
, entry
.bucket
, key_by_user
);
3094 ret
= cls_cxx_map_set_val(hctx
, key_by_user
, &new_record_bl
);
3102 static int usage_iterate_range(cls_method_context_t hctx
, uint64_t start
, uint64_t end
, const string
& user
,
3103 const string
& bucket
, string
& key_iter
, uint32_t max_entries
, bool *truncated
,
3104 int (*cb
)(cls_method_context_t
, const string
&, rgw_usage_log_entry
&, void *),
3107 CLS_LOG(10, "usage_iterate_range");
3109 map
<string
, bufferlist
> keys
;
3110 string filter_prefix
;
3111 string start_key
, end_key
;
3112 bool by_user
= !user
.empty();
3114 bool truncated_status
= false;
3116 ceph_assert(truncated
!= nullptr);
3119 usage_record_prefix_by_time(end
, end_key
);
3122 user_key
.append("_");
3125 if (key_iter
.empty()) {
3127 usage_record_prefix_by_user(user
, start
, start_key
);
3129 usage_record_prefix_by_time(start
, start_key
);
3132 start_key
= key_iter
;
3135 CLS_LOG(20, "usage_iterate_range start_key=%s", start_key
.c_str());
3136 int ret
= cls_cxx_map_get_vals(hctx
, start_key
, filter_prefix
, max_entries
, &keys
, &truncated_status
);
3140 *truncated
= truncated_status
;
3142 map
<string
, bufferlist
>::iterator iter
= keys
.begin();
3143 if (iter
== keys
.end())
3146 for (; iter
!= keys
.end(); ++iter
) {
3147 const string
& key
= iter
->first
;
3148 rgw_usage_log_entry e
;
3151 if (!by_user
&& key
.compare(end_key
) >= 0) {
3152 CLS_LOG(20, "usage_iterate_range reached key=%s, done", key
.c_str());
3158 if (by_user
&& key
.compare(0, user_key
.size(), user_key
) != 0) {
3159 CLS_LOG(20, "usage_iterate_range reached key=%s, done", key
.c_str());
3165 ret
= usage_record_decode(iter
->second
, e
);
3169 if (!bucket
.empty() && bucket
.compare(e
.bucket
))
3172 if (e
.epoch
< start
)
3175 /* keys are sorted by epoch, so once we're past end we're done */
3176 if (e
.epoch
>= end
) {
3181 ret
= cb(hctx
, key
, e
, param
);
3188 static int usage_log_read_cb(cls_method_context_t hctx
, const string
& key
, rgw_usage_log_entry
& entry
, void *param
)
3190 map
<rgw_user_bucket
, rgw_usage_log_entry
> *usage
= (map
<rgw_user_bucket
, rgw_usage_log_entry
> *)param
;
3192 if (!entry
.payer
.empty()) {
3193 puser
= &entry
.payer
;
3195 puser
= &entry
.owner
;
3197 rgw_user_bucket
ub(puser
->to_str(), entry
.bucket
);
3198 rgw_usage_log_entry
& le
= (*usage
)[ub
];
3199 le
.aggregate(entry
);
3204 int rgw_user_usage_log_read(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3206 CLS_LOG(10, "rgw_user_usage_log_read()");
3208 auto in_iter
= in
->cbegin();
3209 rgw_cls_usage_log_read_op op
;
3212 decode(op
, in_iter
);
3213 } catch (buffer::error
& err
) {
3214 CLS_LOG(1, "ERROR: rgw_user_usage_log_read(): failed to decode request\n");
3218 rgw_cls_usage_log_read_ret ret_info
;
3219 map
<rgw_user_bucket
, rgw_usage_log_entry
> *usage
= &ret_info
.usage
;
3220 string iter
= op
.iter
;
3221 #define MAX_ENTRIES 1000
3222 uint32_t max_entries
= (op
.max_entries
? op
.max_entries
: MAX_ENTRIES
);
3223 int ret
= usage_iterate_range(hctx
, op
.start_epoch
, op
.end_epoch
, op
.owner
, op
.bucket
, iter
, max_entries
, &ret_info
.truncated
, usage_log_read_cb
, (void *)usage
);
3227 if (ret_info
.truncated
)
3228 ret_info
.next_iter
= iter
;
3230 encode(ret_info
, *out
);
3234 static int usage_log_trim_cb(cls_method_context_t hctx
, const string
& key
, rgw_usage_log_entry
& entry
, void *param
)
3236 bool *found
= (bool *)param
;
3243 string o
= entry
.owner
.to_str();
3244 usage_record_name_by_time(entry
.epoch
, o
, entry
.bucket
, key_by_time
);
3245 usage_record_name_by_user(o
, entry
.epoch
, entry
.bucket
, key_by_user
);
3247 int ret
= cls_cxx_map_remove_key(hctx
, key_by_time
);
3251 return cls_cxx_map_remove_key(hctx
, key_by_user
);
3254 int rgw_user_usage_log_trim(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3256 CLS_LOG(10, "rgw_user_usage_log_trim()");
3258 /* only continue if object exists! */
3259 int ret
= cls_cxx_stat(hctx
, NULL
, NULL
);
3263 auto in_iter
= in
->cbegin();
3264 rgw_cls_usage_log_trim_op op
;
3267 decode(op
, in_iter
);
3268 } catch (buffer::error
& err
) {
3269 CLS_LOG(1, "ERROR: rgw_user_log_usage_log_trim(): failed to decode request\n");
3276 #define MAX_USAGE_TRIM_ENTRIES 1000
3277 ret
= usage_iterate_range(hctx
, op
.start_epoch
, op
.end_epoch
, op
.user
, op
.bucket
, iter
, MAX_USAGE_TRIM_ENTRIES
, &more
, usage_log_trim_cb
, (void *)&found
);
3281 if (!more
&& !found
)
3287 int rgw_usage_log_clear(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3289 CLS_LOG(10,"%s", __func__
);
3291 int ret
= cls_cxx_map_clear(hctx
);
3292 /* if object doesn't exist all the logs are cleared anyway */
3300 * We hold the garbage collection chain data under two different
3301 * indexes: the first 'name' index keeps them under a unique tag that
3302 * represents the chains, and a second 'time' index keeps them by
3303 * their expiration timestamp. Each is prefixed differently (see
3304 * gc_index_prefixes below).
3306 * Since key-value data is listed in lexical order by keys, generally
3307 * the name entries are retrieved first and then the time entries.
3308 * When listing the entries via `gc_iterate_entries` one parameter is
3309 * a marker, and if we were to pass "1_" (i.e.,
3310 * gc_index_prefixes[GC_OBJ_TIME_INDEX]), the listing would skip over
3311 * the 'name' entries and begin with the 'time' entries.
3313 * Furthermore, the times are converted to strings such that lexical
3314 * order correlates with chronological order, so the entries are
3315 * returned chronologically from the earliest expiring to the latest
3316 * expiring. This allows for starting at "1_" and to keep retrieving
3317 * chunks of entries, and as long as they are prior to the current
3318 * time, they're expired and processing can continue.
3320 #define GC_OBJ_NAME_INDEX 0
3321 #define GC_OBJ_TIME_INDEX 1
3323 static string gc_index_prefixes
[] = { "0_",
3326 static void prepend_index_prefix(const string
& src
, int index
, string
*dest
)
3328 *dest
= gc_index_prefixes
[index
];
3332 static int gc_omap_get(cls_method_context_t hctx
, int type
, const string
& key
, cls_rgw_gc_obj_info
*info
)
3335 prepend_index_prefix(key
, type
, &index
);
3337 int ret
= read_omap_entry(hctx
, index
, info
);
3344 static int gc_omap_set(cls_method_context_t hctx
, int type
, const string
& key
, const cls_rgw_gc_obj_info
*info
)
3349 string index
= gc_index_prefixes
[type
];
3352 int ret
= cls_cxx_map_set_val(hctx
, index
, &bl
);
3359 static int gc_omap_remove(cls_method_context_t hctx
, int type
, const string
& key
)
3361 string index
= gc_index_prefixes
[type
];
3364 int ret
= cls_cxx_map_remove_key(hctx
, index
);
3371 static bool key_in_index(const string
& key
, int index_type
)
3373 const string
& prefix
= gc_index_prefixes
[index_type
];
3374 return (key
.compare(0, prefix
.size(), prefix
) == 0);
3378 static int gc_update_entry(cls_method_context_t hctx
, uint32_t expiration_secs
,
3379 cls_rgw_gc_obj_info
& info
)
3381 cls_rgw_gc_obj_info old_info
;
3382 int ret
= gc_omap_get(hctx
, GC_OBJ_NAME_INDEX
, info
.tag
, &old_info
);
3385 get_time_key(old_info
.time
, &key
);
3386 ret
= gc_omap_remove(hctx
, GC_OBJ_TIME_INDEX
, key
);
3387 if (ret
< 0 && ret
!= -ENOENT
) {
3388 CLS_LOG(0, "ERROR: failed to remove key=%s\n", key
.c_str());
3393 // calculate time and time key
3394 info
.time
= ceph::real_clock::now();
3395 info
.time
+= make_timespan(expiration_secs
);
3397 get_time_key(info
.time
, &time_key
);
3399 if (info
.chain
.objs
.empty()) {
3401 "WARNING: %s setting GC log entry with zero-length chain, "
3402 "tag='%s', timekey='%s'",
3403 __func__
, info
.tag
.c_str(), time_key
.c_str());
3406 ret
= gc_omap_set(hctx
, GC_OBJ_NAME_INDEX
, info
.tag
, &info
);
3410 ret
= gc_omap_set(hctx
, GC_OBJ_TIME_INDEX
, time_key
, &info
);
3418 CLS_LOG(0, "ERROR: gc_set_entry error info.tag=%s, ret=%d\n",
3419 info
.tag
.c_str(), ret
);
3420 gc_omap_remove(hctx
, GC_OBJ_NAME_INDEX
, info
.tag
);
3425 static int gc_defer_entry(cls_method_context_t hctx
, const string
& tag
, uint32_t expiration_secs
)
3427 cls_rgw_gc_obj_info info
;
3428 int ret
= gc_omap_get(hctx
, GC_OBJ_NAME_INDEX
, tag
, &info
);
3431 return gc_update_entry(hctx
, expiration_secs
, info
);
3434 int gc_record_decode(bufferlist
& bl
, cls_rgw_gc_obj_info
& e
)
3436 auto iter
= bl
.cbegin();
3439 } catch (buffer::error
& err
) {
3440 CLS_LOG(0, "ERROR: failed to decode cls_rgw_gc_obj_info");
3446 static int rgw_cls_gc_set_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3448 auto in_iter
= in
->cbegin();
3450 cls_rgw_gc_set_entry_op op
;
3452 decode(op
, in_iter
);
3453 } catch (buffer::error
& err
) {
3454 CLS_LOG(1, "ERROR: rgw_cls_gc_set_entry(): failed to decode entry\n");
3458 return gc_update_entry(hctx
, op
.expiration_secs
, op
.info
);
3461 static int rgw_cls_gc_defer_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3463 auto in_iter
= in
->cbegin();
3465 cls_rgw_gc_defer_entry_op op
;
3467 decode(op
, in_iter
);
3468 } catch (buffer::error
& err
) {
3469 CLS_LOG(1, "ERROR: rgw_cls_gc_defer_entry(): failed to decode entry\n");
3473 return gc_defer_entry(hctx
, op
.tag
, op
.expiration_secs
);
3476 static int gc_iterate_entries(cls_method_context_t hctx
,
3477 const string
& marker
,
3480 uint32_t max_entries
,
3482 int (*cb
)(cls_method_context_t
,
3484 cls_rgw_gc_obj_info
&,
3488 CLS_LOG(10, "gc_iterate_entries");
3490 map
<string
, bufferlist
> keys
;
3491 string filter_prefix
, end_key
;
3498 if (marker
.empty()) {
3499 prepend_index_prefix(marker
, GC_OBJ_TIME_INDEX
, &start_key
);
3505 real_time now
= ceph::real_clock::now();
3507 get_time_key(now
, &now_str
);
3508 prepend_index_prefix(now_str
, GC_OBJ_TIME_INDEX
, &end_key
);
3510 CLS_LOG(10, "gc_iterate_entries end_key=%s\n", end_key
.c_str());
3515 int ret
= cls_cxx_map_get_vals(hctx
, start_key
, filter
, max_entries
,
3520 map
<string
, bufferlist
>::iterator iter
= keys
.begin();
3521 if (iter
== keys
.end()) {
3522 // if keys empty must not come back as truncated
3523 ceph_assert(!truncated
|| !(*truncated
));
3527 const string
* last_key
= nullptr; // last key processed, for end-marker
3528 for (; iter
!= keys
.end(); ++iter
) {
3529 const string
& key
= iter
->first
;
3530 cls_rgw_gc_obj_info e
;
3532 CLS_LOG(10, "gc_iterate_entries key=%s\n", key
.c_str());
3534 if (!end_key
.empty() && key
.compare(end_key
) >= 0) {
3540 if (!key_in_index(key
, GC_OBJ_TIME_INDEX
)) {
3546 ret
= gc_record_decode(iter
->second
, e
);
3550 ret
= cb(hctx
, key
, e
, param
);
3553 last_key
= &(iter
->first
); // update when callback successful
3556 // set the out marker if either caller does not capture truncated or
3557 // if they do capture and we are truncated
3558 if (!truncated
|| *truncated
) {
3560 out_marker
= *last_key
;
3566 static int gc_list_cb(cls_method_context_t hctx
, const string
& key
, cls_rgw_gc_obj_info
& info
, void *param
)
3568 list
<cls_rgw_gc_obj_info
> *l
= (list
<cls_rgw_gc_obj_info
> *)param
;
3573 static int gc_list_entries(cls_method_context_t hctx
, const string
& marker
,
3574 uint32_t max
, bool expired_only
,
3575 list
<cls_rgw_gc_obj_info
>& entries
, bool *truncated
, string
& next_marker
)
3577 int ret
= gc_iterate_entries(hctx
, marker
, expired_only
,
3578 next_marker
, max
, truncated
,
3579 gc_list_cb
, &entries
);
3583 static int rgw_cls_gc_list(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3585 auto in_iter
= in
->cbegin();
3587 cls_rgw_gc_list_op op
;
3589 decode(op
, in_iter
);
3590 } catch (buffer::error
& err
) {
3591 CLS_LOG(1, "ERROR: rgw_cls_gc_list(): failed to decode entry\n");
3595 cls_rgw_gc_list_ret op_ret
;
3596 #define GC_LIST_ENTRIES_DEFAULT 128
3597 int ret
= gc_list_entries(hctx
, op
.marker
, (op
.max
? op
.max
: GC_LIST_ENTRIES_DEFAULT
), op
.expired_only
,
3598 op_ret
.entries
, &op_ret
.truncated
, op_ret
.next_marker
);
3602 encode(op_ret
, *out
);
3607 static int gc_remove(cls_method_context_t hctx
, vector
<string
>& tags
)
3609 for (auto iter
= tags
.begin(); iter
!= tags
.end(); ++iter
) {
3610 string
& tag
= *iter
;
3611 cls_rgw_gc_obj_info info
;
3612 int ret
= gc_omap_get(hctx
, GC_OBJ_NAME_INDEX
, tag
, &info
);
3613 if (ret
== -ENOENT
) {
3614 CLS_LOG(0, "couldn't find tag in name index tag=%s\n", tag
.c_str());
3622 get_time_key(info
.time
, &time_key
);
3623 ret
= gc_omap_remove(hctx
, GC_OBJ_TIME_INDEX
, time_key
);
3624 if (ret
< 0 && ret
!= -ENOENT
)
3626 if (ret
== -ENOENT
) {
3627 CLS_LOG(0, "couldn't find key in time index key=%s\n", time_key
.c_str());
3630 ret
= gc_omap_remove(hctx
, GC_OBJ_NAME_INDEX
, tag
);
3631 if (ret
< 0 && ret
!= -ENOENT
)
3638 static int rgw_cls_gc_remove(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3640 auto in_iter
= in
->cbegin();
3642 cls_rgw_gc_remove_op op
;
3644 decode(op
, in_iter
);
3645 } catch (buffer::error
& err
) {
3646 CLS_LOG(1, "ERROR: rgw_cls_gc_remove(): failed to decode entry\n");
3650 return gc_remove(hctx
, op
.tags
);
3653 static int rgw_cls_lc_get_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3655 auto in_iter
= in
->cbegin();
3657 cls_rgw_lc_get_entry_op op
;
3659 decode(op
, in_iter
);
3660 } catch (buffer::error
& err
) {
3661 CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n");
3665 cls_rgw_lc_entry lc_entry
;
3666 int ret
= read_omap_entry(hctx
, op
.marker
, &lc_entry
);
3670 cls_rgw_lc_get_entry_ret
op_ret(std::move(lc_entry
));
3671 encode(op_ret
, *out
);
3676 static int rgw_cls_lc_set_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3678 auto in_iter
= in
->cbegin();
3680 cls_rgw_lc_set_entry_op op
;
3682 decode(op
, in_iter
);
3683 } catch (buffer::error
& err
) {
3684 CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n");
3689 encode(op
.entry
, bl
);
3691 int ret
= cls_cxx_map_set_val(hctx
, op
.entry
.bucket
, &bl
);
3695 static int rgw_cls_lc_rm_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3697 auto in_iter
= in
->cbegin();
3699 cls_rgw_lc_rm_entry_op op
;
3701 decode(op
, in_iter
);
3702 } catch (buffer::error
& err
) {
3703 CLS_LOG(1, "ERROR: rgw_cls_lc_rm_entry(): failed to decode entry\n");
3707 int ret
= cls_cxx_map_remove_key(hctx
, op
.entry
.bucket
);
3711 static int rgw_cls_lc_get_next_entry(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3713 auto in_iter
= in
->cbegin();
3714 cls_rgw_lc_get_next_entry_ret op_ret
;
3715 cls_rgw_lc_get_next_entry_op op
;
3717 decode(op
, in_iter
);
3718 } catch (buffer::error
& err
) {
3719 CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry: failed to decode op\n");
3723 map
<string
, bufferlist
> vals
;
3724 string filter_prefix
;
3726 int ret
= cls_cxx_map_get_vals(hctx
, op
.marker
, filter_prefix
, 1, &vals
, &more
);
3729 map
<string
, bufferlist
>::iterator it
;
3730 cls_rgw_lc_entry entry
;
3731 if (!vals
.empty()) {
3733 in_iter
= it
->second
.begin();
3735 decode(entry
, in_iter
);
3736 } catch (buffer::error
& err
) {
3737 CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry(): failed to decode entry\n");
3741 op_ret
.entry
= entry
;
3742 encode(op_ret
, *out
);
3746 static int rgw_cls_lc_list_entries(cls_method_context_t hctx
, bufferlist
*in
,
3749 cls_rgw_lc_list_entries_op op
;
3750 auto in_iter
= in
->cbegin();
3752 decode(op
, in_iter
);
3753 } catch (buffer::error
& err
) {
3754 CLS_LOG(1, "ERROR: rgw_cls_lc_list_entries(): failed to decode op\n");
3758 cls_rgw_lc_list_entries_ret
op_ret(op
.compat_v
);
3759 bufferlist::const_iterator iter
;
3760 map
<string
, bufferlist
> vals
;
3761 string filter_prefix
;
3762 int ret
= cls_cxx_map_get_vals(hctx
, op
.marker
, filter_prefix
, op
.max_entries
,
3763 &vals
, &op_ret
.is_truncated
);
3766 map
<string
, bufferlist
>::iterator it
;
3767 for (auto it
= vals
.begin(); it
!= vals
.end(); ++it
) {
3768 cls_rgw_lc_entry entry
;
3769 iter
= it
->second
.cbegin();
3771 decode(entry
, iter
);
3772 } catch (buffer::error
& err
) {
3773 /* try backward compat */
3774 pair
<string
, int> oe
;
3776 iter
= it
->second
.begin();
3778 entry
= {oe
.first
, 0 /* start */, uint32_t(oe
.second
)};
3779 } catch(buffer::error
& err
) {
3781 1, "ERROR: rgw_cls_lc_list_entries(): failed to decode entry\n");
3785 op_ret
.entries
.push_back(entry
);
3787 encode(op_ret
, *out
);
3791 static int rgw_cls_lc_put_head(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3793 auto in_iter
= in
->cbegin();
3795 cls_rgw_lc_put_head_op op
;
3797 decode(op
, in_iter
);
3798 } catch (buffer::error
& err
) {
3799 CLS_LOG(1, "ERROR: rgw_cls_lc_put_head(): failed to decode entry\n");
3804 encode(op
.head
, bl
);
3805 int ret
= cls_cxx_map_write_header(hctx
,&bl
);
3809 static int rgw_cls_lc_get_head(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3812 int ret
= cls_cxx_map_read_header(hctx
, &bl
);
3815 cls_rgw_lc_obj_head head
;
3816 if (bl
.length() != 0) {
3817 auto iter
= bl
.cbegin();
3820 } catch (buffer::error
& err
) {
3821 CLS_LOG(0, "ERROR: rgw_cls_lc_get_head(): failed to decode entry %s\n",err
.what());
3825 head
.start_date
= 0;
3826 head
.marker
.clear();
3828 cls_rgw_lc_get_head_ret op_ret
;
3830 encode(op_ret
, *out
);
3834 static int rgw_reshard_add(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3836 auto in_iter
= in
->cbegin();
3838 cls_rgw_reshard_add_op op
;
3840 decode(op
, in_iter
);
3841 } catch (buffer::error
& err
) {
3842 CLS_LOG(1, "ERROR: rgw_reshard_add: failed to decode entry\n");
3848 op
.entry
.get_key(&key
);
3851 encode(op
.entry
, bl
);
3852 int ret
= cls_cxx_map_set_val(hctx
, key
, &bl
);
3854 CLS_ERR("error adding reshard job for bucket %s with key %s",op
.entry
.bucket_name
.c_str(), key
.c_str());
3861 static int rgw_reshard_list(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3863 cls_rgw_reshard_list_op op
;
3864 auto in_iter
= in
->cbegin();
3866 decode(op
, in_iter
);
3867 } catch (buffer::error
& err
) {
3868 CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
3871 cls_rgw_reshard_list_ret op_ret
;
3872 bufferlist::const_iterator iter
;
3873 map
<string
, bufferlist
> vals
;
3874 string filter_prefix
;
3875 #define MAX_RESHARD_LIST_ENTRIES 1000
3876 /* one extra entry for identifying truncation */
3877 int32_t max
= (op
.max
&& (op
.max
< MAX_RESHARD_LIST_ENTRIES
) ? op
.max
: MAX_RESHARD_LIST_ENTRIES
);
3878 int ret
= cls_cxx_map_get_vals(hctx
, op
.marker
, filter_prefix
, max
, &vals
, &op_ret
.is_truncated
);
3881 map
<string
, bufferlist
>::iterator it
;
3882 cls_rgw_reshard_entry entry
;
3884 for (it
= vals
.begin(); i
< (int)op
.max
&& it
!= vals
.end(); ++it
, ++i
) {
3885 iter
= it
->second
.cbegin();
3887 decode(entry
, iter
);
3888 } catch (buffer::error
& err
) {
3889 CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
3892 op_ret
.entries
.push_back(entry
);
3894 encode(op_ret
, *out
);
3898 static int rgw_reshard_get(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3900 auto in_iter
= in
->cbegin();
3902 cls_rgw_reshard_get_op op
;
3904 decode(op
, in_iter
);
3905 } catch (buffer::error
& err
) {
3906 CLS_LOG(1, "ERROR: rgw_reshard_get: failed to decode entry\n");
3911 cls_rgw_reshard_entry entry
;
3912 op
.entry
.get_key(&key
);
3913 int ret
= read_omap_entry(hctx
, key
, &entry
);
3918 cls_rgw_reshard_get_ret op_ret
;
3919 op_ret
.entry
= entry
;
3920 encode(op_ret
, *out
);
3924 static int rgw_reshard_remove(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3926 auto in_iter
= in
->cbegin();
3928 cls_rgw_reshard_remove_op op
;
3930 decode(op
, in_iter
);
3931 } catch (buffer::error
& err
) {
3932 CLS_LOG(1, "ERROR: rgw_cls_rehard_remove: failed to decode entry\n");
3937 cls_rgw_reshard_entry entry
;
3938 cls_rgw_reshard_entry::generate_key(op
.tenant
, op
.bucket_name
, &key
);
3939 int ret
= read_omap_entry(hctx
, key
, &entry
);
3944 if (!op
.bucket_id
.empty() &&
3945 entry
.bucket_id
!= op
.bucket_id
) {
3949 ret
= cls_cxx_map_remove_key(hctx
, key
);
3951 CLS_LOG(0, "ERROR: failed to remove key: key=%s ret=%d", key
.c_str(), ret
);
3957 static int rgw_set_bucket_resharding(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3959 cls_rgw_set_bucket_resharding_op op
;
3961 auto in_iter
= in
->cbegin();
3963 decode(op
, in_iter
);
3964 } catch (buffer::error
& err
) {
3965 CLS_LOG(1, "ERROR: cls_rgw_set_bucket_resharding: failed to decode entry\n");
3969 rgw_bucket_dir_header header
;
3970 int rc
= read_bucket_header(hctx
, &header
);
3972 CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__
);
3976 header
.new_instance
.set_status(op
.entry
.new_bucket_instance_id
, op
.entry
.num_shards
, op
.entry
.reshard_status
);
3978 return write_bucket_header(hctx
, &header
);
3981 static int rgw_clear_bucket_resharding(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
3983 cls_rgw_clear_bucket_resharding_op op
;
3985 auto in_iter
= in
->cbegin();
3987 decode(op
, in_iter
);
3988 } catch (buffer::error
& err
) {
3989 CLS_LOG(1, "ERROR: cls_rgw_clear_bucket_resharding: failed to decode entry\n");
3993 rgw_bucket_dir_header header
;
3994 int rc
= read_bucket_header(hctx
, &header
);
3996 CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__
);
3999 header
.new_instance
.clear();
4001 return write_bucket_header(hctx
, &header
);
4004 static int rgw_guard_bucket_resharding(cls_method_context_t hctx
, bufferlist
*in
, bufferlist
*out
)
4006 cls_rgw_guard_bucket_resharding_op op
;
4008 auto in_iter
= in
->cbegin();
4010 decode(op
, in_iter
);
4011 } catch (buffer::error
& err
) {
4012 CLS_LOG(1, "ERROR: %s(): failed to decode entry\n", __func__
);
4016 rgw_bucket_dir_header header
;
4017 int rc
= read_bucket_header(hctx
, &header
);
4019 CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__
);
4023 if (header
.resharding()) {
4030 static int rgw_get_bucket_resharding(cls_method_context_t hctx
,
4031 bufferlist
*in
, bufferlist
*out
)
4033 cls_rgw_get_bucket_resharding_op op
;
4035 auto in_iter
= in
->cbegin();
4037 decode(op
, in_iter
);
4038 } catch (buffer::error
& err
) {
4039 CLS_LOG(1, "ERROR: %s(): failed to decode entry\n", __func__
);
4043 rgw_bucket_dir_header header
;
4044 int rc
= read_bucket_header(hctx
, &header
);
4046 CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__
);
4050 cls_rgw_get_bucket_resharding_ret op_ret
;
4051 op_ret
.new_instance
= header
.new_instance
;
4053 encode(op_ret
, *out
);
4060 CLS_LOG(1, "Loaded rgw class!");
4062 cls_handle_t h_class
;
4063 cls_method_handle_t h_rgw_bucket_init_index
;
4064 cls_method_handle_t h_rgw_bucket_set_tag_timeout
;
4065 cls_method_handle_t h_rgw_bucket_list
;
4066 cls_method_handle_t h_rgw_bucket_check_index
;
4067 cls_method_handle_t h_rgw_bucket_rebuild_index
;
4068 cls_method_handle_t h_rgw_bucket_update_stats
;
4069 cls_method_handle_t h_rgw_bucket_prepare_op
;
4070 cls_method_handle_t h_rgw_bucket_complete_op
;
4071 cls_method_handle_t h_rgw_bucket_link_olh
;
4072 cls_method_handle_t h_rgw_bucket_unlink_instance_op
;
4073 cls_method_handle_t h_rgw_bucket_read_olh_log
;
4074 cls_method_handle_t h_rgw_bucket_trim_olh_log
;
4075 cls_method_handle_t h_rgw_bucket_clear_olh
;
4076 cls_method_handle_t h_rgw_obj_remove
;
4077 cls_method_handle_t h_rgw_obj_store_pg_ver
;
4078 cls_method_handle_t h_rgw_obj_check_attrs_prefix
;
4079 cls_method_handle_t h_rgw_obj_check_mtime
;
4080 cls_method_handle_t h_rgw_bi_get_op
;
4081 cls_method_handle_t h_rgw_bi_put_op
;
4082 cls_method_handle_t h_rgw_bi_list_op
;
4083 cls_method_handle_t h_rgw_bi_log_list_op
;
4084 cls_method_handle_t h_rgw_bi_log_resync_op
;
4085 cls_method_handle_t h_rgw_bi_log_stop_op
;
4086 cls_method_handle_t h_rgw_dir_suggest_changes
;
4087 cls_method_handle_t h_rgw_user_usage_log_add
;
4088 cls_method_handle_t h_rgw_user_usage_log_read
;
4089 cls_method_handle_t h_rgw_user_usage_log_trim
;
4090 cls_method_handle_t h_rgw_usage_log_clear
;
4091 cls_method_handle_t h_rgw_gc_set_entry
;
4092 cls_method_handle_t h_rgw_gc_list
;
4093 cls_method_handle_t h_rgw_gc_remove
;
4094 cls_method_handle_t h_rgw_lc_get_entry
;
4095 cls_method_handle_t h_rgw_lc_set_entry
;
4096 cls_method_handle_t h_rgw_lc_rm_entry
;
4097 cls_method_handle_t h_rgw_lc_get_next_entry
;
4098 cls_method_handle_t h_rgw_lc_put_head
;
4099 cls_method_handle_t h_rgw_lc_get_head
;
4100 cls_method_handle_t h_rgw_lc_list_entries
;
4101 cls_method_handle_t h_rgw_reshard_add
;
4102 cls_method_handle_t h_rgw_reshard_list
;
4103 cls_method_handle_t h_rgw_reshard_get
;
4104 cls_method_handle_t h_rgw_reshard_remove
;
4105 cls_method_handle_t h_rgw_set_bucket_resharding
;
4106 cls_method_handle_t h_rgw_clear_bucket_resharding
;
4107 cls_method_handle_t h_rgw_guard_bucket_resharding
;
4108 cls_method_handle_t h_rgw_get_bucket_resharding
;
4110 cls_register(RGW_CLASS
, &h_class
);
4113 cls_register_cxx_method(h_class
, RGW_BUCKET_INIT_INDEX
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_init_index
, &h_rgw_bucket_init_index
);
4114 cls_register_cxx_method(h_class
, RGW_BUCKET_SET_TAG_TIMEOUT
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_set_tag_timeout
, &h_rgw_bucket_set_tag_timeout
);
4115 cls_register_cxx_method(h_class
, RGW_BUCKET_LIST
, CLS_METHOD_RD
, rgw_bucket_list
, &h_rgw_bucket_list
);
4116 cls_register_cxx_method(h_class
, RGW_BUCKET_CHECK_INDEX
, CLS_METHOD_RD
, rgw_bucket_check_index
, &h_rgw_bucket_check_index
);
4117 cls_register_cxx_method(h_class
, RGW_BUCKET_REBUILD_INDEX
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_rebuild_index
, &h_rgw_bucket_rebuild_index
);
4118 cls_register_cxx_method(h_class
, RGW_BUCKET_UPDATE_STATS
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_update_stats
, &h_rgw_bucket_update_stats
);
4119 cls_register_cxx_method(h_class
, RGW_BUCKET_PREPARE_OP
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_prepare_op
, &h_rgw_bucket_prepare_op
);
4120 cls_register_cxx_method(h_class
, RGW_BUCKET_COMPLETE_OP
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_complete_op
, &h_rgw_bucket_complete_op
);
4121 cls_register_cxx_method(h_class
, RGW_BUCKET_LINK_OLH
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_link_olh
, &h_rgw_bucket_link_olh
);
4122 cls_register_cxx_method(h_class
, RGW_BUCKET_UNLINK_INSTANCE
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_unlink_instance
, &h_rgw_bucket_unlink_instance_op
);
4123 cls_register_cxx_method(h_class
, RGW_BUCKET_READ_OLH_LOG
, CLS_METHOD_RD
, rgw_bucket_read_olh_log
, &h_rgw_bucket_read_olh_log
);
4124 cls_register_cxx_method(h_class
, RGW_BUCKET_TRIM_OLH_LOG
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_trim_olh_log
, &h_rgw_bucket_trim_olh_log
);
4125 cls_register_cxx_method(h_class
, RGW_BUCKET_CLEAR_OLH
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bucket_clear_olh
, &h_rgw_bucket_clear_olh
);
4127 cls_register_cxx_method(h_class
, RGW_OBJ_REMOVE
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_obj_remove
, &h_rgw_obj_remove
);
4128 cls_register_cxx_method(h_class
, RGW_OBJ_STORE_PG_VER
, CLS_METHOD_WR
, rgw_obj_store_pg_ver
, &h_rgw_obj_store_pg_ver
);
4129 cls_register_cxx_method(h_class
, RGW_OBJ_CHECK_ATTRS_PREFIX
, CLS_METHOD_RD
, rgw_obj_check_attrs_prefix
, &h_rgw_obj_check_attrs_prefix
);
4130 cls_register_cxx_method(h_class
, RGW_OBJ_CHECK_MTIME
, CLS_METHOD_RD
, rgw_obj_check_mtime
, &h_rgw_obj_check_mtime
);
4132 cls_register_cxx_method(h_class
, RGW_BI_GET
, CLS_METHOD_RD
, rgw_bi_get_op
, &h_rgw_bi_get_op
);
4133 cls_register_cxx_method(h_class
, RGW_BI_PUT
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bi_put_op
, &h_rgw_bi_put_op
);
4134 cls_register_cxx_method(h_class
, RGW_BI_LIST
, CLS_METHOD_RD
, rgw_bi_list_op
, &h_rgw_bi_list_op
);
4136 cls_register_cxx_method(h_class
, RGW_BI_LOG_LIST
, CLS_METHOD_RD
, rgw_bi_log_list
, &h_rgw_bi_log_list_op
);
4137 cls_register_cxx_method(h_class
, RGW_BI_LOG_TRIM
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bi_log_trim
, &h_rgw_bi_log_list_op
);
4138 cls_register_cxx_method(h_class
, RGW_DIR_SUGGEST_CHANGES
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_dir_suggest_changes
, &h_rgw_dir_suggest_changes
);
4140 cls_register_cxx_method(h_class
, RGW_BI_LOG_RESYNC
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bi_log_resync
, &h_rgw_bi_log_resync_op
);
4141 cls_register_cxx_method(h_class
, RGW_BI_LOG_STOP
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_bi_log_stop
, &h_rgw_bi_log_stop_op
);
4144 cls_register_cxx_method(h_class
, RGW_USER_USAGE_LOG_ADD
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_user_usage_log_add
, &h_rgw_user_usage_log_add
);
4145 cls_register_cxx_method(h_class
, RGW_USER_USAGE_LOG_READ
, CLS_METHOD_RD
, rgw_user_usage_log_read
, &h_rgw_user_usage_log_read
);
4146 cls_register_cxx_method(h_class
, RGW_USER_USAGE_LOG_TRIM
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_user_usage_log_trim
, &h_rgw_user_usage_log_trim
);
4147 cls_register_cxx_method(h_class
, RGW_USAGE_LOG_CLEAR
, CLS_METHOD_WR
, rgw_usage_log_clear
, &h_rgw_usage_log_clear
);
4149 /* garbage collection */
4150 cls_register_cxx_method(h_class
, RGW_GC_SET_ENTRY
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_gc_set_entry
, &h_rgw_gc_set_entry
);
4151 cls_register_cxx_method(h_class
, RGW_GC_DEFER_ENTRY
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_gc_defer_entry
, &h_rgw_gc_set_entry
);
4152 cls_register_cxx_method(h_class
, RGW_GC_LIST
, CLS_METHOD_RD
, rgw_cls_gc_list
, &h_rgw_gc_list
);
4153 cls_register_cxx_method(h_class
, RGW_GC_REMOVE
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_gc_remove
, &h_rgw_gc_remove
);
4155 /* lifecycle bucket list */
4156 cls_register_cxx_method(h_class
, RGW_LC_GET_ENTRY
, CLS_METHOD_RD
, rgw_cls_lc_get_entry
, &h_rgw_lc_get_entry
);
4157 cls_register_cxx_method(h_class
, RGW_LC_SET_ENTRY
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_lc_set_entry
, &h_rgw_lc_set_entry
);
4158 cls_register_cxx_method(h_class
, RGW_LC_RM_ENTRY
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_lc_rm_entry
, &h_rgw_lc_rm_entry
);
4159 cls_register_cxx_method(h_class
, RGW_LC_GET_NEXT_ENTRY
, CLS_METHOD_RD
, rgw_cls_lc_get_next_entry
, &h_rgw_lc_get_next_entry
);
4160 cls_register_cxx_method(h_class
, RGW_LC_PUT_HEAD
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_cls_lc_put_head
, &h_rgw_lc_put_head
);
4161 cls_register_cxx_method(h_class
, RGW_LC_GET_HEAD
, CLS_METHOD_RD
, rgw_cls_lc_get_head
, &h_rgw_lc_get_head
);
4162 cls_register_cxx_method(h_class
, RGW_LC_LIST_ENTRIES
, CLS_METHOD_RD
, rgw_cls_lc_list_entries
, &h_rgw_lc_list_entries
);
4165 cls_register_cxx_method(h_class
, RGW_RESHARD_ADD
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_reshard_add
, &h_rgw_reshard_add
);
4166 cls_register_cxx_method(h_class
, RGW_RESHARD_LIST
, CLS_METHOD_RD
, rgw_reshard_list
, &h_rgw_reshard_list
);
4167 cls_register_cxx_method(h_class
, RGW_RESHARD_GET
, CLS_METHOD_RD
,rgw_reshard_get
, &h_rgw_reshard_get
);
4168 cls_register_cxx_method(h_class
, RGW_RESHARD_REMOVE
, CLS_METHOD_RD
| CLS_METHOD_WR
, rgw_reshard_remove
, &h_rgw_reshard_remove
);
4170 /* resharding attribute */
4171 cls_register_cxx_method(h_class
, RGW_SET_BUCKET_RESHARDING
, CLS_METHOD_RD
| CLS_METHOD_WR
,
4172 rgw_set_bucket_resharding
, &h_rgw_set_bucket_resharding
);
4173 cls_register_cxx_method(h_class
, RGW_CLEAR_BUCKET_RESHARDING
, CLS_METHOD_RD
| CLS_METHOD_WR
,
4174 rgw_clear_bucket_resharding
, &h_rgw_clear_bucket_resharding
);
4175 cls_register_cxx_method(h_class
, RGW_GUARD_BUCKET_RESHARDING
, CLS_METHOD_RD
,
4176 rgw_guard_bucket_resharding
, &h_rgw_guard_bucket_resharding
);
4177 cls_register_cxx_method(h_class
, RGW_GET_BUCKET_RESHARDING
, CLS_METHOD_RD
,
4178 rgw_get_bucket_resharding
, &h_rgw_get_bucket_resharding
);