1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
10 #include <boost/utility/string_ref.hpp>
11 #include <boost/format.hpp>
13 #include "common/errno.h"
14 #include "common/ceph_json.h"
15 #include "rgw_rados.h"
17 #include "rgw_acl_s3.h"
19 #include "include/types.h"
20 #include "rgw_bucket.h"
22 #include "rgw_string.h"
23 #include "rgw_multi.h"
25 #include "include/rados/librados.hpp"
26 // until everything is moved from rgw_common
27 #include "rgw_common.h"
29 #include "cls/user/cls_user_types.h"
31 #define dout_context g_ceph_context
32 #define dout_subsys ceph_subsys_rgw
34 #define BUCKET_TAG_TIMEOUT 30
38 static RGWMetadataHandler
*bucket_meta_handler
= NULL
;
39 static RGWMetadataHandler
*bucket_instance_meta_handler
= NULL
;
41 // define as static when RGWBucket implementation compete
42 void rgw_get_buckets_obj(const rgw_user
& user_id
, string
& buckets_obj_id
)
44 buckets_obj_id
= user_id
.to_str();
45 buckets_obj_id
+= RGW_BUCKETS_OBJ_SUFFIX
;
49 * Note that this is not a reversal of parse_bucket(). That one deals
50 * with the syntax we need in metadata and such. This one deals with
51 * the representation in RADOS pools. We chose '/' because it's not
52 * acceptable in bucket names and thus qualified buckets cannot conflict
53 * with the legacy or S3 buckets.
55 std::string
rgw_make_bucket_entry_name(const std::string
& tenant_name
,
56 const std::string
& bucket_name
) {
57 std::string bucket_entry
;
59 if (bucket_name
.empty()) {
61 } else if (tenant_name
.empty()) {
62 bucket_entry
= bucket_name
;
64 bucket_entry
= tenant_name
+ "/" + bucket_name
;
71 * Tenants are separated from buckets in URLs by a colon in S3.
72 * This function is not to be used on Swift URLs, not even for COPY arguments.
74 void rgw_parse_url_bucket(const string
&bucket
, const string
& auth_tenant
,
75 string
&tenant_name
, string
&bucket_name
) {
77 int pos
= bucket
.find(':');
80 * N.B.: We allow ":bucket" syntax with explicit empty tenant in order
81 * to refer to the legacy tenant, in case users in new named tenants
82 * want to access old global buckets.
84 tenant_name
= bucket
.substr(0, pos
);
85 bucket_name
= bucket
.substr(pos
+ 1);
87 tenant_name
= auth_tenant
;
93 * Get all the buckets owned by a user and fill up an RGWUserBuckets with them.
94 * Returns: 0 on success, -ERR# on failure.
96 int rgw_read_user_buckets(RGWRados
* store
,
97 const rgw_user
& user_id
,
98 RGWUserBuckets
& buckets
,
100 const string
& end_marker
,
104 uint64_t default_amount
)
108 string buckets_obj_id
;
109 rgw_get_buckets_obj(user_id
, buckets_obj_id
);
110 rgw_raw_obj
obj(store
->get_zone_params().user_uid_pool
, buckets_obj_id
);
111 list
<cls_user_bucket_entry
> entries
;
113 bool truncated
= false;
119 max
= default_amount
;
123 ret
= store
->cls_user_list_buckets(obj
, m
, end_marker
, max
- total
, entries
, &m
, &truncated
);
130 for (const auto& entry
: entries
) {
131 buckets
.add(RGWBucketEnt(user_id
, entry
));
135 } while (truncated
&& total
< max
);
137 if (is_truncated
!= nullptr) {
138 *is_truncated
= truncated
;
142 map
<string
, RGWBucketEnt
>& m
= buckets
.get_buckets();
143 ret
= store
->update_containers_stats(m
);
144 if (ret
< 0 && ret
!= -ENOENT
) {
145 ldout(store
->ctx(), 0) << "ERROR: could not get stats for buckets" << dendl
;
152 int rgw_bucket_sync_user_stats(RGWRados
*store
, const rgw_user
& user_id
, const RGWBucketInfo
& bucket_info
)
154 string buckets_obj_id
;
155 rgw_get_buckets_obj(user_id
, buckets_obj_id
);
156 rgw_raw_obj
obj(store
->get_zone_params().user_uid_pool
, buckets_obj_id
);
158 return store
->cls_user_sync_bucket_stats(obj
, bucket_info
);
161 int rgw_bucket_sync_user_stats(RGWRados
*store
, const string
& tenant_name
, const string
& bucket_name
)
163 RGWBucketInfo bucket_info
;
164 RGWObjectCtx
obj_ctx(store
);
165 int ret
= store
->get_bucket_info(obj_ctx
, tenant_name
, bucket_name
, bucket_info
, NULL
);
167 ldout(store
->ctx(), 0) << "ERROR: could not fetch bucket info: ret=" << ret
<< dendl
;
171 ret
= rgw_bucket_sync_user_stats(store
, bucket_info
.owner
, bucket_info
);
173 ldout(store
->ctx(), 0) << "ERROR: could not sync user stats for bucket " << bucket_name
<< ": ret=" << ret
<< dendl
;
180 int rgw_link_bucket(RGWRados
*store
, const rgw_user
& user_id
, rgw_bucket
& bucket
, real_time creation_time
, bool update_entrypoint
)
183 string
& tenant_name
= bucket
.tenant
;
184 string
& bucket_name
= bucket
.name
;
186 cls_user_bucket_entry new_bucket
;
188 RGWBucketEntryPoint ep
;
189 RGWObjVersionTracker ot
;
191 bucket
.convert(&new_bucket
.bucket
);
193 if (real_clock::is_zero(creation_time
))
194 new_bucket
.creation_time
= real_clock::now();
196 new_bucket
.creation_time
= creation_time
;
198 map
<string
, bufferlist
> attrs
;
199 RGWObjectCtx
obj_ctx(store
);
201 if (update_entrypoint
) {
202 ret
= store
->get_bucket_entrypoint_info(obj_ctx
, tenant_name
, bucket_name
, ep
, &ot
, NULL
, &attrs
);
203 if (ret
< 0 && ret
!= -ENOENT
) {
204 ldout(store
->ctx(), 0) << "ERROR: store->get_bucket_entrypoint_info() returned: "
205 << cpp_strerror(-ret
) << dendl
;
209 string buckets_obj_id
;
210 rgw_get_buckets_obj(user_id
, buckets_obj_id
);
212 rgw_raw_obj
obj(store
->get_zone_params().user_uid_pool
, buckets_obj_id
);
213 ret
= store
->cls_user_add_bucket(obj
, new_bucket
);
215 ldout(store
->ctx(), 0) << "ERROR: error adding bucket to directory: "
216 << cpp_strerror(-ret
) << dendl
;
220 if (!update_entrypoint
)
226 ret
= store
->put_bucket_entrypoint_info(tenant_name
, bucket_name
, ep
, false, ot
, real_time(), &attrs
);
232 int r
= rgw_unlink_bucket(store
, user_id
, bucket
.tenant
, bucket
.name
);
234 ldout(store
->ctx(), 0) << "ERROR: failed unlinking bucket on error cleanup: "
235 << cpp_strerror(-r
) << dendl
;
240 int rgw_unlink_bucket(RGWRados
*store
, const rgw_user
& user_id
, const string
& tenant_name
, const string
& bucket_name
, bool update_entrypoint
)
244 string buckets_obj_id
;
245 rgw_get_buckets_obj(user_id
, buckets_obj_id
);
247 cls_user_bucket bucket
;
248 bucket
.name
= bucket_name
;
249 rgw_raw_obj
obj(store
->get_zone_params().user_uid_pool
, buckets_obj_id
);
250 ret
= store
->cls_user_remove_bucket(obj
, bucket
);
252 ldout(store
->ctx(), 0) << "ERROR: error removing bucket from directory: "
253 << cpp_strerror(-ret
)<< dendl
;
256 if (!update_entrypoint
)
259 RGWBucketEntryPoint ep
;
260 RGWObjVersionTracker ot
;
261 map
<string
, bufferlist
> attrs
;
262 RGWObjectCtx
obj_ctx(store
);
263 ret
= store
->get_bucket_entrypoint_info(obj_ctx
, tenant_name
, bucket_name
, ep
, &ot
, NULL
, &attrs
);
272 if (ep
.owner
!= user_id
) {
273 ldout(store
->ctx(), 0) << "bucket entry point user mismatch, can't unlink bucket: " << ep
.owner
<< " != " << user_id
<< dendl
;
278 return store
->put_bucket_entrypoint_info(tenant_name
, bucket_name
, ep
, false, ot
, real_time(), &attrs
);
281 int rgw_bucket_store_info(RGWRados
*store
, const string
& bucket_name
, bufferlist
& bl
, bool exclusive
,
282 map
<string
, bufferlist
> *pattrs
, RGWObjVersionTracker
*objv_tracker
,
284 return store
->meta_mgr
->put_entry(bucket_meta_handler
, bucket_name
, bl
, exclusive
, objv_tracker
, mtime
, pattrs
);
287 int rgw_bucket_instance_store_info(RGWRados
*store
, string
& entry
, bufferlist
& bl
, bool exclusive
,
288 map
<string
, bufferlist
> *pattrs
, RGWObjVersionTracker
*objv_tracker
,
290 return store
->meta_mgr
->put_entry(bucket_instance_meta_handler
, entry
, bl
, exclusive
, objv_tracker
, mtime
, pattrs
);
293 int rgw_bucket_instance_remove_entry(RGWRados
*store
, string
& entry
, RGWObjVersionTracker
*objv_tracker
) {
294 return store
->meta_mgr
->remove_entry(bucket_instance_meta_handler
, entry
, objv_tracker
);
297 // 'tenant/' is used in bucket instance keys for sync to avoid parsing ambiguity
298 // with the existing instance[:shard] format. once we parse the shard, the / is
299 // replaced with a : to match the [tenant:]instance format
300 void rgw_bucket_instance_key_to_oid(string
& key
)
302 // replace tenant/ with tenant:
303 auto c
= key
.find('/');
304 if (c
!= string::npos
) {
309 // convert bucket instance oids back to the tenant/ format for metadata keys.
310 // it's safe to parse 'tenant:' only for oids, because they won't contain the
311 // optional :shard at the end
312 void rgw_bucket_instance_oid_to_key(string
& oid
)
314 // find first : (could be tenant:bucket or bucket:instance)
315 auto c
= oid
.find(':');
316 if (c
!= string::npos
) {
317 // if we find another :, the first one was for tenant
318 if (oid
.find(':', c
+ 1) != string::npos
) {
324 int rgw_bucket_parse_bucket_instance(const string
& bucket_instance
, string
*target_bucket_instance
, int *shard_id
)
326 ssize_t pos
= bucket_instance
.rfind(':');
331 string first
= bucket_instance
.substr(0, pos
);
332 string second
= bucket_instance
.substr(pos
+ 1);
334 if (first
.find(':') == string::npos
) {
336 *target_bucket_instance
= bucket_instance
;
340 *target_bucket_instance
= first
;
342 *shard_id
= strict_strtol(second
.c_str(), 10, &err
);
350 // parse key in format: [tenant/]name:instance[:shard_id]
351 int rgw_bucket_parse_bucket_key(CephContext
*cct
, const string
& key
,
352 rgw_bucket
*bucket
, int *shard_id
)
354 boost::string_ref name
{key
};
355 boost::string_ref instance
;
358 auto pos
= name
.find('/');
359 if (pos
!= boost::string_ref::npos
) {
360 auto tenant
= name
.substr(0, pos
);
361 bucket
->tenant
.assign(tenant
.begin(), tenant
.end());
362 name
= name
.substr(pos
+ 1);
365 // split name:instance
366 pos
= name
.find(':');
367 if (pos
!= boost::string_ref::npos
) {
368 instance
= name
.substr(pos
+ 1);
369 name
= name
.substr(0, pos
);
371 bucket
->name
.assign(name
.begin(), name
.end());
373 // split instance:shard
374 pos
= instance
.find(':');
375 if (pos
== boost::string_ref::npos
) {
376 bucket
->bucket_id
.assign(instance
.begin(), instance
.end());
382 auto shard
= instance
.substr(pos
+ 1);
384 auto id
= strict_strtol(shard
.data(), 10, &err
);
386 ldout(cct
, 0) << "ERROR: failed to parse bucket shard '"
387 << instance
.data() << "': " << err
<< dendl
;
392 instance
= instance
.substr(0, pos
);
393 bucket
->bucket_id
.assign(instance
.begin(), instance
.end());
397 int rgw_bucket_set_attrs(RGWRados
*store
, RGWBucketInfo
& bucket_info
,
398 map
<string
, bufferlist
>& attrs
,
399 RGWObjVersionTracker
*objv_tracker
)
401 rgw_bucket
& bucket
= bucket_info
.bucket
;
403 if (!bucket_info
.has_instance_obj
) {
404 /* an old bucket object, need to convert it */
405 RGWObjectCtx
obj_ctx(store
);
406 int ret
= store
->convert_old_bucket_info(obj_ctx
, bucket
.tenant
, bucket
.name
);
408 ldout(store
->ctx(), 0) << "ERROR: failed converting old bucket info: " << ret
<< dendl
;
413 /* we want the bucket instance name without the oid prefix cruft */
414 string key
= bucket
.get_key();
417 ::encode(bucket_info
, bl
);
419 return rgw_bucket_instance_store_info(store
, key
, bl
, false, &attrs
, objv_tracker
, real_time());
422 static void dump_mulipart_index_results(list
<rgw_obj_index_key
>& objs_to_unlink
,
425 for (const auto& o
: objs_to_unlink
) {
426 f
->dump_string("object", o
.name
);
430 void check_bad_user_bucket_mapping(RGWRados
*store
, const rgw_user
& user_id
,
433 RGWUserBuckets user_buckets
;
434 bool is_truncated
= false;
437 CephContext
*cct
= store
->ctx();
439 size_t max_entries
= cct
->_conf
->rgw_list_buckets_max_chunk
;
442 int ret
= rgw_read_user_buckets(store
, user_id
, user_buckets
, marker
,
443 string(), max_entries
, false,
446 ldout(store
->ctx(), 0) << "failed to read user buckets: "
447 << cpp_strerror(-ret
) << dendl
;
451 map
<string
, RGWBucketEnt
>& buckets
= user_buckets
.get_buckets();
452 for (map
<string
, RGWBucketEnt
>::iterator i
= buckets
.begin();
457 RGWBucketEnt
& bucket_ent
= i
->second
;
458 rgw_bucket
& bucket
= bucket_ent
.bucket
;
460 RGWBucketInfo bucket_info
;
462 RGWObjectCtx
obj_ctx(store
);
463 int r
= store
->get_bucket_info(obj_ctx
, user_id
.tenant
, bucket
.name
, bucket_info
, &mtime
);
465 ldout(store
->ctx(), 0) << "could not get bucket info for bucket=" << bucket
<< dendl
;
469 rgw_bucket
& actual_bucket
= bucket_info
.bucket
;
471 if (actual_bucket
.name
.compare(bucket
.name
) != 0 ||
472 actual_bucket
.tenant
.compare(bucket
.tenant
) != 0 ||
473 actual_bucket
.marker
.compare(bucket
.marker
) != 0 ||
474 actual_bucket
.bucket_id
.compare(bucket
.bucket_id
) != 0) {
475 cout
<< "bucket info mismatch: expected " << actual_bucket
<< " got " << bucket
<< std::endl
;
477 cout
<< "fixing" << std::endl
;
478 r
= rgw_link_bucket(store
, user_id
, actual_bucket
, bucket_info
.creation_time
);
480 cerr
<< "failed to fix bucket: " << cpp_strerror(-r
) << std::endl
;
485 } while (is_truncated
);
488 static bool bucket_object_check_filter(const string
& oid
)
492 return rgw_obj_key::oid_to_key_in_ns(oid
, &key
, ns
);
495 int rgw_remove_object(RGWRados
*store
, RGWBucketInfo
& bucket_info
, rgw_bucket
& bucket
, rgw_obj_key
& key
)
497 RGWObjectCtx
rctx(store
);
499 if (key
.instance
.empty()) {
500 key
.instance
= "null";
503 rgw_obj
obj(bucket
, key
);
505 return store
->delete_obj(rctx
, bucket_info
, obj
, bucket_info
.versioning_status());
508 int rgw_remove_bucket(RGWRados
*store
, rgw_bucket
& bucket
, bool delete_children
)
511 map
<RGWObjCategory
, RGWStorageStats
> stats
;
512 std::vector
<rgw_bucket_dir_entry
> objs
;
513 map
<string
, bool> common_prefixes
;
515 RGWObjectCtx
obj_ctx(store
);
517 string bucket_ver
, master_ver
;
519 ret
= store
->get_bucket_info(obj_ctx
, bucket
.tenant
, bucket
.name
, info
, NULL
);
523 ret
= store
->get_bucket_stats(info
, RGW_NO_SHARD
, &bucket_ver
, &master_ver
, stats
, NULL
);
527 RGWRados::Bucket
target(store
, info
);
528 RGWRados::Bucket::List
list_op(&target
);
529 CephContext
*cct
= store
->ctx();
532 list_op
.params
.list_versions
= true;
537 ret
= list_op
.list_objects(max
, &objs
, &common_prefixes
, NULL
);
541 if (!objs
.empty() && !delete_children
) {
542 lderr(store
->ctx()) << "ERROR: could not remove non-empty bucket " << bucket
.name
<< dendl
;
546 for (const auto& obj
: objs
) {
547 rgw_obj_key
key(obj
.key
);
548 ret
= rgw_remove_object(store
, info
, bucket
, key
);
553 } while (!objs
.empty());
555 string prefix
, delimiter
;
557 ret
= abort_bucket_multiparts(store
, cct
, info
, prefix
, delimiter
);
562 ret
= rgw_bucket_sync_user_stats(store
, bucket
.tenant
, info
);
564 dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret
<< dendl
;
567 RGWObjVersionTracker objv_tracker
;
569 ret
= store
->delete_bucket(info
, objv_tracker
);
571 lderr(store
->ctx()) << "ERROR: could not remove bucket " << bucket
.name
<< dendl
;
575 ret
= rgw_unlink_bucket(store
, info
.owner
, bucket
.tenant
, bucket
.name
, false);
577 lderr(store
->ctx()) << "ERROR: unable to remove user bucket information" << dendl
;
583 static int aio_wait(librados::AioCompletion
*handle
)
585 librados::AioCompletion
*c
= (librados::AioCompletion
*)handle
;
587 int ret
= c
->get_return_value();
592 static int drain_handles(list
<librados::AioCompletion
*>& pending
)
595 while (!pending
.empty()) {
596 librados::AioCompletion
*handle
= pending
.front();
598 int r
= aio_wait(handle
);
606 int rgw_remove_bucket_bypass_gc(RGWRados
*store
, rgw_bucket
& bucket
,
607 int concurrent_max
, bool keep_index_consistent
)
610 map
<RGWObjCategory
, RGWStorageStats
> stats
;
611 std::vector
<rgw_bucket_dir_entry
> objs
;
612 map
<string
, bool> common_prefixes
;
614 RGWObjectCtx
obj_ctx(store
);
615 CephContext
*cct
= store
->ctx();
617 string bucket_ver
, master_ver
;
619 ret
= store
->get_bucket_info(obj_ctx
, bucket
.tenant
, bucket
.name
, info
, NULL
);
623 ret
= store
->get_bucket_stats(info
, RGW_NO_SHARD
, &bucket_ver
, &master_ver
, stats
, NULL
);
627 string prefix
, delimiter
;
629 ret
= abort_bucket_multiparts(store
, cct
, info
, prefix
, delimiter
);
634 RGWRados::Bucket
target(store
, info
);
635 RGWRados::Bucket::List
list_op(&target
);
637 list_op
.params
.list_versions
= true;
639 std::list
<librados::AioCompletion
*> handles
;
642 int max_aio
= concurrent_max
;
643 ret
= list_op
.list_objects(max
, &objs
, &common_prefixes
, NULL
);
647 while (!objs
.empty()) {
648 std::vector
<rgw_bucket_dir_entry
>::iterator it
= objs
.begin();
649 for (; it
!= objs
.end(); ++it
) {
650 RGWObjState
*astate
= NULL
;
651 rgw_obj
obj(bucket
, (*it
).key
);
653 ret
= store
->get_obj_state(&obj_ctx
, info
, obj
, &astate
, false);
654 if (ret
== -ENOENT
) {
655 dout(1) << "WARNING: cannot find obj state for obj " << obj
.get_oid() << dendl
;
659 lderr(store
->ctx()) << "ERROR: get obj state returned with error " << ret
<< dendl
;
663 if (astate
->has_manifest
) {
664 RGWObjManifest
& manifest
= astate
->manifest
;
665 RGWObjManifest::obj_iterator miter
= manifest
.obj_begin();
666 rgw_obj head_obj
= manifest
.get_obj();
667 rgw_raw_obj raw_head_obj
;
668 store
->obj_to_raw(info
.placement_rule
, head_obj
, &raw_head_obj
);
671 for (; miter
!= manifest
.obj_end() && max_aio
--; ++miter
) {
673 ret
= drain_handles(handles
);
675 lderr(store
->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret
<< dendl
;
678 max_aio
= concurrent_max
;
681 rgw_raw_obj last_obj
= miter
.get_location().get_raw_obj(store
);
682 if (last_obj
== raw_head_obj
) {
683 // have the head obj deleted at the end
687 ret
= store
->delete_raw_obj_aio(last_obj
, handles
);
689 lderr(store
->ctx()) << "ERROR: delete obj aio failed with " << ret
<< dendl
;
692 } // for all shadow objs
694 ret
= store
->delete_obj_aio(head_obj
, info
, astate
, handles
, keep_index_consistent
);
696 lderr(store
->ctx()) << "ERROR: delete obj aio failed with " << ret
<< dendl
;
702 ret
= drain_handles(handles
);
704 lderr(store
->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret
<< dendl
;
707 max_aio
= concurrent_max
;
709 } // for all RGW objects
712 ret
= list_op
.list_objects(max
, &objs
, &common_prefixes
, NULL
);
717 ret
= drain_handles(handles
);
719 lderr(store
->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret
<< dendl
;
723 ret
= rgw_bucket_sync_user_stats(store
, bucket
.tenant
, info
);
725 dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret
<< dendl
;
728 RGWObjVersionTracker objv_tracker
;
730 ret
= rgw_bucket_delete_bucket_obj(store
, bucket
.tenant
, bucket
.name
, objv_tracker
);
732 lderr(store
->ctx()) << "ERROR: could not remove bucket " << bucket
.name
<< "with ret as " << ret
<< dendl
;
736 if (!store
->is_syncing_bucket_meta(bucket
)) {
737 RGWObjVersionTracker objv_tracker
;
738 string entry
= bucket
.get_key();
739 ret
= rgw_bucket_instance_remove_entry(store
, entry
, &objv_tracker
);
741 lderr(store
->ctx()) << "ERROR: could not remove bucket instance entry" << bucket
.name
<< "with ret as " << ret
<< dendl
;
746 ret
= rgw_unlink_bucket(store
, info
.owner
, bucket
.tenant
, bucket
.name
, false);
748 lderr(store
->ctx()) << "ERROR: unable to remove user bucket information" << dendl
;
754 int rgw_bucket_delete_bucket_obj(RGWRados
*store
,
755 const string
& tenant_name
,
756 const string
& bucket_name
,
757 RGWObjVersionTracker
& objv_tracker
)
761 rgw_make_bucket_entry_name(tenant_name
, bucket_name
, key
);
762 return store
->meta_mgr
->remove_entry(bucket_meta_handler
, key
, &objv_tracker
);
765 static void set_err_msg(std::string
*sink
, std::string msg
)
767 if (sink
&& !msg
.empty())
771 int RGWBucket::init(RGWRados
*storage
, RGWBucketAdminOpState
& op_state
)
778 rgw_user user_id
= op_state
.get_user_id();
779 tenant
= user_id
.tenant
;
780 bucket_name
= op_state
.get_bucket_name();
781 RGWUserBuckets user_buckets
;
782 RGWObjectCtx
obj_ctx(store
);
784 if (bucket_name
.empty() && user_id
.empty())
787 if (!bucket_name
.empty()) {
788 int r
= store
->get_bucket_info(obj_ctx
, tenant
, bucket_name
, bucket_info
, NULL
);
790 ldout(store
->ctx(), 0) << "could not get bucket info for bucket=" << bucket_name
<< dendl
;
794 op_state
.set_bucket(bucket_info
.bucket
);
797 if (!user_id
.empty()) {
798 int r
= rgw_get_user_info_by_uid(store
, user_id
, user_info
);
802 op_state
.display_name
= user_info
.display_name
;
809 int RGWBucket::link(RGWBucketAdminOpState
& op_state
, std::string
*err_msg
)
811 if (!op_state
.is_user_op()) {
812 set_err_msg(err_msg
, "empty user id");
816 string bucket_id
= op_state
.get_bucket_id();
817 if (bucket_id
.empty()) {
818 set_err_msg(err_msg
, "empty bucket instance id");
822 std::string display_name
= op_state
.get_user_display_name();
823 rgw_bucket bucket
= op_state
.get_bucket();
825 const rgw_pool
& root_pool
= store
->get_zone_params().domain_root
;
826 rgw_raw_obj
obj(root_pool
, bucket
.name
);
827 RGWObjVersionTracker objv_tracker
;
829 map
<string
, bufferlist
> attrs
;
830 RGWBucketInfo bucket_info
;
832 string key
= bucket
.name
+ ":" + bucket_id
;
833 RGWObjectCtx
obj_ctx(store
);
834 int r
= store
->get_bucket_instance_info(obj_ctx
, key
, bucket_info
, NULL
, &attrs
);
839 rgw_user user_id
= op_state
.get_user_id();
841 map
<string
, bufferlist
>::iterator aiter
= attrs
.find(RGW_ATTR_ACL
);
842 if (aiter
!= attrs
.end()) {
843 bufferlist aclbl
= aiter
->second
;
844 RGWAccessControlPolicy policy
;
847 bufferlist::iterator iter
= aclbl
.begin();
848 ::decode(policy
, iter
);
849 owner
= policy
.get_owner();
850 } catch (buffer::error
& err
) {
851 set_err_msg(err_msg
, "couldn't decode policy");
855 r
= rgw_unlink_bucket(store
, owner
.get_id(), bucket
.tenant
, bucket
.name
, false);
857 set_err_msg(err_msg
, "could not unlink policy from user " + owner
.get_id().to_str());
861 // now update the user for the bucket...
862 if (display_name
.empty()) {
863 ldout(store
->ctx(), 0) << "WARNING: user " << user_info
.user_id
<< " has no display name set" << dendl
;
865 policy
.create_default(user_info
.user_id
, display_name
);
867 owner
= policy
.get_owner();
868 r
= store
->set_bucket_owner(bucket_info
.bucket
, owner
);
870 set_err_msg(err_msg
, "failed to set bucket owner: " + cpp_strerror(-r
));
874 // ...and encode the acl
876 policy
.encode(aclbl
);
878 r
= store
->system_obj_set_attr(NULL
, obj
, RGW_ATTR_ACL
, aclbl
, &objv_tracker
);
883 RGWAccessControlPolicy policy_instance
;
884 policy_instance
.create_default(user_info
.user_id
, display_name
);
886 policy_instance
.encode(aclbl
);
888 string oid_bucket_instance
= RGW_BUCKET_INSTANCE_MD_PREFIX
+ key
;
889 rgw_raw_obj
obj_bucket_instance(root_pool
, oid_bucket_instance
);
890 r
= store
->system_obj_set_attr(NULL
, obj_bucket_instance
, RGW_ATTR_ACL
, aclbl
, &objv_tracker
);
895 r
= rgw_link_bucket(store
, user_info
.user_id
, bucket_info
.bucket
, real_time());
904 int RGWBucket::unlink(RGWBucketAdminOpState
& op_state
, std::string
*err_msg
)
906 rgw_bucket bucket
= op_state
.get_bucket();
908 if (!op_state
.is_user_op()) {
909 set_err_msg(err_msg
, "could not fetch user or user bucket info");
913 int r
= rgw_unlink_bucket(store
, user_info
.user_id
, bucket
.tenant
, bucket
.name
);
915 set_err_msg(err_msg
, "error unlinking bucket" + cpp_strerror(-r
));
921 int RGWBucket::remove(RGWBucketAdminOpState
& op_state
, bool bypass_gc
,
922 bool keep_index_consistent
, std::string
*err_msg
)
924 bool delete_children
= op_state
.will_delete_children();
925 rgw_bucket bucket
= op_state
.get_bucket();
929 if (delete_children
) {
930 ret
= rgw_remove_bucket_bypass_gc(store
, bucket
, op_state
.get_max_aio(), keep_index_consistent
);
932 set_err_msg(err_msg
, "purge objects should be set for gc to be bypassed");
936 ret
= rgw_remove_bucket(store
, bucket
, delete_children
);
940 set_err_msg(err_msg
, "unable to remove bucket" + cpp_strerror(-ret
));
947 int RGWBucket::remove_object(RGWBucketAdminOpState
& op_state
, std::string
*err_msg
)
949 rgw_bucket bucket
= op_state
.get_bucket();
950 std::string object_name
= op_state
.get_object_name();
952 rgw_obj_key
key(object_name
);
954 int ret
= rgw_remove_object(store
, bucket_info
, bucket
, key
);
956 set_err_msg(err_msg
, "unable to remove object" + cpp_strerror(-ret
));
963 static void dump_bucket_index(map
<string
, rgw_bucket_dir_entry
> result
, Formatter
*f
)
965 map
<string
, rgw_bucket_dir_entry
>::iterator iter
;
966 for (iter
= result
.begin(); iter
!= result
.end(); ++iter
) {
967 f
->dump_string("object", iter
->first
);
971 static void dump_bucket_usage(map
<RGWObjCategory
, RGWStorageStats
>& stats
, Formatter
*formatter
)
973 map
<RGWObjCategory
, RGWStorageStats
>::iterator iter
;
975 formatter
->open_object_section("usage");
976 for (iter
= stats
.begin(); iter
!= stats
.end(); ++iter
) {
977 RGWStorageStats
& s
= iter
->second
;
978 const char *cat_name
= rgw_obj_category_name(iter
->first
);
979 formatter
->open_object_section(cat_name
);
981 formatter
->close_section();
983 formatter
->close_section();
986 static void dump_index_check(map
<RGWObjCategory
, RGWStorageStats
> existing_stats
,
987 map
<RGWObjCategory
, RGWStorageStats
> calculated_stats
,
988 Formatter
*formatter
)
990 formatter
->open_object_section("check_result");
991 formatter
->open_object_section("existing_header");
992 dump_bucket_usage(existing_stats
, formatter
);
993 formatter
->close_section();
994 formatter
->open_object_section("calculated_header");
995 dump_bucket_usage(calculated_stats
, formatter
);
996 formatter
->close_section();
997 formatter
->close_section();
1000 int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState
& op_state
,
1001 RGWFormatterFlusher
& flusher
,std::string
*err_msg
)
1003 bool fix_index
= op_state
.will_fix_index();
1004 rgw_bucket bucket
= op_state
.get_bucket();
1008 map
<string
, bool> common_prefixes
;
1011 map
<string
, bool> meta_objs
;
1012 map
<rgw_obj_index_key
, string
> all_objs
;
1014 RGWBucketInfo bucket_info
;
1015 RGWObjectCtx
obj_ctx(store
);
1016 int r
= store
->get_bucket_instance_info(obj_ctx
, bucket
, bucket_info
, nullptr, nullptr);
1018 ldout(store
->ctx(), 0) << "ERROR: " << __func__
<< "(): get_bucket_instance_info(bucket=" << bucket
<< ") returned r=" << r
<< dendl
;
1022 RGWRados::Bucket
target(store
, bucket_info
);
1023 RGWRados::Bucket::List
list_op(&target
);
1025 list_op
.params
.list_versions
= true;
1026 list_op
.params
.ns
= RGW_OBJ_NS_MULTIPART
;
1029 vector
<rgw_bucket_dir_entry
> result
;
1030 int r
= list_op
.list_objects(max
, &result
, &common_prefixes
, &is_truncated
);
1032 set_err_msg(err_msg
, "failed to list objects in bucket=" + bucket
.name
+
1033 " err=" + cpp_strerror(-r
));
1038 vector
<rgw_bucket_dir_entry
>::iterator iter
;
1039 for (iter
= result
.begin(); iter
!= result
.end(); ++iter
) {
1040 rgw_obj_index_key key
= iter
->key
;
1041 rgw_obj
obj(bucket
, key
);
1042 string oid
= obj
.get_oid();
1044 int pos
= oid
.find_last_of('.');
1046 /* obj has no suffix */
1047 all_objs
[key
] = oid
;
1049 /* obj has suffix */
1050 string name
= oid
.substr(0, pos
);
1051 string suffix
= oid
.substr(pos
+ 1);
1053 if (suffix
.compare("meta") == 0) {
1054 meta_objs
[name
] = true;
1056 all_objs
[key
] = name
;
1061 } while (is_truncated
);
1063 list
<rgw_obj_index_key
> objs_to_unlink
;
1064 Formatter
*f
= flusher
.get_formatter();
1066 f
->open_array_section("invalid_multipart_entries");
1068 for (auto aiter
= all_objs
.begin(); aiter
!= all_objs
.end(); ++aiter
) {
1069 string
& name
= aiter
->second
;
1071 if (meta_objs
.find(name
) == meta_objs
.end()) {
1072 objs_to_unlink
.push_back(aiter
->first
);
1075 if (objs_to_unlink
.size() > max
) {
1077 int r
= store
->remove_objs_from_index(bucket_info
, objs_to_unlink
);
1079 set_err_msg(err_msg
, "ERROR: remove_obj_from_index() returned error: " +
1085 dump_mulipart_index_results(objs_to_unlink
, flusher
.get_formatter());
1087 objs_to_unlink
.clear();
1092 int r
= store
->remove_objs_from_index(bucket_info
, objs_to_unlink
);
1094 set_err_msg(err_msg
, "ERROR: remove_obj_from_index() returned error: " +
1101 dump_mulipart_index_results(objs_to_unlink
, f
);
1108 int RGWBucket::check_object_index(RGWBucketAdminOpState
& op_state
,
1109 RGWFormatterFlusher
& flusher
,
1110 std::string
*err_msg
)
1113 bool fix_index
= op_state
.will_fix_index();
1115 rgw_bucket bucket
= op_state
.get_bucket();
1118 set_err_msg(err_msg
, "check-objects flag requires fix index enabled");
1122 store
->cls_obj_set_bucket_tag_timeout(bucket_info
, BUCKET_TAG_TIMEOUT
);
1125 rgw_obj_index_key marker
;
1126 bool is_truncated
= true;
1128 Formatter
*formatter
= flusher
.get_formatter();
1129 formatter
->open_object_section("objects");
1130 while (is_truncated
) {
1131 map
<string
, rgw_bucket_dir_entry
> result
;
1133 int r
= store
->cls_bucket_list(bucket_info
, RGW_NO_SHARD
, marker
, prefix
, 1000, true,
1134 result
, &is_truncated
, &marker
,
1135 bucket_object_check_filter
);
1138 } else if (r
< 0 && r
!= -ENOENT
) {
1139 set_err_msg(err_msg
, "ERROR: failed operation r=" + cpp_strerror(-r
));
1143 dump_bucket_index(result
, formatter
);
1148 formatter
->close_section();
1150 store
->cls_obj_set_bucket_tag_timeout(bucket_info
, 0);
1156 int RGWBucket::check_index(RGWBucketAdminOpState
& op_state
,
1157 map
<RGWObjCategory
, RGWStorageStats
>& existing_stats
,
1158 map
<RGWObjCategory
, RGWStorageStats
>& calculated_stats
,
1159 std::string
*err_msg
)
1161 rgw_bucket bucket
= op_state
.get_bucket();
1162 bool fix_index
= op_state
.will_fix_index();
1164 int r
= store
->bucket_check_index(bucket_info
, &existing_stats
, &calculated_stats
);
1166 set_err_msg(err_msg
, "failed to check index error=" + cpp_strerror(-r
));
1171 r
= store
->bucket_rebuild_index(bucket_info
);
1173 set_err_msg(err_msg
, "failed to rebuild index err=" + cpp_strerror(-r
));
1182 int RGWBucket::policy_bl_to_stream(bufferlist
& bl
, ostream
& o
)
1184 RGWAccessControlPolicy_S3
policy(g_ceph_context
);
1185 bufferlist::iterator iter
= bl
.begin();
1187 policy
.decode(iter
);
1188 } catch (buffer::error
& err
) {
1189 dout(0) << "ERROR: caught buffer::error, could not decode policy" << dendl
;
1196 static int policy_decode(RGWRados
*store
, bufferlist
& bl
, RGWAccessControlPolicy
& policy
)
1198 bufferlist::iterator iter
= bl
.begin();
1200 policy
.decode(iter
);
1201 } catch (buffer::error
& err
) {
1202 ldout(store
->ctx(), 0) << "ERROR: caught buffer::error, could not decode policy" << dendl
;
1208 int RGWBucket::get_policy(RGWBucketAdminOpState
& op_state
, RGWAccessControlPolicy
& policy
)
1210 std::string object_name
= op_state
.get_object_name();
1211 rgw_bucket bucket
= op_state
.get_bucket();
1212 RGWObjectCtx
obj_ctx(store
);
1214 RGWBucketInfo bucket_info
;
1215 map
<string
, bufferlist
> attrs
;
1216 int ret
= store
->get_bucket_info(obj_ctx
, bucket
.tenant
, bucket
.name
, bucket_info
, NULL
, &attrs
);
1221 if (!object_name
.empty()) {
1223 rgw_obj
obj(bucket
, object_name
);
1225 RGWRados::Object
op_target(store
, bucket_info
, obj_ctx
, obj
);
1226 RGWRados::Object::Read
rop(&op_target
);
1228 int ret
= rop
.get_attr(RGW_ATTR_ACL
, bl
);
1232 return policy_decode(store
, bl
, policy
);
1235 map
<string
, bufferlist
>::iterator aiter
= attrs
.find(RGW_ATTR_ACL
);
1236 if (aiter
== attrs
.end()) {
1240 return policy_decode(store
, aiter
->second
, policy
);
1244 int RGWBucketAdminOp::get_policy(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1245 RGWAccessControlPolicy
& policy
)
1249 int ret
= bucket
.init(store
, op_state
);
1253 ret
= bucket
.get_policy(op_state
, policy
);
1260 /* Wrappers to facilitate RESTful interface */
1263 int RGWBucketAdminOp::get_policy(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1264 RGWFormatterFlusher
& flusher
)
1266 RGWAccessControlPolicy
policy(store
->ctx());
1268 int ret
= get_policy(store
, op_state
, policy
);
1272 Formatter
*formatter
= flusher
.get_formatter();
1276 formatter
->open_object_section("policy");
1277 policy
.dump(formatter
);
1278 formatter
->close_section();
1285 int RGWBucketAdminOp::dump_s3_policy(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1288 RGWAccessControlPolicy_S3
policy(store
->ctx());
1290 int ret
= get_policy(store
, op_state
, policy
);
1299 int RGWBucketAdminOp::unlink(RGWRados
*store
, RGWBucketAdminOpState
& op_state
)
1303 int ret
= bucket
.init(store
, op_state
);
1307 return bucket
.unlink(op_state
);
1310 int RGWBucketAdminOp::link(RGWRados
*store
, RGWBucketAdminOpState
& op_state
, string
*err
)
1314 int ret
= bucket
.init(store
, op_state
);
1318 return bucket
.link(op_state
, err
);
1322 int RGWBucketAdminOp::check_index(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1323 RGWFormatterFlusher
& flusher
)
1326 map
<RGWObjCategory
, RGWStorageStats
> existing_stats
;
1327 map
<RGWObjCategory
, RGWStorageStats
> calculated_stats
;
1332 ret
= bucket
.init(store
, op_state
);
1336 Formatter
*formatter
= flusher
.get_formatter();
1339 ret
= bucket
.check_bad_index_multipart(op_state
, flusher
);
1343 ret
= bucket
.check_object_index(op_state
, flusher
);
1347 ret
= bucket
.check_index(op_state
, existing_stats
, calculated_stats
);
1351 dump_index_check(existing_stats
, calculated_stats
, formatter
);
1357 int RGWBucketAdminOp::remove_bucket(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1358 bool bypass_gc
, bool keep_index_consistent
)
1362 int ret
= bucket
.init(store
, op_state
);
1366 std::string err_msg
;
1367 ret
= bucket
.remove(op_state
, bypass_gc
, keep_index_consistent
, &err_msg
);
1368 if (!err_msg
.empty()) {
1369 lderr(store
->ctx()) << "ERROR: " << err_msg
<< dendl
;
1374 int RGWBucketAdminOp::remove_object(RGWRados
*store
, RGWBucketAdminOpState
& op_state
)
1378 int ret
= bucket
.init(store
, op_state
);
1382 return bucket
.remove_object(op_state
);
1385 static int bucket_stats(RGWRados
*store
, const std::string
& tenant_name
, std::string
& bucket_name
, Formatter
*formatter
)
1387 RGWBucketInfo bucket_info
;
1388 map
<RGWObjCategory
, RGWStorageStats
> stats
;
1391 RGWObjectCtx
obj_ctx(store
);
1392 int r
= store
->get_bucket_info(obj_ctx
, tenant_name
, bucket_name
, bucket_info
, &mtime
);
1396 rgw_bucket
& bucket
= bucket_info
.bucket
;
1398 string bucket_ver
, master_ver
;
1400 int ret
= store
->get_bucket_stats(bucket_info
, RGW_NO_SHARD
, &bucket_ver
, &master_ver
, stats
, &max_marker
);
1402 cerr
<< "error getting bucket stats ret=" << ret
<< std::endl
;
1408 formatter
->open_object_section("stats");
1409 formatter
->dump_string("bucket", bucket
.name
);
1410 formatter
->dump_string("zonegroup", bucket_info
.zonegroup
);
1411 formatter
->dump_string("placement_rule", bucket_info
.placement_rule
);
1412 ::encode_json("explicit_placement", bucket
.explicit_placement
, formatter
);
1413 formatter
->dump_string("id", bucket
.bucket_id
);
1414 formatter
->dump_string("marker", bucket
.marker
);
1415 formatter
->dump_stream("index_type") << bucket_info
.index_type
;
1416 ::encode_json("owner", bucket_info
.owner
, formatter
);
1417 formatter
->dump_string("ver", bucket_ver
);
1418 formatter
->dump_string("master_ver", master_ver
);
1419 formatter
->dump_stream("mtime") << ut
;
1420 formatter
->dump_string("max_marker", max_marker
);
1421 dump_bucket_usage(stats
, formatter
);
1422 encode_json("bucket_quota", bucket_info
.quota
, formatter
);
1423 formatter
->close_section();
1428 int RGWBucketAdminOp::limit_check(RGWRados
*store
,
1429 RGWBucketAdminOpState
& op_state
,
1430 const std::list
<std::string
>& user_ids
,
1431 RGWFormatterFlusher
& flusher
,
1435 const size_t max_entries
=
1436 store
->ctx()->_conf
->rgw_list_buckets_max_chunk
;
1438 const size_t safe_max_objs_per_shard
=
1439 store
->ctx()->_conf
->rgw_safe_max_objects_per_shard
;
1441 uint16_t shard_warn_pct
=
1442 store
->ctx()->_conf
->rgw_shard_warning_threshold
;
1443 if (shard_warn_pct
> 100)
1444 shard_warn_pct
= 90;
1446 Formatter
*formatter
= flusher
.get_formatter();
1449 formatter
->open_array_section("users");
1451 for (const auto& user_id
: user_ids
) {
1452 formatter
->open_object_section("user");
1453 formatter
->dump_string("user_id", user_id
);
1455 formatter
->open_array_section("buckets");
1457 RGWUserBuckets buckets
;
1461 ret
= rgw_read_user_buckets(store
, user_id
, buckets
,
1462 marker
, string(), max_entries
, false,
1467 map
<string
, RGWBucketEnt
>& m_buckets
= buckets
.get_buckets();
1469 for (const auto& iter
: m_buckets
) {
1470 auto& bucket
= iter
.second
.bucket
;
1471 uint32_t num_shards
= 1;
1472 uint64_t num_objects
= 0;
1474 /* need info for num_shards */
1476 RGWObjectCtx
obj_ctx(store
);
1478 marker
= bucket
.name
; /* Casey's location for marker update,
1479 * as we may now not reach the end of
1482 ret
= store
->get_bucket_info(obj_ctx
, bucket
.tenant
, bucket
.name
,
1487 /* need stats for num_entries */
1488 string bucket_ver
, master_ver
;
1489 std::map
<RGWObjCategory
, RGWStorageStats
> stats
;
1490 ret
= store
->get_bucket_stats(info
, RGW_NO_SHARD
, &bucket_ver
,
1491 &master_ver
, stats
, nullptr);
1496 for (const auto& s
: stats
) {
1497 num_objects
+= s
.second
.num_objects
;
1500 num_shards
= info
.num_shards
;
1501 uint64_t objs_per_shard
=
1502 (num_shards
) ? num_objects
/num_shards
: num_objects
;
1506 if (objs_per_shard
> safe_max_objs_per_shard
) {
1508 100 - (safe_max_objs_per_shard
/objs_per_shard
* 100);
1509 ss
<< boost::format("OVER %4f%%") % over
;
1513 objs_per_shard
/ safe_max_objs_per_shard
* 100;
1514 if (fill_pct
>= shard_warn_pct
) {
1515 ss
<< boost::format("WARN %4f%%") % fill_pct
;
1522 if (warn
|| (! warnings_only
)) {
1523 formatter
->open_object_section("bucket");
1524 formatter
->dump_string("bucket", bucket
.name
);
1525 formatter
->dump_string("tenant", bucket
.tenant
);
1526 formatter
->dump_int("num_objects", num_objects
);
1527 formatter
->dump_int("num_shards", num_shards
);
1528 formatter
->dump_int("objects_per_shard", objs_per_shard
);
1529 formatter
->dump_string("fill_status", ss
.str());
1530 formatter
->close_section();
1535 done
= (m_buckets
.size() < max_entries
);
1536 } while (!done
); /* foreach: bucket */
1538 formatter
->close_section();
1539 formatter
->close_section();
1540 formatter
->flush(cout
);
1542 } /* foreach: user_id */
1544 formatter
->close_section();
1545 formatter
->flush(cout
);
1548 } /* RGWBucketAdminOp::limit_check */
1550 int RGWBucketAdminOp::info(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1551 RGWFormatterFlusher
& flusher
)
1556 string bucket_name
= op_state
.get_bucket_name();
1558 if (!bucket_name
.empty()) {
1559 ret
= bucket
.init(store
, op_state
);
1564 Formatter
*formatter
= flusher
.get_formatter();
1567 CephContext
*cct
= store
->ctx();
1569 const size_t max_entries
= cct
->_conf
->rgw_list_buckets_max_chunk
;
1571 bool show_stats
= op_state
.will_fetch_stats();
1572 rgw_user user_id
= op_state
.get_user_id();
1573 if (op_state
.is_user_op()) {
1574 formatter
->open_array_section("buckets");
1576 RGWUserBuckets buckets
;
1578 bool is_truncated
= false;
1581 ret
= rgw_read_user_buckets(store
, op_state
.get_user_id(), buckets
,
1582 marker
, string(), max_entries
, false,
1587 map
<string
, RGWBucketEnt
>& m
= buckets
.get_buckets();
1588 map
<string
, RGWBucketEnt
>::iterator iter
;
1590 for (iter
= m
.begin(); iter
!= m
.end(); ++iter
) {
1591 std::string obj_name
= iter
->first
;
1593 bucket_stats(store
, user_id
.tenant
, obj_name
, formatter
);
1595 formatter
->dump_string("bucket", obj_name
);
1601 } while (is_truncated
);
1603 formatter
->close_section();
1604 } else if (!bucket_name
.empty()) {
1605 bucket_stats(store
, user_id
.tenant
, bucket_name
, formatter
);
1607 RGWAccessHandle handle
;
1609 formatter
->open_array_section("buckets");
1610 if (store
->list_buckets_init(&handle
) >= 0) {
1611 rgw_bucket_dir_entry obj
;
1612 while (store
->list_buckets_next(obj
, &handle
) >= 0) {
1614 bucket_stats(store
, user_id
.tenant
, obj
.key
.name
, formatter
);
1616 formatter
->dump_string("bucket", obj
.key
.name
);
1620 formatter
->close_section();
1629 void rgw_data_change::dump(Formatter
*f
) const
1632 switch (entity_type
) {
1633 case ENTITY_TYPE_BUCKET
:
1639 encode_json("entity_type", type
, f
);
1640 encode_json("key", key
, f
);
1641 utime_t
ut(timestamp
);
1642 encode_json("timestamp", ut
, f
);
1645 void rgw_data_change::decode_json(JSONObj
*obj
) {
1647 JSONDecoder::decode_json("entity_type", s
, obj
);
1648 if (s
== "bucket") {
1649 entity_type
= ENTITY_TYPE_BUCKET
;
1651 entity_type
= ENTITY_TYPE_UNKNOWN
;
1653 JSONDecoder::decode_json("key", key
, obj
);
1655 JSONDecoder::decode_json("timestamp", ut
, obj
);
1656 timestamp
= ut
.to_real_time();
1659 void rgw_data_change_log_entry::dump(Formatter
*f
) const
1661 encode_json("log_id", log_id
, f
);
1662 utime_t
ut(log_timestamp
);
1663 encode_json("log_timestamp", ut
, f
);
1664 encode_json("entry", entry
, f
);
1667 void rgw_data_change_log_entry::decode_json(JSONObj
*obj
) {
1668 JSONDecoder::decode_json("log_id", log_id
, obj
);
1670 JSONDecoder::decode_json("log_timestamp", ut
, obj
);
1671 log_timestamp
= ut
.to_real_time();
1672 JSONDecoder::decode_json("entry", entry
, obj
);
1675 int RGWDataChangesLog::choose_oid(const rgw_bucket_shard
& bs
) {
1676 const string
& name
= bs
.bucket
.name
;
1677 int shard_shift
= (bs
.shard_id
> 0 ? bs
.shard_id
: 0);
1678 uint32_t r
= (ceph_str_hash_linux(name
.c_str(), name
.size()) + shard_shift
) % num_shards
;
1683 int RGWDataChangesLog::renew_entries()
1685 if (!store
->need_to_log_data())
1688 /* we can't keep the bucket name as part of the cls_log_entry, and we need
1689 * it later, so we keep two lists under the map */
1690 map
<int, pair
<list
<rgw_bucket_shard
>, list
<cls_log_entry
> > > m
;
1693 map
<rgw_bucket_shard
, bool> entries
;
1694 entries
.swap(cur_cycle
);
1697 map
<rgw_bucket_shard
, bool>::iterator iter
;
1699 real_time ut
= real_clock::now();
1700 for (iter
= entries
.begin(); iter
!= entries
.end(); ++iter
) {
1701 const rgw_bucket_shard
& bs
= iter
->first
;
1703 int index
= choose_oid(bs
);
1705 cls_log_entry entry
;
1707 rgw_data_change change
;
1709 change
.entity_type
= ENTITY_TYPE_BUCKET
;
1710 change
.key
= bs
.get_key();
1711 change
.timestamp
= ut
;
1712 ::encode(change
, bl
);
1714 store
->time_log_prepare_entry(entry
, ut
, section
, change
.key
, bl
);
1716 m
[index
].first
.push_back(bs
);
1717 m
[index
].second
.emplace_back(std::move(entry
));
1720 map
<int, pair
<list
<rgw_bucket_shard
>, list
<cls_log_entry
> > >::iterator miter
;
1721 for (miter
= m
.begin(); miter
!= m
.end(); ++miter
) {
1722 list
<cls_log_entry
>& entries
= miter
->second
.second
;
1724 real_time now
= real_clock::now();
1726 int ret
= store
->time_log_add(oids
[miter
->first
], entries
, NULL
);
1728 /* we don't really need to have a special handling for failed cases here,
1729 * as this is just an optimization. */
1730 lderr(cct
) << "ERROR: store->time_log_add() returned " << ret
<< dendl
;
1734 real_time expiration
= now
;
1735 expiration
+= make_timespan(cct
->_conf
->rgw_data_log_window
);
1737 list
<rgw_bucket_shard
>& buckets
= miter
->second
.first
;
1738 list
<rgw_bucket_shard
>::iterator liter
;
1739 for (liter
= buckets
.begin(); liter
!= buckets
.end(); ++liter
) {
1740 update_renewed(*liter
, expiration
);
1747 void RGWDataChangesLog::_get_change(const rgw_bucket_shard
& bs
, ChangeStatusPtr
& status
)
1749 assert(lock
.is_locked());
1750 if (!changes
.find(bs
, status
)) {
1751 status
= ChangeStatusPtr(new ChangeStatus
);
1752 changes
.add(bs
, status
);
1756 void RGWDataChangesLog::register_renew(rgw_bucket_shard
& bs
)
1758 Mutex::Locker
l(lock
);
1759 cur_cycle
[bs
] = true;
1762 void RGWDataChangesLog::update_renewed(rgw_bucket_shard
& bs
, real_time
& expiration
)
1764 Mutex::Locker
l(lock
);
1765 ChangeStatusPtr status
;
1766 _get_change(bs
, status
);
1768 ldout(cct
, 20) << "RGWDataChangesLog::update_renewd() bucket_name=" << bs
.bucket
.name
<< " shard_id=" << bs
.shard_id
<< " expiration=" << expiration
<< dendl
;
1769 status
->cur_expiration
= expiration
;
1772 int RGWDataChangesLog::get_log_shard_id(rgw_bucket
& bucket
, int shard_id
) {
1773 rgw_bucket_shard
bs(bucket
, shard_id
);
1775 return choose_oid(bs
);
1778 int RGWDataChangesLog::add_entry(rgw_bucket
& bucket
, int shard_id
) {
1779 if (!store
->need_to_log_data())
1782 rgw_bucket_shard
bs(bucket
, shard_id
);
1784 int index
= choose_oid(bs
);
1785 mark_modified(index
, bs
);
1789 ChangeStatusPtr status
;
1790 _get_change(bs
, status
);
1794 real_time now
= real_clock::now();
1796 status
->lock
->Lock();
1798 ldout(cct
, 20) << "RGWDataChangesLog::add_entry() bucket.name=" << bucket
.name
<< " shard_id=" << shard_id
<< " now=" << now
<< " cur_expiration=" << status
->cur_expiration
<< dendl
;
1800 if (now
< status
->cur_expiration
) {
1801 /* no need to send, recently completed */
1802 status
->lock
->Unlock();
1808 RefCountedCond
*cond
;
1810 if (status
->pending
) {
1811 cond
= status
->cond
;
1815 status
->cond
->get();
1816 status
->lock
->Unlock();
1818 int ret
= cond
->wait();
1826 status
->cond
= new RefCountedCond
;
1827 status
->pending
= true;
1829 string
& oid
= oids
[index
];
1830 real_time expiration
;
1835 status
->cur_sent
= now
;
1838 expiration
+= ceph::make_timespan(cct
->_conf
->rgw_data_log_window
);
1840 status
->lock
->Unlock();
1843 rgw_data_change change
;
1844 change
.entity_type
= ENTITY_TYPE_BUCKET
;
1845 change
.key
= bs
.get_key();
1846 change
.timestamp
= now
;
1847 ::encode(change
, bl
);
1850 ldout(cct
, 20) << "RGWDataChangesLog::add_entry() sending update with now=" << now
<< " cur_expiration=" << expiration
<< dendl
;
1852 ret
= store
->time_log_add(oid
, now
, section
, change
.key
, bl
);
1854 now
= real_clock::now();
1856 status
->lock
->Lock();
1858 } while (!ret
&& real_clock::now() > expiration
);
1860 cond
= status
->cond
;
1862 status
->pending
= false;
1863 status
->cur_expiration
= status
->cur_sent
; /* time of when operation started, not completed */
1864 status
->cur_expiration
+= make_timespan(cct
->_conf
->rgw_data_log_window
);
1865 status
->cond
= NULL
;
1866 status
->lock
->Unlock();
1874 int RGWDataChangesLog::list_entries(int shard
, const real_time
& start_time
, const real_time
& end_time
, int max_entries
,
1875 list
<rgw_data_change_log_entry
>& entries
,
1876 const string
& marker
,
1879 if (shard
>= num_shards
)
1882 list
<cls_log_entry
> log_entries
;
1884 int ret
= store
->time_log_list(oids
[shard
], start_time
, end_time
,
1885 max_entries
, log_entries
, marker
,
1886 out_marker
, truncated
);
1890 list
<cls_log_entry
>::iterator iter
;
1891 for (iter
= log_entries
.begin(); iter
!= log_entries
.end(); ++iter
) {
1892 rgw_data_change_log_entry log_entry
;
1893 log_entry
.log_id
= iter
->id
;
1894 real_time rt
= iter
->timestamp
.to_real_time();
1895 log_entry
.log_timestamp
= rt
;
1896 bufferlist::iterator liter
= iter
->data
.begin();
1898 ::decode(log_entry
.entry
, liter
);
1899 } catch (buffer::error
& err
) {
1900 lderr(cct
) << "ERROR: failed to decode data changes log entry" << dendl
;
1903 entries
.push_back(log_entry
);
1909 int RGWDataChangesLog::list_entries(const real_time
& start_time
, const real_time
& end_time
, int max_entries
,
1910 list
<rgw_data_change_log_entry
>& entries
, LogMarker
& marker
, bool *ptruncated
) {
1914 for (; marker
.shard
< num_shards
&& (int)entries
.size() < max_entries
;
1915 marker
.shard
++, marker
.marker
.clear()) {
1916 int ret
= list_entries(marker
.shard
, start_time
, end_time
, max_entries
- entries
.size(), entries
,
1917 marker
.marker
, NULL
, &truncated
);
1918 if (ret
== -ENOENT
) {
1930 *ptruncated
= (marker
.shard
< num_shards
);
1935 int RGWDataChangesLog::get_info(int shard_id
, RGWDataChangesLogInfo
*info
)
1937 if (shard_id
>= num_shards
)
1940 string oid
= oids
[shard_id
];
1942 cls_log_header header
;
1944 int ret
= store
->time_log_info(oid
, &header
);
1945 if ((ret
< 0) && (ret
!= -ENOENT
))
1948 info
->marker
= header
.max_marker
;
1949 info
->last_update
= header
.max_time
.to_real_time();
1954 int RGWDataChangesLog::trim_entries(int shard_id
, const real_time
& start_time
, const real_time
& end_time
,
1955 const string
& start_marker
, const string
& end_marker
)
1959 if (shard_id
> num_shards
)
1962 ret
= store
->time_log_trim(oids
[shard_id
], start_time
, end_time
, start_marker
, end_marker
);
1964 if (ret
== -ENOENT
|| ret
== -ENODATA
)
1970 int RGWDataChangesLog::trim_entries(const real_time
& start_time
, const real_time
& end_time
,
1971 const string
& start_marker
, const string
& end_marker
)
1973 for (int shard
= 0; shard
< num_shards
; shard
++) {
1974 int ret
= store
->time_log_trim(oids
[shard
], start_time
, end_time
, start_marker
, end_marker
);
1975 if (ret
== -ENOENT
|| ret
== -ENODATA
) {
1985 bool RGWDataChangesLog::going_down()
1990 RGWDataChangesLog::~RGWDataChangesLog() {
1992 renew_thread
->stop();
1993 renew_thread
->join();
1994 delete renew_thread
;
1998 void *RGWDataChangesLog::ChangesRenewThread::entry() {
2000 dout(2) << "RGWDataChangesLog::ChangesRenewThread: start" << dendl
;
2001 int r
= log
->renew_entries();
2003 dout(0) << "ERROR: RGWDataChangesLog::renew_entries returned error r=" << r
<< dendl
;
2006 if (log
->going_down())
2009 int interval
= cct
->_conf
->rgw_data_log_window
* 3 / 4;
2011 cond
.WaitInterval(lock
, utime_t(interval
, 0));
2013 } while (!log
->going_down());
2018 void RGWDataChangesLog::ChangesRenewThread::stop()
2020 Mutex::Locker
l(lock
);
2024 void RGWDataChangesLog::mark_modified(int shard_id
, const rgw_bucket_shard
& bs
)
2026 auto key
= bs
.get_key();
2027 modified_lock
.get_read();
2028 map
<int, set
<string
> >::iterator iter
= modified_shards
.find(shard_id
);
2029 if (iter
!= modified_shards
.end()) {
2030 set
<string
>& keys
= iter
->second
;
2031 if (keys
.find(key
) != keys
.end()) {
2032 modified_lock
.unlock();
2036 modified_lock
.unlock();
2038 RWLock::WLocker
wl(modified_lock
);
2039 modified_shards
[shard_id
].insert(key
);
2042 void RGWDataChangesLog::read_clear_modified(map
<int, set
<string
> > &modified
)
2044 RWLock::WLocker
wl(modified_lock
);
2045 modified
.swap(modified_shards
);
2046 modified_shards
.clear();
2049 void RGWBucketCompleteInfo::dump(Formatter
*f
) const {
2050 encode_json("bucket_info", info
, f
);
2051 encode_json("attrs", attrs
, f
);
2054 void RGWBucketCompleteInfo::decode_json(JSONObj
*obj
) {
2055 JSONDecoder::decode_json("bucket_info", info
, obj
);
2056 JSONDecoder::decode_json("attrs", attrs
, obj
);
2059 class RGWBucketMetadataHandler
: public RGWMetadataHandler
{
2062 string
get_type() override
{ return "bucket"; }
2064 int get(RGWRados
*store
, string
& entry
, RGWMetadataObject
**obj
) override
{
2065 RGWObjVersionTracker ot
;
2066 RGWBucketEntryPoint be
;
2069 map
<string
, bufferlist
> attrs
;
2070 RGWObjectCtx
obj_ctx(store
);
2072 string tenant_name
, bucket_name
;
2073 parse_bucket(entry
, &tenant_name
, &bucket_name
);
2074 int ret
= store
->get_bucket_entrypoint_info(obj_ctx
, tenant_name
, bucket_name
, be
, &ot
, &mtime
, &attrs
);
2078 RGWBucketEntryMetadataObject
*mdo
= new RGWBucketEntryMetadataObject(be
, ot
.read_version
, mtime
);
2085 int put(RGWRados
*store
, string
& entry
, RGWObjVersionTracker
& objv_tracker
,
2086 real_time mtime
, JSONObj
*obj
, sync_type_t sync_type
) override
{
2087 RGWBucketEntryPoint be
, old_be
;
2089 decode_json_obj(be
, obj
);
2090 } catch (JSONDecoder::err
& e
) {
2094 real_time orig_mtime
;
2095 map
<string
, bufferlist
> attrs
;
2097 RGWObjVersionTracker old_ot
;
2098 RGWObjectCtx
obj_ctx(store
);
2100 string tenant_name
, bucket_name
;
2101 parse_bucket(entry
, &tenant_name
, &bucket_name
);
2102 int ret
= store
->get_bucket_entrypoint_info(obj_ctx
, tenant_name
, bucket_name
, old_be
, &old_ot
, &orig_mtime
, &attrs
);
2103 if (ret
< 0 && ret
!= -ENOENT
)
2106 // are we actually going to perform this put, or is it too old?
2107 if (ret
!= -ENOENT
&&
2108 !check_versions(old_ot
.read_version
, orig_mtime
,
2109 objv_tracker
.write_version
, mtime
, sync_type
)) {
2110 return STATUS_NO_APPLY
;
2113 objv_tracker
.read_version
= old_ot
.read_version
; /* maintain the obj version we just read */
2115 ret
= store
->put_bucket_entrypoint_info(tenant_name
, bucket_name
, be
, false, objv_tracker
, mtime
, &attrs
);
2121 ret
= rgw_link_bucket(store
, be
.owner
, be
.bucket
, be
.creation_time
, false);
2123 ret
= rgw_unlink_bucket(store
, be
.owner
, be
.bucket
.tenant
, be
.bucket
.name
, false);
2129 struct list_keys_info
{
2131 RGWListRawObjsCtx ctx
;
2134 int remove(RGWRados
*store
, string
& entry
, RGWObjVersionTracker
& objv_tracker
) override
{
2135 RGWBucketEntryPoint be
;
2136 RGWObjectCtx
obj_ctx(store
);
2138 string tenant_name
, bucket_name
;
2139 parse_bucket(entry
, &tenant_name
, &bucket_name
);
2140 int ret
= store
->get_bucket_entrypoint_info(obj_ctx
, tenant_name
, bucket_name
, be
, &objv_tracker
, NULL
, NULL
);
2145 * We're unlinking the bucket but we don't want to update the entrypoint here - we're removing
2146 * it immediately and don't want to invalidate our cached objv_version or the bucket obj removal
2147 * will incorrectly fail.
2149 ret
= rgw_unlink_bucket(store
, be
.owner
, tenant_name
, bucket_name
, false);
2151 lderr(store
->ctx()) << "could not unlink bucket=" << entry
<< " owner=" << be
.owner
<< dendl
;
2154 ret
= rgw_bucket_delete_bucket_obj(store
, tenant_name
, bucket_name
, objv_tracker
);
2156 lderr(store
->ctx()) << "could not delete bucket=" << entry
<< dendl
;
2162 void get_pool_and_oid(RGWRados
*store
, const string
& key
, rgw_pool
& pool
, string
& oid
) override
{
2164 pool
= store
->get_zone_params().domain_root
;
2167 int list_keys_init(RGWRados
*store
, void **phandle
) override
2169 list_keys_info
*info
= new list_keys_info
;
2171 info
->store
= store
;
2173 *phandle
= (void *)info
;
2178 int list_keys_next(void *handle
, int max
, list
<string
>& keys
, bool *truncated
) override
{
2179 list_keys_info
*info
= static_cast<list_keys_info
*>(handle
);
2185 RGWRados
*store
= info
->store
;
2187 list
<string
> unfiltered_keys
;
2189 int ret
= store
->list_raw_objects(store
->get_zone_params().domain_root
, no_filter
,
2190 max
, info
->ctx
, unfiltered_keys
, truncated
);
2191 if (ret
< 0 && ret
!= -ENOENT
)
2193 if (ret
== -ENOENT
) {
2199 // now filter out the system entries
2200 list
<string
>::iterator iter
;
2201 for (iter
= unfiltered_keys
.begin(); iter
!= unfiltered_keys
.end(); ++iter
) {
2212 void list_keys_complete(void *handle
) override
{
2213 list_keys_info
*info
= static_cast<list_keys_info
*>(handle
);
2218 class RGWBucketInstanceMetadataHandler
: public RGWMetadataHandler
{
2221 string
get_type() override
{ return "bucket.instance"; }
2223 int get(RGWRados
*store
, string
& oid
, RGWMetadataObject
**obj
) override
{
2224 RGWBucketCompleteInfo bci
;
2227 RGWObjectCtx
obj_ctx(store
);
2229 int ret
= store
->get_bucket_instance_info(obj_ctx
, oid
, bci
.info
, &mtime
, &bci
.attrs
);
2233 RGWBucketInstanceMetadataObject
*mdo
= new RGWBucketInstanceMetadataObject(bci
, bci
.info
.objv_tracker
.read_version
, mtime
);
2240 int put(RGWRados
*store
, string
& entry
, RGWObjVersionTracker
& objv_tracker
,
2241 real_time mtime
, JSONObj
*obj
, sync_type_t sync_type
) override
{
2242 RGWBucketCompleteInfo bci
, old_bci
;
2244 decode_json_obj(bci
, obj
);
2245 } catch (JSONDecoder::err
& e
) {
2249 real_time orig_mtime
;
2250 RGWObjectCtx
obj_ctx(store
);
2252 int ret
= store
->get_bucket_instance_info(obj_ctx
, entry
, old_bci
.info
,
2253 &orig_mtime
, &old_bci
.attrs
);
2254 bool exists
= (ret
!= -ENOENT
);
2255 if (ret
< 0 && exists
)
2258 if (!exists
|| old_bci
.info
.bucket
.bucket_id
!= bci
.info
.bucket
.bucket_id
) {
2259 /* a new bucket, we need to select a new bucket placement for it */
2261 rgw_bucket_instance_oid_to_key(key
);
2264 string bucket_instance
;
2265 parse_bucket(key
, &tenant_name
, &bucket_name
, &bucket_instance
);
2267 RGWZonePlacementInfo rule_info
;
2268 bci
.info
.bucket
.name
= bucket_name
;
2269 bci
.info
.bucket
.bucket_id
= bucket_instance
;
2270 bci
.info
.bucket
.tenant
= tenant_name
;
2271 ret
= store
->select_bucket_location_by_rule(bci
.info
.placement_rule
, &rule_info
);
2273 ldout(store
->ctx(), 0) << "ERROR: select_bucket_placement() returned " << ret
<< dendl
;
2276 bci
.info
.index_type
= rule_info
.index_type
;
2278 /* existing bucket, keep its placement */
2279 bci
.info
.bucket
.explicit_placement
= old_bci
.info
.bucket
.explicit_placement
;
2280 bci
.info
.placement_rule
= old_bci
.info
.placement_rule
;
2283 if (exists
&& old_bci
.info
.datasync_flag_enabled() != bci
.info
.datasync_flag_enabled()) {
2284 int shards_num
= bci
.info
.num_shards
? bci
.info
.num_shards
: 1;
2285 int shard_id
= bci
.info
.num_shards
? 0 : -1;
2287 if (!bci
.info
.datasync_flag_enabled()) {
2288 ret
= store
->stop_bi_log_entries(bci
.info
, -1);
2290 lderr(store
->ctx()) << "ERROR: failed writing bilog" << dendl
;
2294 ret
= store
->resync_bi_log_entries(bci
.info
, -1);
2296 lderr(store
->ctx()) << "ERROR: failed writing bilog" << dendl
;
2301 for (int i
= 0; i
< shards_num
; ++i
, ++shard_id
) {
2302 ret
= store
->data_log
->add_entry(bci
.info
.bucket
, shard_id
);
2304 lderr(store
->ctx()) << "ERROR: failed writing data log" << dendl
;
2310 // are we actually going to perform this put, or is it too old?
2312 !check_versions(old_bci
.info
.objv_tracker
.read_version
, orig_mtime
,
2313 objv_tracker
.write_version
, mtime
, sync_type
)) {
2314 objv_tracker
.read_version
= old_bci
.info
.objv_tracker
.read_version
;
2315 return STATUS_NO_APPLY
;
2318 /* record the read version (if any), store the new version */
2319 bci
.info
.objv_tracker
.read_version
= old_bci
.info
.objv_tracker
.read_version
;
2320 bci
.info
.objv_tracker
.write_version
= objv_tracker
.write_version
;
2322 ret
= store
->put_bucket_instance_info(bci
.info
, false, mtime
, &bci
.attrs
);
2326 objv_tracker
= bci
.info
.objv_tracker
;
2328 ret
= store
->init_bucket_index(bci
.info
, bci
.info
.num_shards
);
2332 return STATUS_APPLIED
;
2335 struct list_keys_info
{
2337 RGWListRawObjsCtx ctx
;
2340 int remove(RGWRados
*store
, string
& entry
, RGWObjVersionTracker
& objv_tracker
) override
{
2342 RGWObjectCtx
obj_ctx(store
);
2344 int ret
= store
->get_bucket_instance_info(obj_ctx
, entry
, info
, NULL
, NULL
);
2345 if (ret
< 0 && ret
!= -ENOENT
)
2348 return rgw_bucket_instance_remove_entry(store
, entry
, &info
.objv_tracker
);
2351 void get_pool_and_oid(RGWRados
*store
, const string
& key
, rgw_pool
& pool
, string
& oid
) override
{
2352 oid
= RGW_BUCKET_INSTANCE_MD_PREFIX
+ key
;
2353 rgw_bucket_instance_key_to_oid(oid
);
2354 pool
= store
->get_zone_params().domain_root
;
2357 int list_keys_init(RGWRados
*store
, void **phandle
) override
2359 list_keys_info
*info
= new list_keys_info
;
2361 info
->store
= store
;
2363 *phandle
= (void *)info
;
2368 int list_keys_next(void *handle
, int max
, list
<string
>& keys
, bool *truncated
) override
{
2369 list_keys_info
*info
= static_cast<list_keys_info
*>(handle
);
2375 RGWRados
*store
= info
->store
;
2377 list
<string
> unfiltered_keys
;
2379 int ret
= store
->list_raw_objects(store
->get_zone_params().domain_root
, no_filter
,
2380 max
, info
->ctx
, unfiltered_keys
, truncated
);
2381 if (ret
< 0 && ret
!= -ENOENT
)
2383 if (ret
== -ENOENT
) {
2389 constexpr int prefix_size
= sizeof(RGW_BUCKET_INSTANCE_MD_PREFIX
) - 1;
2390 // now filter in the relevant entries
2391 list
<string
>::iterator iter
;
2392 for (iter
= unfiltered_keys
.begin(); iter
!= unfiltered_keys
.end(); ++iter
) {
2395 if (k
.compare(0, prefix_size
, RGW_BUCKET_INSTANCE_MD_PREFIX
) == 0) {
2396 auto oid
= k
.substr(prefix_size
);
2397 rgw_bucket_instance_oid_to_key(oid
);
2398 keys
.emplace_back(std::move(oid
));
2405 void list_keys_complete(void *handle
) override
{
2406 list_keys_info
*info
= static_cast<list_keys_info
*>(handle
);
2411 * hash entry for mdlog placement. Use the same hash key we'd have for the bucket entry
2412 * point, so that the log entries end up at the same log shard, so that we process them
2415 void get_hash_key(const string
& section
, const string
& key
, string
& hash_key
) override
{
2417 int pos
= key
.find(':');
2421 k
= key
.substr(0, pos
);
2422 hash_key
= "bucket:" + k
;
2426 void rgw_bucket_init(RGWMetadataManager
*mm
)
2428 bucket_meta_handler
= new RGWBucketMetadataHandler
;
2429 mm
->register_handler(bucket_meta_handler
);
2430 bucket_instance_meta_handler
= new RGWBucketInstanceMetadataHandler
;
2431 mm
->register_handler(bucket_instance_meta_handler
);