1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
10 #include <boost/utility/string_ref.hpp>
11 #include <boost/format.hpp>
13 #include "common/errno.h"
14 #include "common/ceph_json.h"
15 #include "common/backport14.h"
16 #include "rgw_rados.h"
18 #include "rgw_acl_s3.h"
20 #include "include/types.h"
21 #include "rgw_bucket.h"
23 #include "rgw_string.h"
24 #include "rgw_multi.h"
26 #include "include/rados/librados.hpp"
27 // until everything is moved from rgw_common
28 #include "rgw_common.h"
30 #include "cls/user/cls_user_types.h"
32 #define dout_context g_ceph_context
33 #define dout_subsys ceph_subsys_rgw
35 #define BUCKET_TAG_TIMEOUT 30
39 static RGWMetadataHandler
*bucket_meta_handler
= NULL
;
40 static RGWMetadataHandler
*bucket_instance_meta_handler
= NULL
;
42 // define as static when RGWBucket implementation compete
43 void rgw_get_buckets_obj(const rgw_user
& user_id
, string
& buckets_obj_id
)
45 buckets_obj_id
= user_id
.to_str();
46 buckets_obj_id
+= RGW_BUCKETS_OBJ_SUFFIX
;
50 * Note that this is not a reversal of parse_bucket(). That one deals
51 * with the syntax we need in metadata and such. This one deals with
52 * the representation in RADOS pools. We chose '/' because it's not
53 * acceptable in bucket names and thus qualified buckets cannot conflict
54 * with the legacy or S3 buckets.
56 std::string
rgw_make_bucket_entry_name(const std::string
& tenant_name
,
57 const std::string
& bucket_name
) {
58 std::string bucket_entry
;
60 if (bucket_name
.empty()) {
62 } else if (tenant_name
.empty()) {
63 bucket_entry
= bucket_name
;
65 bucket_entry
= tenant_name
+ "/" + bucket_name
;
72 * Tenants are separated from buckets in URLs by a colon in S3.
73 * This function is not to be used on Swift URLs, not even for COPY arguments.
75 void rgw_parse_url_bucket(const string
&bucket
, const string
& auth_tenant
,
76 string
&tenant_name
, string
&bucket_name
) {
78 int pos
= bucket
.find(':');
81 * N.B.: We allow ":bucket" syntax with explicit empty tenant in order
82 * to refer to the legacy tenant, in case users in new named tenants
83 * want to access old global buckets.
85 tenant_name
= bucket
.substr(0, pos
);
86 bucket_name
= bucket
.substr(pos
+ 1);
88 tenant_name
= auth_tenant
;
94 * Get all the buckets owned by a user and fill up an RGWUserBuckets with them.
95 * Returns: 0 on success, -ERR# on failure.
97 int rgw_read_user_buckets(RGWRados
* store
,
98 const rgw_user
& user_id
,
99 RGWUserBuckets
& buckets
,
100 const string
& marker
,
101 const string
& end_marker
,
105 uint64_t default_amount
)
109 string buckets_obj_id
;
110 rgw_get_buckets_obj(user_id
, buckets_obj_id
);
111 rgw_raw_obj
obj(store
->get_zone_params().user_uid_pool
, buckets_obj_id
);
112 list
<cls_user_bucket_entry
> entries
;
114 bool truncated
= false;
120 max
= default_amount
;
124 ret
= store
->cls_user_list_buckets(obj
, m
, end_marker
, max
- total
, entries
, &m
, &truncated
);
131 for (const auto& entry
: entries
) {
132 buckets
.add(RGWBucketEnt(user_id
, entry
));
136 } while (truncated
&& total
< max
);
138 if (is_truncated
!= nullptr) {
139 *is_truncated
= truncated
;
143 map
<string
, RGWBucketEnt
>& m
= buckets
.get_buckets();
144 ret
= store
->update_containers_stats(m
);
145 if (ret
< 0 && ret
!= -ENOENT
) {
146 ldout(store
->ctx(), 0) << "ERROR: could not get stats for buckets" << dendl
;
153 int rgw_bucket_sync_user_stats(RGWRados
*store
, const rgw_user
& user_id
, const RGWBucketInfo
& bucket_info
)
155 string buckets_obj_id
;
156 rgw_get_buckets_obj(user_id
, buckets_obj_id
);
157 rgw_raw_obj
obj(store
->get_zone_params().user_uid_pool
, buckets_obj_id
);
159 return store
->cls_user_sync_bucket_stats(obj
, bucket_info
);
162 int rgw_bucket_sync_user_stats(RGWRados
*store
, const string
& tenant_name
, const string
& bucket_name
)
164 RGWBucketInfo bucket_info
;
165 RGWObjectCtx
obj_ctx(store
);
166 int ret
= store
->get_bucket_info(obj_ctx
, tenant_name
, bucket_name
, bucket_info
, NULL
);
168 ldout(store
->ctx(), 0) << "ERROR: could not fetch bucket info: ret=" << ret
<< dendl
;
172 ret
= rgw_bucket_sync_user_stats(store
, bucket_info
.owner
, bucket_info
);
174 ldout(store
->ctx(), 0) << "ERROR: could not sync user stats for bucket " << bucket_name
<< ": ret=" << ret
<< dendl
;
181 int rgw_link_bucket(RGWRados
*store
, const rgw_user
& user_id
, rgw_bucket
& bucket
, real_time creation_time
, bool update_entrypoint
)
184 string
& tenant_name
= bucket
.tenant
;
185 string
& bucket_name
= bucket
.name
;
187 cls_user_bucket_entry new_bucket
;
189 RGWBucketEntryPoint ep
;
190 RGWObjVersionTracker ot
;
192 bucket
.convert(&new_bucket
.bucket
);
194 if (real_clock::is_zero(creation_time
))
195 new_bucket
.creation_time
= real_clock::now();
197 new_bucket
.creation_time
= creation_time
;
199 map
<string
, bufferlist
> attrs
;
200 RGWObjectCtx
obj_ctx(store
);
202 if (update_entrypoint
) {
203 ret
= store
->get_bucket_entrypoint_info(obj_ctx
, tenant_name
, bucket_name
, ep
, &ot
, NULL
, &attrs
);
204 if (ret
< 0 && ret
!= -ENOENT
) {
205 ldout(store
->ctx(), 0) << "ERROR: store->get_bucket_entrypoint_info() returned: "
206 << cpp_strerror(-ret
) << dendl
;
210 string buckets_obj_id
;
211 rgw_get_buckets_obj(user_id
, buckets_obj_id
);
213 rgw_raw_obj
obj(store
->get_zone_params().user_uid_pool
, buckets_obj_id
);
214 ret
= store
->cls_user_add_bucket(obj
, new_bucket
);
216 ldout(store
->ctx(), 0) << "ERROR: error adding bucket to directory: "
217 << cpp_strerror(-ret
) << dendl
;
221 if (!update_entrypoint
)
227 ret
= store
->put_bucket_entrypoint_info(tenant_name
, bucket_name
, ep
, false, ot
, real_time(), &attrs
);
233 int r
= rgw_unlink_bucket(store
, user_id
, bucket
.tenant
, bucket
.name
);
235 ldout(store
->ctx(), 0) << "ERROR: failed unlinking bucket on error cleanup: "
236 << cpp_strerror(-r
) << dendl
;
241 int rgw_unlink_bucket(RGWRados
*store
, const rgw_user
& user_id
, const string
& tenant_name
, const string
& bucket_name
, bool update_entrypoint
)
245 string buckets_obj_id
;
246 rgw_get_buckets_obj(user_id
, buckets_obj_id
);
248 cls_user_bucket bucket
;
249 bucket
.name
= bucket_name
;
250 rgw_raw_obj
obj(store
->get_zone_params().user_uid_pool
, buckets_obj_id
);
251 ret
= store
->cls_user_remove_bucket(obj
, bucket
);
253 ldout(store
->ctx(), 0) << "ERROR: error removing bucket from directory: "
254 << cpp_strerror(-ret
)<< dendl
;
257 if (!update_entrypoint
)
260 RGWBucketEntryPoint ep
;
261 RGWObjVersionTracker ot
;
262 map
<string
, bufferlist
> attrs
;
263 RGWObjectCtx
obj_ctx(store
);
264 ret
= store
->get_bucket_entrypoint_info(obj_ctx
, tenant_name
, bucket_name
, ep
, &ot
, NULL
, &attrs
);
273 if (ep
.owner
!= user_id
) {
274 ldout(store
->ctx(), 0) << "bucket entry point user mismatch, can't unlink bucket: " << ep
.owner
<< " != " << user_id
<< dendl
;
279 return store
->put_bucket_entrypoint_info(tenant_name
, bucket_name
, ep
, false, ot
, real_time(), &attrs
);
282 int rgw_bucket_store_info(RGWRados
*store
, const string
& bucket_name
, bufferlist
& bl
, bool exclusive
,
283 map
<string
, bufferlist
> *pattrs
, RGWObjVersionTracker
*objv_tracker
,
285 return store
->meta_mgr
->put_entry(bucket_meta_handler
, bucket_name
, bl
, exclusive
, objv_tracker
, mtime
, pattrs
);
288 int rgw_bucket_instance_store_info(RGWRados
*store
, string
& entry
, bufferlist
& bl
, bool exclusive
,
289 map
<string
, bufferlist
> *pattrs
, RGWObjVersionTracker
*objv_tracker
,
291 return store
->meta_mgr
->put_entry(bucket_instance_meta_handler
, entry
, bl
, exclusive
, objv_tracker
, mtime
, pattrs
);
294 int rgw_bucket_instance_remove_entry(RGWRados
*store
, string
& entry
, RGWObjVersionTracker
*objv_tracker
) {
295 return store
->meta_mgr
->remove_entry(bucket_instance_meta_handler
, entry
, objv_tracker
);
298 // 'tenant/' is used in bucket instance keys for sync to avoid parsing ambiguity
299 // with the existing instance[:shard] format. once we parse the shard, the / is
300 // replaced with a : to match the [tenant:]instance format
301 void rgw_bucket_instance_key_to_oid(string
& key
)
303 // replace tenant/ with tenant:
304 auto c
= key
.find('/');
305 if (c
!= string::npos
) {
310 // convert bucket instance oids back to the tenant/ format for metadata keys.
311 // it's safe to parse 'tenant:' only for oids, because they won't contain the
312 // optional :shard at the end
313 void rgw_bucket_instance_oid_to_key(string
& oid
)
315 // find first : (could be tenant:bucket or bucket:instance)
316 auto c
= oid
.find(':');
317 if (c
!= string::npos
) {
318 // if we find another :, the first one was for tenant
319 if (oid
.find(':', c
+ 1) != string::npos
) {
325 int rgw_bucket_parse_bucket_instance(const string
& bucket_instance
, string
*target_bucket_instance
, int *shard_id
)
327 ssize_t pos
= bucket_instance
.rfind(':');
332 string first
= bucket_instance
.substr(0, pos
);
333 string second
= bucket_instance
.substr(pos
+ 1);
335 if (first
.find(':') == string::npos
) {
337 *target_bucket_instance
= bucket_instance
;
341 *target_bucket_instance
= first
;
343 *shard_id
= strict_strtol(second
.c_str(), 10, &err
);
351 // parse key in format: [tenant/]name:instance[:shard_id]
352 int rgw_bucket_parse_bucket_key(CephContext
*cct
, const string
& key
,
353 rgw_bucket
*bucket
, int *shard_id
)
355 boost::string_ref name
{key
};
356 boost::string_ref instance
;
359 auto pos
= name
.find('/');
360 if (pos
!= boost::string_ref::npos
) {
361 auto tenant
= name
.substr(0, pos
);
362 bucket
->tenant
.assign(tenant
.begin(), tenant
.end());
363 name
= name
.substr(pos
+ 1);
366 // split name:instance
367 pos
= name
.find(':');
368 if (pos
!= boost::string_ref::npos
) {
369 instance
= name
.substr(pos
+ 1);
370 name
= name
.substr(0, pos
);
372 bucket
->name
.assign(name
.begin(), name
.end());
374 // split instance:shard
375 pos
= instance
.find(':');
376 if (pos
== boost::string_ref::npos
) {
377 bucket
->bucket_id
.assign(instance
.begin(), instance
.end());
383 auto shard
= instance
.substr(pos
+ 1);
385 auto id
= strict_strtol(shard
.data(), 10, &err
);
387 ldout(cct
, 0) << "ERROR: failed to parse bucket shard '"
388 << instance
.data() << "': " << err
<< dendl
;
393 instance
= instance
.substr(0, pos
);
394 bucket
->bucket_id
.assign(instance
.begin(), instance
.end());
398 int rgw_bucket_set_attrs(RGWRados
*store
, RGWBucketInfo
& bucket_info
,
399 map
<string
, bufferlist
>& attrs
,
400 RGWObjVersionTracker
*objv_tracker
)
402 rgw_bucket
& bucket
= bucket_info
.bucket
;
404 if (!bucket_info
.has_instance_obj
) {
405 /* an old bucket object, need to convert it */
406 RGWObjectCtx
obj_ctx(store
);
407 int ret
= store
->convert_old_bucket_info(obj_ctx
, bucket
.tenant
, bucket
.name
);
409 ldout(store
->ctx(), 0) << "ERROR: failed converting old bucket info: " << ret
<< dendl
;
414 /* we want the bucket instance name without the oid prefix cruft */
415 string key
= bucket
.get_key();
418 ::encode(bucket_info
, bl
);
420 return rgw_bucket_instance_store_info(store
, key
, bl
, false, &attrs
, objv_tracker
, real_time());
423 static void dump_mulipart_index_results(list
<rgw_obj_index_key
>& objs_to_unlink
,
426 for (const auto& o
: objs_to_unlink
) {
427 f
->dump_string("object", o
.name
);
431 void check_bad_user_bucket_mapping(RGWRados
*store
, const rgw_user
& user_id
,
434 RGWUserBuckets user_buckets
;
435 bool is_truncated
= false;
438 CephContext
*cct
= store
->ctx();
440 size_t max_entries
= cct
->_conf
->rgw_list_buckets_max_chunk
;
443 int ret
= rgw_read_user_buckets(store
, user_id
, user_buckets
, marker
,
444 string(), max_entries
, false,
447 ldout(store
->ctx(), 0) << "failed to read user buckets: "
448 << cpp_strerror(-ret
) << dendl
;
452 map
<string
, RGWBucketEnt
>& buckets
= user_buckets
.get_buckets();
453 for (map
<string
, RGWBucketEnt
>::iterator i
= buckets
.begin();
458 RGWBucketEnt
& bucket_ent
= i
->second
;
459 rgw_bucket
& bucket
= bucket_ent
.bucket
;
461 RGWBucketInfo bucket_info
;
463 RGWObjectCtx
obj_ctx(store
);
464 int r
= store
->get_bucket_info(obj_ctx
, user_id
.tenant
, bucket
.name
, bucket_info
, &mtime
);
466 ldout(store
->ctx(), 0) << "could not get bucket info for bucket=" << bucket
<< dendl
;
470 rgw_bucket
& actual_bucket
= bucket_info
.bucket
;
472 if (actual_bucket
.name
.compare(bucket
.name
) != 0 ||
473 actual_bucket
.tenant
.compare(bucket
.tenant
) != 0 ||
474 actual_bucket
.marker
.compare(bucket
.marker
) != 0 ||
475 actual_bucket
.bucket_id
.compare(bucket
.bucket_id
) != 0) {
476 cout
<< "bucket info mismatch: expected " << actual_bucket
<< " got " << bucket
<< std::endl
;
478 cout
<< "fixing" << std::endl
;
479 r
= rgw_link_bucket(store
, user_id
, actual_bucket
, bucket_info
.creation_time
);
481 cerr
<< "failed to fix bucket: " << cpp_strerror(-r
) << std::endl
;
486 } while (is_truncated
);
489 static bool bucket_object_check_filter(const string
& oid
)
493 return rgw_obj_key::oid_to_key_in_ns(oid
, &key
, ns
);
496 int rgw_remove_object(RGWRados
*store
, RGWBucketInfo
& bucket_info
, rgw_bucket
& bucket
, rgw_obj_key
& key
)
498 RGWObjectCtx
rctx(store
);
500 if (key
.instance
.empty()) {
501 key
.instance
= "null";
504 rgw_obj
obj(bucket
, key
);
506 return store
->delete_obj(rctx
, bucket_info
, obj
, bucket_info
.versioning_status());
509 int rgw_remove_bucket(RGWRados
*store
, rgw_bucket
& bucket
, bool delete_children
)
512 map
<RGWObjCategory
, RGWStorageStats
> stats
;
513 std::vector
<rgw_bucket_dir_entry
> objs
;
514 map
<string
, bool> common_prefixes
;
516 RGWObjectCtx
obj_ctx(store
);
518 string bucket_ver
, master_ver
;
520 ret
= store
->get_bucket_info(obj_ctx
, bucket
.tenant
, bucket
.name
, info
, NULL
);
524 ret
= store
->get_bucket_stats(info
, RGW_NO_SHARD
, &bucket_ver
, &master_ver
, stats
, NULL
);
528 RGWRados::Bucket
target(store
, info
);
529 RGWRados::Bucket::List
list_op(&target
);
530 CephContext
*cct
= store
->ctx();
533 list_op
.params
.list_versions
= true;
538 ret
= list_op
.list_objects(max
, &objs
, &common_prefixes
, NULL
);
542 if (!objs
.empty() && !delete_children
) {
543 lderr(store
->ctx()) << "ERROR: could not remove non-empty bucket " << bucket
.name
<< dendl
;
547 for (const auto& obj
: objs
) {
548 rgw_obj_key
key(obj
.key
);
549 ret
= rgw_remove_object(store
, info
, bucket
, key
);
554 } while (!objs
.empty());
556 string prefix
, delimiter
;
558 ret
= abort_bucket_multiparts(store
, cct
, info
, prefix
, delimiter
);
563 ret
= rgw_bucket_sync_user_stats(store
, bucket
.tenant
, info
);
565 dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret
<< dendl
;
568 RGWObjVersionTracker objv_tracker
;
570 ret
= store
->delete_bucket(info
, objv_tracker
);
572 lderr(store
->ctx()) << "ERROR: could not remove bucket " << bucket
.name
<< dendl
;
576 ret
= rgw_unlink_bucket(store
, info
.owner
, bucket
.tenant
, bucket
.name
, false);
578 lderr(store
->ctx()) << "ERROR: unable to remove user bucket information" << dendl
;
584 static int aio_wait(librados::AioCompletion
*handle
)
586 librados::AioCompletion
*c
= (librados::AioCompletion
*)handle
;
588 int ret
= c
->get_return_value();
593 static int drain_handles(list
<librados::AioCompletion
*>& pending
)
596 while (!pending
.empty()) {
597 librados::AioCompletion
*handle
= pending
.front();
599 int r
= aio_wait(handle
);
607 int rgw_remove_bucket_bypass_gc(RGWRados
*store
, rgw_bucket
& bucket
,
608 int concurrent_max
, bool keep_index_consistent
)
611 map
<RGWObjCategory
, RGWStorageStats
> stats
;
612 std::vector
<rgw_bucket_dir_entry
> objs
;
613 map
<string
, bool> common_prefixes
;
615 RGWObjectCtx
obj_ctx(store
);
616 CephContext
*cct
= store
->ctx();
618 string bucket_ver
, master_ver
;
620 ret
= store
->get_bucket_info(obj_ctx
, bucket
.tenant
, bucket
.name
, info
, NULL
);
624 ret
= store
->get_bucket_stats(info
, RGW_NO_SHARD
, &bucket_ver
, &master_ver
, stats
, NULL
);
628 string prefix
, delimiter
;
630 ret
= abort_bucket_multiparts(store
, cct
, info
, prefix
, delimiter
);
635 RGWRados::Bucket
target(store
, info
);
636 RGWRados::Bucket::List
list_op(&target
);
638 list_op
.params
.list_versions
= true;
640 std::list
<librados::AioCompletion
*> handles
;
643 int max_aio
= concurrent_max
;
644 ret
= list_op
.list_objects(max
, &objs
, &common_prefixes
, NULL
);
648 while (!objs
.empty()) {
649 std::vector
<rgw_bucket_dir_entry
>::iterator it
= objs
.begin();
650 for (; it
!= objs
.end(); ++it
) {
651 RGWObjState
*astate
= NULL
;
652 rgw_obj
obj(bucket
, (*it
).key
);
654 ret
= store
->get_obj_state(&obj_ctx
, info
, obj
, &astate
, false);
655 if (ret
== -ENOENT
) {
656 dout(1) << "WARNING: cannot find obj state for obj " << obj
.get_oid() << dendl
;
660 lderr(store
->ctx()) << "ERROR: get obj state returned with error " << ret
<< dendl
;
664 if (astate
->has_manifest
) {
665 RGWObjManifest
& manifest
= astate
->manifest
;
666 RGWObjManifest::obj_iterator miter
= manifest
.obj_begin();
667 rgw_obj head_obj
= manifest
.get_obj();
668 rgw_raw_obj raw_head_obj
;
669 store
->obj_to_raw(info
.placement_rule
, head_obj
, &raw_head_obj
);
672 for (; miter
!= manifest
.obj_end() && max_aio
--; ++miter
) {
674 ret
= drain_handles(handles
);
676 lderr(store
->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret
<< dendl
;
679 max_aio
= concurrent_max
;
682 rgw_raw_obj last_obj
= miter
.get_location().get_raw_obj(store
);
683 if (last_obj
== raw_head_obj
) {
684 // have the head obj deleted at the end
688 ret
= store
->delete_raw_obj_aio(last_obj
, handles
);
690 lderr(store
->ctx()) << "ERROR: delete obj aio failed with " << ret
<< dendl
;
693 } // for all shadow objs
695 ret
= store
->delete_obj_aio(head_obj
, info
, astate
, handles
, keep_index_consistent
);
697 lderr(store
->ctx()) << "ERROR: delete obj aio failed with " << ret
<< dendl
;
703 ret
= drain_handles(handles
);
705 lderr(store
->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret
<< dendl
;
708 max_aio
= concurrent_max
;
710 } // for all RGW objects
713 ret
= list_op
.list_objects(max
, &objs
, &common_prefixes
, NULL
);
718 ret
= drain_handles(handles
);
720 lderr(store
->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret
<< dendl
;
724 ret
= rgw_bucket_sync_user_stats(store
, bucket
.tenant
, info
);
726 dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret
<< dendl
;
729 RGWObjVersionTracker objv_tracker
;
731 ret
= rgw_bucket_delete_bucket_obj(store
, bucket
.tenant
, bucket
.name
, objv_tracker
);
733 lderr(store
->ctx()) << "ERROR: could not remove bucket " << bucket
.name
<< "with ret as " << ret
<< dendl
;
737 if (!store
->is_syncing_bucket_meta(bucket
)) {
738 RGWObjVersionTracker objv_tracker
;
739 string entry
= bucket
.get_key();
740 ret
= rgw_bucket_instance_remove_entry(store
, entry
, &objv_tracker
);
742 lderr(store
->ctx()) << "ERROR: could not remove bucket instance entry" << bucket
.name
<< "with ret as " << ret
<< dendl
;
747 ret
= rgw_unlink_bucket(store
, info
.owner
, bucket
.tenant
, bucket
.name
, false);
749 lderr(store
->ctx()) << "ERROR: unable to remove user bucket information" << dendl
;
755 int rgw_bucket_delete_bucket_obj(RGWRados
*store
,
756 const string
& tenant_name
,
757 const string
& bucket_name
,
758 RGWObjVersionTracker
& objv_tracker
)
762 rgw_make_bucket_entry_name(tenant_name
, bucket_name
, key
);
763 return store
->meta_mgr
->remove_entry(bucket_meta_handler
, key
, &objv_tracker
);
766 static void set_err_msg(std::string
*sink
, std::string msg
)
768 if (sink
&& !msg
.empty())
772 int RGWBucket::init(RGWRados
*storage
, RGWBucketAdminOpState
& op_state
)
779 rgw_user user_id
= op_state
.get_user_id();
780 tenant
= user_id
.tenant
;
781 bucket_name
= op_state
.get_bucket_name();
782 RGWUserBuckets user_buckets
;
783 RGWObjectCtx
obj_ctx(store
);
785 if (bucket_name
.empty() && user_id
.empty())
788 if (!bucket_name
.empty()) {
789 int r
= store
->get_bucket_info(obj_ctx
, tenant
, bucket_name
, bucket_info
, NULL
);
791 ldout(store
->ctx(), 0) << "could not get bucket info for bucket=" << bucket_name
<< dendl
;
795 op_state
.set_bucket(bucket_info
.bucket
);
798 if (!user_id
.empty()) {
799 int r
= rgw_get_user_info_by_uid(store
, user_id
, user_info
);
803 op_state
.display_name
= user_info
.display_name
;
810 int RGWBucket::link(RGWBucketAdminOpState
& op_state
, std::string
*err_msg
)
812 if (!op_state
.is_user_op()) {
813 set_err_msg(err_msg
, "empty user id");
817 string bucket_id
= op_state
.get_bucket_id();
818 if (bucket_id
.empty()) {
819 set_err_msg(err_msg
, "empty bucket instance id");
823 std::string display_name
= op_state
.get_user_display_name();
824 rgw_bucket bucket
= op_state
.get_bucket();
826 const rgw_pool
& root_pool
= store
->get_zone_params().domain_root
;
827 rgw_raw_obj
obj(root_pool
, bucket
.name
);
828 RGWObjVersionTracker objv_tracker
;
830 map
<string
, bufferlist
> attrs
;
831 RGWBucketInfo bucket_info
;
833 string key
= bucket
.name
+ ":" + bucket_id
;
834 RGWObjectCtx
obj_ctx(store
);
835 int r
= store
->get_bucket_instance_info(obj_ctx
, key
, bucket_info
, NULL
, &attrs
);
840 rgw_user user_id
= op_state
.get_user_id();
842 map
<string
, bufferlist
>::iterator aiter
= attrs
.find(RGW_ATTR_ACL
);
843 if (aiter
!= attrs
.end()) {
844 bufferlist aclbl
= aiter
->second
;
845 RGWAccessControlPolicy policy
;
848 bufferlist::iterator iter
= aclbl
.begin();
849 ::decode(policy
, iter
);
850 owner
= policy
.get_owner();
851 } catch (buffer::error
& err
) {
852 set_err_msg(err_msg
, "couldn't decode policy");
856 r
= rgw_unlink_bucket(store
, owner
.get_id(), bucket
.tenant
, bucket
.name
, false);
858 set_err_msg(err_msg
, "could not unlink policy from user " + owner
.get_id().to_str());
862 // now update the user for the bucket...
863 if (display_name
.empty()) {
864 ldout(store
->ctx(), 0) << "WARNING: user " << user_info
.user_id
<< " has no display name set" << dendl
;
866 policy
.create_default(user_info
.user_id
, display_name
);
868 owner
= policy
.get_owner();
869 r
= store
->set_bucket_owner(bucket_info
.bucket
, owner
);
871 set_err_msg(err_msg
, "failed to set bucket owner: " + cpp_strerror(-r
));
875 // ...and encode the acl
877 policy
.encode(aclbl
);
879 r
= store
->system_obj_set_attr(NULL
, obj
, RGW_ATTR_ACL
, aclbl
, &objv_tracker
);
884 RGWAccessControlPolicy policy_instance
;
885 policy_instance
.create_default(user_info
.user_id
, display_name
);
887 policy_instance
.encode(aclbl
);
889 string oid_bucket_instance
= RGW_BUCKET_INSTANCE_MD_PREFIX
+ key
;
890 rgw_raw_obj
obj_bucket_instance(root_pool
, oid_bucket_instance
);
891 r
= store
->system_obj_set_attr(NULL
, obj_bucket_instance
, RGW_ATTR_ACL
, aclbl
, &objv_tracker
);
896 r
= rgw_link_bucket(store
, user_info
.user_id
, bucket_info
.bucket
, real_time());
905 int RGWBucket::unlink(RGWBucketAdminOpState
& op_state
, std::string
*err_msg
)
907 rgw_bucket bucket
= op_state
.get_bucket();
909 if (!op_state
.is_user_op()) {
910 set_err_msg(err_msg
, "could not fetch user or user bucket info");
914 int r
= rgw_unlink_bucket(store
, user_info
.user_id
, bucket
.tenant
, bucket
.name
);
916 set_err_msg(err_msg
, "error unlinking bucket" + cpp_strerror(-r
));
922 int RGWBucket::remove(RGWBucketAdminOpState
& op_state
, bool bypass_gc
,
923 bool keep_index_consistent
, std::string
*err_msg
)
925 bool delete_children
= op_state
.will_delete_children();
926 rgw_bucket bucket
= op_state
.get_bucket();
930 if (delete_children
) {
931 ret
= rgw_remove_bucket_bypass_gc(store
, bucket
, op_state
.get_max_aio(), keep_index_consistent
);
933 set_err_msg(err_msg
, "purge objects should be set for gc to be bypassed");
937 ret
= rgw_remove_bucket(store
, bucket
, delete_children
);
941 set_err_msg(err_msg
, "unable to remove bucket" + cpp_strerror(-ret
));
948 int RGWBucket::remove_object(RGWBucketAdminOpState
& op_state
, std::string
*err_msg
)
950 rgw_bucket bucket
= op_state
.get_bucket();
951 std::string object_name
= op_state
.get_object_name();
953 rgw_obj_key
key(object_name
);
955 int ret
= rgw_remove_object(store
, bucket_info
, bucket
, key
);
957 set_err_msg(err_msg
, "unable to remove object" + cpp_strerror(-ret
));
964 static void dump_bucket_index(map
<string
, rgw_bucket_dir_entry
> result
, Formatter
*f
)
966 map
<string
, rgw_bucket_dir_entry
>::iterator iter
;
967 for (iter
= result
.begin(); iter
!= result
.end(); ++iter
) {
968 f
->dump_string("object", iter
->first
);
972 static void dump_bucket_usage(map
<RGWObjCategory
, RGWStorageStats
>& stats
, Formatter
*formatter
)
974 map
<RGWObjCategory
, RGWStorageStats
>::iterator iter
;
976 formatter
->open_object_section("usage");
977 for (iter
= stats
.begin(); iter
!= stats
.end(); ++iter
) {
978 RGWStorageStats
& s
= iter
->second
;
979 const char *cat_name
= rgw_obj_category_name(iter
->first
);
980 formatter
->open_object_section(cat_name
);
982 formatter
->close_section();
984 formatter
->close_section();
987 static void dump_index_check(map
<RGWObjCategory
, RGWStorageStats
> existing_stats
,
988 map
<RGWObjCategory
, RGWStorageStats
> calculated_stats
,
989 Formatter
*formatter
)
991 formatter
->open_object_section("check_result");
992 formatter
->open_object_section("existing_header");
993 dump_bucket_usage(existing_stats
, formatter
);
994 formatter
->close_section();
995 formatter
->open_object_section("calculated_header");
996 dump_bucket_usage(calculated_stats
, formatter
);
997 formatter
->close_section();
998 formatter
->close_section();
1001 int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState
& op_state
,
1002 RGWFormatterFlusher
& flusher
,std::string
*err_msg
)
1004 bool fix_index
= op_state
.will_fix_index();
1005 rgw_bucket bucket
= op_state
.get_bucket();
1009 map
<string
, bool> common_prefixes
;
1012 map
<string
, bool> meta_objs
;
1013 map
<rgw_obj_index_key
, string
> all_objs
;
1015 RGWBucketInfo bucket_info
;
1016 RGWObjectCtx
obj_ctx(store
);
1017 int r
= store
->get_bucket_instance_info(obj_ctx
, bucket
, bucket_info
, nullptr, nullptr);
1019 ldout(store
->ctx(), 0) << "ERROR: " << __func__
<< "(): get_bucket_instance_info(bucket=" << bucket
<< ") returned r=" << r
<< dendl
;
1023 RGWRados::Bucket
target(store
, bucket_info
);
1024 RGWRados::Bucket::List
list_op(&target
);
1026 list_op
.params
.list_versions
= true;
1027 list_op
.params
.ns
= RGW_OBJ_NS_MULTIPART
;
1030 vector
<rgw_bucket_dir_entry
> result
;
1031 int r
= list_op
.list_objects(max
, &result
, &common_prefixes
, &is_truncated
);
1033 set_err_msg(err_msg
, "failed to list objects in bucket=" + bucket
.name
+
1034 " err=" + cpp_strerror(-r
));
1039 vector
<rgw_bucket_dir_entry
>::iterator iter
;
1040 for (iter
= result
.begin(); iter
!= result
.end(); ++iter
) {
1041 rgw_obj_index_key key
= iter
->key
;
1042 rgw_obj
obj(bucket
, key
);
1043 string oid
= obj
.get_oid();
1045 int pos
= oid
.find_last_of('.');
1047 /* obj has no suffix */
1048 all_objs
[key
] = oid
;
1050 /* obj has suffix */
1051 string name
= oid
.substr(0, pos
);
1052 string suffix
= oid
.substr(pos
+ 1);
1054 if (suffix
.compare("meta") == 0) {
1055 meta_objs
[name
] = true;
1057 all_objs
[key
] = name
;
1062 } while (is_truncated
);
1064 list
<rgw_obj_index_key
> objs_to_unlink
;
1065 Formatter
*f
= flusher
.get_formatter();
1067 f
->open_array_section("invalid_multipart_entries");
1069 for (auto aiter
= all_objs
.begin(); aiter
!= all_objs
.end(); ++aiter
) {
1070 string
& name
= aiter
->second
;
1072 if (meta_objs
.find(name
) == meta_objs
.end()) {
1073 objs_to_unlink
.push_back(aiter
->first
);
1076 if (objs_to_unlink
.size() > max
) {
1078 int r
= store
->remove_objs_from_index(bucket_info
, objs_to_unlink
);
1080 set_err_msg(err_msg
, "ERROR: remove_obj_from_index() returned error: " +
1086 dump_mulipart_index_results(objs_to_unlink
, flusher
.get_formatter());
1088 objs_to_unlink
.clear();
1093 int r
= store
->remove_objs_from_index(bucket_info
, objs_to_unlink
);
1095 set_err_msg(err_msg
, "ERROR: remove_obj_from_index() returned error: " +
1102 dump_mulipart_index_results(objs_to_unlink
, f
);
1109 int RGWBucket::check_object_index(RGWBucketAdminOpState
& op_state
,
1110 RGWFormatterFlusher
& flusher
,
1111 std::string
*err_msg
)
1114 bool fix_index
= op_state
.will_fix_index();
1116 rgw_bucket bucket
= op_state
.get_bucket();
1119 set_err_msg(err_msg
, "check-objects flag requires fix index enabled");
1123 store
->cls_obj_set_bucket_tag_timeout(bucket_info
, BUCKET_TAG_TIMEOUT
);
1126 rgw_obj_index_key marker
;
1127 bool is_truncated
= true;
1129 Formatter
*formatter
= flusher
.get_formatter();
1130 formatter
->open_object_section("objects");
1131 while (is_truncated
) {
1132 map
<string
, rgw_bucket_dir_entry
> result
;
1134 int r
= store
->cls_bucket_list(bucket_info
, RGW_NO_SHARD
, marker
, prefix
, 1000, true,
1135 result
, &is_truncated
, &marker
,
1136 bucket_object_check_filter
);
1139 } else if (r
< 0 && r
!= -ENOENT
) {
1140 set_err_msg(err_msg
, "ERROR: failed operation r=" + cpp_strerror(-r
));
1144 dump_bucket_index(result
, formatter
);
1149 formatter
->close_section();
1151 store
->cls_obj_set_bucket_tag_timeout(bucket_info
, 0);
1157 int RGWBucket::check_index(RGWBucketAdminOpState
& op_state
,
1158 map
<RGWObjCategory
, RGWStorageStats
>& existing_stats
,
1159 map
<RGWObjCategory
, RGWStorageStats
>& calculated_stats
,
1160 std::string
*err_msg
)
1162 rgw_bucket bucket
= op_state
.get_bucket();
1163 bool fix_index
= op_state
.will_fix_index();
1165 int r
= store
->bucket_check_index(bucket_info
, &existing_stats
, &calculated_stats
);
1167 set_err_msg(err_msg
, "failed to check index error=" + cpp_strerror(-r
));
1172 r
= store
->bucket_rebuild_index(bucket_info
);
1174 set_err_msg(err_msg
, "failed to rebuild index err=" + cpp_strerror(-r
));
1183 int RGWBucket::policy_bl_to_stream(bufferlist
& bl
, ostream
& o
)
1185 RGWAccessControlPolicy_S3
policy(g_ceph_context
);
1186 bufferlist::iterator iter
= bl
.begin();
1188 policy
.decode(iter
);
1189 } catch (buffer::error
& err
) {
1190 dout(0) << "ERROR: caught buffer::error, could not decode policy" << dendl
;
1197 static int policy_decode(RGWRados
*store
, bufferlist
& bl
, RGWAccessControlPolicy
& policy
)
1199 bufferlist::iterator iter
= bl
.begin();
1201 policy
.decode(iter
);
1202 } catch (buffer::error
& err
) {
1203 ldout(store
->ctx(), 0) << "ERROR: caught buffer::error, could not decode policy" << dendl
;
1209 int RGWBucket::get_policy(RGWBucketAdminOpState
& op_state
, RGWAccessControlPolicy
& policy
)
1211 std::string object_name
= op_state
.get_object_name();
1212 rgw_bucket bucket
= op_state
.get_bucket();
1213 RGWObjectCtx
obj_ctx(store
);
1215 RGWBucketInfo bucket_info
;
1216 map
<string
, bufferlist
> attrs
;
1217 int ret
= store
->get_bucket_info(obj_ctx
, bucket
.tenant
, bucket
.name
, bucket_info
, NULL
, &attrs
);
1222 if (!object_name
.empty()) {
1224 rgw_obj
obj(bucket
, object_name
);
1226 RGWRados::Object
op_target(store
, bucket_info
, obj_ctx
, obj
);
1227 RGWRados::Object::Read
rop(&op_target
);
1229 int ret
= rop
.get_attr(RGW_ATTR_ACL
, bl
);
1233 return policy_decode(store
, bl
, policy
);
1236 map
<string
, bufferlist
>::iterator aiter
= attrs
.find(RGW_ATTR_ACL
);
1237 if (aiter
== attrs
.end()) {
1241 return policy_decode(store
, aiter
->second
, policy
);
1245 int RGWBucketAdminOp::get_policy(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1246 RGWAccessControlPolicy
& policy
)
1250 int ret
= bucket
.init(store
, op_state
);
1254 ret
= bucket
.get_policy(op_state
, policy
);
1261 /* Wrappers to facilitate RESTful interface */
1264 int RGWBucketAdminOp::get_policy(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1265 RGWFormatterFlusher
& flusher
)
1267 RGWAccessControlPolicy
policy(store
->ctx());
1269 int ret
= get_policy(store
, op_state
, policy
);
1273 Formatter
*formatter
= flusher
.get_formatter();
1277 formatter
->open_object_section("policy");
1278 policy
.dump(formatter
);
1279 formatter
->close_section();
1286 int RGWBucketAdminOp::dump_s3_policy(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1289 RGWAccessControlPolicy_S3
policy(store
->ctx());
1291 int ret
= get_policy(store
, op_state
, policy
);
1300 int RGWBucketAdminOp::unlink(RGWRados
*store
, RGWBucketAdminOpState
& op_state
)
1304 int ret
= bucket
.init(store
, op_state
);
1308 return bucket
.unlink(op_state
);
1311 int RGWBucketAdminOp::link(RGWRados
*store
, RGWBucketAdminOpState
& op_state
, string
*err
)
1315 int ret
= bucket
.init(store
, op_state
);
1319 return bucket
.link(op_state
, err
);
1323 int RGWBucketAdminOp::check_index(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1324 RGWFormatterFlusher
& flusher
)
1327 map
<RGWObjCategory
, RGWStorageStats
> existing_stats
;
1328 map
<RGWObjCategory
, RGWStorageStats
> calculated_stats
;
1333 ret
= bucket
.init(store
, op_state
);
1337 Formatter
*formatter
= flusher
.get_formatter();
1340 ret
= bucket
.check_bad_index_multipart(op_state
, flusher
);
1344 ret
= bucket
.check_object_index(op_state
, flusher
);
1348 ret
= bucket
.check_index(op_state
, existing_stats
, calculated_stats
);
1352 dump_index_check(existing_stats
, calculated_stats
, formatter
);
1358 int RGWBucketAdminOp::remove_bucket(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1359 bool bypass_gc
, bool keep_index_consistent
)
1363 int ret
= bucket
.init(store
, op_state
);
1367 std::string err_msg
;
1368 ret
= bucket
.remove(op_state
, bypass_gc
, keep_index_consistent
, &err_msg
);
1369 if (!err_msg
.empty()) {
1370 lderr(store
->ctx()) << "ERROR: " << err_msg
<< dendl
;
1375 int RGWBucketAdminOp::remove_object(RGWRados
*store
, RGWBucketAdminOpState
& op_state
)
1379 int ret
= bucket
.init(store
, op_state
);
1383 return bucket
.remove_object(op_state
);
1386 static int bucket_stats(RGWRados
*store
, const std::string
& tenant_name
, std::string
& bucket_name
, Formatter
*formatter
)
1388 RGWBucketInfo bucket_info
;
1389 map
<RGWObjCategory
, RGWStorageStats
> stats
;
1392 RGWObjectCtx
obj_ctx(store
);
1393 int r
= store
->get_bucket_info(obj_ctx
, tenant_name
, bucket_name
, bucket_info
, &mtime
);
1397 rgw_bucket
& bucket
= bucket_info
.bucket
;
1399 string bucket_ver
, master_ver
;
1401 int ret
= store
->get_bucket_stats(bucket_info
, RGW_NO_SHARD
, &bucket_ver
, &master_ver
, stats
, &max_marker
);
1403 cerr
<< "error getting bucket stats ret=" << ret
<< std::endl
;
1409 formatter
->open_object_section("stats");
1410 formatter
->dump_string("bucket", bucket
.name
);
1411 formatter
->dump_string("zonegroup", bucket_info
.zonegroup
);
1412 formatter
->dump_string("placement_rule", bucket_info
.placement_rule
);
1413 ::encode_json("explicit_placement", bucket
.explicit_placement
, formatter
);
1414 formatter
->dump_string("id", bucket
.bucket_id
);
1415 formatter
->dump_string("marker", bucket
.marker
);
1416 formatter
->dump_stream("index_type") << bucket_info
.index_type
;
1417 ::encode_json("owner", bucket_info
.owner
, formatter
);
1418 formatter
->dump_string("ver", bucket_ver
);
1419 formatter
->dump_string("master_ver", master_ver
);
1420 formatter
->dump_stream("mtime") << ut
;
1421 formatter
->dump_string("max_marker", max_marker
);
1422 dump_bucket_usage(stats
, formatter
);
1423 encode_json("bucket_quota", bucket_info
.quota
, formatter
);
1424 formatter
->close_section();
1429 int RGWBucketAdminOp::limit_check(RGWRados
*store
,
1430 RGWBucketAdminOpState
& op_state
,
1431 const std::list
<std::string
>& user_ids
,
1432 RGWFormatterFlusher
& flusher
,
1436 const size_t max_entries
=
1437 store
->ctx()->_conf
->rgw_list_buckets_max_chunk
;
1439 const size_t safe_max_objs_per_shard
=
1440 store
->ctx()->_conf
->rgw_safe_max_objects_per_shard
;
1442 uint16_t shard_warn_pct
=
1443 store
->ctx()->_conf
->rgw_shard_warning_threshold
;
1444 if (shard_warn_pct
> 100)
1445 shard_warn_pct
= 90;
1447 Formatter
*formatter
= flusher
.get_formatter();
1450 formatter
->open_array_section("users");
1452 for (const auto& user_id
: user_ids
) {
1453 formatter
->open_object_section("user");
1454 formatter
->dump_string("user_id", user_id
);
1456 formatter
->open_array_section("buckets");
1458 RGWUserBuckets buckets
;
1462 ret
= rgw_read_user_buckets(store
, user_id
, buckets
,
1463 marker
, string(), max_entries
, false,
1468 map
<string
, RGWBucketEnt
>& m_buckets
= buckets
.get_buckets();
1470 for (const auto& iter
: m_buckets
) {
1471 auto& bucket
= iter
.second
.bucket
;
1472 uint32_t num_shards
= 1;
1473 uint64_t num_objects
= 0;
1475 /* need info for num_shards */
1477 RGWObjectCtx
obj_ctx(store
);
1479 marker
= bucket
.name
; /* Casey's location for marker update,
1480 * as we may now not reach the end of
1483 ret
= store
->get_bucket_info(obj_ctx
, bucket
.tenant
, bucket
.name
,
1488 /* need stats for num_entries */
1489 string bucket_ver
, master_ver
;
1490 std::map
<RGWObjCategory
, RGWStorageStats
> stats
;
1491 ret
= store
->get_bucket_stats(info
, RGW_NO_SHARD
, &bucket_ver
,
1492 &master_ver
, stats
, nullptr);
1497 for (const auto& s
: stats
) {
1498 num_objects
+= s
.second
.num_objects
;
1501 num_shards
= info
.num_shards
;
1502 uint64_t objs_per_shard
=
1503 (num_shards
) ? num_objects
/num_shards
: num_objects
;
1507 if (objs_per_shard
> safe_max_objs_per_shard
) {
1509 100 - (safe_max_objs_per_shard
/objs_per_shard
* 100);
1510 ss
<< boost::format("OVER %4f%%") % over
;
1514 objs_per_shard
/ safe_max_objs_per_shard
* 100;
1515 if (fill_pct
>= shard_warn_pct
) {
1516 ss
<< boost::format("WARN %4f%%") % fill_pct
;
1523 if (warn
|| (! warnings_only
)) {
1524 formatter
->open_object_section("bucket");
1525 formatter
->dump_string("bucket", bucket
.name
);
1526 formatter
->dump_string("tenant", bucket
.tenant
);
1527 formatter
->dump_int("num_objects", num_objects
);
1528 formatter
->dump_int("num_shards", num_shards
);
1529 formatter
->dump_int("objects_per_shard", objs_per_shard
);
1530 formatter
->dump_string("fill_status", ss
.str());
1531 formatter
->close_section();
1536 done
= (m_buckets
.size() < max_entries
);
1537 } while (!done
); /* foreach: bucket */
1539 formatter
->close_section();
1540 formatter
->close_section();
1541 formatter
->flush(cout
);
1543 } /* foreach: user_id */
1545 formatter
->close_section();
1546 formatter
->flush(cout
);
1549 } /* RGWBucketAdminOp::limit_check */
1551 int RGWBucketAdminOp::info(RGWRados
*store
, RGWBucketAdminOpState
& op_state
,
1552 RGWFormatterFlusher
& flusher
)
1557 string bucket_name
= op_state
.get_bucket_name();
1559 if (!bucket_name
.empty()) {
1560 ret
= bucket
.init(store
, op_state
);
1565 Formatter
*formatter
= flusher
.get_formatter();
1568 CephContext
*cct
= store
->ctx();
1570 const size_t max_entries
= cct
->_conf
->rgw_list_buckets_max_chunk
;
1572 bool show_stats
= op_state
.will_fetch_stats();
1573 rgw_user user_id
= op_state
.get_user_id();
1574 if (op_state
.is_user_op()) {
1575 formatter
->open_array_section("buckets");
1577 RGWUserBuckets buckets
;
1579 bool is_truncated
= false;
1582 ret
= rgw_read_user_buckets(store
, op_state
.get_user_id(), buckets
,
1583 marker
, string(), max_entries
, false,
1588 map
<string
, RGWBucketEnt
>& m
= buckets
.get_buckets();
1589 map
<string
, RGWBucketEnt
>::iterator iter
;
1591 for (iter
= m
.begin(); iter
!= m
.end(); ++iter
) {
1592 std::string obj_name
= iter
->first
;
1594 bucket_stats(store
, user_id
.tenant
, obj_name
, formatter
);
1596 formatter
->dump_string("bucket", obj_name
);
1602 } while (is_truncated
);
1604 formatter
->close_section();
1605 } else if (!bucket_name
.empty()) {
1606 bucket_stats(store
, user_id
.tenant
, bucket_name
, formatter
);
1608 RGWAccessHandle handle
;
1610 formatter
->open_array_section("buckets");
1611 if (store
->list_buckets_init(&handle
) >= 0) {
1612 rgw_bucket_dir_entry obj
;
1613 while (store
->list_buckets_next(obj
, &handle
) >= 0) {
1615 bucket_stats(store
, user_id
.tenant
, obj
.key
.name
, formatter
);
1617 formatter
->dump_string("bucket", obj
.key
.name
);
1621 formatter
->close_section();
1630 void rgw_data_change::dump(Formatter
*f
) const
1633 switch (entity_type
) {
1634 case ENTITY_TYPE_BUCKET
:
1640 encode_json("entity_type", type
, f
);
1641 encode_json("key", key
, f
);
1642 utime_t
ut(timestamp
);
1643 encode_json("timestamp", ut
, f
);
1646 void rgw_data_change::decode_json(JSONObj
*obj
) {
1648 JSONDecoder::decode_json("entity_type", s
, obj
);
1649 if (s
== "bucket") {
1650 entity_type
= ENTITY_TYPE_BUCKET
;
1652 entity_type
= ENTITY_TYPE_UNKNOWN
;
1654 JSONDecoder::decode_json("key", key
, obj
);
1656 JSONDecoder::decode_json("timestamp", ut
, obj
);
1657 timestamp
= ut
.to_real_time();
1660 void rgw_data_change_log_entry::dump(Formatter
*f
) const
1662 encode_json("log_id", log_id
, f
);
1663 utime_t
ut(log_timestamp
);
1664 encode_json("log_timestamp", ut
, f
);
1665 encode_json("entry", entry
, f
);
1668 void rgw_data_change_log_entry::decode_json(JSONObj
*obj
) {
1669 JSONDecoder::decode_json("log_id", log_id
, obj
);
1671 JSONDecoder::decode_json("log_timestamp", ut
, obj
);
1672 log_timestamp
= ut
.to_real_time();
1673 JSONDecoder::decode_json("entry", entry
, obj
);
1676 int RGWDataChangesLog::choose_oid(const rgw_bucket_shard
& bs
) {
1677 const string
& name
= bs
.bucket
.name
;
1678 int shard_shift
= (bs
.shard_id
> 0 ? bs
.shard_id
: 0);
1679 uint32_t r
= (ceph_str_hash_linux(name
.c_str(), name
.size()) + shard_shift
) % num_shards
;
1684 int RGWDataChangesLog::renew_entries()
1686 if (!store
->need_to_log_data())
1689 /* we can't keep the bucket name as part of the cls_log_entry, and we need
1690 * it later, so we keep two lists under the map */
1691 map
<int, pair
<list
<rgw_bucket_shard
>, list
<cls_log_entry
> > > m
;
1694 map
<rgw_bucket_shard
, bool> entries
;
1695 entries
.swap(cur_cycle
);
1698 map
<rgw_bucket_shard
, bool>::iterator iter
;
1700 real_time ut
= real_clock::now();
1701 for (iter
= entries
.begin(); iter
!= entries
.end(); ++iter
) {
1702 const rgw_bucket_shard
& bs
= iter
->first
;
1704 int index
= choose_oid(bs
);
1706 cls_log_entry entry
;
1708 rgw_data_change change
;
1710 change
.entity_type
= ENTITY_TYPE_BUCKET
;
1711 change
.key
= bs
.get_key();
1712 change
.timestamp
= ut
;
1713 ::encode(change
, bl
);
1715 store
->time_log_prepare_entry(entry
, ut
, section
, change
.key
, bl
);
1717 m
[index
].first
.push_back(bs
);
1718 m
[index
].second
.emplace_back(std::move(entry
));
1721 map
<int, pair
<list
<rgw_bucket_shard
>, list
<cls_log_entry
> > >::iterator miter
;
1722 for (miter
= m
.begin(); miter
!= m
.end(); ++miter
) {
1723 list
<cls_log_entry
>& entries
= miter
->second
.second
;
1725 real_time now
= real_clock::now();
1727 int ret
= store
->time_log_add(oids
[miter
->first
], entries
, NULL
);
1729 /* we don't really need to have a special handling for failed cases here,
1730 * as this is just an optimization. */
1731 lderr(cct
) << "ERROR: store->time_log_add() returned " << ret
<< dendl
;
1735 real_time expiration
= now
;
1736 expiration
+= make_timespan(cct
->_conf
->rgw_data_log_window
);
1738 list
<rgw_bucket_shard
>& buckets
= miter
->second
.first
;
1739 list
<rgw_bucket_shard
>::iterator liter
;
1740 for (liter
= buckets
.begin(); liter
!= buckets
.end(); ++liter
) {
1741 update_renewed(*liter
, expiration
);
1748 void RGWDataChangesLog::_get_change(const rgw_bucket_shard
& bs
, ChangeStatusPtr
& status
)
1750 assert(lock
.is_locked());
1751 if (!changes
.find(bs
, status
)) {
1752 status
= ChangeStatusPtr(new ChangeStatus
);
1753 changes
.add(bs
, status
);
1757 void RGWDataChangesLog::register_renew(rgw_bucket_shard
& bs
)
1759 Mutex::Locker
l(lock
);
1760 cur_cycle
[bs
] = true;
1763 void RGWDataChangesLog::update_renewed(rgw_bucket_shard
& bs
, real_time
& expiration
)
1765 Mutex::Locker
l(lock
);
1766 ChangeStatusPtr status
;
1767 _get_change(bs
, status
);
1769 ldout(cct
, 20) << "RGWDataChangesLog::update_renewd() bucket_name=" << bs
.bucket
.name
<< " shard_id=" << bs
.shard_id
<< " expiration=" << expiration
<< dendl
;
1770 status
->cur_expiration
= expiration
;
1773 int RGWDataChangesLog::get_log_shard_id(rgw_bucket
& bucket
, int shard_id
) {
1774 rgw_bucket_shard
bs(bucket
, shard_id
);
1776 return choose_oid(bs
);
1779 int RGWDataChangesLog::add_entry(rgw_bucket
& bucket
, int shard_id
) {
1780 if (!store
->need_to_log_data())
1783 rgw_bucket_shard
bs(bucket
, shard_id
);
1785 int index
= choose_oid(bs
);
1786 mark_modified(index
, bs
);
1790 ChangeStatusPtr status
;
1791 _get_change(bs
, status
);
1795 real_time now
= real_clock::now();
1797 status
->lock
->Lock();
1799 ldout(cct
, 20) << "RGWDataChangesLog::add_entry() bucket.name=" << bucket
.name
<< " shard_id=" << shard_id
<< " now=" << now
<< " cur_expiration=" << status
->cur_expiration
<< dendl
;
1801 if (now
< status
->cur_expiration
) {
1802 /* no need to send, recently completed */
1803 status
->lock
->Unlock();
1809 RefCountedCond
*cond
;
1811 if (status
->pending
) {
1812 cond
= status
->cond
;
1816 status
->cond
->get();
1817 status
->lock
->Unlock();
1819 int ret
= cond
->wait();
1827 status
->cond
= new RefCountedCond
;
1828 status
->pending
= true;
1830 string
& oid
= oids
[index
];
1831 real_time expiration
;
1836 status
->cur_sent
= now
;
1839 expiration
+= ceph::make_timespan(cct
->_conf
->rgw_data_log_window
);
1841 status
->lock
->Unlock();
1844 rgw_data_change change
;
1845 change
.entity_type
= ENTITY_TYPE_BUCKET
;
1846 change
.key
= bs
.get_key();
1847 change
.timestamp
= now
;
1848 ::encode(change
, bl
);
1851 ldout(cct
, 20) << "RGWDataChangesLog::add_entry() sending update with now=" << now
<< " cur_expiration=" << expiration
<< dendl
;
1853 ret
= store
->time_log_add(oid
, now
, section
, change
.key
, bl
);
1855 now
= real_clock::now();
1857 status
->lock
->Lock();
1859 } while (!ret
&& real_clock::now() > expiration
);
1861 cond
= status
->cond
;
1863 status
->pending
= false;
1864 status
->cur_expiration
= status
->cur_sent
; /* time of when operation started, not completed */
1865 status
->cur_expiration
+= make_timespan(cct
->_conf
->rgw_data_log_window
);
1866 status
->cond
= NULL
;
1867 status
->lock
->Unlock();
1875 int RGWDataChangesLog::list_entries(int shard
, const real_time
& start_time
, const real_time
& end_time
, int max_entries
,
1876 list
<rgw_data_change_log_entry
>& entries
,
1877 const string
& marker
,
1880 if (shard
>= num_shards
)
1883 list
<cls_log_entry
> log_entries
;
1885 int ret
= store
->time_log_list(oids
[shard
], start_time
, end_time
,
1886 max_entries
, log_entries
, marker
,
1887 out_marker
, truncated
);
1891 list
<cls_log_entry
>::iterator iter
;
1892 for (iter
= log_entries
.begin(); iter
!= log_entries
.end(); ++iter
) {
1893 rgw_data_change_log_entry log_entry
;
1894 log_entry
.log_id
= iter
->id
;
1895 real_time rt
= iter
->timestamp
.to_real_time();
1896 log_entry
.log_timestamp
= rt
;
1897 bufferlist::iterator liter
= iter
->data
.begin();
1899 ::decode(log_entry
.entry
, liter
);
1900 } catch (buffer::error
& err
) {
1901 lderr(cct
) << "ERROR: failed to decode data changes log entry" << dendl
;
1904 entries
.push_back(log_entry
);
1910 int RGWDataChangesLog::list_entries(const real_time
& start_time
, const real_time
& end_time
, int max_entries
,
1911 list
<rgw_data_change_log_entry
>& entries
, LogMarker
& marker
, bool *ptruncated
) {
1915 for (; marker
.shard
< num_shards
&& (int)entries
.size() < max_entries
;
1916 marker
.shard
++, marker
.marker
.clear()) {
1917 int ret
= list_entries(marker
.shard
, start_time
, end_time
, max_entries
- entries
.size(), entries
,
1918 marker
.marker
, NULL
, &truncated
);
1919 if (ret
== -ENOENT
) {
1931 *ptruncated
= (marker
.shard
< num_shards
);
1936 int RGWDataChangesLog::get_info(int shard_id
, RGWDataChangesLogInfo
*info
)
1938 if (shard_id
>= num_shards
)
1941 string oid
= oids
[shard_id
];
1943 cls_log_header header
;
1945 int ret
= store
->time_log_info(oid
, &header
);
1946 if ((ret
< 0) && (ret
!= -ENOENT
))
1949 info
->marker
= header
.max_marker
;
1950 info
->last_update
= header
.max_time
.to_real_time();
1955 int RGWDataChangesLog::trim_entries(int shard_id
, const real_time
& start_time
, const real_time
& end_time
,
1956 const string
& start_marker
, const string
& end_marker
)
1960 if (shard_id
> num_shards
)
1963 ret
= store
->time_log_trim(oids
[shard_id
], start_time
, end_time
, start_marker
, end_marker
);
1965 if (ret
== -ENOENT
|| ret
== -ENODATA
)
1971 int RGWDataChangesLog::trim_entries(const real_time
& start_time
, const real_time
& end_time
,
1972 const string
& start_marker
, const string
& end_marker
)
1974 for (int shard
= 0; shard
< num_shards
; shard
++) {
1975 int ret
= store
->time_log_trim(oids
[shard
], start_time
, end_time
, start_marker
, end_marker
);
1976 if (ret
== -ENOENT
|| ret
== -ENODATA
) {
1986 bool RGWDataChangesLog::going_down()
1991 RGWDataChangesLog::~RGWDataChangesLog() {
1993 renew_thread
->stop();
1994 renew_thread
->join();
1995 delete renew_thread
;
1999 void *RGWDataChangesLog::ChangesRenewThread::entry() {
2001 dout(2) << "RGWDataChangesLog::ChangesRenewThread: start" << dendl
;
2002 int r
= log
->renew_entries();
2004 dout(0) << "ERROR: RGWDataChangesLog::renew_entries returned error r=" << r
<< dendl
;
2007 if (log
->going_down())
2010 int interval
= cct
->_conf
->rgw_data_log_window
* 3 / 4;
2012 cond
.WaitInterval(lock
, utime_t(interval
, 0));
2014 } while (!log
->going_down());
2019 void RGWDataChangesLog::ChangesRenewThread::stop()
2021 Mutex::Locker
l(lock
);
2025 void RGWDataChangesLog::mark_modified(int shard_id
, const rgw_bucket_shard
& bs
)
2027 auto key
= bs
.get_key();
2028 modified_lock
.get_read();
2029 map
<int, set
<string
> >::iterator iter
= modified_shards
.find(shard_id
);
2030 if (iter
!= modified_shards
.end()) {
2031 set
<string
>& keys
= iter
->second
;
2032 if (keys
.find(key
) != keys
.end()) {
2033 modified_lock
.unlock();
2037 modified_lock
.unlock();
2039 RWLock::WLocker
wl(modified_lock
);
2040 modified_shards
[shard_id
].insert(key
);
2043 void RGWDataChangesLog::read_clear_modified(map
<int, set
<string
> > &modified
)
2045 RWLock::WLocker
wl(modified_lock
);
2046 modified
.swap(modified_shards
);
2047 modified_shards
.clear();
2050 void RGWBucketCompleteInfo::dump(Formatter
*f
) const {
2051 encode_json("bucket_info", info
, f
);
2052 encode_json("attrs", attrs
, f
);
2055 void RGWBucketCompleteInfo::decode_json(JSONObj
*obj
) {
2056 JSONDecoder::decode_json("bucket_info", info
, obj
);
2057 JSONDecoder::decode_json("attrs", attrs
, obj
);
2060 class RGWBucketMetadataHandler
: public RGWMetadataHandler
{
2063 string
get_type() override
{ return "bucket"; }
2065 int get(RGWRados
*store
, string
& entry
, RGWMetadataObject
**obj
) override
{
2066 RGWObjVersionTracker ot
;
2067 RGWBucketEntryPoint be
;
2070 map
<string
, bufferlist
> attrs
;
2071 RGWObjectCtx
obj_ctx(store
);
2073 string tenant_name
, bucket_name
;
2074 parse_bucket(entry
, &tenant_name
, &bucket_name
);
2075 int ret
= store
->get_bucket_entrypoint_info(obj_ctx
, tenant_name
, bucket_name
, be
, &ot
, &mtime
, &attrs
);
2079 RGWBucketEntryMetadataObject
*mdo
= new RGWBucketEntryMetadataObject(be
, ot
.read_version
, mtime
);
2086 int put(RGWRados
*store
, string
& entry
, RGWObjVersionTracker
& objv_tracker
,
2087 real_time mtime
, JSONObj
*obj
, sync_type_t sync_type
) override
{
2088 RGWBucketEntryPoint be
, old_be
;
2090 decode_json_obj(be
, obj
);
2091 } catch (JSONDecoder::err
& e
) {
2095 real_time orig_mtime
;
2096 map
<string
, bufferlist
> attrs
;
2098 RGWObjVersionTracker old_ot
;
2099 RGWObjectCtx
obj_ctx(store
);
2101 string tenant_name
, bucket_name
;
2102 parse_bucket(entry
, &tenant_name
, &bucket_name
);
2103 int ret
= store
->get_bucket_entrypoint_info(obj_ctx
, tenant_name
, bucket_name
, old_be
, &old_ot
, &orig_mtime
, &attrs
);
2104 if (ret
< 0 && ret
!= -ENOENT
)
2107 // are we actually going to perform this put, or is it too old?
2108 if (ret
!= -ENOENT
&&
2109 !check_versions(old_ot
.read_version
, orig_mtime
,
2110 objv_tracker
.write_version
, mtime
, sync_type
)) {
2111 return STATUS_NO_APPLY
;
2114 objv_tracker
.read_version
= old_ot
.read_version
; /* maintain the obj version we just read */
2116 ret
= store
->put_bucket_entrypoint_info(tenant_name
, bucket_name
, be
, false, objv_tracker
, mtime
, &attrs
);
2122 ret
= rgw_link_bucket(store
, be
.owner
, be
.bucket
, be
.creation_time
, false);
2124 ret
= rgw_unlink_bucket(store
, be
.owner
, be
.bucket
.tenant
, be
.bucket
.name
, false);
2130 struct list_keys_info
{
2132 RGWListRawObjsCtx ctx
;
2135 int remove(RGWRados
*store
, string
& entry
, RGWObjVersionTracker
& objv_tracker
) override
{
2136 RGWBucketEntryPoint be
;
2137 RGWObjectCtx
obj_ctx(store
);
2139 string tenant_name
, bucket_name
;
2140 parse_bucket(entry
, &tenant_name
, &bucket_name
);
2141 int ret
= store
->get_bucket_entrypoint_info(obj_ctx
, tenant_name
, bucket_name
, be
, &objv_tracker
, NULL
, NULL
);
2146 * We're unlinking the bucket but we don't want to update the entrypoint here - we're removing
2147 * it immediately and don't want to invalidate our cached objv_version or the bucket obj removal
2148 * will incorrectly fail.
2150 ret
= rgw_unlink_bucket(store
, be
.owner
, tenant_name
, bucket_name
, false);
2152 lderr(store
->ctx()) << "could not unlink bucket=" << entry
<< " owner=" << be
.owner
<< dendl
;
2155 ret
= rgw_bucket_delete_bucket_obj(store
, tenant_name
, bucket_name
, objv_tracker
);
2157 lderr(store
->ctx()) << "could not delete bucket=" << entry
<< dendl
;
2163 void get_pool_and_oid(RGWRados
*store
, const string
& key
, rgw_pool
& pool
, string
& oid
) override
{
2165 pool
= store
->get_zone_params().domain_root
;
2168 int list_keys_init(RGWRados
*store
, const string
& marker
, void **phandle
) override
{
2169 auto info
= ceph::make_unique
<list_keys_info
>();
2171 info
->store
= store
;
2173 int ret
= store
->list_raw_objects_init(store
->get_zone_params().domain_root
, marker
,
2178 *phandle
= (void *)info
.release();
2183 int list_keys_next(void *handle
, int max
, list
<string
>& keys
, bool *truncated
) override
{
2184 list_keys_info
*info
= static_cast<list_keys_info
*>(handle
);
2190 RGWRados
*store
= info
->store
;
2192 list
<string
> unfiltered_keys
;
2194 int ret
= store
->list_raw_objects_next(no_filter
, max
, info
->ctx
,
2195 unfiltered_keys
, truncated
);
2196 if (ret
< 0 && ret
!= -ENOENT
)
2198 if (ret
== -ENOENT
) {
2204 // now filter out the system entries
2205 list
<string
>::iterator iter
;
2206 for (iter
= unfiltered_keys
.begin(); iter
!= unfiltered_keys
.end(); ++iter
) {
2217 void list_keys_complete(void *handle
) override
{
2218 list_keys_info
*info
= static_cast<list_keys_info
*>(handle
);
2222 string
get_marker(void *handle
) {
2223 list_keys_info
*info
= static_cast<list_keys_info
*>(handle
);
2224 return info
->store
->list_raw_objs_get_cursor(info
->ctx
);
2228 class RGWBucketInstanceMetadataHandler
: public RGWMetadataHandler
{
2231 string
get_type() override
{ return "bucket.instance"; }
2233 int get(RGWRados
*store
, string
& oid
, RGWMetadataObject
**obj
) override
{
2234 RGWBucketCompleteInfo bci
;
2237 RGWObjectCtx
obj_ctx(store
);
2239 int ret
= store
->get_bucket_instance_info(obj_ctx
, oid
, bci
.info
, &mtime
, &bci
.attrs
);
2243 RGWBucketInstanceMetadataObject
*mdo
= new RGWBucketInstanceMetadataObject(bci
, bci
.info
.objv_tracker
.read_version
, mtime
);
2250 int put(RGWRados
*store
, string
& entry
, RGWObjVersionTracker
& objv_tracker
,
2251 real_time mtime
, JSONObj
*obj
, sync_type_t sync_type
) override
{
2252 RGWBucketCompleteInfo bci
, old_bci
;
2254 decode_json_obj(bci
, obj
);
2255 } catch (JSONDecoder::err
& e
) {
2259 real_time orig_mtime
;
2260 RGWObjectCtx
obj_ctx(store
);
2262 int ret
= store
->get_bucket_instance_info(obj_ctx
, entry
, old_bci
.info
,
2263 &orig_mtime
, &old_bci
.attrs
);
2264 bool exists
= (ret
!= -ENOENT
);
2265 if (ret
< 0 && exists
)
2268 if (!exists
|| old_bci
.info
.bucket
.bucket_id
!= bci
.info
.bucket
.bucket_id
) {
2269 /* a new bucket, we need to select a new bucket placement for it */
2271 rgw_bucket_instance_oid_to_key(key
);
2274 string bucket_instance
;
2275 parse_bucket(key
, &tenant_name
, &bucket_name
, &bucket_instance
);
2277 RGWZonePlacementInfo rule_info
;
2278 bci
.info
.bucket
.name
= bucket_name
;
2279 bci
.info
.bucket
.bucket_id
= bucket_instance
;
2280 bci
.info
.bucket
.tenant
= tenant_name
;
2281 ret
= store
->select_bucket_location_by_rule(bci
.info
.placement_rule
, &rule_info
);
2283 ldout(store
->ctx(), 0) << "ERROR: select_bucket_placement() returned " << ret
<< dendl
;
2286 bci
.info
.index_type
= rule_info
.index_type
;
2288 /* existing bucket, keep its placement */
2289 bci
.info
.bucket
.explicit_placement
= old_bci
.info
.bucket
.explicit_placement
;
2290 bci
.info
.placement_rule
= old_bci
.info
.placement_rule
;
2293 if (exists
&& old_bci
.info
.datasync_flag_enabled() != bci
.info
.datasync_flag_enabled()) {
2294 int shards_num
= bci
.info
.num_shards
? bci
.info
.num_shards
: 1;
2295 int shard_id
= bci
.info
.num_shards
? 0 : -1;
2297 if (!bci
.info
.datasync_flag_enabled()) {
2298 ret
= store
->stop_bi_log_entries(bci
.info
, -1);
2300 lderr(store
->ctx()) << "ERROR: failed writing bilog" << dendl
;
2304 ret
= store
->resync_bi_log_entries(bci
.info
, -1);
2306 lderr(store
->ctx()) << "ERROR: failed writing bilog" << dendl
;
2311 for (int i
= 0; i
< shards_num
; ++i
, ++shard_id
) {
2312 ret
= store
->data_log
->add_entry(bci
.info
.bucket
, shard_id
);
2314 lderr(store
->ctx()) << "ERROR: failed writing data log" << dendl
;
2320 // are we actually going to perform this put, or is it too old?
2322 !check_versions(old_bci
.info
.objv_tracker
.read_version
, orig_mtime
,
2323 objv_tracker
.write_version
, mtime
, sync_type
)) {
2324 objv_tracker
.read_version
= old_bci
.info
.objv_tracker
.read_version
;
2325 return STATUS_NO_APPLY
;
2328 /* record the read version (if any), store the new version */
2329 bci
.info
.objv_tracker
.read_version
= old_bci
.info
.objv_tracker
.read_version
;
2330 bci
.info
.objv_tracker
.write_version
= objv_tracker
.write_version
;
2332 ret
= store
->put_bucket_instance_info(bci
.info
, false, mtime
, &bci
.attrs
);
2336 objv_tracker
= bci
.info
.objv_tracker
;
2338 ret
= store
->init_bucket_index(bci
.info
, bci
.info
.num_shards
);
2342 return STATUS_APPLIED
;
2345 struct list_keys_info
{
2347 RGWListRawObjsCtx ctx
;
2350 int remove(RGWRados
*store
, string
& entry
, RGWObjVersionTracker
& objv_tracker
) override
{
2352 RGWObjectCtx
obj_ctx(store
);
2354 int ret
= store
->get_bucket_instance_info(obj_ctx
, entry
, info
, NULL
, NULL
);
2355 if (ret
< 0 && ret
!= -ENOENT
)
2358 return rgw_bucket_instance_remove_entry(store
, entry
, &info
.objv_tracker
);
2361 void get_pool_and_oid(RGWRados
*store
, const string
& key
, rgw_pool
& pool
, string
& oid
) override
{
2362 oid
= RGW_BUCKET_INSTANCE_MD_PREFIX
+ key
;
2363 rgw_bucket_instance_key_to_oid(oid
);
2364 pool
= store
->get_zone_params().domain_root
;
2367 int list_keys_init(RGWRados
*store
, const string
& marker
, void **phandle
) override
{
2368 auto info
= ceph::make_unique
<list_keys_info
>();
2370 info
->store
= store
;
2372 int ret
= store
->list_raw_objects_init(store
->get_zone_params().domain_root
, marker
,
2377 *phandle
= (void *)info
.release();
2382 int list_keys_next(void *handle
, int max
, list
<string
>& keys
, bool *truncated
) override
{
2383 list_keys_info
*info
= static_cast<list_keys_info
*>(handle
);
2389 RGWRados
*store
= info
->store
;
2391 list
<string
> unfiltered_keys
;
2393 int ret
= store
->list_raw_objects_next(no_filter
, max
, info
->ctx
,
2394 unfiltered_keys
, truncated
);
2395 if (ret
< 0 && ret
!= -ENOENT
)
2397 if (ret
== -ENOENT
) {
2403 constexpr int prefix_size
= sizeof(RGW_BUCKET_INSTANCE_MD_PREFIX
) - 1;
2404 // now filter in the relevant entries
2405 list
<string
>::iterator iter
;
2406 for (iter
= unfiltered_keys
.begin(); iter
!= unfiltered_keys
.end(); ++iter
) {
2409 if (k
.compare(0, prefix_size
, RGW_BUCKET_INSTANCE_MD_PREFIX
) == 0) {
2410 auto oid
= k
.substr(prefix_size
);
2411 rgw_bucket_instance_oid_to_key(oid
);
2412 keys
.emplace_back(std::move(oid
));
2419 void list_keys_complete(void *handle
) override
{
2420 list_keys_info
*info
= static_cast<list_keys_info
*>(handle
);
2424 string
get_marker(void *handle
) {
2425 list_keys_info
*info
= static_cast<list_keys_info
*>(handle
);
2426 return info
->store
->list_raw_objs_get_cursor(info
->ctx
);
2430 * hash entry for mdlog placement. Use the same hash key we'd have for the bucket entry
2431 * point, so that the log entries end up at the same log shard, so that we process them
2434 void get_hash_key(const string
& section
, const string
& key
, string
& hash_key
) override
{
2436 int pos
= key
.find(':');
2440 k
= key
.substr(0, pos
);
2441 hash_key
= "bucket:" + k
;
2445 void rgw_bucket_init(RGWMetadataManager
*mm
)
2447 bucket_meta_handler
= new RGWBucketMetadataHandler
;
2448 mm
->register_handler(bucket_meta_handler
);
2449 bucket_instance_meta_handler
= new RGWBucketInstanceMetadataHandler
;
2450 mm
->register_handler(bucket_instance_meta_handler
);