1 #include <boost/utility/string_ref.hpp>
3 #include "common/ceph_json.h"
4 #include "common/RWLock.h"
5 #include "common/RefCountedObj.h"
6 #include "common/WorkQueue.h"
7 #include "common/Throttle.h"
8 #include "common/errno.h"
10 #include "rgw_common.h"
11 #include "rgw_rados.h"
13 #include "rgw_data_sync.h"
14 #include "rgw_rest_conn.h"
15 #include "rgw_cr_rados.h"
16 #include "rgw_cr_rest.h"
17 #include "rgw_http_client.h"
18 #include "rgw_bucket.h"
19 #include "rgw_metadata.h"
20 #include "rgw_sync_module.h"
22 #include "cls/lock/cls_lock_client.h"
24 #include "auth/Crypto.h"
26 #include <boost/asio/yield.hpp>
28 #define dout_subsys ceph_subsys_rgw
31 #define dout_prefix (*_dout << "data sync: ")
33 static string datalog_sync_status_oid_prefix
= "datalog.sync-status";
34 static string datalog_sync_status_shard_prefix
= "datalog.sync-status.shard";
35 static string datalog_sync_full_sync_index_prefix
= "data.full-sync.index";
36 static string bucket_status_oid_prefix
= "bucket.sync-status";
38 class RGWSyncDebugLogger
{
45 RGWSyncDebugLogger(CephContext
*_cct
, const string
& source_zone
,
46 const string
& sync_type
, const string
& sync_stage
,
47 const string
& resource
, bool log_start
= true) {
48 init(_cct
, source_zone
, sync_type
, sync_stage
, resource
, log_start
);
50 RGWSyncDebugLogger() : cct(NULL
), ended(false) {}
51 ~RGWSyncDebugLogger();
53 void init(CephContext
*_cct
, const string
& source_zone
,
54 const string
& sync_type
, const string
& sync_stage
,
55 const string
& resource
, bool log_start
= true);
56 void log(const string
& state
);
57 void finish(int status
);
60 void RGWSyncDebugLogger::init(CephContext
*_cct
, const string
& source_zone
,
61 const string
& sync_type
, const string
& sync_section
,
62 const string
& resource
, bool log_start
)
66 string zone_str
= source_zone
.substr(0, 8);
67 prefix
= "Sync:" + zone_str
+ ":" + sync_type
+ ":" + sync_section
+ ":" + resource
;
73 RGWSyncDebugLogger::~RGWSyncDebugLogger()
80 void RGWSyncDebugLogger::log(const string
& state
)
82 ldout(cct
, 5) << prefix
<< ":" << state
<< dendl
;
85 void RGWSyncDebugLogger::finish(int status
)
88 ldout(cct
, 5) << prefix
<< ":" << "finish r=" << status
<< dendl
;
91 class RGWDataSyncDebugLogger
: public RGWSyncDebugLogger
{
93 RGWDataSyncDebugLogger() {}
94 RGWDataSyncDebugLogger(RGWDataSyncEnv
*sync_env
, const string
& sync_section
,
95 const string
& resource
, bool log_start
= true) {
96 init(sync_env
, sync_section
, resource
, log_start
);
98 void init(RGWDataSyncEnv
*sync_env
, const string
& sync_section
,
99 const string
& resource
, bool log_start
= true) {
100 RGWSyncDebugLogger::init(sync_env
->cct
, sync_env
->source_zone
, "data", sync_section
, resource
, log_start
);
105 void rgw_datalog_info::decode_json(JSONObj
*obj
) {
106 JSONDecoder::decode_json("num_objects", num_shards
, obj
);
109 void rgw_datalog_entry::decode_json(JSONObj
*obj
) {
110 JSONDecoder::decode_json("key", key
, obj
);
112 JSONDecoder::decode_json("timestamp", ut
, obj
);
113 timestamp
= ut
.to_real_time();
116 void rgw_datalog_shard_data::decode_json(JSONObj
*obj
) {
117 JSONDecoder::decode_json("marker", marker
, obj
);
118 JSONDecoder::decode_json("truncated", truncated
, obj
);
119 JSONDecoder::decode_json("entries", entries
, obj
);
122 class RGWReadDataSyncStatusMarkersCR
: public RGWShardCollectCR
{
123 static constexpr int MAX_CONCURRENT_SHARDS
= 16;
126 const int num_shards
;
129 map
<uint32_t, rgw_data_sync_marker
>& markers
;
132 RGWReadDataSyncStatusMarkersCR(RGWDataSyncEnv
*env
, int num_shards
,
133 map
<uint32_t, rgw_data_sync_marker
>& markers
)
134 : RGWShardCollectCR(env
->cct
, MAX_CONCURRENT_SHARDS
),
135 env(env
), num_shards(num_shards
), markers(markers
)
137 bool spawn_next() override
;
140 bool RGWReadDataSyncStatusMarkersCR::spawn_next()
142 if (shard_id
>= num_shards
) {
145 using CR
= RGWSimpleRadosReadCR
<rgw_data_sync_marker
>;
146 spawn(new CR(env
->async_rados
, env
->store
,
147 rgw_raw_obj(env
->store
->get_zone_params().log_pool
, RGWDataSyncStatusManager::shard_obj_name(env
->source_zone
, shard_id
)),
154 class RGWReadDataSyncStatusCoroutine
: public RGWCoroutine
{
155 RGWDataSyncEnv
*sync_env
;
156 rgw_data_sync_status
*sync_status
;
159 RGWReadDataSyncStatusCoroutine(RGWDataSyncEnv
*_sync_env
,
160 rgw_data_sync_status
*_status
)
161 : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
), sync_status(_status
)
163 int operate() override
;
166 int RGWReadDataSyncStatusCoroutine::operate()
170 using ReadInfoCR
= RGWSimpleRadosReadCR
<rgw_data_sync_info
>;
172 bool empty_on_enoent
= false; // fail on ENOENT
173 call(new ReadInfoCR(sync_env
->async_rados
, sync_env
->store
,
174 rgw_raw_obj(sync_env
->store
->get_zone_params().log_pool
, RGWDataSyncStatusManager::sync_status_oid(sync_env
->source_zone
)),
175 &sync_status
->sync_info
, empty_on_enoent
));
178 ldout(sync_env
->cct
, 4) << "failed to read sync status info with "
179 << cpp_strerror(retcode
) << dendl
;
180 return set_cr_error(retcode
);
182 // read shard markers
183 using ReadMarkersCR
= RGWReadDataSyncStatusMarkersCR
;
184 yield
call(new ReadMarkersCR(sync_env
, sync_status
->sync_info
.num_shards
,
185 sync_status
->sync_markers
));
187 ldout(sync_env
->cct
, 4) << "failed to read sync status markers with "
188 << cpp_strerror(retcode
) << dendl
;
189 return set_cr_error(retcode
);
191 return set_cr_done();
196 class RGWReadRemoteDataLogShardInfoCR
: public RGWCoroutine
{
197 RGWDataSyncEnv
*sync_env
;
199 RGWRESTReadResource
*http_op
;
202 RGWDataChangesLogInfo
*shard_info
;
205 RGWReadRemoteDataLogShardInfoCR(RGWDataSyncEnv
*_sync_env
,
206 int _shard_id
, RGWDataChangesLogInfo
*_shard_info
) : RGWCoroutine(_sync_env
->cct
),
210 shard_info(_shard_info
) {
213 ~RGWReadRemoteDataLogShardInfoCR() override
{
219 int operate() override
{
223 snprintf(buf
, sizeof(buf
), "%d", shard_id
);
224 rgw_http_param_pair pairs
[] = { { "type" , "data" },
229 string p
= "/admin/log/";
231 http_op
= new RGWRESTReadResource(sync_env
->conn
, p
, pairs
, NULL
, sync_env
->http_manager
);
233 http_op
->set_user_info((void *)stack
);
235 int ret
= http_op
->aio_read();
237 ldout(sync_env
->cct
, 0) << "ERROR: failed to read from " << p
<< dendl
;
238 log_error() << "failed to send http operation: " << http_op
->to_str() << " ret=" << ret
<< std::endl
;
239 return set_cr_error(ret
);
245 int ret
= http_op
->wait(shard_info
);
247 return set_cr_error(ret
);
249 return set_cr_done();
256 struct read_remote_data_log_response
{
259 list
<rgw_data_change_log_entry
> entries
;
261 read_remote_data_log_response() : truncated(false) {}
263 void decode_json(JSONObj
*obj
) {
264 JSONDecoder::decode_json("marker", marker
, obj
);
265 JSONDecoder::decode_json("truncated", truncated
, obj
);
266 JSONDecoder::decode_json("entries", entries
, obj
);
270 class RGWReadRemoteDataLogShardCR
: public RGWCoroutine
{
271 RGWDataSyncEnv
*sync_env
;
273 RGWRESTReadResource
*http_op
;
277 list
<rgw_data_change_log_entry
> *entries
;
280 read_remote_data_log_response response
;
283 RGWReadRemoteDataLogShardCR(RGWDataSyncEnv
*_sync_env
,
284 int _shard_id
, string
*_pmarker
, list
<rgw_data_change_log_entry
> *_entries
, bool *_truncated
) : RGWCoroutine(_sync_env
->cct
),
290 truncated(_truncated
) {
292 ~RGWReadRemoteDataLogShardCR() override
{
298 int operate() override
{
302 snprintf(buf
, sizeof(buf
), "%d", shard_id
);
303 rgw_http_param_pair pairs
[] = { { "type" , "data" },
305 { "marker", pmarker
->c_str() },
306 { "extra-info", "true" },
309 string p
= "/admin/log/";
311 http_op
= new RGWRESTReadResource(sync_env
->conn
, p
, pairs
, NULL
, sync_env
->http_manager
);
313 http_op
->set_user_info((void *)stack
);
315 int ret
= http_op
->aio_read();
317 ldout(sync_env
->cct
, 0) << "ERROR: failed to read from " << p
<< dendl
;
318 log_error() << "failed to send http operation: " << http_op
->to_str() << " ret=" << ret
<< std::endl
;
319 return set_cr_error(ret
);
325 int ret
= http_op
->wait(&response
);
327 return set_cr_error(ret
);
330 entries
->swap(response
.entries
);
331 *pmarker
= response
.marker
;
332 *truncated
= response
.truncated
;
333 return set_cr_done();
340 class RGWReadRemoteDataLogInfoCR
: public RGWShardCollectCR
{
341 RGWDataSyncEnv
*sync_env
;
344 map
<int, RGWDataChangesLogInfo
> *datalog_info
;
347 #define READ_DATALOG_MAX_CONCURRENT 10
350 RGWReadRemoteDataLogInfoCR(RGWDataSyncEnv
*_sync_env
,
352 map
<int, RGWDataChangesLogInfo
> *_datalog_info
) : RGWShardCollectCR(_sync_env
->cct
, READ_DATALOG_MAX_CONCURRENT
),
353 sync_env(_sync_env
), num_shards(_num_shards
),
354 datalog_info(_datalog_info
), shard_id(0) {}
355 bool spawn_next() override
;
358 bool RGWReadRemoteDataLogInfoCR::spawn_next() {
359 if (shard_id
>= num_shards
) {
362 spawn(new RGWReadRemoteDataLogShardInfoCR(sync_env
, shard_id
, &(*datalog_info
)[shard_id
]), false);
367 class RGWListRemoteDataLogShardCR
: public RGWSimpleCoroutine
{
368 RGWDataSyncEnv
*sync_env
;
369 RGWRESTReadResource
*http_op
;
373 uint32_t max_entries
;
374 rgw_datalog_shard_data
*result
;
377 RGWListRemoteDataLogShardCR(RGWDataSyncEnv
*env
, int _shard_id
,
378 const string
& _marker
, uint32_t _max_entries
,
379 rgw_datalog_shard_data
*_result
)
380 : RGWSimpleCoroutine(env
->store
->ctx()), sync_env(env
), http_op(NULL
),
381 shard_id(_shard_id
), marker(_marker
), max_entries(_max_entries
), result(_result
) {}
383 int send_request() override
{
384 RGWRESTConn
*conn
= sync_env
->conn
;
385 RGWRados
*store
= sync_env
->store
;
388 snprintf(buf
, sizeof(buf
), "%d", shard_id
);
390 char max_entries_buf
[32];
391 snprintf(max_entries_buf
, sizeof(max_entries_buf
), "%d", (int)max_entries
);
393 const char *marker_key
= (marker
.empty() ? "" : "marker");
395 rgw_http_param_pair pairs
[] = { { "type", "data" },
397 { "max-entries", max_entries_buf
},
398 { marker_key
, marker
.c_str() },
401 string p
= "/admin/log/";
403 http_op
= new RGWRESTReadResource(conn
, p
, pairs
, NULL
, sync_env
->http_manager
);
404 http_op
->set_user_info((void *)stack
);
406 int ret
= http_op
->aio_read();
408 ldout(store
->ctx(), 0) << "ERROR: failed to read from " << p
<< dendl
;
409 log_error() << "failed to send http operation: " << http_op
->to_str() << " ret=" << ret
<< std::endl
;
417 int request_complete() override
{
418 int ret
= http_op
->wait(result
);
420 if (ret
< 0 && ret
!= -ENOENT
) {
421 ldout(sync_env
->store
->ctx(), 0) << "ERROR: failed to list remote datalog shard, ret=" << ret
<< dendl
;
428 class RGWListRemoteDataLogCR
: public RGWShardCollectCR
{
429 RGWDataSyncEnv
*sync_env
;
431 map
<int, string
> shards
;
432 int max_entries_per_shard
;
433 map
<int, rgw_datalog_shard_data
> *result
;
435 map
<int, string
>::iterator iter
;
436 #define READ_DATALOG_MAX_CONCURRENT 10
439 RGWListRemoteDataLogCR(RGWDataSyncEnv
*_sync_env
,
440 map
<int, string
>& _shards
,
441 int _max_entries_per_shard
,
442 map
<int, rgw_datalog_shard_data
> *_result
) : RGWShardCollectCR(_sync_env
->cct
, READ_DATALOG_MAX_CONCURRENT
),
443 sync_env(_sync_env
), max_entries_per_shard(_max_entries_per_shard
),
445 shards
.swap(_shards
);
446 iter
= shards
.begin();
448 bool spawn_next() override
;
451 bool RGWListRemoteDataLogCR::spawn_next() {
452 if (iter
== shards
.end()) {
456 spawn(new RGWListRemoteDataLogShardCR(sync_env
, iter
->first
, iter
->second
, max_entries_per_shard
, &(*result
)[iter
->first
]), false);
461 class RGWInitDataSyncStatusCoroutine
: public RGWCoroutine
{
462 static constexpr uint32_t lock_duration
= 30;
463 RGWDataSyncEnv
*sync_env
;
465 const rgw_pool
& pool
;
466 const uint32_t num_shards
;
468 string sync_status_oid
;
472 rgw_data_sync_status
*status
;
473 map
<int, RGWDataChangesLogInfo
> shards_info
;
475 RGWInitDataSyncStatusCoroutine(RGWDataSyncEnv
*_sync_env
, uint32_t num_shards
,
476 uint64_t instance_id
,
477 rgw_data_sync_status
*status
)
478 : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
), store(sync_env
->store
),
479 pool(store
->get_zone_params().log_pool
),
480 num_shards(num_shards
), status(status
) {
481 lock_name
= "sync_lock";
483 status
->sync_info
.instance_id
= instance_id
;
485 #define COOKIE_LEN 16
486 char buf
[COOKIE_LEN
+ 1];
488 gen_rand_alphanumeric(cct
, buf
, sizeof(buf
) - 1);
491 sync_status_oid
= RGWDataSyncStatusManager::sync_status_oid(sync_env
->source_zone
);
494 int operate() override
{
497 using LockCR
= RGWSimpleRadosLockCR
;
498 yield
call(new LockCR(sync_env
->async_rados
, store
,
499 rgw_raw_obj
{pool
, sync_status_oid
},
500 lock_name
, cookie
, lock_duration
));
502 ldout(cct
, 0) << "ERROR: failed to take a lock on " << sync_status_oid
<< dendl
;
503 return set_cr_error(retcode
);
505 using WriteInfoCR
= RGWSimpleRadosWriteCR
<rgw_data_sync_info
>;
506 yield
call(new WriteInfoCR(sync_env
->async_rados
, store
,
507 rgw_raw_obj
{pool
, sync_status_oid
},
510 ldout(cct
, 0) << "ERROR: failed to write sync status info with " << retcode
<< dendl
;
511 return set_cr_error(retcode
);
514 /* take lock again, we just recreated the object */
515 yield
call(new LockCR(sync_env
->async_rados
, store
,
516 rgw_raw_obj
{pool
, sync_status_oid
},
517 lock_name
, cookie
, lock_duration
));
519 ldout(cct
, 0) << "ERROR: failed to take a lock on " << sync_status_oid
<< dendl
;
520 return set_cr_error(retcode
);
523 /* fetch current position in logs */
525 RGWRESTConn
*conn
= store
->get_zone_conn_by_id(sync_env
->source_zone
);
527 ldout(cct
, 0) << "ERROR: connection to zone " << sync_env
->source_zone
<< " does not exist!" << dendl
;
528 return set_cr_error(-EIO
);
530 for (uint32_t i
= 0; i
< num_shards
; i
++) {
531 spawn(new RGWReadRemoteDataLogShardInfoCR(sync_env
, i
, &shards_info
[i
]), true);
534 while (collect(&ret
, NULL
)) {
536 ldout(cct
, 0) << "ERROR: failed to read remote data log shards" << dendl
;
537 return set_state(RGWCoroutine_Error
);
542 for (uint32_t i
= 0; i
< num_shards
; i
++) {
543 RGWDataChangesLogInfo
& info
= shards_info
[i
];
544 auto& marker
= status
->sync_markers
[i
];
545 marker
.next_step_marker
= info
.marker
;
546 marker
.timestamp
= info
.last_update
;
547 const auto& oid
= RGWDataSyncStatusManager::shard_obj_name(sync_env
->source_zone
, i
);
548 using WriteMarkerCR
= RGWSimpleRadosWriteCR
<rgw_data_sync_marker
>;
549 spawn(new WriteMarkerCR(sync_env
->async_rados
, store
,
550 rgw_raw_obj
{pool
, oid
}, marker
), true);
553 while (collect(&ret
, NULL
)) {
555 ldout(cct
, 0) << "ERROR: failed to write data sync status markers" << dendl
;
556 return set_state(RGWCoroutine_Error
);
561 status
->sync_info
.state
= rgw_data_sync_info::StateBuildingFullSyncMaps
;
562 yield
call(new WriteInfoCR(sync_env
->async_rados
, store
,
563 rgw_raw_obj
{pool
, sync_status_oid
},
566 ldout(cct
, 0) << "ERROR: failed to write sync status info with " << retcode
<< dendl
;
567 return set_cr_error(retcode
);
569 yield
call(new RGWSimpleRadosUnlockCR(sync_env
->async_rados
, store
,
570 rgw_raw_obj
{pool
, sync_status_oid
},
572 return set_cr_done();
578 int RGWRemoteDataLog::read_log_info(rgw_datalog_info
*log_info
)
580 rgw_http_param_pair pairs
[] = { { "type", "data" },
583 int ret
= sync_env
.conn
->get_json_resource("/admin/log", pairs
, *log_info
);
585 ldout(store
->ctx(), 0) << "ERROR: failed to fetch datalog info" << dendl
;
589 ldout(store
->ctx(), 20) << "remote datalog, num_shards=" << log_info
->num_shards
<< dendl
;
594 int RGWRemoteDataLog::read_source_log_shards_info(map
<int, RGWDataChangesLogInfo
> *shards_info
)
596 rgw_datalog_info log_info
;
597 int ret
= read_log_info(&log_info
);
602 return run(new RGWReadRemoteDataLogInfoCR(&sync_env
, log_info
.num_shards
, shards_info
));
605 int RGWRemoteDataLog::read_source_log_shards_next(map
<int, string
> shard_markers
, map
<int, rgw_datalog_shard_data
> *result
)
607 if (store
->is_meta_master()) {
611 return run(new RGWListRemoteDataLogCR(&sync_env
, shard_markers
, 1, result
));
614 int RGWRemoteDataLog::init(const string
& _source_zone
, RGWRESTConn
*_conn
, RGWSyncErrorLogger
*_error_logger
, RGWSyncModuleInstanceRef
& _sync_module
)
616 sync_env
.init(store
->ctx(), store
, _conn
, async_rados
, &http_manager
, _error_logger
, _source_zone
, _sync_module
);
622 int ret
= http_manager
.set_threaded();
624 ldout(store
->ctx(), 0) << "failed in http_manager.set_threaded() ret=" << ret
<< dendl
;
633 void RGWRemoteDataLog::finish()
638 int RGWRemoteDataLog::get_shard_info(int shard_id
)
641 snprintf(buf
, sizeof(buf
), "%d", shard_id
);
643 rgw_http_param_pair pairs
[] = { { "type", "data" },
648 RGWDataChangesLogInfo info
;
649 int ret
= sync_env
.conn
->get_json_resource("/admin/log", pairs
, info
);
651 ldout(store
->ctx(), 0) << "ERROR: failed to fetch datalog info" << dendl
;
655 ldout(store
->ctx(), 20) << "remote datalog, shard_id=" << shard_id
<< " marker=" << info
.marker
<< dendl
;
660 int RGWRemoteDataLog::read_sync_status(rgw_data_sync_status
*sync_status
)
662 // cannot run concurrently with run_sync(), so run in a separate manager
663 RGWCoroutinesManager
crs(store
->ctx(), store
->get_cr_registry());
664 RGWHTTPManager
http_manager(store
->ctx(), crs
.get_completion_mgr());
665 int ret
= http_manager
.set_threaded();
667 ldout(store
->ctx(), 0) << "failed in http_manager.set_threaded() ret=" << ret
<< dendl
;
670 RGWDataSyncEnv sync_env_local
= sync_env
;
671 sync_env_local
.http_manager
= &http_manager
;
672 ret
= crs
.run(new RGWReadDataSyncStatusCoroutine(&sync_env_local
, sync_status
));
677 int RGWRemoteDataLog::init_sync_status(int num_shards
)
679 rgw_data_sync_status sync_status
;
680 RGWCoroutinesManager
crs(store
->ctx(), store
->get_cr_registry());
681 RGWHTTPManager
http_manager(store
->ctx(), crs
.get_completion_mgr());
682 int ret
= http_manager
.set_threaded();
684 ldout(store
->ctx(), 0) << "failed in http_manager.set_threaded() ret=" << ret
<< dendl
;
687 RGWDataSyncEnv sync_env_local
= sync_env
;
688 sync_env_local
.http_manager
= &http_manager
;
689 uint64_t instance_id
;
690 get_random_bytes((char *)&instance_id
, sizeof(instance_id
));
691 ret
= crs
.run(new RGWInitDataSyncStatusCoroutine(&sync_env_local
, num_shards
, instance_id
, &sync_status
));
696 static string
full_data_sync_index_shard_oid(const string
& source_zone
, int shard_id
)
698 char buf
[datalog_sync_full_sync_index_prefix
.size() + 1 + source_zone
.size() + 1 + 16];
699 snprintf(buf
, sizeof(buf
), "%s.%s.%d", datalog_sync_full_sync_index_prefix
.c_str(), source_zone
.c_str(), shard_id
);
703 struct bucket_instance_meta_info
{
707 RGWBucketInstanceMetadataObject data
;
709 bucket_instance_meta_info() {}
711 void decode_json(JSONObj
*obj
) {
712 JSONDecoder::decode_json("key", key
, obj
);
713 JSONDecoder::decode_json("ver", ver
, obj
);
714 JSONDecoder::decode_json("mtime", mtime
, obj
);
715 JSONDecoder::decode_json("data", data
, obj
);
719 class RGWListBucketIndexesCR
: public RGWCoroutine
{
720 RGWDataSyncEnv
*sync_env
;
724 rgw_data_sync_status
*sync_status
;
731 list
<string
>::iterator iter
;
733 RGWShardedOmapCRManager
*entries_index
;
738 bucket_instance_meta_info meta_info
;
746 RGWListBucketIndexesCR(RGWDataSyncEnv
*_sync_env
,
747 rgw_data_sync_status
*_sync_status
) : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
),
748 store(sync_env
->store
), sync_status(_sync_status
),
749 req_ret(0), ret(0), entries_index(NULL
), i(0), failed(false) {
750 oid_prefix
= datalog_sync_full_sync_index_prefix
+ "." + sync_env
->source_zone
;
751 path
= "/admin/metadata/bucket.instance";
752 num_shards
= sync_status
->sync_info
.num_shards
;
754 ~RGWListBucketIndexesCR() override
{
755 delete entries_index
;
758 int operate() override
{
760 entries_index
= new RGWShardedOmapCRManager(sync_env
->async_rados
, store
, this, num_shards
,
761 store
->get_zone_params().log_pool
,
764 string entrypoint
= string("/admin/metadata/bucket.instance");
765 /* FIXME: need a better scaling solution here, requires streaming output */
766 call(new RGWReadRESTResourceCR
<list
<string
> >(store
->ctx(), sync_env
->conn
, sync_env
->http_manager
,
767 entrypoint
, NULL
, &result
));
769 if (get_ret_status() < 0) {
770 ldout(sync_env
->cct
, 0) << "ERROR: failed to fetch metadata for section bucket.index" << dendl
;
771 return set_state(RGWCoroutine_Error
);
773 for (iter
= result
.begin(); iter
!= result
.end(); ++iter
) {
774 ldout(sync_env
->cct
, 20) << "list metadata: section=bucket.index key=" << *iter
<< dendl
;
779 rgw_http_param_pair pairs
[] = { { "key", key
.c_str() },
782 call(new RGWReadRESTResourceCR
<bucket_instance_meta_info
>(store
->ctx(), sync_env
->conn
, sync_env
->http_manager
, path
, pairs
, &meta_info
));
785 num_shards
= meta_info
.data
.get_bucket_info().num_shards
;
786 if (num_shards
> 0) {
787 for (i
= 0; i
< num_shards
; i
++) {
789 snprintf(buf
, sizeof(buf
), ":%d", i
);
791 yield entries_index
->append(s
, store
->data_log
->get_log_shard_id(meta_info
.data
.get_bucket_info().bucket
, i
));
794 yield entries_index
->append(key
, store
->data_log
->get_log_shard_id(meta_info
.data
.get_bucket_info().bucket
, -1));
798 if (!entries_index
->finish()) {
803 for (map
<uint32_t, rgw_data_sync_marker
>::iterator iter
= sync_status
->sync_markers
.begin(); iter
!= sync_status
->sync_markers
.end(); ++iter
) {
804 int shard_id
= (int)iter
->first
;
805 rgw_data_sync_marker
& marker
= iter
->second
;
806 marker
.total_entries
= entries_index
->get_total_entries(shard_id
);
807 spawn(new RGWSimpleRadosWriteCR
<rgw_data_sync_marker
>(sync_env
->async_rados
, store
,
808 rgw_raw_obj(store
->get_zone_params().log_pool
, RGWDataSyncStatusManager::shard_obj_name(sync_env
->source_zone
, shard_id
)),
812 yield
call(sync_env
->error_logger
->log_error_cr(sync_env
->conn
->get_remote_id(), "data.init", "",
813 EIO
, string("failed to build bucket instances map")));
815 while (collect(&ret
, NULL
)) {
817 yield
call(sync_env
->error_logger
->log_error_cr(sync_env
->conn
->get_remote_id(), "data.init", "",
818 -ret
, string("failed to store sync status: ") + cpp_strerror(-ret
)));
825 yield
return set_cr_error(req_ret
);
827 yield
return set_cr_done();
833 #define DATA_SYNC_UPDATE_MARKER_WINDOW 1
835 class RGWDataSyncShardMarkerTrack
: public RGWSyncShardMarkerTrack
<string
, string
> {
836 RGWDataSyncEnv
*sync_env
;
839 rgw_data_sync_marker sync_marker
;
841 map
<string
, string
> key_to_marker
;
842 map
<string
, string
> marker_to_key
;
844 void handle_finish(const string
& marker
) override
{
845 map
<string
, string
>::iterator iter
= marker_to_key
.find(marker
);
846 if (iter
== marker_to_key
.end()) {
849 key_to_marker
.erase(iter
->second
);
850 reset_need_retry(iter
->second
);
851 marker_to_key
.erase(iter
);
855 RGWDataSyncShardMarkerTrack(RGWDataSyncEnv
*_sync_env
,
856 const string
& _marker_oid
,
857 const rgw_data_sync_marker
& _marker
) : RGWSyncShardMarkerTrack(DATA_SYNC_UPDATE_MARKER_WINDOW
),
859 marker_oid(_marker_oid
),
860 sync_marker(_marker
) {}
862 RGWCoroutine
*store_marker(const string
& new_marker
, uint64_t index_pos
, const real_time
& timestamp
) override
{
863 sync_marker
.marker
= new_marker
;
864 sync_marker
.pos
= index_pos
;
866 ldout(sync_env
->cct
, 20) << __func__
<< "(): updating marker marker_oid=" << marker_oid
<< " marker=" << new_marker
<< dendl
;
867 RGWRados
*store
= sync_env
->store
;
869 return new RGWSimpleRadosWriteCR
<rgw_data_sync_marker
>(sync_env
->async_rados
, store
,
870 rgw_raw_obj(store
->get_zone_params().log_pool
, marker_oid
),
875 * create index from key -> marker, and from marker -> key
876 * this is useful so that we can insure that we only have one
877 * entry for any key that is used. This is needed when doing
878 * incremenatl sync of data, and we don't want to run multiple
879 * concurrent sync operations for the same bucket shard
881 bool index_key_to_marker(const string
& key
, const string
& marker
) {
882 if (key_to_marker
.find(key
) != key_to_marker
.end()) {
886 key_to_marker
[key
] = marker
;
887 marker_to_key
[marker
] = key
;
892 // ostream wrappers to print buckets without copying strings
895 bucket_str(const rgw_bucket
& b
) : b(b
) {}
897 std::ostream
& operator<<(std::ostream
& out
, const bucket_str
& rhs
) {
899 if (!b
.tenant
.empty()) {
900 out
<< b
.tenant
<< '/';
903 if (!b
.bucket_id
.empty()) {
904 out
<< ':' << b
.bucket_id
;
909 struct bucket_shard_str
{
910 const rgw_bucket_shard
& bs
;
911 bucket_shard_str(const rgw_bucket_shard
& bs
) : bs(bs
) {}
913 std::ostream
& operator<<(std::ostream
& out
, const bucket_shard_str
& rhs
) {
915 out
<< bucket_str
{bs
.bucket
};
916 if (bs
.shard_id
>= 0) {
917 out
<< ':' << bs
.shard_id
;
922 class RGWRunBucketSyncCoroutine
: public RGWCoroutine
{
923 RGWDataSyncEnv
*sync_env
;
925 RGWBucketInfo bucket_info
;
926 rgw_bucket_shard_sync_info sync_status
;
927 RGWMetaSyncEnv meta_sync_env
;
929 RGWDataSyncDebugLogger logger
;
930 const std::string status_oid
;
932 boost::intrusive_ptr
<RGWContinuousLeaseCR
> lease_cr
;
933 boost::intrusive_ptr
<RGWCoroutinesStack
> lease_stack
;
936 RGWRunBucketSyncCoroutine(RGWDataSyncEnv
*_sync_env
, const rgw_bucket_shard
& bs
)
937 : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
), bs(bs
),
938 status_oid(RGWBucketSyncStatusManager::status_oid(sync_env
->source_zone
, bs
)) {
939 logger
.init(sync_env
, "Bucket", bs
.get_key());
941 ~RGWRunBucketSyncCoroutine() override
{
947 int operate() override
;
950 class RGWDataSyncSingleEntryCR
: public RGWCoroutine
{
951 RGWDataSyncEnv
*sync_env
;
962 RGWDataSyncShardMarkerTrack
*marker_tracker
;
964 boost::intrusive_ptr
<RGWOmapAppend
> error_repo
;
965 bool remove_from_repo
;
970 RGWDataSyncSingleEntryCR(RGWDataSyncEnv
*_sync_env
,
971 const string
& _raw_key
, const string
& _entry_marker
, RGWDataSyncShardMarkerTrack
*_marker_tracker
,
972 RGWOmapAppend
*_error_repo
, bool _remove_from_repo
) : RGWCoroutine(_sync_env
->cct
),
974 raw_key(_raw_key
), entry_marker(_entry_marker
),
976 marker_tracker(_marker_tracker
),
977 error_repo(_error_repo
), remove_from_repo(_remove_from_repo
) {
978 set_description() << "data sync single entry (source_zone=" << sync_env
->source_zone
<< ") key=" <<_raw_key
<< " entry=" << entry_marker
;
981 int operate() override
{
985 int ret
= rgw_bucket_parse_bucket_key(sync_env
->cct
, raw_key
,
986 &bs
.bucket
, &bs
.shard_id
);
988 return set_cr_error(-EIO
);
990 if (marker_tracker
) {
991 marker_tracker
->reset_need_retry(raw_key
);
993 call(new RGWRunBucketSyncCoroutine(sync_env
, bs
));
995 } while (marker_tracker
&& marker_tracker
->need_retry(raw_key
));
997 sync_status
= retcode
;
999 if (sync_status
== -ENOENT
) {
1000 // this was added when 'tenant/' was added to datalog entries, because
1001 // preexisting tenant buckets could never sync and would stay in the
1002 // error_repo forever
1003 ldout(sync_env
->store
->ctx(), 0) << "WARNING: skipping data log entry "
1004 "for missing bucket " << raw_key
<< dendl
;
1008 if (sync_status
< 0) {
1009 yield
call(sync_env
->error_logger
->log_error_cr(sync_env
->conn
->get_remote_id(), "data", raw_key
,
1010 -sync_status
, string("failed to sync bucket instance: ") + cpp_strerror(-sync_status
)));
1012 ldout(sync_env
->store
->ctx(), 0) << "ERROR: failed to log sync failure: retcode=" << retcode
<< dendl
;
1014 if (error_repo
&& !error_repo
->append(raw_key
)) {
1015 ldout(sync_env
->store
->ctx(), 0) << "ERROR: failed to log sync failure in error repo: retcode=" << retcode
<< dendl
;
1017 } else if (error_repo
&& remove_from_repo
) {
1019 yield
call(new RGWRadosRemoveOmapKeysCR(sync_env
->store
, error_repo
->get_obj(), keys
));
1021 ldout(sync_env
->store
->ctx(), 0) << "ERROR: failed to remove omap key from error repo ("
1022 << error_repo
->get_obj() << " retcode=" << retcode
<< dendl
;
1025 /* FIXME: what do do in case of error */
1026 if (marker_tracker
&& !entry_marker
.empty()) {
1028 yield
call(marker_tracker
->finish(entry_marker
));
1030 if (sync_status
== 0) {
1031 sync_status
= retcode
;
1033 if (sync_status
< 0) {
1034 return set_cr_error(sync_status
);
1036 return set_cr_done();
1042 #define BUCKET_SHARD_SYNC_SPAWN_WINDOW 20
1043 #define DATA_SYNC_MAX_ERR_ENTRIES 10
1045 class RGWDataSyncShardCR
: public RGWCoroutine
{
1046 RGWDataSyncEnv
*sync_env
;
1051 rgw_data_sync_marker sync_marker
;
1053 map
<string
, bufferlist
> entries
;
1054 map
<string
, bufferlist
>::iterator iter
;
1058 RGWDataSyncShardMarkerTrack
*marker_tracker
;
1060 list
<rgw_data_change_log_entry
> log_entries
;
1061 list
<rgw_data_change_log_entry
>::iterator log_iter
;
1064 RGWDataChangesLogInfo shard_info
;
1065 string datalog_marker
;
1070 boost::asio::coroutine incremental_cr
;
1071 boost::asio::coroutine full_cr
;
1074 set
<string
> modified_shards
;
1075 set
<string
> current_modified
;
1077 set
<string
>::iterator modified_iter
;
1083 bool *reset_backoff
;
1085 set
<string
> spawned_keys
;
1087 boost::intrusive_ptr
<RGWContinuousLeaseCR
> lease_cr
;
1088 boost::intrusive_ptr
<RGWCoroutinesStack
> lease_stack
;
1093 RGWOmapAppend
*error_repo
;
1094 map
<string
, bufferlist
> error_entries
;
1095 string error_marker
;
1096 int max_error_entries
;
1098 ceph::real_time error_retry_time
;
1100 #define RETRY_BACKOFF_SECS_MIN 60
1101 #define RETRY_BACKOFF_SECS_DEFAULT 60
1102 #define RETRY_BACKOFF_SECS_MAX 600
1103 uint32_t retry_backoff_secs
;
1105 RGWDataSyncDebugLogger logger
;
1107 RGWDataSyncShardCR(RGWDataSyncEnv
*_sync_env
,
1109 uint32_t _shard_id
, rgw_data_sync_marker
& _marker
, bool *_reset_backoff
) : RGWCoroutine(_sync_env
->cct
),
1110 sync_env(_sync_env
),
1112 shard_id(_shard_id
),
1113 sync_marker(_marker
),
1114 marker_tracker(NULL
), truncated(false), inc_lock("RGWDataSyncShardCR::inc_lock"),
1115 total_entries(0), spawn_window(BUCKET_SHARD_SYNC_SPAWN_WINDOW
), reset_backoff(NULL
),
1116 lease_cr(nullptr), lease_stack(nullptr), error_repo(nullptr), max_error_entries(DATA_SYNC_MAX_ERR_ENTRIES
),
1117 retry_backoff_secs(RETRY_BACKOFF_SECS_DEFAULT
) {
1118 set_description() << "data sync shard source_zone=" << sync_env
->source_zone
<< " shard_id=" << shard_id
;
1119 status_oid
= RGWDataSyncStatusManager::shard_obj_name(sync_env
->source_zone
, shard_id
);
1120 error_oid
= status_oid
+ ".retry";
1122 logger
.init(sync_env
, "DataShard", status_oid
);
1125 ~RGWDataSyncShardCR() override
{
1126 delete marker_tracker
;
1135 void append_modified_shards(set
<string
>& keys
) {
1136 Mutex::Locker
l(inc_lock
);
1137 modified_shards
.insert(keys
.begin(), keys
.end());
1140 void set_marker_tracker(RGWDataSyncShardMarkerTrack
*mt
) {
1141 delete marker_tracker
;
1142 marker_tracker
= mt
;
1145 int operate() override
{
1148 switch (sync_marker
.state
) {
1149 case rgw_data_sync_marker::FullSync
:
1152 ldout(cct
, 10) << "sync: full_sync: shard_id=" << shard_id
<< " r=" << r
<< dendl
;
1153 return set_cr_error(r
);
1156 case rgw_data_sync_marker::IncrementalSync
:
1157 r
= incremental_sync();
1159 ldout(cct
, 10) << "sync: incremental_sync: shard_id=" << shard_id
<< " r=" << r
<< dendl
;
1160 return set_cr_error(r
);
1164 return set_cr_error(-EIO
);
1170 void init_lease_cr() {
1171 set_status("acquiring sync lock");
1172 uint32_t lock_duration
= cct
->_conf
->rgw_sync_lease_period
;
1173 string lock_name
= "sync_lock";
1177 RGWRados
*store
= sync_env
->store
;
1178 lease_cr
.reset(new RGWContinuousLeaseCR(sync_env
->async_rados
, store
,
1179 rgw_raw_obj(store
->get_zone_params().log_pool
, status_oid
),
1180 lock_name
, lock_duration
, this));
1181 lease_stack
.reset(spawn(lease_cr
.get(), false));
1185 #define OMAP_GET_MAX_ENTRIES 100
1186 int max_entries
= OMAP_GET_MAX_ENTRIES
;
1188 yield
init_lease_cr();
1189 while (!lease_cr
->is_locked()) {
1190 if (lease_cr
->is_done()) {
1191 ldout(cct
, 5) << "lease cr failed, done early " << dendl
;
1192 set_status("lease lock failed, early abort");
1193 return set_cr_error(lease_cr
->get_ret_status());
1198 logger
.log("full sync");
1199 oid
= full_data_sync_index_shard_oid(sync_env
->source_zone
, shard_id
);
1200 set_marker_tracker(new RGWDataSyncShardMarkerTrack(sync_env
, status_oid
, sync_marker
));
1201 total_entries
= sync_marker
.pos
;
1203 yield
call(new RGWRadosGetOmapKeysCR(sync_env
->store
, rgw_raw_obj(pool
, oid
), sync_marker
.marker
, &entries
, max_entries
));
1205 ldout(sync_env
->cct
, 0) << "ERROR: " << __func__
<< "(): RGWRadosGetOmapKeysCR() returned ret=" << retcode
<< dendl
;
1206 lease_cr
->go_down();
1208 return set_cr_error(retcode
);
1210 iter
= entries
.begin();
1211 for (; iter
!= entries
.end(); ++iter
) {
1212 ldout(sync_env
->cct
, 20) << __func__
<< ": full sync: " << iter
->first
<< dendl
;
1214 if (!marker_tracker
->start(iter
->first
, total_entries
, real_time())) {
1215 ldout(sync_env
->cct
, 0) << "ERROR: cannot start syncing " << iter
->first
<< ". Duplicate entry?" << dendl
;
1217 // fetch remote and write locally
1218 yield
spawn(new RGWDataSyncSingleEntryCR(sync_env
, iter
->first
, iter
->first
, marker_tracker
, error_repo
, false), false);
1220 lease_cr
->go_down();
1222 return set_cr_error(retcode
);
1225 sync_marker
.marker
= iter
->first
;
1227 } while ((int)entries
.size() == max_entries
);
1229 lease_cr
->go_down();
1233 /* update marker to reflect we're done with full sync */
1234 sync_marker
.state
= rgw_data_sync_marker::IncrementalSync
;
1235 sync_marker
.marker
= sync_marker
.next_step_marker
;
1236 sync_marker
.next_step_marker
.clear();
1237 RGWRados
*store
= sync_env
->store
;
1238 call(new RGWSimpleRadosWriteCR
<rgw_data_sync_marker
>(sync_env
->async_rados
, store
,
1239 rgw_raw_obj(store
->get_zone_params().log_pool
, status_oid
),
1243 ldout(sync_env
->cct
, 0) << "ERROR: failed to set sync marker: retcode=" << retcode
<< dendl
;
1244 lease_cr
->go_down();
1245 return set_cr_error(retcode
);
1251 int incremental_sync() {
1252 reenter(&incremental_cr
) {
1253 yield
init_lease_cr();
1254 while (!lease_cr
->is_locked()) {
1255 if (lease_cr
->is_done()) {
1256 ldout(cct
, 5) << "lease cr failed, done early " << dendl
;
1257 set_status("lease lock failed, early abort");
1258 return set_cr_error(lease_cr
->get_ret_status());
1263 set_status("lease acquired");
1264 error_repo
= new RGWOmapAppend(sync_env
->async_rados
, sync_env
->store
,
1265 rgw_raw_obj(pool
, error_oid
),
1268 spawn(error_repo
, false);
1269 logger
.log("inc sync");
1270 set_marker_tracker(new RGWDataSyncShardMarkerTrack(sync_env
, status_oid
, sync_marker
));
1272 current_modified
.clear();
1274 current_modified
.swap(modified_shards
);
1277 /* process out of band updates */
1278 for (modified_iter
= current_modified
.begin(); modified_iter
!= current_modified
.end(); ++modified_iter
) {
1280 ldout(sync_env
->cct
, 20) << __func__
<< "(): async update notification: " << *modified_iter
<< dendl
;
1281 spawn(new RGWDataSyncSingleEntryCR(sync_env
, *modified_iter
, string(), marker_tracker
, error_repo
, false), false);
1285 /* process bucket shards that previously failed */
1286 yield
call(new RGWRadosGetOmapKeysCR(sync_env
->store
, rgw_raw_obj(pool
, error_oid
),
1287 error_marker
, &error_entries
,
1288 max_error_entries
));
1289 ldout(sync_env
->cct
, 20) << __func__
<< "(): read error repo, got " << error_entries
.size() << " entries" << dendl
;
1290 iter
= error_entries
.begin();
1291 for (; iter
!= error_entries
.end(); ++iter
) {
1292 ldout(sync_env
->cct
, 20) << __func__
<< "(): handle error entry: " << iter
->first
<< dendl
;
1293 spawn(new RGWDataSyncSingleEntryCR(sync_env
, iter
->first
, iter
->first
, nullptr /* no marker tracker */, error_repo
, true), false);
1294 error_marker
= iter
->first
;
1296 if ((int)error_entries
.size() != max_error_entries
) {
1297 if (error_marker
.empty() && error_entries
.empty()) {
1298 /* the retry repo is empty, we back off a bit before calling it again */
1299 retry_backoff_secs
*= 2;
1300 if (retry_backoff_secs
> RETRY_BACKOFF_SECS_MAX
) {
1301 retry_backoff_secs
= RETRY_BACKOFF_SECS_MAX
;
1304 retry_backoff_secs
= RETRY_BACKOFF_SECS_DEFAULT
;
1306 error_retry_time
= ceph::real_clock::now() + make_timespan(retry_backoff_secs
);
1307 error_marker
.clear();
1311 yield
call(new RGWReadRemoteDataLogShardInfoCR(sync_env
, shard_id
, &shard_info
));
1313 ldout(sync_env
->cct
, 0) << "ERROR: failed to fetch remote data log info: ret=" << retcode
<< dendl
;
1314 stop_spawned_services();
1316 return set_cr_error(retcode
);
1318 datalog_marker
= shard_info
.marker
;
1319 #define INCREMENTAL_MAX_ENTRIES 100
1320 ldout(sync_env
->cct
, 20) << __func__
<< ":" << __LINE__
<< ": shard_id=" << shard_id
<< " datalog_marker=" << datalog_marker
<< " sync_marker.marker=" << sync_marker
.marker
<< dendl
;
1321 if (datalog_marker
> sync_marker
.marker
) {
1322 spawned_keys
.clear();
1323 yield
call(new RGWReadRemoteDataLogShardCR(sync_env
, shard_id
, &sync_marker
.marker
, &log_entries
, &truncated
));
1325 ldout(sync_env
->cct
, 0) << "ERROR: failed to read remote data log info: ret=" << retcode
<< dendl
;
1326 stop_spawned_services();
1328 return set_cr_error(retcode
);
1330 for (log_iter
= log_entries
.begin(); log_iter
!= log_entries
.end(); ++log_iter
) {
1331 ldout(sync_env
->cct
, 20) << __func__
<< ":" << __LINE__
<< ": shard_id=" << shard_id
<< " log_entry: " << log_iter
->log_id
<< ":" << log_iter
->log_timestamp
<< ":" << log_iter
->entry
.key
<< dendl
;
1332 if (!marker_tracker
->index_key_to_marker(log_iter
->entry
.key
, log_iter
->log_id
)) {
1333 ldout(sync_env
->cct
, 20) << __func__
<< ": skipping sync of entry: " << log_iter
->log_id
<< ":" << log_iter
->entry
.key
<< " sync already in progress for bucket shard" << dendl
;
1334 marker_tracker
->try_update_high_marker(log_iter
->log_id
, 0, log_iter
->log_timestamp
);
1337 if (!marker_tracker
->start(log_iter
->log_id
, 0, log_iter
->log_timestamp
)) {
1338 ldout(sync_env
->cct
, 0) << "ERROR: cannot start syncing " << log_iter
->log_id
<< ". Duplicate entry?" << dendl
;
1341 * don't spawn the same key more than once. We can do that as long as we don't yield
1343 if (spawned_keys
.find(log_iter
->entry
.key
) == spawned_keys
.end()) {
1344 spawned_keys
.insert(log_iter
->entry
.key
);
1345 spawn(new RGWDataSyncSingleEntryCR(sync_env
, log_iter
->entry
.key
, log_iter
->log_id
, marker_tracker
, error_repo
, false), false);
1347 stop_spawned_services();
1349 return set_cr_error(retcode
);
1354 while ((int)num_spawned() > spawn_window
) {
1355 set_status() << "num_spawned() > spawn_window";
1356 yield
wait_for_child();
1358 while (collect(&ret
, lease_stack
.get())) {
1360 ldout(sync_env
->cct
, 0) << "ERROR: a sync operation returned error" << dendl
;
1361 /* we have reported this error */
1363 /* not waiting for child here */
1367 ldout(sync_env
->cct
, 20) << __func__
<< ":" << __LINE__
<< ": shard_id=" << shard_id
<< " datalog_marker=" << datalog_marker
<< " sync_marker.marker=" << sync_marker
.marker
<< dendl
;
1368 if (datalog_marker
== sync_marker
.marker
) {
1369 #define INCREMENTAL_INTERVAL 20
1370 yield
wait(utime_t(INCREMENTAL_INTERVAL
, 0));
1376 void stop_spawned_services() {
1377 lease_cr
->go_down();
1379 error_repo
->finish();
1386 class RGWDataSyncShardControlCR
: public RGWBackoffControlCR
{
1387 RGWDataSyncEnv
*sync_env
;
1392 rgw_data_sync_marker sync_marker
;
1395 RGWDataSyncShardControlCR(RGWDataSyncEnv
*_sync_env
, rgw_pool
& _pool
,
1396 uint32_t _shard_id
, rgw_data_sync_marker
& _marker
) : RGWBackoffControlCR(_sync_env
->cct
, false),
1397 sync_env(_sync_env
),
1399 shard_id(_shard_id
),
1400 sync_marker(_marker
) {
1403 RGWCoroutine
*alloc_cr() override
{
1404 return new RGWDataSyncShardCR(sync_env
, pool
, shard_id
, sync_marker
, backoff_ptr());
1407 RGWCoroutine
*alloc_finisher_cr() override
{
1408 RGWRados
*store
= sync_env
->store
;
1409 return new RGWSimpleRadosReadCR
<rgw_data_sync_marker
>(sync_env
->async_rados
, store
,
1410 rgw_raw_obj(store
->get_zone_params().log_pool
, RGWDataSyncStatusManager::shard_obj_name(sync_env
->source_zone
, shard_id
)),
1414 void append_modified_shards(set
<string
>& keys
) {
1415 Mutex::Locker
l(cr_lock());
1417 RGWDataSyncShardCR
*cr
= static_cast<RGWDataSyncShardCR
*>(get_cr());
1422 cr
->append_modified_shards(keys
);
1426 class RGWDataSyncCR
: public RGWCoroutine
{
1427 RGWDataSyncEnv
*sync_env
;
1428 uint32_t num_shards
;
1430 rgw_data_sync_status sync_status
;
1432 RGWDataSyncShardMarkerTrack
*marker_tracker
;
1434 Mutex shard_crs_lock
;
1435 map
<int, RGWDataSyncShardControlCR
*> shard_crs
;
1437 bool *reset_backoff
;
1439 RGWDataSyncDebugLogger logger
;
1441 RGWDataSyncModule
*data_sync_module
{nullptr};
1443 RGWDataSyncCR(RGWDataSyncEnv
*_sync_env
, uint32_t _num_shards
, bool *_reset_backoff
) : RGWCoroutine(_sync_env
->cct
),
1444 sync_env(_sync_env
),
1445 num_shards(_num_shards
),
1446 marker_tracker(NULL
),
1447 shard_crs_lock("RGWDataSyncCR::shard_crs_lock"),
1448 reset_backoff(_reset_backoff
), logger(sync_env
, "Data", "all") {
1452 ~RGWDataSyncCR() override
{
1453 for (auto iter
: shard_crs
) {
1458 int operate() override
{
1461 /* read sync status */
1462 yield
call(new RGWReadDataSyncStatusCoroutine(sync_env
, &sync_status
));
1464 data_sync_module
= sync_env
->sync_module
->get_data_handler();
1466 if (retcode
== -ENOENT
) {
1467 sync_status
.sync_info
.num_shards
= num_shards
;
1468 } else if (retcode
< 0 && retcode
!= -ENOENT
) {
1469 ldout(sync_env
->cct
, 0) << "ERROR: failed to fetch sync status, retcode=" << retcode
<< dendl
;
1470 return set_cr_error(retcode
);
1473 /* state: init status */
1474 if ((rgw_data_sync_info::SyncState
)sync_status
.sync_info
.state
== rgw_data_sync_info::StateInit
) {
1475 ldout(sync_env
->cct
, 20) << __func__
<< "(): init" << dendl
;
1476 uint64_t instance_id
;
1477 get_random_bytes((char *)&instance_id
, sizeof(instance_id
));
1478 yield
call(new RGWInitDataSyncStatusCoroutine(sync_env
, num_shards
, instance_id
, &sync_status
));
1480 ldout(sync_env
->cct
, 0) << "ERROR: failed to init sync, retcode=" << retcode
<< dendl
;
1481 return set_cr_error(retcode
);
1483 // sets state = StateBuildingFullSyncMaps
1485 *reset_backoff
= true;
1488 data_sync_module
->init(sync_env
, sync_status
.sync_info
.instance_id
);
1490 if ((rgw_data_sync_info::SyncState
)sync_status
.sync_info
.state
== rgw_data_sync_info::StateBuildingFullSyncMaps
) {
1491 /* call sync module init here */
1492 yield
call(data_sync_module
->init_sync(sync_env
));
1494 ldout(sync_env
->cct
, 0) << "ERROR: sync module init_sync() failed, retcode=" << retcode
<< dendl
;
1495 return set_cr_error(retcode
);
1497 /* state: building full sync maps */
1498 ldout(sync_env
->cct
, 20) << __func__
<< "(): building full sync maps" << dendl
;
1499 yield
call(new RGWListBucketIndexesCR(sync_env
, &sync_status
));
1501 ldout(sync_env
->cct
, 0) << "ERROR: failed to build full sync maps, retcode=" << retcode
<< dendl
;
1502 return set_cr_error(retcode
);
1504 sync_status
.sync_info
.state
= rgw_data_sync_info::StateSync
;
1506 /* update new state */
1507 yield
call(set_sync_info_cr());
1509 ldout(sync_env
->cct
, 0) << "ERROR: failed to write sync status, retcode=" << retcode
<< dendl
;
1510 return set_cr_error(retcode
);
1513 *reset_backoff
= true;
1517 if ((rgw_data_sync_info::SyncState
)sync_status
.sync_info
.state
== rgw_data_sync_info::StateSync
) {
1518 for (map
<uint32_t, rgw_data_sync_marker
>::iterator iter
= sync_status
.sync_markers
.begin();
1519 iter
!= sync_status
.sync_markers
.end(); ++iter
) {
1520 RGWDataSyncShardControlCR
*cr
= new RGWDataSyncShardControlCR(sync_env
, sync_env
->store
->get_zone_params().log_pool
,
1521 iter
->first
, iter
->second
);
1523 shard_crs_lock
.Lock();
1524 shard_crs
[iter
->first
] = cr
;
1525 shard_crs_lock
.Unlock();
1531 return set_cr_done();
1536 RGWCoroutine
*set_sync_info_cr() {
1537 RGWRados
*store
= sync_env
->store
;
1538 return new RGWSimpleRadosWriteCR
<rgw_data_sync_info
>(sync_env
->async_rados
, store
,
1539 rgw_raw_obj(store
->get_zone_params().log_pool
, RGWDataSyncStatusManager::sync_status_oid(sync_env
->source_zone
)),
1540 sync_status
.sync_info
);
1543 void wakeup(int shard_id
, set
<string
>& keys
) {
1544 Mutex::Locker
l(shard_crs_lock
);
1545 map
<int, RGWDataSyncShardControlCR
*>::iterator iter
= shard_crs
.find(shard_id
);
1546 if (iter
== shard_crs
.end()) {
1549 iter
->second
->append_modified_shards(keys
);
1550 iter
->second
->wakeup();
1554 class RGWDefaultDataSyncModule
: public RGWDataSyncModule
{
1556 RGWDefaultDataSyncModule() {}
1558 RGWCoroutine
*sync_object(RGWDataSyncEnv
*sync_env
, RGWBucketInfo
& bucket_info
, rgw_obj_key
& key
, uint64_t versioned_epoch
, rgw_zone_set
*zones_trace
) override
;
1559 RGWCoroutine
*remove_object(RGWDataSyncEnv
*sync_env
, RGWBucketInfo
& bucket_info
, rgw_obj_key
& key
, real_time
& mtime
, bool versioned
, uint64_t versioned_epoch
, rgw_zone_set
*zones_trace
) override
;
1560 RGWCoroutine
*create_delete_marker(RGWDataSyncEnv
*sync_env
, RGWBucketInfo
& bucket_info
, rgw_obj_key
& key
, real_time
& mtime
,
1561 rgw_bucket_entry_owner
& owner
, bool versioned
, uint64_t versioned_epoch
, rgw_zone_set
*zones_trace
) override
;
1564 class RGWDefaultSyncModuleInstance
: public RGWSyncModuleInstance
{
1565 RGWDefaultDataSyncModule data_handler
;
1567 RGWDefaultSyncModuleInstance() {}
1568 RGWDataSyncModule
*get_data_handler() override
{
1569 return &data_handler
;
1573 int RGWDefaultSyncModule::create_instance(CephContext
*cct
, map
<string
, string
, ltstr_nocase
>& config
, RGWSyncModuleInstanceRef
*instance
)
1575 instance
->reset(new RGWDefaultSyncModuleInstance());
1579 RGWCoroutine
*RGWDefaultDataSyncModule::sync_object(RGWDataSyncEnv
*sync_env
, RGWBucketInfo
& bucket_info
, rgw_obj_key
& key
, uint64_t versioned_epoch
, rgw_zone_set
*zones_trace
)
1581 return new RGWFetchRemoteObjCR(sync_env
->async_rados
, sync_env
->store
, sync_env
->source_zone
, bucket_info
,
1582 key
, versioned_epoch
,
1586 RGWCoroutine
*RGWDefaultDataSyncModule::remove_object(RGWDataSyncEnv
*sync_env
, RGWBucketInfo
& bucket_info
, rgw_obj_key
& key
,
1587 real_time
& mtime
, bool versioned
, uint64_t versioned_epoch
, rgw_zone_set
*zones_trace
)
1589 return new RGWRemoveObjCR(sync_env
->async_rados
, sync_env
->store
, sync_env
->source_zone
,
1590 bucket_info
, key
, versioned
, versioned_epoch
,
1591 NULL
, NULL
, false, &mtime
, zones_trace
);
1594 RGWCoroutine
*RGWDefaultDataSyncModule::create_delete_marker(RGWDataSyncEnv
*sync_env
, RGWBucketInfo
& bucket_info
, rgw_obj_key
& key
, real_time
& mtime
,
1595 rgw_bucket_entry_owner
& owner
, bool versioned
, uint64_t versioned_epoch
, rgw_zone_set
*zones_trace
)
1597 return new RGWRemoveObjCR(sync_env
->async_rados
, sync_env
->store
, sync_env
->source_zone
,
1598 bucket_info
, key
, versioned
, versioned_epoch
,
1599 &owner
.id
, &owner
.display_name
, true, &mtime
, zones_trace
);
1602 class RGWDataSyncControlCR
: public RGWBackoffControlCR
1604 RGWDataSyncEnv
*sync_env
;
1605 uint32_t num_shards
;
1608 RGWDataSyncControlCR(RGWDataSyncEnv
*_sync_env
, uint32_t _num_shards
) : RGWBackoffControlCR(_sync_env
->cct
, true),
1609 sync_env(_sync_env
), num_shards(_num_shards
) {
1612 RGWCoroutine
*alloc_cr() override
{
1613 return new RGWDataSyncCR(sync_env
, num_shards
, backoff_ptr());
1616 void wakeup(int shard_id
, set
<string
>& keys
) {
1617 Mutex
& m
= cr_lock();
1620 RGWDataSyncCR
*cr
= static_cast<RGWDataSyncCR
*>(get_cr());
1630 cr
->wakeup(shard_id
, keys
);
1637 void RGWRemoteDataLog::wakeup(int shard_id
, set
<string
>& keys
) {
1638 RWLock::RLocker
rl(lock
);
1639 if (!data_sync_cr
) {
1642 data_sync_cr
->wakeup(shard_id
, keys
);
1645 int RGWRemoteDataLog::run_sync(int num_shards
)
1648 data_sync_cr
= new RGWDataSyncControlCR(&sync_env
, num_shards
);
1649 data_sync_cr
->get(); // run() will drop a ref, so take another
1652 int r
= run(data_sync_cr
);
1655 data_sync_cr
->put();
1656 data_sync_cr
= NULL
;
1660 ldout(store
->ctx(), 0) << "ERROR: failed to run sync" << dendl
;
1666 int RGWDataSyncStatusManager::init()
1668 auto zone_def_iter
= store
->zone_by_id
.find(source_zone
);
1669 if (zone_def_iter
== store
->zone_by_id
.end()) {
1670 ldout(store
->ctx(), 0) << "ERROR: failed to find zone config info for zone=" << source_zone
<< dendl
;
1674 auto& zone_def
= zone_def_iter
->second
;
1676 if (!store
->get_sync_modules_manager()->supports_data_export(zone_def
.tier_type
)) {
1680 RGWZoneParams
& zone_params
= store
->get_zone_params();
1682 sync_module
= store
->get_sync_module();
1684 conn
= store
->get_zone_conn_by_id(source_zone
);
1686 ldout(store
->ctx(), 0) << "connection object to zone " << source_zone
<< " does not exist" << dendl
;
1690 error_logger
= new RGWSyncErrorLogger(store
, RGW_SYNC_ERROR_LOG_SHARD_PREFIX
, ERROR_LOGGER_SHARDS
);
1692 int r
= source_log
.init(source_zone
, conn
, error_logger
, sync_module
);
1694 lderr(store
->ctx()) << "ERROR: failed to init remote log, r=" << r
<< dendl
;
1699 rgw_datalog_info datalog_info
;
1700 r
= source_log
.read_log_info(&datalog_info
);
1702 ldout(store
->ctx(), 5) << "ERROR: master.read_log_info() returned r=" << r
<< dendl
;
1707 num_shards
= datalog_info
.num_shards
;
1709 for (int i
= 0; i
< num_shards
; i
++) {
1710 shard_objs
[i
] = rgw_raw_obj(zone_params
.log_pool
, shard_obj_name(source_zone
, i
));
1716 void RGWDataSyncStatusManager::finalize()
1718 delete error_logger
;
1719 error_logger
= nullptr;
1722 string
RGWDataSyncStatusManager::sync_status_oid(const string
& source_zone
)
1724 char buf
[datalog_sync_status_oid_prefix
.size() + source_zone
.size() + 16];
1725 snprintf(buf
, sizeof(buf
), "%s.%s", datalog_sync_status_oid_prefix
.c_str(), source_zone
.c_str());
1730 string
RGWDataSyncStatusManager::shard_obj_name(const string
& source_zone
, int shard_id
)
1732 char buf
[datalog_sync_status_shard_prefix
.size() + source_zone
.size() + 16];
1733 snprintf(buf
, sizeof(buf
), "%s.%s.%d", datalog_sync_status_shard_prefix
.c_str(), source_zone
.c_str(), shard_id
);
1738 int RGWRemoteBucketLog::init(const string
& _source_zone
, RGWRESTConn
*_conn
,
1739 const rgw_bucket
& bucket
, int shard_id
,
1740 RGWSyncErrorLogger
*_error_logger
,
1741 RGWSyncModuleInstanceRef
& _sync_module
)
1744 source_zone
= _source_zone
;
1746 bs
.shard_id
= shard_id
;
1748 sync_env
.init(store
->ctx(), store
, conn
, async_rados
, http_manager
, _error_logger
, source_zone
, _sync_module
);
1753 struct bucket_index_marker_info
{
1758 void decode_json(JSONObj
*obj
) {
1759 JSONDecoder::decode_json("bucket_ver", bucket_ver
, obj
);
1760 JSONDecoder::decode_json("master_ver", master_ver
, obj
);
1761 JSONDecoder::decode_json("max_marker", max_marker
, obj
);
1765 class RGWReadRemoteBucketIndexLogInfoCR
: public RGWCoroutine
{
1766 RGWDataSyncEnv
*sync_env
;
1767 const string instance_key
;
1769 bucket_index_marker_info
*info
;
1772 RGWReadRemoteBucketIndexLogInfoCR(RGWDataSyncEnv
*_sync_env
,
1773 const rgw_bucket_shard
& bs
,
1774 bucket_index_marker_info
*_info
)
1775 : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
),
1776 instance_key(bs
.get_key()), info(_info
) {}
1778 int operate() override
{
1781 rgw_http_param_pair pairs
[] = { { "type" , "bucket-index" },
1782 { "bucket-instance", instance_key
.c_str() },
1786 string p
= "/admin/log/";
1787 call(new RGWReadRESTResourceCR
<bucket_index_marker_info
>(sync_env
->cct
, sync_env
->conn
, sync_env
->http_manager
, p
, pairs
, info
));
1790 return set_cr_error(retcode
);
1792 return set_cr_done();
1798 class RGWInitBucketShardSyncStatusCoroutine
: public RGWCoroutine
{
1799 RGWDataSyncEnv
*sync_env
;
1801 rgw_bucket_shard bs
;
1802 const string sync_status_oid
;
1804 rgw_bucket_shard_sync_info
& status
;
1806 bucket_index_marker_info info
;
1808 RGWInitBucketShardSyncStatusCoroutine(RGWDataSyncEnv
*_sync_env
,
1809 const rgw_bucket_shard
& bs
,
1810 rgw_bucket_shard_sync_info
& _status
)
1811 : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
), bs(bs
),
1812 sync_status_oid(RGWBucketSyncStatusManager::status_oid(sync_env
->source_zone
, bs
)),
1816 int operate() override
{
1818 /* fetch current position in logs */
1819 yield
call(new RGWReadRemoteBucketIndexLogInfoCR(sync_env
, bs
, &info
));
1820 if (retcode
< 0 && retcode
!= -ENOENT
) {
1821 ldout(cct
, 0) << "ERROR: failed to fetch bucket index status" << dendl
;
1822 return set_cr_error(retcode
);
1825 status
.state
= rgw_bucket_shard_sync_info::StateFullSync
;
1826 status
.inc_marker
.position
= info
.max_marker
;
1827 map
<string
, bufferlist
> attrs
;
1828 status
.encode_all_attrs(attrs
);
1829 auto store
= sync_env
->store
;
1830 call(new RGWSimpleRadosWriteAttrsCR(sync_env
->async_rados
, store
,
1831 rgw_raw_obj(store
->get_zone_params().log_pool
, sync_status_oid
),
1834 return set_cr_done();
1840 RGWCoroutine
*RGWRemoteBucketLog::init_sync_status_cr()
1842 return new RGWInitBucketShardSyncStatusCoroutine(&sync_env
, bs
, init_status
);
1846 static void decode_attr(CephContext
*cct
, map
<string
, bufferlist
>& attrs
, const string
& attr_name
, T
*val
)
1848 map
<string
, bufferlist
>::iterator iter
= attrs
.find(attr_name
);
1849 if (iter
== attrs
.end()) {
1854 bufferlist::iterator biter
= iter
->second
.begin();
1856 ::decode(*val
, biter
);
1857 } catch (buffer::error
& err
) {
1858 ldout(cct
, 0) << "ERROR: failed to decode attribute: " << attr_name
<< dendl
;
1862 void rgw_bucket_shard_sync_info::decode_from_attrs(CephContext
*cct
, map
<string
, bufferlist
>& attrs
)
1864 decode_attr(cct
, attrs
, "state", &state
);
1865 decode_attr(cct
, attrs
, "full_marker", &full_marker
);
1866 decode_attr(cct
, attrs
, "inc_marker", &inc_marker
);
1869 void rgw_bucket_shard_sync_info::encode_all_attrs(map
<string
, bufferlist
>& attrs
)
1871 encode_state_attr(attrs
);
1872 full_marker
.encode_attr(attrs
);
1873 inc_marker
.encode_attr(attrs
);
1876 void rgw_bucket_shard_sync_info::encode_state_attr(map
<string
, bufferlist
>& attrs
)
1878 ::encode(state
, attrs
["state"]);
1881 void rgw_bucket_shard_full_sync_marker::encode_attr(map
<string
, bufferlist
>& attrs
)
1883 ::encode(*this, attrs
["full_marker"]);
1886 void rgw_bucket_shard_inc_sync_marker::encode_attr(map
<string
, bufferlist
>& attrs
)
1888 ::encode(*this, attrs
["inc_marker"]);
1891 class RGWReadBucketSyncStatusCoroutine
: public RGWCoroutine
{
1892 RGWDataSyncEnv
*sync_env
;
1894 rgw_bucket_shard_sync_info
*status
;
1896 map
<string
, bufferlist
> attrs
;
1898 RGWReadBucketSyncStatusCoroutine(RGWDataSyncEnv
*_sync_env
,
1899 const rgw_bucket_shard
& bs
,
1900 rgw_bucket_shard_sync_info
*_status
)
1901 : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
),
1902 oid(RGWBucketSyncStatusManager::status_oid(sync_env
->source_zone
, bs
)),
1904 int operate() override
;
1907 int RGWReadBucketSyncStatusCoroutine::operate()
1910 yield
call(new RGWSimpleRadosReadAttrsCR(sync_env
->async_rados
, sync_env
->store
,
1911 rgw_raw_obj(sync_env
->store
->get_zone_params().log_pool
, oid
),
1913 if (retcode
== -ENOENT
) {
1914 *status
= rgw_bucket_shard_sync_info();
1915 return set_cr_done();
1918 ldout(sync_env
->cct
, 0) << "ERROR: failed to call fetch bucket shard info oid=" << oid
<< " ret=" << retcode
<< dendl
;
1919 return set_cr_error(retcode
);
1921 status
->decode_from_attrs(sync_env
->cct
, attrs
);
1922 return set_cr_done();
1926 RGWCoroutine
*RGWRemoteBucketLog::read_sync_status_cr(rgw_bucket_shard_sync_info
*sync_status
)
1928 return new RGWReadBucketSyncStatusCoroutine(&sync_env
, bs
, sync_status
);
1931 RGWBucketSyncStatusManager::~RGWBucketSyncStatusManager() {
1932 for (map
<int, RGWRemoteBucketLog
*>::iterator iter
= source_logs
.begin(); iter
!= source_logs
.end(); ++iter
) {
1933 delete iter
->second
;
1935 delete error_logger
;
1939 void rgw_bucket_entry_owner::decode_json(JSONObj
*obj
)
1941 JSONDecoder::decode_json("ID", id
, obj
);
1942 JSONDecoder::decode_json("DisplayName", display_name
, obj
);
1945 struct bucket_list_entry
{
1952 string storage_class
;
1953 rgw_bucket_entry_owner owner
;
1954 uint64_t versioned_epoch
;
1957 bucket_list_entry() : delete_marker(false), is_latest(false), size(0), versioned_epoch(0) {}
1959 void decode_json(JSONObj
*obj
) {
1960 JSONDecoder::decode_json("IsDeleteMarker", delete_marker
, obj
);
1961 JSONDecoder::decode_json("Key", key
.name
, obj
);
1962 JSONDecoder::decode_json("VersionId", key
.instance
, obj
);
1963 JSONDecoder::decode_json("IsLatest", is_latest
, obj
);
1965 JSONDecoder::decode_json("RgwxMtime", mtime_str
, obj
);
1969 if (parse_iso8601(mtime_str
.c_str(), &t
, &nsec
)) {
1971 ts
.tv_sec
= (uint64_t)internal_timegm(&t
);
1973 mtime
= real_clock::from_ceph_timespec(ts
);
1975 JSONDecoder::decode_json("ETag", etag
, obj
);
1976 JSONDecoder::decode_json("Size", size
, obj
);
1977 JSONDecoder::decode_json("StorageClass", storage_class
, obj
);
1978 JSONDecoder::decode_json("Owner", owner
, obj
);
1979 JSONDecoder::decode_json("VersionedEpoch", versioned_epoch
, obj
);
1980 JSONDecoder::decode_json("RgwxTag", rgw_tag
, obj
);
1984 struct bucket_list_result
{
1988 string version_id_marker
;
1991 list
<bucket_list_entry
> entries
;
1993 bucket_list_result() : max_keys(0), is_truncated(false) {}
1995 void decode_json(JSONObj
*obj
) {
1996 JSONDecoder::decode_json("Name", name
, obj
);
1997 JSONDecoder::decode_json("Prefix", prefix
, obj
);
1998 JSONDecoder::decode_json("KeyMarker", key_marker
, obj
);
1999 JSONDecoder::decode_json("VersionIdMarker", version_id_marker
, obj
);
2000 JSONDecoder::decode_json("MaxKeys", max_keys
, obj
);
2001 JSONDecoder::decode_json("IsTruncated", is_truncated
, obj
);
2002 JSONDecoder::decode_json("Entries", entries
, obj
);
2006 class RGWListBucketShardCR
: public RGWCoroutine
{
2007 RGWDataSyncEnv
*sync_env
;
2008 const rgw_bucket_shard
& bs
;
2009 const string instance_key
;
2010 rgw_obj_key marker_position
;
2012 bucket_list_result
*result
;
2015 RGWListBucketShardCR(RGWDataSyncEnv
*_sync_env
, const rgw_bucket_shard
& bs
,
2016 rgw_obj_key
& _marker_position
, bucket_list_result
*_result
)
2017 : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
), bs(bs
),
2018 instance_key(bs
.get_key()), marker_position(_marker_position
),
2021 int operate() override
{
2024 rgw_http_param_pair pairs
[] = { { "rgwx-bucket-instance", instance_key
.c_str() },
2025 { "versions" , NULL
},
2026 { "format" , "json" },
2027 { "objs-container" , "true" },
2028 { "key-marker" , marker_position
.name
.c_str() },
2029 { "version-id-marker" , marker_position
.instance
.c_str() },
2031 // don't include tenant in the url, it's already part of instance_key
2032 string p
= string("/") + bs
.bucket
.name
;
2033 call(new RGWReadRESTResourceCR
<bucket_list_result
>(sync_env
->cct
, sync_env
->conn
, sync_env
->http_manager
, p
, pairs
, result
));
2036 return set_cr_error(retcode
);
2038 return set_cr_done();
2044 class RGWListBucketIndexLogCR
: public RGWCoroutine
{
2045 RGWDataSyncEnv
*sync_env
;
2046 const string instance_key
;
2049 list
<rgw_bi_log_entry
> *result
;
2052 RGWListBucketIndexLogCR(RGWDataSyncEnv
*_sync_env
, const rgw_bucket_shard
& bs
,
2053 string
& _marker
, list
<rgw_bi_log_entry
> *_result
)
2054 : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
),
2055 instance_key(bs
.get_key()), marker(_marker
), result(_result
) {}
2057 int operate() override
{
2060 rgw_http_param_pair pairs
[] = { { "bucket-instance", instance_key
.c_str() },
2061 { "format" , "json" },
2062 { "marker" , marker
.c_str() },
2063 { "type", "bucket-index" },
2066 call(new RGWReadRESTResourceCR
<list
<rgw_bi_log_entry
> >(sync_env
->cct
, sync_env
->conn
, sync_env
->http_manager
, "/admin/log", pairs
, result
));
2069 return set_cr_error(retcode
);
2071 return set_cr_done();
2077 #define BUCKET_SYNC_UPDATE_MARKER_WINDOW 10
2079 class RGWBucketFullSyncShardMarkerTrack
: public RGWSyncShardMarkerTrack
<rgw_obj_key
, rgw_obj_key
> {
2080 RGWDataSyncEnv
*sync_env
;
2083 rgw_bucket_shard_full_sync_marker sync_marker
;
2086 RGWBucketFullSyncShardMarkerTrack(RGWDataSyncEnv
*_sync_env
,
2087 const string
& _marker_oid
,
2088 const rgw_bucket_shard_full_sync_marker
& _marker
) : RGWSyncShardMarkerTrack(BUCKET_SYNC_UPDATE_MARKER_WINDOW
),
2089 sync_env(_sync_env
),
2090 marker_oid(_marker_oid
),
2091 sync_marker(_marker
) {}
2093 RGWCoroutine
*store_marker(const rgw_obj_key
& new_marker
, uint64_t index_pos
, const real_time
& timestamp
) override
{
2094 sync_marker
.position
= new_marker
;
2095 sync_marker
.count
= index_pos
;
2097 map
<string
, bufferlist
> attrs
;
2098 sync_marker
.encode_attr(attrs
);
2100 RGWRados
*store
= sync_env
->store
;
2102 ldout(sync_env
->cct
, 20) << __func__
<< "(): updating marker marker_oid=" << marker_oid
<< " marker=" << new_marker
<< dendl
;
2103 return new RGWSimpleRadosWriteAttrsCR(sync_env
->async_rados
, store
,
2104 rgw_raw_obj(store
->get_zone_params().log_pool
, marker_oid
),
2109 class RGWBucketIncSyncShardMarkerTrack
: public RGWSyncShardMarkerTrack
<string
, rgw_obj_key
> {
2110 RGWDataSyncEnv
*sync_env
;
2113 rgw_bucket_shard_inc_sync_marker sync_marker
;
2115 map
<rgw_obj_key
, string
> key_to_marker
;
2116 map
<string
, rgw_obj_key
> marker_to_key
;
2118 void handle_finish(const string
& marker
) override
{
2119 map
<string
, rgw_obj_key
>::iterator iter
= marker_to_key
.find(marker
);
2120 if (iter
== marker_to_key
.end()) {
2123 key_to_marker
.erase(iter
->second
);
2124 reset_need_retry(iter
->second
);
2125 marker_to_key
.erase(iter
);
2129 RGWBucketIncSyncShardMarkerTrack(RGWDataSyncEnv
*_sync_env
,
2130 const string
& _marker_oid
,
2131 const rgw_bucket_shard_inc_sync_marker
& _marker
) : RGWSyncShardMarkerTrack(BUCKET_SYNC_UPDATE_MARKER_WINDOW
),
2132 sync_env(_sync_env
),
2133 marker_oid(_marker_oid
),
2134 sync_marker(_marker
) {}
2136 RGWCoroutine
*store_marker(const string
& new_marker
, uint64_t index_pos
, const real_time
& timestamp
) override
{
2137 sync_marker
.position
= new_marker
;
2139 map
<string
, bufferlist
> attrs
;
2140 sync_marker
.encode_attr(attrs
);
2142 RGWRados
*store
= sync_env
->store
;
2144 ldout(sync_env
->cct
, 20) << __func__
<< "(): updating marker marker_oid=" << marker_oid
<< " marker=" << new_marker
<< dendl
;
2145 return new RGWSimpleRadosWriteAttrsCR(sync_env
->async_rados
,
2147 rgw_raw_obj(store
->get_zone_params().log_pool
, marker_oid
),
2152 * create index from key -> <op, marker>, and from marker -> key
2153 * this is useful so that we can insure that we only have one
2154 * entry for any key that is used. This is needed when doing
2155 * incremenatl sync of data, and we don't want to run multiple
2156 * concurrent sync operations for the same bucket shard
2157 * Also, we should make sure that we don't run concurrent operations on the same key with
2160 bool index_key_to_marker(const rgw_obj_key
& key
, const string
& marker
) {
2161 if (key_to_marker
.find(key
) != key_to_marker
.end()) {
2162 set_need_retry(key
);
2165 key_to_marker
[key
] = marker
;
2166 marker_to_key
[marker
] = key
;
2170 bool can_do_op(const rgw_obj_key
& key
) {
2171 return (key_to_marker
.find(key
) == key_to_marker
.end());
2175 template <class T
, class K
>
2176 class RGWBucketSyncSingleEntryCR
: public RGWCoroutine
{
2177 RGWDataSyncEnv
*sync_env
;
2179 RGWBucketInfo
*bucket_info
;
2180 const rgw_bucket_shard
& bs
;
2184 uint64_t versioned_epoch
;
2185 rgw_bucket_entry_owner owner
;
2186 real_time timestamp
;
2188 RGWPendingState op_state
;
2191 RGWSyncShardMarkerTrack
<T
, K
> *marker_tracker
;
2195 stringstream error_ss
;
2197 RGWDataSyncDebugLogger logger
;
2199 bool error_injection
;
2201 RGWDataSyncModule
*data_sync_module
;
2203 rgw_zone_set zones_trace
;
2206 RGWBucketSyncSingleEntryCR(RGWDataSyncEnv
*_sync_env
,
2207 RGWBucketInfo
*_bucket_info
,
2208 const rgw_bucket_shard
& bs
,
2209 const rgw_obj_key
& _key
, bool _versioned
, uint64_t _versioned_epoch
,
2210 real_time
& _timestamp
,
2211 const rgw_bucket_entry_owner
& _owner
,
2212 RGWModifyOp _op
, RGWPendingState _op_state
,
2213 const T
& _entry_marker
, RGWSyncShardMarkerTrack
<T
, K
> *_marker_tracker
, rgw_zone_set
& _zones_trace
) : RGWCoroutine(_sync_env
->cct
),
2214 sync_env(_sync_env
),
2215 bucket_info(_bucket_info
), bs(bs
),
2216 key(_key
), versioned(_versioned
), versioned_epoch(_versioned_epoch
),
2218 timestamp(_timestamp
), op(_op
),
2219 op_state(_op_state
),
2220 entry_marker(_entry_marker
),
2221 marker_tracker(_marker_tracker
),
2224 ss
<< bucket_shard_str
{bs
} << "/" << key
<< "[" << versioned_epoch
<< "]";
2225 set_description() << "bucket sync single entry (source_zone=" << sync_env
->source_zone
<< ") b=" << ss
.str() << " log_entry=" << entry_marker
<< " op=" << (int)op
<< " op_state=" << (int)op_state
;
2226 ldout(sync_env
->cct
, 20) << "bucket sync single entry (source_zone=" << sync_env
->source_zone
<< ") b=" << ss
.str() << " log_entry=" << entry_marker
<< " op=" << (int)op
<< " op_state=" << (int)op_state
<< dendl
;
2229 logger
.init(sync_env
, "Object", ss
.str());
2231 error_injection
= (sync_env
->cct
->_conf
->rgw_sync_data_inject_err_probability
> 0);
2233 data_sync_module
= sync_env
->sync_module
->get_data_handler();
2235 zones_trace
= _zones_trace
;
2236 zones_trace
.insert(sync_env
->store
->get_zone().id
);
2239 int operate() override
{
2241 /* skip entries that are not complete */
2242 if (op_state
!= CLS_RGW_STATE_COMPLETE
) {
2247 marker_tracker
->reset_need_retry(key
);
2248 if (key
.name
.empty()) {
2249 /* shouldn't happen */
2250 set_status("skipping empty entry");
2251 ldout(sync_env
->cct
, 0) << "ERROR: " << __func__
<< "(): entry with empty obj name, skipping" << dendl
;
2254 if (error_injection
&&
2255 rand() % 10000 < cct
->_conf
->rgw_sync_data_inject_err_probability
* 10000.0) {
2256 ldout(sync_env
->cct
, 0) << __func__
<< ": injecting data sync error on key=" << key
.name
<< dendl
;
2258 } else if (op
== CLS_RGW_OP_ADD
||
2259 op
== CLS_RGW_OP_LINK_OLH
) {
2260 if (op
== CLS_RGW_OP_ADD
&& !key
.instance
.empty() && key
.instance
!= "null") {
2261 set_status("skipping entry");
2262 ldout(sync_env
->cct
, 10) << "bucket skipping sync obj: " << sync_env
->source_zone
<< "/" << bucket_info
->bucket
<< "/" << key
<< "[" << versioned_epoch
<< "]: versioned object will be synced on link_olh" << dendl
;
2266 set_status("syncing obj");
2267 ldout(sync_env
->cct
, 5) << "bucket sync: sync obj: " << sync_env
->source_zone
<< "/" << bucket_info
->bucket
<< "/" << key
<< "[" << versioned_epoch
<< "]" << dendl
;
2268 logger
.log("fetch");
2269 call(data_sync_module
->sync_object(sync_env
, *bucket_info
, key
, versioned_epoch
, &zones_trace
));
2270 } else if (op
== CLS_RGW_OP_DEL
|| op
== CLS_RGW_OP_UNLINK_INSTANCE
) {
2271 set_status("removing obj");
2272 if (op
== CLS_RGW_OP_UNLINK_INSTANCE
) {
2275 logger
.log("remove");
2276 call(data_sync_module
->remove_object(sync_env
, *bucket_info
, key
, timestamp
, versioned
, versioned_epoch
, &zones_trace
));
2277 } else if (op
== CLS_RGW_OP_LINK_OLH_DM
) {
2278 logger
.log("creating delete marker");
2279 set_status("creating delete marker");
2280 ldout(sync_env
->cct
, 10) << "creating delete marker: obj: " << sync_env
->source_zone
<< "/" << bucket_info
->bucket
<< "/" << key
<< "[" << versioned_epoch
<< "]" << dendl
;
2281 call(data_sync_module
->create_delete_marker(sync_env
, *bucket_info
, key
, timestamp
, owner
, versioned
, versioned_epoch
, &zones_trace
));
2284 } while (marker_tracker
->need_retry(key
));
2290 ss
<< "done, retcode=" << retcode
;
2292 logger
.log(ss
.str());
2295 if (retcode
< 0 && retcode
!= -ENOENT
) {
2296 set_status() << "failed to sync obj; retcode=" << retcode
;
2297 ldout(sync_env
->cct
, 0) << "ERROR: failed to sync object: "
2298 << bucket_shard_str
{bs
} << "/" << key
.name
<< dendl
;
2299 error_ss
<< bucket_shard_str
{bs
} << "/" << key
.name
;
2300 sync_status
= retcode
;
2302 if (!error_ss
.str().empty()) {
2303 yield
call(sync_env
->error_logger
->log_error_cr(sync_env
->conn
->get_remote_id(), "data", error_ss
.str(), -retcode
, "failed to sync object"));
2306 if (sync_status
== 0) {
2308 set_status() << "calling marker_tracker->finish(" << entry_marker
<< ")";
2309 yield
call(marker_tracker
->finish(entry_marker
));
2310 sync_status
= retcode
;
2312 if (sync_status
< 0) {
2313 return set_cr_error(sync_status
);
2315 return set_cr_done();
2321 #define BUCKET_SYNC_SPAWN_WINDOW 20
2323 class RGWBucketShardFullSyncCR
: public RGWCoroutine
{
2324 RGWDataSyncEnv
*sync_env
;
2325 const rgw_bucket_shard
& bs
;
2326 RGWBucketInfo
*bucket_info
;
2327 boost::intrusive_ptr
<RGWContinuousLeaseCR
> lease_cr
;
2328 bucket_list_result list_result
;
2329 list
<bucket_list_entry
>::iterator entries_iter
;
2330 rgw_bucket_shard_full_sync_marker
& full_marker
;
2331 RGWBucketFullSyncShardMarkerTrack marker_tracker
;
2332 rgw_obj_key list_marker
;
2333 bucket_list_entry
*entry
{nullptr};
2334 RGWModifyOp op
{CLS_RGW_OP_ADD
};
2336 int total_entries
{0};
2340 const string
& status_oid
;
2342 RGWDataSyncDebugLogger logger
;
2343 rgw_zone_set zones_trace
;
2345 RGWBucketShardFullSyncCR(RGWDataSyncEnv
*_sync_env
, const rgw_bucket_shard
& bs
,
2346 RGWBucketInfo
*_bucket_info
,
2347 const std::string
& status_oid
,
2348 RGWContinuousLeaseCR
*lease_cr
,
2349 rgw_bucket_shard_full_sync_marker
& _full_marker
)
2350 : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
), bs(bs
),
2351 bucket_info(_bucket_info
), lease_cr(lease_cr
), full_marker(_full_marker
),
2352 marker_tracker(sync_env
, status_oid
, full_marker
),
2353 status_oid(status_oid
) {
2354 logger
.init(sync_env
, "BucketFull", bs
.get_key());
2355 zones_trace
.insert(sync_env
->source_zone
);
2358 int operate() override
;
2361 int RGWBucketShardFullSyncCR::operate()
2365 list_marker
= full_marker
.position
;
2367 total_entries
= full_marker
.count
;
2369 if (!lease_cr
->is_locked()) {
2371 return set_cr_error(-ECANCELED
);
2373 set_status("listing remote bucket");
2374 ldout(sync_env
->cct
, 20) << __func__
<< "(): listing bucket for full sync" << dendl
;
2375 yield
call(new RGWListBucketShardCR(sync_env
, bs
, list_marker
,
2377 if (retcode
< 0 && retcode
!= -ENOENT
) {
2378 set_status("failed bucket listing, going down");
2380 return set_cr_error(retcode
);
2382 entries_iter
= list_result
.entries
.begin();
2383 for (; entries_iter
!= list_result
.entries
.end(); ++entries_iter
) {
2384 if (!lease_cr
->is_locked()) {
2386 return set_cr_error(-ECANCELED
);
2388 ldout(sync_env
->cct
, 20) << "[full sync] syncing object: "
2389 << bucket_shard_str
{bs
} << "/" << entries_iter
->key
<< dendl
;
2390 entry
= &(*entries_iter
);
2392 list_marker
= entries_iter
->key
;
2393 if (!marker_tracker
.start(entry
->key
, total_entries
, real_time())) {
2394 ldout(sync_env
->cct
, 0) << "ERROR: cannot start syncing " << entry
->key
<< ". Duplicate entry?" << dendl
;
2396 op
= (entry
->key
.instance
.empty() || entry
->key
.instance
== "null" ? CLS_RGW_OP_ADD
: CLS_RGW_OP_LINK_OLH
);
2397 using SyncCR
= RGWBucketSyncSingleEntryCR
<rgw_obj_key
, rgw_obj_key
>;
2398 yield
spawn(new SyncCR(sync_env
, bucket_info
, bs
, entry
->key
,
2399 false, /* versioned, only matters for object removal */
2400 entry
->versioned_epoch
, entry
->mtime
,
2401 entry
->owner
, op
, CLS_RGW_STATE_COMPLETE
,
2402 entry
->key
, &marker_tracker
, zones_trace
),
2405 while (num_spawned() > BUCKET_SYNC_SPAWN_WINDOW
) {
2406 yield
wait_for_child();
2409 again
= collect(&ret
, nullptr);
2411 ldout(sync_env
->cct
, 0) << "ERROR: a sync operation returned error" << dendl
;
2413 /* we have reported this error */
2418 } while (list_result
.is_truncated
&& sync_status
== 0);
2419 set_status("done iterating over all objects");
2420 /* wait for all operations to complete */
2421 while (num_spawned()) {
2422 yield
wait_for_child();
2425 again
= collect(&ret
, nullptr);
2427 ldout(sync_env
->cct
, 0) << "ERROR: a sync operation returned error" << dendl
;
2429 /* we have reported this error */
2433 if (!lease_cr
->is_locked()) {
2434 return set_cr_error(-ECANCELED
);
2436 /* update sync state to incremental */
2437 if (sync_status
== 0) {
2439 rgw_bucket_shard_sync_info sync_status
;
2440 sync_status
.state
= rgw_bucket_shard_sync_info::StateIncrementalSync
;
2441 map
<string
, bufferlist
> attrs
;
2442 sync_status
.encode_state_attr(attrs
);
2443 RGWRados
*store
= sync_env
->store
;
2444 call(new RGWSimpleRadosWriteAttrsCR(sync_env
->async_rados
, store
,
2445 rgw_raw_obj(store
->get_zone_params().log_pool
, status_oid
),
2449 ldout(sync_env
->cct
, 0) << "ERROR: failure in sync, backing out (sync_status=" << sync_status
<< ")" << dendl
;
2451 if (retcode
< 0 && sync_status
== 0) { /* actually tried to set incremental state and failed */
2452 ldout(sync_env
->cct
, 0) << "ERROR: failed to set sync state on bucket "
2453 << bucket_shard_str
{bs
} << " retcode=" << retcode
<< dendl
;
2454 return set_cr_error(retcode
);
2456 if (sync_status
< 0) {
2457 return set_cr_error(sync_status
);
2459 return set_cr_done();
2464 class RGWBucketShardIncrementalSyncCR
: public RGWCoroutine
{
2465 RGWDataSyncEnv
*sync_env
;
2466 const rgw_bucket_shard
& bs
;
2467 RGWBucketInfo
*bucket_info
;
2468 boost::intrusive_ptr
<RGWContinuousLeaseCR
> lease_cr
;
2469 list
<rgw_bi_log_entry
> list_result
;
2470 list
<rgw_bi_log_entry
>::iterator entries_iter
;
2471 map
<pair
<string
, string
>, pair
<real_time
, RGWModifyOp
> > squash_map
;
2472 rgw_bucket_shard_inc_sync_marker
& inc_marker
;
2474 rgw_bi_log_entry
*entry
{nullptr};
2475 RGWBucketIncSyncShardMarkerTrack marker_tracker
;
2476 bool updated_status
{false};
2477 const string
& status_oid
;
2478 const string
& zone_id
;
2482 RGWDataSyncDebugLogger logger
;
2487 RGWBucketShardIncrementalSyncCR(RGWDataSyncEnv
*_sync_env
,
2488 const rgw_bucket_shard
& bs
,
2489 RGWBucketInfo
*_bucket_info
,
2490 const std::string
& status_oid
,
2491 RGWContinuousLeaseCR
*lease_cr
,
2492 rgw_bucket_shard_inc_sync_marker
& _inc_marker
)
2493 : RGWCoroutine(_sync_env
->cct
), sync_env(_sync_env
), bs(bs
),
2494 bucket_info(_bucket_info
), lease_cr(lease_cr
), inc_marker(_inc_marker
),
2495 marker_tracker(sync_env
, status_oid
, inc_marker
), status_oid(status_oid
) , zone_id(_sync_env
->store
->get_zone().id
){
2496 set_description() << "bucket shard incremental sync bucket="
2497 << bucket_shard_str
{bs
};
2499 logger
.init(sync_env
, "BucketInc", bs
.get_key());
2502 int operate() override
;
2505 int RGWBucketShardIncrementalSyncCR::operate()
2510 if (!lease_cr
->is_locked()) {
2512 return set_cr_error(-ECANCELED
);
2514 ldout(sync_env
->cct
, 20) << __func__
<< "(): listing bilog for incremental sync" << dendl
;
2515 set_status() << "listing bilog; position=" << inc_marker
.position
;
2516 yield
call(new RGWListBucketIndexLogCR(sync_env
, bs
, inc_marker
.position
,
2518 if (retcode
< 0 && retcode
!= -ENOENT
) {
2519 /* wait for all operations to complete */
2521 return set_cr_error(retcode
);
2524 for (auto& e
: list_result
) {
2525 if (e
.state
!= CLS_RGW_STATE_COMPLETE
) {
2528 if (e
.zones_trace
.find(zone_id
) != e
.zones_trace
.end()) {
2531 auto& squash_entry
= squash_map
[make_pair(e
.object
, e
.instance
)];
2532 if (squash_entry
.first
<= e
.timestamp
) {
2533 squash_entry
= make_pair
<>(e
.timestamp
, e
.op
);
2536 entries_iter
= list_result
.begin();
2537 for (; entries_iter
!= list_result
.end(); ++entries_iter
) {
2538 if (!lease_cr
->is_locked()) {
2540 return set_cr_error(-ECANCELED
);
2542 entry
= &(*entries_iter
);
2544 ssize_t p
= entry
->id
.find('#'); /* entries might have explicit shard info in them, e.g., 6#00000000004.94.3 */
2548 cur_id
= entry
->id
.substr(p
+ 1);
2551 inc_marker
.position
= cur_id
;
2553 if (!key
.set(rgw_obj_index_key
{entry
->object
, entry
->instance
})) {
2554 set_status() << "parse_raw_oid() on " << entry
->object
<< " returned false, skipping entry";
2555 ldout(sync_env
->cct
, 20) << "parse_raw_oid() on " << entry
->object
<< " returned false, skipping entry" << dendl
;
2556 marker_tracker
.try_update_high_marker(cur_id
, 0, entry
->timestamp
);
2560 ldout(sync_env
->cct
, 20) << "parsed entry: id=" << cur_id
<< " iter->object=" << entry
->object
<< " iter->instance=" << entry
->instance
<< " name=" << key
.name
<< " instance=" << key
.instance
<< " ns=" << key
.ns
<< dendl
;
2562 if (!key
.ns
.empty()) {
2563 set_status() << "skipping entry in namespace: " << entry
->object
;
2564 ldout(sync_env
->cct
, 20) << "skipping entry in namespace: " << entry
->object
<< dendl
;
2565 marker_tracker
.try_update_high_marker(cur_id
, 0, entry
->timestamp
);
2569 set_status() << "got entry.id=" << cur_id
<< " key=" << key
<< " op=" << (int)entry
->op
;
2570 if (entry
->op
== CLS_RGW_OP_CANCEL
) {
2571 set_status() << "canceled operation, skipping";
2572 ldout(sync_env
->cct
, 20) << "[inc sync] skipping object: "
2573 << bucket_shard_str
{bs
} << "/" << key
<< ": canceled operation" << dendl
;
2574 marker_tracker
.try_update_high_marker(cur_id
, 0, entry
->timestamp
);
2577 if (entry
->state
!= CLS_RGW_STATE_COMPLETE
) {
2578 set_status() << "non-complete operation, skipping";
2579 ldout(sync_env
->cct
, 20) << "[inc sync] skipping object: "
2580 << bucket_shard_str
{bs
} << "/" << key
<< ": non-complete operation" << dendl
;
2581 marker_tracker
.try_update_high_marker(cur_id
, 0, entry
->timestamp
);
2584 if (entry
->zones_trace
.find(zone_id
) != entry
->zones_trace
.end()) {
2585 set_status() << "redundant operation, skipping";
2586 ldout(sync_env
->cct
, 20) << "[inc sync] skipping object: "
2587 <<bucket_shard_str
{bs
} <<"/"<<key
<<": redundant operation" << dendl
;
2588 marker_tracker
.try_update_high_marker(cur_id
, 0, entry
->timestamp
);
2591 if (make_pair
<>(entry
->timestamp
, entry
->op
) != squash_map
[make_pair(entry
->object
, entry
->instance
)]) {
2592 set_status() << "squashed operation, skipping";
2593 ldout(sync_env
->cct
, 20) << "[inc sync] skipping object: "
2594 << bucket_shard_str
{bs
} << "/" << key
<< ": squashed operation" << dendl
;
2595 /* not updating high marker though */
2598 ldout(sync_env
->cct
, 20) << "[inc sync] syncing object: "
2599 << bucket_shard_str
{bs
} << "/" << key
<< dendl
;
2600 updated_status
= false;
2601 while (!marker_tracker
.can_do_op(key
)) {
2602 if (!updated_status
) {
2603 set_status() << "can't do op, conflicting inflight operation";
2604 updated_status
= true;
2606 ldout(sync_env
->cct
, 5) << *this << ": [inc sync] can't do op on key=" << key
<< " need to wait for conflicting operation to complete" << dendl
;
2607 yield
wait_for_child();
2610 again
= collect(&ret
, nullptr);
2612 ldout(sync_env
->cct
, 0) << "ERROR: a child operation returned error (ret=" << ret
<< ")" << dendl
;
2614 /* we have reported this error */
2618 if (!marker_tracker
.index_key_to_marker(key
, cur_id
)) {
2619 set_status() << "can't do op, sync already in progress for object";
2620 ldout(sync_env
->cct
, 20) << __func__
<< ": skipping sync of entry: " << cur_id
<< ":" << key
<< " sync already in progress for object" << dendl
;
2621 marker_tracker
.try_update_high_marker(cur_id
, 0, entry
->timestamp
);
2625 set_status() << "start object sync";
2626 if (!marker_tracker
.start(cur_id
, 0, entry
->timestamp
)) {
2627 ldout(sync_env
->cct
, 0) << "ERROR: cannot start syncing " << cur_id
<< ". Duplicate entry?" << dendl
;
2629 uint64_t versioned_epoch
= 0;
2630 rgw_bucket_entry_owner
owner(entry
->owner
, entry
->owner_display_name
);
2631 if (entry
->ver
.pool
< 0) {
2632 versioned_epoch
= entry
->ver
.epoch
;
2634 ldout(sync_env
->cct
, 20) << __func__
<< "(): entry->timestamp=" << entry
->timestamp
<< dendl
;
2635 using SyncCR
= RGWBucketSyncSingleEntryCR
<string
, rgw_obj_key
>;
2636 spawn(new SyncCR(sync_env
, bucket_info
, bs
, key
,
2637 entry
->is_versioned(), versioned_epoch
,
2638 entry
->timestamp
, owner
, entry
->op
, entry
->state
,
2639 cur_id
, &marker_tracker
, entry
->zones_trace
),
2643 while (num_spawned() > BUCKET_SYNC_SPAWN_WINDOW
) {
2644 set_status() << "num_spawned() > spawn_window";
2645 yield
wait_for_child();
2648 again
= collect(&ret
, nullptr);
2650 ldout(sync_env
->cct
, 0) << "ERROR: a sync operation returned error" << dendl
;
2652 /* we have reported this error */
2654 /* not waiting for child here */
2658 } while (!list_result
.empty() && sync_status
== 0);
2660 while (num_spawned()) {
2661 yield
wait_for_child();
2664 again
= collect(&ret
, nullptr);
2666 ldout(sync_env
->cct
, 0) << "ERROR: a sync operation returned error" << dendl
;
2668 /* we have reported this error */
2670 /* not waiting for child here */
2674 yield
call(marker_tracker
.flush());
2676 ldout(sync_env
->cct
, 0) << "ERROR: marker_tracker.flush() returned retcode=" << retcode
<< dendl
;
2677 return set_cr_error(retcode
);
2679 if (sync_status
< 0) {
2680 ldout(sync_env
->cct
, 0) << "ERROR: failure in sync, backing out (sync_status=" << sync_status
<< ")" << dendl
;
2683 /* wait for all operations to complete */
2686 if (sync_status
< 0) {
2687 return set_cr_error(sync_status
);
2690 return set_cr_done();
2695 int RGWRunBucketSyncCoroutine::operate()
2699 set_status("acquiring sync lock");
2700 auto store
= sync_env
->store
;
2701 lease_cr
.reset(new RGWContinuousLeaseCR(sync_env
->async_rados
, store
,
2702 rgw_raw_obj(store
->get_zone_params().log_pool
, status_oid
),
2704 cct
->_conf
->rgw_sync_lease_period
,
2706 lease_stack
.reset(spawn(lease_cr
.get(), false));
2708 while (!lease_cr
->is_locked()) {
2709 if (lease_cr
->is_done()) {
2710 ldout(cct
, 5) << "lease cr failed, done early" << dendl
;
2711 set_status("lease lock failed, early abort");
2712 return set_cr_error(lease_cr
->get_ret_status());
2718 yield
call(new RGWReadBucketSyncStatusCoroutine(sync_env
, bs
, &sync_status
));
2719 if (retcode
< 0 && retcode
!= -ENOENT
) {
2720 ldout(sync_env
->cct
, 0) << "ERROR: failed to read sync status for bucket="
2721 << bucket_shard_str
{bs
} << dendl
;
2722 lease_cr
->go_down();
2724 return set_cr_error(retcode
);
2727 ldout(sync_env
->cct
, 20) << __func__
<< "(): sync status for bucket "
2728 << bucket_shard_str
{bs
} << ": " << sync_status
.state
<< dendl
;
2730 yield
call(new RGWGetBucketInstanceInfoCR(sync_env
->async_rados
, sync_env
->store
, bs
.bucket
, &bucket_info
));
2731 if (retcode
== -ENOENT
) {
2732 /* bucket instance info has not been synced in yet, fetch it now */
2734 ldout(sync_env
->cct
, 10) << "no local info for bucket "
2735 << bucket_str
{bs
.bucket
} << ": fetching metadata" << dendl
;
2736 string raw_key
= string("bucket.instance:") + bs
.bucket
.get_key();
2738 meta_sync_env
.init(cct
, sync_env
->store
, sync_env
->store
->rest_master_conn
, sync_env
->async_rados
, sync_env
->http_manager
, sync_env
->error_logger
);
2740 call(new RGWMetaSyncSingleEntryCR(&meta_sync_env
, raw_key
,
2741 string() /* no marker */,
2742 MDLOG_STATUS_COMPLETE
,
2743 NULL
/* no marker tracker */));
2746 ldout(sync_env
->cct
, 0) << "ERROR: failed to fetch bucket instance info for " << bucket_str
{bs
.bucket
} << dendl
;
2747 lease_cr
->go_down();
2749 return set_cr_error(retcode
);
2752 yield
call(new RGWGetBucketInstanceInfoCR(sync_env
->async_rados
, sync_env
->store
, bs
.bucket
, &bucket_info
));
2755 ldout(sync_env
->cct
, 0) << "ERROR: failed to retrieve bucket info for bucket=" << bucket_str
{bs
.bucket
} << dendl
;
2756 lease_cr
->go_down();
2758 return set_cr_error(retcode
);
2761 if (sync_status
.state
== rgw_bucket_shard_sync_info::StateInit
) {
2762 yield
call(new RGWInitBucketShardSyncStatusCoroutine(sync_env
, bs
, sync_status
));
2764 ldout(sync_env
->cct
, 0) << "ERROR: init sync on " << bucket_shard_str
{bs
}
2765 << " failed, retcode=" << retcode
<< dendl
;
2766 lease_cr
->go_down();
2768 return set_cr_error(retcode
);
2772 if (sync_status
.state
== rgw_bucket_shard_sync_info::StateFullSync
) {
2773 yield
call(new RGWBucketShardFullSyncCR(sync_env
, bs
, &bucket_info
,
2774 status_oid
, lease_cr
.get(),
2775 sync_status
.full_marker
));
2777 ldout(sync_env
->cct
, 5) << "full sync on " << bucket_shard_str
{bs
}
2778 << " failed, retcode=" << retcode
<< dendl
;
2779 lease_cr
->go_down();
2781 return set_cr_error(retcode
);
2783 sync_status
.state
= rgw_bucket_shard_sync_info::StateIncrementalSync
;
2786 if (sync_status
.state
== rgw_bucket_shard_sync_info::StateIncrementalSync
) {
2787 yield
call(new RGWBucketShardIncrementalSyncCR(sync_env
, bs
, &bucket_info
,
2788 status_oid
, lease_cr
.get(),
2789 sync_status
.inc_marker
));
2791 ldout(sync_env
->cct
, 5) << "incremental sync on " << bucket_shard_str
{bs
}
2792 << " failed, retcode=" << retcode
<< dendl
;
2793 lease_cr
->go_down();
2795 return set_cr_error(retcode
);
2799 lease_cr
->go_down();
2801 return set_cr_done();
2807 RGWCoroutine
*RGWRemoteBucketLog::run_sync_cr()
2809 return new RGWRunBucketSyncCoroutine(&sync_env
, bs
);
2812 int RGWBucketSyncStatusManager::init()
2814 conn
= store
->get_zone_conn_by_id(source_zone
);
2816 ldout(store
->ctx(), 0) << "connection object to zone " << source_zone
<< " does not exist" << dendl
;
2820 int ret
= http_manager
.set_threaded();
2822 ldout(store
->ctx(), 0) << "failed in http_manager.set_threaded() ret=" << ret
<< dendl
;
2827 const string key
= bucket
.get_key();
2829 rgw_http_param_pair pairs
[] = { { "key", key
.c_str() },
2832 string path
= string("/admin/metadata/bucket.instance");
2834 bucket_instance_meta_info result
;
2835 ret
= cr_mgr
.run(new RGWReadRESTResourceCR
<bucket_instance_meta_info
>(store
->ctx(), conn
, &http_manager
, path
, pairs
, &result
));
2837 ldout(store
->ctx(), 0) << "ERROR: failed to fetch bucket metadata info from zone=" << source_zone
<< " path=" << path
<< " key=" << key
<< " ret=" << ret
<< dendl
;
2841 RGWBucketInfo
& bi
= result
.data
.get_bucket_info();
2842 num_shards
= bi
.num_shards
;
2844 error_logger
= new RGWSyncErrorLogger(store
, RGW_SYNC_ERROR_LOG_SHARD_PREFIX
, ERROR_LOGGER_SHARDS
);
2846 sync_module
.reset(new RGWDefaultSyncModuleInstance());
2848 int effective_num_shards
= (num_shards
? num_shards
: 1);
2850 auto async_rados
= store
->get_async_rados();
2852 for (int i
= 0; i
< effective_num_shards
; i
++) {
2853 RGWRemoteBucketLog
*l
= new RGWRemoteBucketLog(store
, this, async_rados
, &http_manager
);
2854 ret
= l
->init(source_zone
, conn
, bucket
, (num_shards
? i
: -1), error_logger
, sync_module
);
2856 ldout(store
->ctx(), 0) << "ERROR: failed to initialize RGWRemoteBucketLog object" << dendl
;
2865 int RGWBucketSyncStatusManager::init_sync_status()
2867 list
<RGWCoroutinesStack
*> stacks
;
2869 for (map
<int, RGWRemoteBucketLog
*>::iterator iter
= source_logs
.begin(); iter
!= source_logs
.end(); ++iter
) {
2870 RGWCoroutinesStack
*stack
= new RGWCoroutinesStack(store
->ctx(), &cr_mgr
);
2871 RGWRemoteBucketLog
*l
= iter
->second
;
2872 stack
->call(l
->init_sync_status_cr());
2874 stacks
.push_back(stack
);
2877 return cr_mgr
.run(stacks
);
2880 int RGWBucketSyncStatusManager::read_sync_status()
2882 list
<RGWCoroutinesStack
*> stacks
;
2884 for (map
<int, RGWRemoteBucketLog
*>::iterator iter
= source_logs
.begin(); iter
!= source_logs
.end(); ++iter
) {
2885 RGWCoroutinesStack
*stack
= new RGWCoroutinesStack(store
->ctx(), &cr_mgr
);
2886 RGWRemoteBucketLog
*l
= iter
->second
;
2887 stack
->call(l
->read_sync_status_cr(&sync_status
[iter
->first
]));
2889 stacks
.push_back(stack
);
2892 int ret
= cr_mgr
.run(stacks
);
2894 ldout(store
->ctx(), 0) << "ERROR: failed to read sync status for "
2895 << bucket_str
{bucket
} << dendl
;
2902 int RGWBucketSyncStatusManager::run()
2904 list
<RGWCoroutinesStack
*> stacks
;
2906 for (map
<int, RGWRemoteBucketLog
*>::iterator iter
= source_logs
.begin(); iter
!= source_logs
.end(); ++iter
) {
2907 RGWCoroutinesStack
*stack
= new RGWCoroutinesStack(store
->ctx(), &cr_mgr
);
2908 RGWRemoteBucketLog
*l
= iter
->second
;
2909 stack
->call(l
->run_sync_cr());
2911 stacks
.push_back(stack
);
2914 int ret
= cr_mgr
.run(stacks
);
2916 ldout(store
->ctx(), 0) << "ERROR: failed to read sync status for "
2917 << bucket_str
{bucket
} << dendl
;
2924 string
RGWBucketSyncStatusManager::status_oid(const string
& source_zone
,
2925 const rgw_bucket_shard
& bs
)
2927 return bucket_status_oid_prefix
+ "." + source_zone
+ ":" + bs
.get_key();
2931 // TODO: move into rgw_data_sync_trim.cc
2933 #define dout_prefix (*_dout << "data trim: ")
2937 /// return the marker that it's safe to trim up to
2938 const std::string
& get_stable_marker(const rgw_data_sync_marker
& m
)
2940 return m
.state
== m
.FullSync
? m
.next_step_marker
: m
.marker
;
2943 /// comparison operator for take_min_markers()
2944 bool operator<(const rgw_data_sync_marker
& lhs
,
2945 const rgw_data_sync_marker
& rhs
)
2947 // sort by stable marker
2948 return get_stable_marker(lhs
) < get_stable_marker(rhs
);
2951 /// populate the container starting with 'dest' with the minimum stable marker
2952 /// of each shard for all of the peers in [first, last)
2953 template <typename IterIn
, typename IterOut
>
2954 void take_min_markers(IterIn first
, IterIn last
, IterOut dest
)
2956 if (first
== last
) {
2959 // initialize markers with the first peer's
2961 for (auto &shard
: first
->sync_markers
) {
2962 *m
= std::move(shard
.second
);
2965 // for remaining peers, replace with smaller markers
2966 for (auto p
= first
+ 1; p
!= last
; ++p
) {
2968 for (auto &shard
: p
->sync_markers
) {
2969 if (shard
.second
< *m
) {
2970 *m
= std::move(shard
.second
);
2977 } // anonymous namespace
2979 class DataLogTrimCR
: public RGWCoroutine
{
2981 RGWHTTPManager
*http
;
2982 const int num_shards
;
2983 const std::string
& zone_id
; //< my zone id
2984 std::vector
<rgw_data_sync_status
> peer_status
; //< sync status for each peer
2985 std::vector
<rgw_data_sync_marker
> min_shard_markers
; //< min marker per shard
2986 std::vector
<std::string
>& last_trim
; //< last trimmed marker per shard
2990 DataLogTrimCR(RGWRados
*store
, RGWHTTPManager
*http
,
2991 int num_shards
, std::vector
<std::string
>& last_trim
)
2992 : RGWCoroutine(store
->ctx()), store(store
), http(http
),
2993 num_shards(num_shards
),
2994 zone_id(store
->get_zone().id
),
2995 peer_status(store
->zone_conn_map
.size()),
2996 min_shard_markers(num_shards
),
2997 last_trim(last_trim
)
3000 int operate() override
;
3003 int DataLogTrimCR::operate()
3006 ldout(cct
, 10) << "fetching sync status for zone " << zone_id
<< dendl
;
3007 set_status("fetching sync status");
3009 // query data sync status from each sync peer
3010 rgw_http_param_pair params
[] = {
3012 { "status", nullptr },
3013 { "source-zone", zone_id
.c_str() },
3014 { nullptr, nullptr }
3017 auto p
= peer_status
.begin();
3018 for (auto& c
: store
->zone_conn_map
) {
3019 ldout(cct
, 20) << "query sync status from " << c
.first
<< dendl
;
3020 using StatusCR
= RGWReadRESTResourceCR
<rgw_data_sync_status
>;
3021 spawn(new StatusCR(cct
, c
.second
, http
, "/admin/log/", params
, &*p
),
3027 // must get a successful reply from all peers to consider trimming
3029 while (ret
== 0 && num_spawned() > 0) {
3030 yield
wait_for_child();
3036 ldout(cct
, 4) << "failed to fetch sync status from all peers" << dendl
;
3037 return set_cr_error(ret
);
3040 ldout(cct
, 10) << "trimming log shards" << dendl
;
3041 set_status("trimming log shards");
3043 // determine the minimum marker for each shard
3044 take_min_markers(peer_status
.begin(), peer_status
.end(),
3045 min_shard_markers
.begin());
3047 for (int i
= 0; i
< num_shards
; i
++) {
3048 const auto& m
= min_shard_markers
[i
];
3049 auto& stable
= get_stable_marker(m
);
3050 if (stable
<= last_trim
[i
]) {
3053 ldout(cct
, 10) << "trimming log shard " << i
3054 << " at marker=" << stable
3055 << " last_trim=" << last_trim
[i
] << dendl
;
3056 using TrimCR
= RGWSyncLogTrimCR
;
3057 spawn(new TrimCR(store
, store
->data_log
->get_oid(i
),
3058 stable
, &last_trim
[i
]),
3062 return set_cr_done();
3067 class DataLogTrimPollCR
: public RGWCoroutine
{
3069 RGWHTTPManager
*http
;
3070 const int num_shards
;
3071 const utime_t interval
; //< polling interval
3072 const std::string lock_oid
; //< use first data log shard for lock
3073 const std::string lock_cookie
;
3074 std::vector
<std::string
> last_trim
; //< last trimmed marker per shard
3077 DataLogTrimPollCR(RGWRados
*store
, RGWHTTPManager
*http
,
3078 int num_shards
, utime_t interval
)
3079 : RGWCoroutine(store
->ctx()), store(store
), http(http
),
3080 num_shards(num_shards
), interval(interval
),
3081 lock_oid(store
->data_log
->get_oid(0)),
3082 lock_cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct
)),
3083 last_trim(num_shards
)
3086 int operate() override
;
3089 int DataLogTrimPollCR::operate()
3093 set_status("sleeping");
3096 // request a 'data_trim' lock that covers the entire wait interval to
3097 // prevent other gateways from attempting to trim for the duration
3098 set_status("acquiring trim lock");
3099 yield
call(new RGWSimpleRadosLockCR(store
->get_async_rados(), store
,
3100 rgw_raw_obj(store
->get_zone_params().log_pool
, lock_oid
),
3101 "data_trim", lock_cookie
,
3104 // if the lock is already held, go back to sleep and try again later
3105 ldout(cct
, 4) << "failed to lock " << lock_oid
<< ", trying again in "
3106 << interval
.sec() << "s" << dendl
;
3110 set_status("trimming");
3111 yield
call(new DataLogTrimCR(store
, http
, num_shards
, last_trim
));
3113 // note that the lock is not released. this is intentional, as it avoids
3114 // duplicating this work in other gateways
3120 RGWCoroutine
* create_data_log_trim_cr(RGWRados
*store
,
3121 RGWHTTPManager
*http
,
3122 int num_shards
, utime_t interval
)
3124 return new DataLogTrimPollCR(store
, http
, num_shards
, interval
);