// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
-// vim: ts=8 sw=2 smarttab
+// vim: ts=8 sw=2 smarttab ft=cpp
#include <boost/optional.hpp>
#include "rgw_zone.h"
#include "rgw_sync.h"
#include "rgw_metadata.h"
+#include "rgw_mdlog_types.h"
#include "rgw_rest_conn.h"
#include "rgw_tools.h"
#include "rgw_cr_rados.h"
#include "cls/lock/cls_lock_client.h"
#include "services/svc_zone.h"
+#include "services/svc_mdlog.h"
+#include "services/svc_meta.h"
+#include "services/svc_cls.h"
#include <boost/asio/yield.hpp>
static string mdlog_sync_status_shard_prefix = "mdlog.sync-status.shard";
static string mdlog_sync_full_sync_index_prefix = "meta.full-sync.index";
-RGWSyncErrorLogger::RGWSyncErrorLogger(RGWRados *_store, const string &oid_prefix, int _num_shards) : store(_store), num_shards(_num_shards) {
+RGWSyncErrorLogger::RGWSyncErrorLogger(rgw::sal::RGWRadosStore *_store, const string &oid_prefix, int _num_shards) : store(_store), num_shards(_num_shards) {
for (int i = 0; i < num_shards; i++) {
oids.push_back(get_shard_oid(oid_prefix, i));
}
rgw_sync_error_info info(source_zone, error_code, message);
bufferlist bl;
encode(info, bl);
- store->time_log_prepare_entry(entry, real_clock::now(), section, name, bl);
+ store->svc()->cls->timelog.prepare_entry(entry, real_clock::now(), section, name, bl);
uint32_t shard_id = ++counter % num_shards;
// retry the operation until it succeeds
while (true) {
yield {
- Mutex::Locker l(lock);
+ std::lock_guard l{lock};
cr = alloc_cr();
cr->get();
call(cr);
}
{
- Mutex::Locker l(lock);
+ std::lock_guard l{lock};
cr->put();
cr = NULL;
}
int RGWRemoteMetaLog::read_master_log_shards_info(const string &master_period, map<int, RGWMetadataLogInfo> *shards_info)
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result)
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
int RGWRemoteMetaLog::init()
{
- conn = store->svc.zone->get_master_conn();
+ conn = store->svc()->zone->get_master_conn();
int ret = http_manager.start();
if (ret < 0) {
int RGWMetaSyncStatusManager::init()
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
- if (!store->svc.zone->get_master_conn()) {
+ if (!store->svc()->zone->get_master_conn()) {
lderr(store->ctx()) << "no REST connection to master zone" << dendl;
return -EIO;
}
- int r = rgw_init_ioctx(store->get_rados_handle(), store->svc.zone->get_zone_params().log_pool, ioctx, true);
+ int r = rgw_init_ioctx(store->getRados()->get_rados_handle(), store->svc()->zone->get_zone_params().log_pool, ioctx, true);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->svc.zone->get_zone_params().log_pool << " ret=" << r << dendl;
+ lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->svc()->zone->get_zone_params().log_pool << " ret=" << r << dendl;
return r;
}
int num_shards = sync_status.sync_info.num_shards;
for (int i = 0; i < num_shards; i++) {
- shard_objs[i] = rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env.shard_obj_name(i));
+ shard_objs[i] = rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.shard_obj_name(i));
}
- RWLock::WLocker wl(ts_to_shard_lock);
+ std::unique_lock wl{ts_to_shard_lock};
for (int i = 0; i < num_shards; i++) {
clone_markers.push_back(string());
utime_shard ut;
return out << "meta sync: ";
}
-void RGWMetaSyncEnv::init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
+void RGWMetaSyncEnv::init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RGWRadosStore *_store, RGWRESTConn *_conn,
RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer) {
dpp = _dpp;
}
class RGWAsyncReadMDLogEntries : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWMetadataLog *mdlog;
int shard_id;
string *marker;
return ret;
}
public:
- RGWAsyncReadMDLogEntries(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncReadMDLogEntries(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
RGWMetadataLog* mdlog, int _shard_id,
string* _marker, int _max_entries,
list<cls_log_entry> *_entries, bool *_truncated)
int operate() override {
auto store = env->store;
- RGWRESTConn *conn = store->svc.zone->get_master_conn();
+ RGWRESTConn *conn = store->svc()->zone->get_master_conn();
reenter(this) {
yield {
char buf[16];
return io_block(0);
}
yield {
- int ret = http_op->wait(shard_info);
+ int ret = http_op->wait(shard_info, null_yield);
http_op->put();
if (ret < 0) {
return set_cr_error(ret);
}
};
+RGWCoroutine* create_read_remote_mdlog_shard_info_cr(RGWMetaSyncEnv *env,
+ const std::string& period,
+ int shard_id,
+ RGWMetadataLogInfo* info)
+{
+ return new RGWReadRemoteMDLogShardInfoCR(env, period, shard_id, info);
+}
+
class RGWListRemoteMDLogShardCR : public RGWSimpleCoroutine {
RGWMetaSyncEnv *sync_env;
RGWRESTReadResource *http_op;
}
int request_complete() override {
- int ret = http_op->wait(result);
+ int ret = http_op->wait(result, null_yield);
http_op->put();
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to list remote mdlog shard, ret=" << ret << dendl;
}
};
+RGWCoroutine* create_list_remote_mdlog_shard_cr(RGWMetaSyncEnv *env,
+ const std::string& period,
+ int shard_id,
+ const std::string& marker,
+ uint32_t max_entries,
+ rgw_mdlog_shard_data *result)
+{
+ return new RGWListRemoteMDLogShardCR(env, period, shard_id, marker,
+ max_entries, result);
+}
+
bool RGWReadRemoteMDLogInfoCR::spawn_next() {
if (shard_id >= num_shards) {
return false;
set_status("acquiring sync lock");
uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
string lock_name = "sync_lock";
- RGWRados *store = sync_env->store;
+ rgw::sal::RGWRadosStore *store = sync_env->store;
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
lock_name, lock_duration, this));
lease_stack.reset(spawn(lease_cr.get(), false));
}
}
yield {
set_status("writing sync status");
- RGWRados *store = sync_env->store;
- call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+ rgw::sal::RGWRadosStore *store = sync_env->store;
+ call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
status));
}
RGWMetadataLogInfo& info = shards_info[i];
marker.next_step_marker = info.marker;
marker.timestamp = info.last_update;
- RGWRados *store = sync_env->store;
+ rgw::sal::RGWRadosStore *store = sync_env->store;
spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados,
- store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)),
+ store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)),
marker), true);
}
}
yield {
set_status("changing sync state: build full sync maps");
status.state = rgw_meta_sync_info::StateBuildingFullSyncMaps;
- RGWRados *store = sync_env->store;
- call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+ rgw::sal::RGWRadosStore *store = sync_env->store;
+ call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
status));
}
set_status("drop lock lease");
return false;
}
using CR = RGWSimpleRadosReadCR<rgw_meta_sync_marker>;
- rgw_raw_obj obj{env->store->svc.zone->get_zone_params().log_pool,
+ rgw_raw_obj obj{env->store->svc()->zone->get_zone_params().log_pool,
env->shard_obj_name(shard_id)};
- spawn(new CR(env->async_rados, env->store->svc.sysobj, obj, &markers[shard_id]), false);
+ spawn(new CR(env->async_rados, env->store->svc()->sysobj, obj, &markers[shard_id]), false);
shard_id++;
return true;
}
using ReadInfoCR = RGWSimpleRadosReadCR<rgw_meta_sync_info>;
yield {
bool empty_on_enoent = false; // fail on ENOENT
- rgw_raw_obj obj{sync_env->store->svc.zone->get_zone_params().log_pool,
+ rgw_raw_obj obj{sync_env->store->svc()->zone->get_zone_params().log_pool,
sync_env->status_oid()};
- call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj, obj,
+ call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, obj,
&sync_status->sync_info, empty_on_enoent));
}
if (retcode < 0) {
string lock_name = "sync_lock";
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados,
sync_env->store,
- rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+ rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
lock_name, lock_duration, this));
lease_stack.reset(spawn(lease_cr.get(), false));
}
yield;
}
entries_index.reset(new RGWShardedOmapCRManager(sync_env->async_rados, sync_env->store, this, num_shards,
- sync_env->store->svc.zone->get_zone_params().log_pool,
+ sync_env->store->svc()->zone->get_zone_params().log_pool,
mdlog_sync_full_sync_index_prefix));
yield {
call(new RGWReadRESTResourceCR<list<string> >(cct, conn, sync_env->http_manager,
tn->log(20, SSTR("list metadata: section=" << *sections_iter << " key=" << *iter));
string s = *sections_iter + ":" + *iter;
int shard_id;
- RGWRados *store = sync_env->store;
- int ret = store->meta_mgr->get_log_shard_id(*sections_iter, *iter, &shard_id);
+ rgw::sal::RGWRadosStore *store = sync_env->store;
+ int ret = store->ctl()->meta.mgr->get_shard_id(*sections_iter, *iter, &shard_id);
if (ret < 0) {
tn->log(0, SSTR("ERROR: could not determine shard id for " << *sections_iter << ":" << *iter));
ret_status = ret;
int shard_id = (int)iter->first;
rgw_meta_sync_marker& marker = iter->second;
marker.total_entries = entries_index->get_total_entries(shard_id);
- spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados, sync_env->store->svc.sysobj,
- rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)),
+ spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados, sync_env->store->svc()->sysobj,
+ rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)),
marker), true);
}
}
return io_block(0);
}
yield {
- int ret = http_op->wait(pbl);
+ int ret = http_op->wait(pbl, null_yield);
http_op->put();
if (ret < 0) {
return set_cr_error(ret);
};
class RGWAsyncMetaStoreEntry : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string raw_key;
bufferlist bl;
protected:
int _send_request() override {
- int ret = store->meta_mgr->put(raw_key, bl, RGWMetadataHandler::APPLY_ALWAYS);
+ int ret = store->ctl()->meta.mgr->put(raw_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS);
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: can't store key: " << raw_key << " ret=" << ret << dendl;
return ret;
return 0;
}
public:
- RGWAsyncMetaStoreEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncMetaStoreEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
const string& _raw_key,
bufferlist& _bl) : RGWAsyncRadosRequest(caller, cn), store(_store),
raw_key(_raw_key), bl(_bl) {}
};
class RGWAsyncMetaRemoveEntry : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string raw_key;
protected:
int _send_request() override {
- int ret = store->meta_mgr->remove(raw_key);
+ int ret = store->ctl()->meta.mgr->remove(raw_key, null_yield);
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: can't remove key: " << raw_key << " ret=" << ret << dendl;
return ret;
return 0;
}
public:
- RGWAsyncMetaRemoveEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncMetaRemoveEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
const string& _raw_key) : RGWAsyncRadosRequest(caller, cn), store(_store),
raw_key(_raw_key) {}
};
ldpp_dout(sync_env->dpp, 20) << __func__ << "(): updating marker marker_oid=" << marker_oid << " marker=" << new_marker << " realm_epoch=" << sync_marker.realm_epoch << dendl;
tn->log(20, SSTR("new marker=" << new_marker));
- RGWRados *store = sync_env->store;
+ rgw::sal::RGWRadosStore *store = sync_env->store;
return new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados,
- store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
+ store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, marker_oid),
sync_marker);
}
if (error_injection &&
rand() % 10000 < cct->_conf->rgw_sync_meta_inject_err_probability * 10000.0) {
- ldpp_dout(sync_env->dpp, 0) << __FILE__ << ":" << __LINE__ << ": injecting meta sync error on key=" << raw_key << dendl;
return set_cr_error(-EIO);
}
string raw_key;
rgw_mdlog_entry mdlog_entry;
- Mutex inc_lock;
- Cond inc_cond;
+ ceph::mutex inc_lock = ceph::make_mutex("RGWMetaSyncShardCR::inc_lock");
+ ceph::condition_variable inc_cond;
boost::asio::coroutine incremental_cr;
boost::asio::coroutine full_cr;
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env), pool(_pool),
period(period), realm_epoch(realm_epoch), mdlog(mdlog),
shard_id(_shard_id), sync_marker(_marker),
- period_marker(period_marker), inc_lock("RGWMetaSyncShardCR::inc_lock"),
+ period_marker(period_marker),
reset_backoff(_reset_backoff), tn(_tn) {
*reset_backoff = false;
}
yield {
uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
string lock_name = "sync_lock";
- RGWRados *store = sync_env->store;
+ rgw::sal::RGWRadosStore *store = sync_env->store;
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
lock_name, lock_duration, this));
ldpp_dout(sync_env->dpp, 4) << *this << ": saving marker pos=" << temp_marker->marker << " realm_epoch=" << realm_epoch << dendl;
using WriteMarkerCR = RGWSimpleRadosWriteCR<rgw_meta_sync_marker>;
- yield call(new WriteMarkerCR(sync_env->async_rados, sync_env->store->svc.sysobj,
+ yield call(new WriteMarkerCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
*temp_marker));
}
yield {
uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
string lock_name = "sync_lock";
- RGWRados *store = sync_env->store;
+ rgw::sal::RGWRadosStore *store = sync_env->store;
lease_cr.reset( new RGWContinuousLeaseCR(sync_env->async_rados, store,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
lock_name, lock_duration, this));
}
RGWCoroutine *alloc_finisher_cr() override {
- RGWRados *store = sync_env->store;
- return new RGWSimpleRadosReadCR<rgw_meta_sync_marker>(sync_env->async_rados, store->svc.sysobj,
+ rgw::sal::RGWRadosStore *store = sync_env->store;
+ return new RGWSimpleRadosReadCR<rgw_meta_sync_marker>(sync_env->async_rados, store->svc()->sysobj,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
&sync_marker);
}
RGWMetaSyncCR(RGWMetaSyncEnv *_sync_env, const RGWPeriodHistory::Cursor &cursor,
const rgw_meta_sync_status& _sync_status, RGWSyncTraceNodeRef& _tn)
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env),
- pool(sync_env->store->svc.zone->get_zone_params().log_pool),
+ pool(sync_env->store->svc()->zone->get_zone_params().log_pool),
cursor(cursor), sync_status(_sync_status), tn(_tn) {}
~RGWMetaSyncCR() {
// loop through one period at a time
tn->log(1, "start");
for (;;) {
- if (cursor == sync_env->store->period_history->get_current()) {
+ if (cursor == sync_env->store->svc()->mdlog->get_period_history()->get_current()) {
next = RGWPeriodHistory::Cursor{};
if (cursor) {
ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR on current period="
// get the mdlog for the current period (may be empty)
auto& period_id = sync_status.sync_info.period;
auto realm_epoch = sync_status.sync_info.realm_epoch;
- auto mdlog = sync_env->store->meta_mgr->get_log(period_id);
+ auto mdlog = sync_env->store->svc()->mdlog->get_log(period_id);
tn->log(1, SSTR("realm epoch=" << realm_epoch << " period id=" << period_id));
sync_status.sync_info.period = cursor.get_period().get_id();
sync_status.sync_info.realm_epoch = cursor.get_epoch();
yield call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados,
- sync_env->store->svc.sysobj,
+ sync_env->store->svc()->sysobj,
rgw_raw_obj(pool, sync_env->status_oid()),
sync_status.sync_info));
}
env->async_rados = async_rados;
env->http_manager = &http_manager;
env->error_logger = error_logger;
- env->sync_tracer = store->get_sync_tracer();
+ env->sync_tracer = store->getRados()->get_sync_tracer();
}
int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status)
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
// cannot run concurrently with run_sync(), so run in a separate manager
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+ RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
int ret = http_manager.start();
if (ret < 0) {
int RGWRemoteMetaLog::init_sync_status()
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
rgw_meta_sync_info sync_info;
sync_info.num_shards = mdlog_info.num_shards;
- auto cursor = store->period_history->get_current();
+ auto cursor = store->svc()->mdlog->get_period_history()->get_current();
if (cursor) {
sync_info.period = cursor.get_period().get_id();
sync_info.realm_epoch = cursor.get_epoch();
int RGWRemoteMetaLog::store_sync_info(const rgw_meta_sync_info& sync_info)
{
tn->log(20, "store sync info");
- return run(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env.status_oid()),
+ return run(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(async_rados, store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.status_oid()),
sync_info));
}
// return a cursor to the period at our sync position
-static RGWPeriodHistory::Cursor get_period_at(RGWRados* store,
+static RGWPeriodHistory::Cursor get_period_at(rgw::sal::RGWRadosStore* store,
const rgw_meta_sync_info& info)
{
if (info.period.empty()) {
}
// look for an existing period in our history
- auto cursor = store->period_history->lookup(info.realm_epoch);
+ auto cursor = store->svc()->mdlog->get_period_history()->lookup(info.realm_epoch);
if (cursor) {
// verify that the period ids match
auto& existing = cursor.get_period().get_id();
// read the period from rados or pull it from the master
RGWPeriod period;
- int r = store->period_puller->pull(info.period, period);
+ int r = store->svc()->mdlog->pull_period(info.period, period);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed to read period id "
<< info.period << ": " << cpp_strerror(r) << dendl;
return RGWPeriodHistory::Cursor{r};
}
// attach the period to our history
- cursor = store->period_history->attach(std::move(period));
+ cursor = store->svc()->mdlog->get_period_history()->attach(std::move(period));
if (!cursor) {
r = cursor.get_error();
lderr(store->ctx()) << "ERROR: failed to read period history back to "
int RGWRemoteMetaLog::run_sync()
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
if (sync_status.sync_info.state == rgw_meta_sync_info::StateInit) {
ldpp_dout(dpp, 20) << __func__ << "(): init" << dendl;
sync_status.sync_info.num_shards = mdlog_info.num_shards;
- auto cursor = store->period_history->get_current();
+ auto cursor = store->svc()->mdlog->get_period_history()->get_current();
if (cursor) {
// run full sync, then start incremental from the current period/epoch
sync_status.sync_info.period = cursor.get_period().get_id();
int RGWCloneMetaLogCoroutine::state_receive_rest_response()
{
- int ret = http_op->wait(&data);
+ int ret = http_op->wait(&data, null_yield);
if (ret < 0) {
error_stream << "http operation failed: " << http_op->to_str() << " status=" << http_op->get_http_status() << std::endl;
ldpp_dout(sync_env->dpp, 5) << "failed to wait for op, ret=" << ret << dendl;
{
return set_cr_done();
}
-
-
-// TODO: move into rgw_sync_trim.cc
-#undef dout_prefix
-#define dout_prefix (*_dout << "meta trim: ")
-
-/// purge all log shards for the given mdlog
-class PurgeLogShardsCR : public RGWShardCollectCR {
- RGWRados *const store;
- const RGWMetadataLog* mdlog;
- const int num_shards;
- rgw_raw_obj obj;
- int i{0};
-
- static constexpr int max_concurrent = 16;
-
- public:
- PurgeLogShardsCR(RGWRados *store, const RGWMetadataLog* mdlog,
- const rgw_pool& pool, int num_shards)
- : RGWShardCollectCR(store->ctx(), max_concurrent),
- store(store), mdlog(mdlog), num_shards(num_shards), obj(pool, "")
- {}
-
- bool spawn_next() override {
- if (i == num_shards) {
- return false;
- }
- mdlog->get_shard_oid(i++, obj.oid);
- spawn(new RGWRadosRemoveCR(store, obj), false);
- return true;
- }
-};
-
-using Cursor = RGWPeriodHistory::Cursor;
-
-/// purge mdlogs from the oldest up to (but not including) the given realm_epoch
-class PurgePeriodLogsCR : public RGWCoroutine {
- RGWRados *const store;
- RGWMetadataManager *const metadata;
- RGWObjVersionTracker objv;
- Cursor cursor;
- epoch_t realm_epoch;
- epoch_t *last_trim_epoch; //< update last trim on success
-
- public:
- PurgePeriodLogsCR(RGWRados *store, epoch_t realm_epoch, epoch_t *last_trim)
- : RGWCoroutine(store->ctx()), store(store), metadata(store->meta_mgr),
- realm_epoch(realm_epoch), last_trim_epoch(last_trim)
- {}
-
- int operate() override;
-};
-
-int PurgePeriodLogsCR::operate()
-{
- reenter(this) {
- // read our current oldest log period
- yield call(metadata->read_oldest_log_period_cr(&cursor, &objv));
- if (retcode < 0) {
- return set_cr_error(retcode);
- }
- ceph_assert(cursor);
- ldout(cct, 20) << "oldest log realm_epoch=" << cursor.get_epoch()
- << " period=" << cursor.get_period().get_id() << dendl;
-
- // trim -up to- the given realm_epoch
- while (cursor.get_epoch() < realm_epoch) {
- ldout(cct, 4) << "purging log shards for realm_epoch=" << cursor.get_epoch()
- << " period=" << cursor.get_period().get_id() << dendl;
- yield {
- const auto mdlog = metadata->get_log(cursor.get_period().get_id());
- const auto& pool = store->svc.zone->get_zone_params().log_pool;
- auto num_shards = cct->_conf->rgw_md_log_max_shards;
- call(new PurgeLogShardsCR(store, mdlog, pool, num_shards));
- }
- if (retcode < 0) {
- ldout(cct, 1) << "failed to remove log shards: "
- << cpp_strerror(retcode) << dendl;
- return set_cr_error(retcode);
- }
- ldout(cct, 10) << "removed log shards for realm_epoch=" << cursor.get_epoch()
- << " period=" << cursor.get_period().get_id() << dendl;
-
- // update our mdlog history
- yield call(metadata->trim_log_period_cr(cursor, &objv));
- if (retcode == -ENOENT) {
- // must have raced to update mdlog history. return success and allow the
- // winner to continue purging
- ldout(cct, 10) << "already removed log shards for realm_epoch=" << cursor.get_epoch()
- << " period=" << cursor.get_period().get_id() << dendl;
- return set_cr_done();
- } else if (retcode < 0) {
- ldout(cct, 1) << "failed to remove log shards for realm_epoch="
- << cursor.get_epoch() << " period=" << cursor.get_period().get_id()
- << " with: " << cpp_strerror(retcode) << dendl;
- return set_cr_error(retcode);
- }
-
- if (*last_trim_epoch < cursor.get_epoch()) {
- *last_trim_epoch = cursor.get_epoch();
- }
-
- ceph_assert(cursor.has_next()); // get_current() should always come after
- cursor.next();
- }
- return set_cr_done();
- }
- return 0;
-}
-
-namespace {
-
-using connection_map = std::map<std::string, std::unique_ptr<RGWRESTConn>>;
-
-/// construct a RGWRESTConn for each zone in the realm
-template <typename Zonegroups>
-connection_map make_peer_connections(RGWRados *store,
- const Zonegroups& zonegroups)
-{
- connection_map connections;
- for (auto& g : zonegroups) {
- for (auto& z : g.second.zones) {
- std::unique_ptr<RGWRESTConn> conn{
- new RGWRESTConn(store->ctx(), store->svc.zone, z.first, z.second.endpoints)};
- connections.emplace(z.first, std::move(conn));
- }
- }
- return connections;
-}
-
-/// return the marker that it's safe to trim up to
-const std::string& get_stable_marker(const rgw_meta_sync_marker& m)
-{
- return m.state == m.FullSync ? m.next_step_marker : m.marker;
-}
-
-/// comparison operator for take_min_status()
-bool operator<(const rgw_meta_sync_marker& lhs, const rgw_meta_sync_marker& rhs)
-{
- // sort by stable marker
- return get_stable_marker(lhs) < get_stable_marker(rhs);
-}
-
-/// populate the status with the minimum stable marker of each shard for any
-/// peer whose realm_epoch matches the minimum realm_epoch in the input
-template <typename Iter>
-int take_min_status(CephContext *cct, Iter first, Iter last,
- rgw_meta_sync_status *status)
-{
- if (first == last) {
- return -EINVAL;
- }
- const size_t num_shards = cct->_conf->rgw_md_log_max_shards;
-
- status->sync_info.realm_epoch = std::numeric_limits<epoch_t>::max();
- for (auto p = first; p != last; ++p) {
- // validate peer's shard count
- if (p->sync_markers.size() != num_shards) {
- ldout(cct, 1) << "take_min_status got peer status with "
- << p->sync_markers.size() << " shards, expected "
- << num_shards << dendl;
- return -EINVAL;
- }
- if (p->sync_info.realm_epoch < status->sync_info.realm_epoch) {
- // earlier epoch, take its entire status
- *status = std::move(*p);
- } else if (p->sync_info.realm_epoch == status->sync_info.realm_epoch) {
- // same epoch, take any earlier markers
- auto m = status->sync_markers.begin();
- for (auto& shard : p->sync_markers) {
- if (shard.second < m->second) {
- m->second = std::move(shard.second);
- }
- ++m;
- }
- }
- }
- return 0;
-}
-
-struct TrimEnv {
- const DoutPrefixProvider *dpp;
- RGWRados *const store;
- RGWHTTPManager *const http;
- int num_shards;
- const std::string& zone;
- Cursor current; //< cursor to current period
- epoch_t last_trim_epoch{0}; //< epoch of last mdlog that was purged
-
- TrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
- : dpp(dpp), store(store), http(http), num_shards(num_shards),
- zone(store->svc.zone->get_zone_params().get_id()),
- current(store->period_history->get_current())
- {}
-};
-
-struct MasterTrimEnv : public TrimEnv {
- connection_map connections; //< peer connections
- std::vector<rgw_meta_sync_status> peer_status; //< sync status for each peer
- /// last trim marker for each shard, only applies to current period's mdlog
- std::vector<std::string> last_trim_markers;
-
- MasterTrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
- : TrimEnv(dpp, store, http, num_shards),
- last_trim_markers(num_shards)
- {
- auto& period = current.get_period();
- connections = make_peer_connections(store, period.get_map().zonegroups);
- connections.erase(zone);
- peer_status.resize(connections.size());
- }
-};
-
-struct PeerTrimEnv : public TrimEnv {
- /// last trim timestamp for each shard, only applies to current period's mdlog
- std::vector<ceph::real_time> last_trim_timestamps;
-
- PeerTrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
- : TrimEnv(dpp, store, http, num_shards),
- last_trim_timestamps(num_shards)
- {}
-
- void set_num_shards(int num_shards) {
- this->num_shards = num_shards;
- last_trim_timestamps.resize(num_shards);
- }
-};
-
-} // anonymous namespace
-
-
-/// spawn a trim cr for each shard that needs it, while limiting the number
-/// of concurrent shards
-class MetaMasterTrimShardCollectCR : public RGWShardCollectCR {
- private:
- static constexpr int MAX_CONCURRENT_SHARDS = 16;
-
- MasterTrimEnv& env;
- RGWMetadataLog *mdlog;
- int shard_id{0};
- std::string oid;
- const rgw_meta_sync_status& sync_status;
-
- public:
- MetaMasterTrimShardCollectCR(MasterTrimEnv& env, RGWMetadataLog *mdlog,
- const rgw_meta_sync_status& sync_status)
- : RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS),
- env(env), mdlog(mdlog), sync_status(sync_status)
- {}
-
- bool spawn_next() override;
-};
-
-bool MetaMasterTrimShardCollectCR::spawn_next()
-{
- while (shard_id < env.num_shards) {
- auto m = sync_status.sync_markers.find(shard_id);
- if (m == sync_status.sync_markers.end()) {
- shard_id++;
- continue;
- }
- auto& stable = get_stable_marker(m->second);
- auto& last_trim = env.last_trim_markers[shard_id];
-
- if (stable <= last_trim) {
- // already trimmed
- ldout(cct, 20) << "skipping log shard " << shard_id
- << " at marker=" << stable
- << " last_trim=" << last_trim
- << " realm_epoch=" << sync_status.sync_info.realm_epoch << dendl;
- shard_id++;
- continue;
- }
-
- mdlog->get_shard_oid(shard_id, oid);
-
- ldout(cct, 10) << "trimming log shard " << shard_id
- << " at marker=" << stable
- << " last_trim=" << last_trim
- << " realm_epoch=" << sync_status.sync_info.realm_epoch << dendl;
- spawn(new RGWSyncLogTrimCR(env.store, oid, stable, &last_trim), false);
- shard_id++;
- return true;
- }
- return false;
-}
-
-/// spawn rest requests to read each peer's sync status
-class MetaMasterStatusCollectCR : public RGWShardCollectCR {
- static constexpr int MAX_CONCURRENT_SHARDS = 16;
-
- MasterTrimEnv& env;
- connection_map::iterator c;
- std::vector<rgw_meta_sync_status>::iterator s;
- public:
- explicit MetaMasterStatusCollectCR(MasterTrimEnv& env)
- : RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS),
- env(env), c(env.connections.begin()), s(env.peer_status.begin())
- {}
-
- bool spawn_next() override {
- if (c == env.connections.end()) {
- return false;
- }
- static rgw_http_param_pair params[] = {
- { "type", "metadata" },
- { "status", nullptr },
- { nullptr, nullptr }
- };
-
- ldout(cct, 20) << "query sync status from " << c->first << dendl;
- auto conn = c->second.get();
- using StatusCR = RGWReadRESTResourceCR<rgw_meta_sync_status>;
- spawn(new StatusCR(cct, conn, env.http, "/admin/log/", params, &*s),
- false);
- ++c;
- ++s;
- return true;
- }
-};
-
-class MetaMasterTrimCR : public RGWCoroutine {
- MasterTrimEnv& env;
- rgw_meta_sync_status min_status; //< minimum sync status of all peers
- int ret{0};
-
- public:
- explicit MetaMasterTrimCR(MasterTrimEnv& env)
- : RGWCoroutine(env.store->ctx()), env(env)
- {}
-
- int operate() override;
-};
-
-int MetaMasterTrimCR::operate()
-{
- reenter(this) {
- // TODO: detect this and fail before we spawn the trim thread?
- if (env.connections.empty()) {
- ldout(cct, 4) << "no peers, exiting" << dendl;
- return set_cr_done();
- }
-
- ldout(cct, 10) << "fetching sync status for zone " << env.zone << dendl;
- // query mdlog sync status from peers
- yield call(new MetaMasterStatusCollectCR(env));
-
- // must get a successful reply from all peers to consider trimming
- if (ret < 0) {
- ldout(cct, 4) << "failed to fetch sync status from all peers" << dendl;
- return set_cr_error(ret);
- }
-
- // determine the minimum epoch and markers
- ret = take_min_status(env.store->ctx(), env.peer_status.begin(),
- env.peer_status.end(), &min_status);
- if (ret < 0) {
- ldout(cct, 4) << "failed to calculate min sync status from peers" << dendl;
- return set_cr_error(ret);
- }
- yield {
- auto store = env.store;
- auto epoch = min_status.sync_info.realm_epoch;
- ldout(cct, 4) << "realm epoch min=" << epoch
- << " current=" << env.current.get_epoch()<< dendl;
- if (epoch > env.last_trim_epoch + 1) {
- // delete any prior mdlog periods
- spawn(new PurgePeriodLogsCR(store, epoch, &env.last_trim_epoch), true);
- } else {
- ldout(cct, 10) << "mdlogs already purged up to realm_epoch "
- << env.last_trim_epoch << dendl;
- }
-
- // if realm_epoch == current, trim mdlog based on markers
- if (epoch == env.current.get_epoch()) {
- auto mdlog = store->meta_mgr->get_log(env.current.get_period().get_id());
- spawn(new MetaMasterTrimShardCollectCR(env, mdlog, min_status), true);
- }
- }
- // ignore any errors during purge/trim because we want to hold the lock open
- return set_cr_done();
- }
- return 0;
-}
-
-
-/// read the first entry of the master's mdlog shard and trim to that position
-class MetaPeerTrimShardCR : public RGWCoroutine {
- RGWMetaSyncEnv& env;
- RGWMetadataLog *mdlog;
- const std::string& period_id;
- const int shard_id;
- RGWMetadataLogInfo info;
- ceph::real_time stable; //< safe timestamp to trim, according to master
- ceph::real_time *last_trim; //< last trimmed timestamp, updated on trim
- rgw_mdlog_shard_data result; //< result from master's mdlog listing
-
- public:
- MetaPeerTrimShardCR(RGWMetaSyncEnv& env, RGWMetadataLog *mdlog,
- const std::string& period_id, int shard_id,
- ceph::real_time *last_trim)
- : RGWCoroutine(env.store->ctx()), env(env), mdlog(mdlog),
- period_id(period_id), shard_id(shard_id), last_trim(last_trim)
- {}
-
- int operate() override;
-};
-
-int MetaPeerTrimShardCR::operate()
-{
- reenter(this) {
- // query master's first mdlog entry for this shard
- yield call(new RGWListRemoteMDLogShardCR(&env, period_id, shard_id,
- "", 1, &result));
- if (retcode < 0) {
- ldpp_dout(env.dpp, 5) << "failed to read first entry from master's mdlog shard "
- << shard_id << " for period " << period_id
- << ": " << cpp_strerror(retcode) << dendl;
- return set_cr_error(retcode);
- }
- if (result.entries.empty()) {
- // if there are no mdlog entries, we don't have a timestamp to compare. we
- // can't just trim everything, because there could be racing updates since
- // this empty reply. query the mdlog shard info to read its max timestamp,
- // then retry the listing to make sure it's still empty before trimming to
- // that
- ldpp_dout(env.dpp, 10) << "empty master mdlog shard " << shard_id
- << ", reading last timestamp from shard info" << dendl;
- // read the mdlog shard info for the last timestamp
- using ShardInfoCR = RGWReadRemoteMDLogShardInfoCR;
- yield call(new ShardInfoCR(&env, period_id, shard_id, &info));
- if (retcode < 0) {
- ldpp_dout(env.dpp, 5) << "failed to read info from master's mdlog shard "
- << shard_id << " for period " << period_id
- << ": " << cpp_strerror(retcode) << dendl;
- return set_cr_error(retcode);
- }
- if (ceph::real_clock::is_zero(info.last_update)) {
- return set_cr_done(); // nothing to trim
- }
- ldpp_dout(env.dpp, 10) << "got mdlog shard info with last update="
- << info.last_update << dendl;
- // re-read the master's first mdlog entry to make sure it hasn't changed
- yield call(new RGWListRemoteMDLogShardCR(&env, period_id, shard_id,
- "", 1, &result));
- if (retcode < 0) {
- ldpp_dout(env.dpp, 5) << "failed to read first entry from master's mdlog shard "
- << shard_id << " for period " << period_id
- << ": " << cpp_strerror(retcode) << dendl;
- return set_cr_error(retcode);
- }
- // if the mdlog is still empty, trim to max marker
- if (result.entries.empty()) {
- stable = info.last_update;
- } else {
- stable = result.entries.front().timestamp;
-
- // can only trim -up to- master's first timestamp, so subtract a second.
- // (this is why we use timestamps instead of markers for the peers)
- stable -= std::chrono::seconds(1);
- }
- } else {
- stable = result.entries.front().timestamp;
- stable -= std::chrono::seconds(1);
- }
-
- if (stable <= *last_trim) {
- ldpp_dout(env.dpp, 10) << "skipping log shard " << shard_id
- << " at timestamp=" << stable
- << " last_trim=" << *last_trim << dendl;
- return set_cr_done();
- }
-
- ldpp_dout(env.dpp, 10) << "trimming log shard " << shard_id
- << " at timestamp=" << stable
- << " last_trim=" << *last_trim << dendl;
- yield {
- std::string oid;
- mdlog->get_shard_oid(shard_id, oid);
- call(new RGWRadosTimelogTrimCR(env.store, oid, real_time{}, stable, "", ""));
- }
- if (retcode < 0 && retcode != -ENODATA) {
- ldpp_dout(env.dpp, 1) << "failed to trim mdlog shard " << shard_id
- << ": " << cpp_strerror(retcode) << dendl;
- return set_cr_error(retcode);
- }
- *last_trim = stable;
- return set_cr_done();
- }
- return 0;
-}
-
-class MetaPeerTrimShardCollectCR : public RGWShardCollectCR {
- static constexpr int MAX_CONCURRENT_SHARDS = 16;
-
- PeerTrimEnv& env;
- RGWMetadataLog *mdlog;
- const std::string& period_id;
- RGWMetaSyncEnv meta_env; //< for RGWListRemoteMDLogShardCR
- int shard_id{0};
-
- public:
- MetaPeerTrimShardCollectCR(PeerTrimEnv& env, RGWMetadataLog *mdlog)
- : RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS),
- env(env), mdlog(mdlog), period_id(env.current.get_period().get_id())
- {
- meta_env.init(env.dpp, cct, env.store, env.store->svc.zone->get_master_conn(),
- env.store->get_async_rados(), env.http, nullptr,
- env.store->get_sync_tracer());
- }
-
- bool spawn_next() override;
-};
-
-bool MetaPeerTrimShardCollectCR::spawn_next()
-{
- if (shard_id >= env.num_shards) {
- return false;
- }
- auto& last_trim = env.last_trim_timestamps[shard_id];
- spawn(new MetaPeerTrimShardCR(meta_env, mdlog, period_id, shard_id, &last_trim),
- false);
- shard_id++;
- return true;
-}
-
-class MetaPeerTrimCR : public RGWCoroutine {
- PeerTrimEnv& env;
- rgw_mdlog_info mdlog_info; //< master's mdlog info
-
- public:
- explicit MetaPeerTrimCR(PeerTrimEnv& env) : RGWCoroutine(env.store->ctx()), env(env) {}
-
- int operate() override;
-};
-
-int MetaPeerTrimCR::operate()
-{
- reenter(this) {
- ldout(cct, 10) << "fetching master mdlog info" << dendl;
- yield {
- // query mdlog_info from master for oldest_log_period
- rgw_http_param_pair params[] = {
- { "type", "metadata" },
- { nullptr, nullptr }
- };
-
- using LogInfoCR = RGWReadRESTResourceCR<rgw_mdlog_info>;
- call(new LogInfoCR(cct, env.store->svc.zone->get_master_conn(), env.http,
- "/admin/log/", params, &mdlog_info));
- }
- if (retcode < 0) {
- ldout(cct, 4) << "failed to read mdlog info from master" << dendl;
- return set_cr_error(retcode);
- }
- // use master's shard count instead
- env.set_num_shards(mdlog_info.num_shards);
-
- if (mdlog_info.realm_epoch > env.last_trim_epoch + 1) {
- // delete any prior mdlog periods
- yield call(new PurgePeriodLogsCR(env.store, mdlog_info.realm_epoch,
- &env.last_trim_epoch));
- } else {
- ldout(cct, 10) << "mdlogs already purged through realm_epoch "
- << env.last_trim_epoch << dendl;
- }
-
- // if realm_epoch == current, trim mdlog based on master's markers
- if (mdlog_info.realm_epoch == env.current.get_epoch()) {
- yield {
- auto meta_mgr = env.store->meta_mgr;
- auto mdlog = meta_mgr->get_log(env.current.get_period().get_id());
- call(new MetaPeerTrimShardCollectCR(env, mdlog));
- // ignore any errors during purge/trim because we want to hold the lock open
- }
- }
- return set_cr_done();
- }
- return 0;
-}
-
-class MetaTrimPollCR : public RGWCoroutine {
- RGWRados *const store;
- const utime_t interval; //< polling interval
- const rgw_raw_obj obj;
- const std::string name{"meta_trim"}; //< lock name
- const std::string cookie;
-
- protected:
- /// allocate the coroutine to run within the lease
- virtual RGWCoroutine* alloc_cr() = 0;
-
- public:
- MetaTrimPollCR(RGWRados *store, utime_t interval)
- : RGWCoroutine(store->ctx()), store(store), interval(interval),
- obj(store->svc.zone->get_zone_params().log_pool, RGWMetadataLogHistory::oid),
- cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct))
- {}
-
- int operate() override;
-};
-
-int MetaTrimPollCR::operate()
-{
- reenter(this) {
- for (;;) {
- set_status("sleeping");
- wait(interval);
-
- // prevent others from trimming for our entire wait interval
- set_status("acquiring trim lock");
- yield call(new RGWSimpleRadosLockCR(store->get_async_rados(), store,
- obj, name, cookie, interval.sec()));
- if (retcode < 0) {
- ldout(cct, 4) << "failed to lock: " << cpp_strerror(retcode) << dendl;
- continue;
- }
-
- set_status("trimming");
- yield call(alloc_cr());
-
- if (retcode < 0) {
- // on errors, unlock so other gateways can try
- set_status("unlocking");
- yield call(new RGWSimpleRadosUnlockCR(store->get_async_rados(), store,
- obj, name, cookie));
- }
- }
- }
- return 0;
-}
-
-class MetaMasterTrimPollCR : public MetaTrimPollCR {
- MasterTrimEnv env; //< trim state to share between calls
- RGWCoroutine* alloc_cr() override {
- return new MetaMasterTrimCR(env);
- }
- public:
- MetaMasterTrimPollCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
- int num_shards, utime_t interval)
- : MetaTrimPollCR(store, interval),
- env(dpp, store, http, num_shards)
- {}
-};
-
-class MetaPeerTrimPollCR : public MetaTrimPollCR {
- PeerTrimEnv env; //< trim state to share between calls
- RGWCoroutine* alloc_cr() override {
- return new MetaPeerTrimCR(env);
- }
- public:
- MetaPeerTrimPollCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
- int num_shards, utime_t interval)
- : MetaTrimPollCR(store, interval),
- env(dpp, store, http, num_shards)
- {}
-};
-
-RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
- int num_shards, utime_t interval)
-{
- if (store->svc.zone->is_meta_master()) {
- return new MetaMasterTrimPollCR(dpp, store, http, num_shards, interval);
- }
- return new MetaPeerTrimPollCR(dpp, store, http, num_shards, interval);
-}
-
-
-struct MetaMasterAdminTrimCR : private MasterTrimEnv, public MetaMasterTrimCR {
- MetaMasterAdminTrimCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
- : MasterTrimEnv(dpp, store, http, num_shards),
- MetaMasterTrimCR(*static_cast<MasterTrimEnv*>(this))
- {}
-};
-
-struct MetaPeerAdminTrimCR : private PeerTrimEnv, public MetaPeerTrimCR {
- MetaPeerAdminTrimCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
- : PeerTrimEnv(dpp, store, http, num_shards),
- MetaPeerTrimCR(*static_cast<PeerTrimEnv*>(this))
- {}
-};
-
-RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store,
- RGWHTTPManager *http,
- int num_shards)
-{
- if (store->svc.zone->is_meta_master()) {
- return new MetaMasterAdminTrimCR(dpp, store, http, num_shards);
- }
- return new MetaPeerAdminTrimCR(dpp, store, http, num_shards);
-}