1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab ft=cpp
7 #include "rgw_period_pusher.h"
8 #include "rgw_cr_rest.h"
12 #include "services/svc_zone.h"
14 #include "common/errno.h"
16 #include <boost/asio/yield.hpp>
18 #define dout_subsys ceph_subsys_rgw
21 #define dout_prefix (*_dout << "rgw period pusher: ")
23 /// A coroutine to post the period over the given connection.
24 using PushCR
= RGWPostRESTResourceCR
<RGWPeriod
, int>;
26 /// A coroutine that calls PushCR, and retries with backoff until success.
27 class PushAndRetryCR
: public RGWCoroutine
{
28 const std::string
& zone
;
29 RGWRESTConn
*const conn
;
30 RGWHTTPManager
*const http
;
32 const std::string epoch
; //< epoch string for params
33 double timeout
; //< current interval between retries
34 const double timeout_max
; //< maximum interval between retries
35 uint32_t counter
; //< number of failures since backoff increased
38 PushAndRetryCR(CephContext
* cct
, const std::string
& zone
, RGWRESTConn
* conn
,
39 RGWHTTPManager
* http
, RGWPeriod
& period
)
40 : RGWCoroutine(cct
), zone(zone
), conn(conn
), http(http
), period(period
),
41 epoch(std::to_string(period
.get_epoch())),
42 timeout(cct
->_conf
->rgw_period_push_interval
),
43 timeout_max(cct
->_conf
->rgw_period_push_interval_max
),
47 int operate() override
;
50 int PushAndRetryCR::operate()
55 ldout(cct
, 10) << "pushing period " << period
.get_id()
56 << " to " << zone
<< dendl
;
57 // initialize the http params
58 rgw_http_param_pair params
[] = {
59 { "period", period
.get_id().c_str() },
60 { "epoch", epoch
.c_str() },
63 call(new PushCR(cct
, conn
, http
, "/admin/realm/period",
64 params
, period
, nullptr));
68 if (get_ret_status() == 0) {
69 ldout(cct
, 10) << "push to " << zone
<< " succeeded" << dendl
;
73 // try each endpoint in the connection before waiting
74 if (++counter
< conn
->get_endpoint_count())
78 // wait with exponential backoff up to timeout_max
81 dur
.set_from_double(timeout
);
83 ldout(cct
, 10) << "waiting " << dur
<< "s for retry.." << dendl
;
87 if (timeout
> timeout_max
)
88 timeout
= timeout_max
;
96 * PushAllCR is a coroutine that sends the period over all of the given
97 * connections, retrying until they are all marked as completed.
99 class PushAllCR
: public RGWCoroutine
{
100 RGWHTTPManager
*const http
;
101 RGWPeriod period
; //< period object to push
102 std::map
<std::string
, RGWRESTConn
> conns
; //< zones that need the period
105 PushAllCR(CephContext
* cct
, RGWHTTPManager
* http
, RGWPeriod
&& period
,
106 std::map
<std::string
, RGWRESTConn
>&& conns
)
107 : RGWCoroutine(cct
), http(http
),
108 period(std::move(period
)),
109 conns(std::move(conns
))
112 int operate() override
;
115 int PushAllCR::operate()
118 // spawn a coroutine to push the period over each connection
120 ldout(cct
, 4) << "sending " << conns
.size() << " periods" << dendl
;
121 for (auto& c
: conns
)
122 spawn(new PushAndRetryCR(cct
, c
.first
, &c
.second
, http
, period
), false);
124 // wait for all to complete
126 return set_cr_done();
131 /// A background thread to run the PushAllCR coroutine and exit.
132 class RGWPeriodPusher::CRThread
{
133 RGWCoroutinesManager coroutines
;
135 boost::intrusive_ptr
<PushAllCR
> push_all
;
139 CRThread(CephContext
* cct
, RGWPeriod
&& period
,
140 std::map
<std::string
, RGWRESTConn
>&& conns
)
141 : coroutines(cct
, NULL
),
142 http(cct
, coroutines
.get_completion_mgr()),
143 push_all(new PushAllCR(cct
, &http
, std::move(period
), std::move(conns
)))
146 // must spawn the CR thread after start
147 thread
= std::thread([this] { coroutines
.run(push_all
.get()); });
154 if (thread
.joinable())
160 RGWPeriodPusher::RGWPeriodPusher(rgw::sal::RGWRadosStore
* store
)
161 : cct(store
->ctx()), store(store
)
163 const auto& realm
= store
->svc()->zone
->get_realm();
164 auto& realm_id
= realm
.get_id();
165 if (realm_id
.empty()) // no realm configuration
168 // always send out the current period on startup
170 int r
= period
.init(cct
, store
->svc()->sysobj
, realm_id
, realm
.get_name());
172 lderr(cct
) << "failed to load period for realm " << realm_id
<< dendl
;
176 std::lock_guard
<std::mutex
> lock(mutex
);
177 handle_notify(std::move(period
));
180 // destructor is here because CRThread is incomplete in the header
181 RGWPeriodPusher::~RGWPeriodPusher() = default;
183 void RGWPeriodPusher::handle_notify(RGWRealmNotify type
,
184 bufferlist::const_iterator
& p
)
187 RGWZonesNeedPeriod info
;
190 } catch (buffer::error
& e
) {
191 lderr(cct
) << "Failed to decode the period: " << e
.what() << dendl
;
195 std::lock_guard
<std::mutex
> lock(mutex
);
197 // we can't process this notification without access to our current realm
198 // configuration. queue it until resume()
199 if (store
== nullptr) {
200 pending_periods
.emplace_back(std::move(info
));
204 handle_notify(std::move(info
));
207 // expects the caller to hold a lock on mutex
208 void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod
&& period
)
210 if (period
.get_realm_epoch() < realm_epoch
) {
211 ldout(cct
, 10) << "period's realm epoch " << period
.get_realm_epoch()
212 << " is not newer than current realm epoch " << realm_epoch
213 << ", discarding update" << dendl
;
216 if (period
.get_realm_epoch() == realm_epoch
&&
217 period
.get_epoch() <= period_epoch
) {
218 ldout(cct
, 10) << "period epoch " << period
.get_epoch() << " is not newer "
219 "than current epoch " << period_epoch
<< ", discarding update" << dendl
;
223 // find our zonegroup in the new period
224 auto& zonegroups
= period
.get_map().zonegroups
;
225 auto i
= zonegroups
.find(store
->svc()->zone
->get_zonegroup().get_id());
226 if (i
== zonegroups
.end()) {
227 lderr(cct
) << "The new period does not contain my zonegroup!" << dendl
;
230 auto& my_zonegroup
= i
->second
;
232 // if we're not a master zone, we're not responsible for pushing any updates
233 if (my_zonegroup
.master_zone
!= store
->svc()->zone
->get_zone_params().get_id())
236 // construct a map of the zones that need this period. the map uses the same
237 // keys/ordering as the zone[group] map, so we can use a hint for insertions
238 std::map
<std::string
, RGWRESTConn
> conns
;
239 auto hint
= conns
.end();
241 // are we the master zonegroup in this period?
242 if (period
.get_map().master_zonegroup
== store
->svc()->zone
->get_zonegroup().get_id()) {
243 // update other zonegroup endpoints
244 for (auto& zg
: zonegroups
) {
245 auto& zonegroup
= zg
.second
;
246 if (zonegroup
.get_id() == store
->svc()->zone
->get_zonegroup().get_id())
248 if (zonegroup
.endpoints
.empty())
251 hint
= conns
.emplace_hint(
252 hint
, std::piecewise_construct
,
253 std::forward_as_tuple(zonegroup
.get_id()),
254 std::forward_as_tuple(cct
, store
->svc()->zone
, zonegroup
.get_id(), zonegroup
.endpoints
));
258 // update other zone endpoints
259 for (auto& z
: my_zonegroup
.zones
) {
260 auto& zone
= z
.second
;
261 if (zone
.id
== store
->svc()->zone
->get_zone_params().get_id())
263 if (zone
.endpoints
.empty())
266 hint
= conns
.emplace_hint(
267 hint
, std::piecewise_construct
,
268 std::forward_as_tuple(zone
.id
),
269 std::forward_as_tuple(cct
, store
->svc()->zone
, zone
.id
, zone
.endpoints
));
273 ldout(cct
, 4) << "No zones to update" << dendl
;
277 realm_epoch
= period
.get_realm_epoch();
278 period_epoch
= period
.get_epoch();
280 ldout(cct
, 4) << "Zone master pushing period " << period
.get_id()
281 << " epoch " << period_epoch
<< " to "
282 << conns
.size() << " other zones" << dendl
;
284 // spawn a new coroutine thread, destroying the previous one
285 cr_thread
.reset(new CRThread(cct
, std::move(period
), std::move(conns
)));
288 void RGWPeriodPusher::pause()
290 ldout(cct
, 4) << "paused for realm update" << dendl
;
291 std::lock_guard
<std::mutex
> lock(mutex
);
295 void RGWPeriodPusher::resume(rgw::sal::RGWRadosStore
* store
)
297 std::lock_guard
<std::mutex
> lock(mutex
);
300 ldout(cct
, 4) << "resume with " << pending_periods
.size()
301 << " periods pending" << dendl
;
303 // process notification queue
304 for (auto& info
: pending_periods
) {
305 handle_notify(std::move(info
));
307 pending_periods
.clear();