]> git.proxmox.com Git - ceph.git/blame - ceph/src/rgw/rgw_realm_reloader.cc
import 15.2.0 Octopus source
[ceph.git] / ceph / src / rgw / rgw_realm_reloader.cc
CommitLineData
7c673cae 1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
9f95a23c 2// vim: ts=8 sw=2 smarttab ft=cpp
7c673cae
FG
3
4#include "rgw_realm_reloader.h"
5#include "rgw_rados.h"
6
7#include "rgw_bucket.h"
8#include "rgw_log.h"
9#include "rgw_rest.h"
10#include "rgw_user.h"
9f95a23c 11#include "rgw_sal.h"
7c673cae 12
11fdf7f2
TL
13#include "services/svc_zone.h"
14
224ce89b
WB
15#include "common/errno.h"
16
7c673cae
FG
17#define dout_subsys ceph_subsys_rgw
18
19#undef dout_prefix
20#define dout_prefix (*_dout << "rgw realm reloader: ")
21
22
23// safe callbacks from SafeTimer are unneccessary. reload() can take a long
24// time, so we don't want to hold the mutex and block handle_notify() for the
25// duration
26static constexpr bool USE_SAFE_TIMER_CALLBACKS = false;
27
28
9f95a23c 29RGWRealmReloader::RGWRealmReloader(rgw::sal::RGWRadosStore*& store, std::map<std::string, std::string>& service_map_meta,
224ce89b 30 Pauser* frontends)
7c673cae 31 : store(store),
224ce89b 32 service_map_meta(service_map_meta),
7c673cae
FG
33 frontends(frontends),
34 timer(store->ctx(), mutex, USE_SAFE_TIMER_CALLBACKS),
9f95a23c 35 mutex(ceph::make_mutex("RGWRealmReloader")),
7c673cae
FG
36 reload_scheduled(nullptr)
37{
38 timer.init();
39}
40
41RGWRealmReloader::~RGWRealmReloader()
42{
9f95a23c 43 std::lock_guard lock{mutex};
7c673cae
FG
44 timer.shutdown();
45}
46
47class RGWRealmReloader::C_Reload : public Context {
48 RGWRealmReloader* reloader;
49 public:
11fdf7f2 50 explicit C_Reload(RGWRealmReloader* reloader) : reloader(reloader) {}
7c673cae
FG
51 void finish(int r) override { reloader->reload(); }
52};
53
54void RGWRealmReloader::handle_notify(RGWRealmNotify type,
11fdf7f2 55 bufferlist::const_iterator& p)
7c673cae
FG
56{
57 if (!store) {
58 /* we're in the middle of reload */
59 return;
60 }
61
62 CephContext *const cct = store->ctx();
63
9f95a23c 64 std::lock_guard lock{mutex};
7c673cae
FG
65 if (reload_scheduled) {
66 ldout(cct, 4) << "Notification on realm, reconfiguration "
67 "already scheduled" << dendl;
68 return;
69 }
70
71 reload_scheduled = new C_Reload(this);
9f95a23c 72 cond.notify_one(); // wake reload() if it blocked on a bad configuration
7c673cae
FG
73
74 // schedule reload() without delay
75 timer.add_event_after(0, reload_scheduled);
76
77 ldout(cct, 4) << "Notification on realm, reconfiguration scheduled" << dendl;
78}
79
80void RGWRealmReloader::reload()
81{
82 CephContext *const cct = store->ctx();
83 ldout(cct, 1) << "Pausing frontends for realm update..." << dendl;
84
85 frontends->pause();
86
87 ldout(cct, 1) << "Frontends paused" << dendl;
88
89 // TODO: make RGWRados responsible for rgw_log_usage lifetime
90 rgw_log_usage_finalize();
91
92 // destroy the existing store
93 RGWStoreManager::close_storage(store);
94 store = nullptr;
95
96 ldout(cct, 1) << "Store closed" << dendl;
97 {
98 // allow a new notify to reschedule us. it's important that we do this
99 // before we start loading the new realm, or we could miss some updates
9f95a23c 100 std::lock_guard lock{mutex};
7c673cae
FG
101 reload_scheduled = nullptr;
102 }
103
104 while (!store) {
105 // recreate and initialize a new store
11fdf7f2
TL
106 store =
107 RGWStoreManager::get_storage(cct,
108 cct->_conf->rgw_enable_gc_threads,
109 cct->_conf->rgw_enable_lc_threads,
110 cct->_conf->rgw_enable_quota_threads,
111 cct->_conf->rgw_run_sync_thread,
112 cct->_conf.get_val<bool>("rgw_dynamic_resharding"),
113 cct->_conf->rgw_cache_enabled);
7c673cae
FG
114
115 ldout(cct, 1) << "Creating new store" << dendl;
116
9f95a23c 117 rgw::sal::RGWRadosStore* store_cleanup = nullptr;
7c673cae 118 {
9f95a23c 119 std::unique_lock lock{mutex};
7c673cae
FG
120
121 // failure to recreate RGWRados is not a recoverable error, but we
122 // don't want to assert or abort the entire cluster. instead, just
123 // sleep until we get another notification, and retry until we get
124 // a working configuration
125 if (store == nullptr) {
126 lderr(cct) << "Failed to reinitialize RGWRados after a realm "
127 "configuration update. Waiting for a new update." << dendl;
128
129 // sleep until another event is scheduled
9f95a23c 130 cond.wait(lock, [this] { return reload_scheduled; });
7c673cae
FG
131 ldout(cct, 1) << "Woke up with a new configuration, retrying "
132 "RGWRados initialization." << dendl;
133 }
134
135 if (reload_scheduled) {
136 // cancel the event; we'll handle it now
137 timer.cancel_event(reload_scheduled);
138 reload_scheduled = nullptr;
139
140 // if we successfully created a store, clean it up outside of the lock,
141 // then continue to loop and recreate another
142 std::swap(store, store_cleanup);
143 }
144 }
145
146 if (store_cleanup) {
147 ldout(cct, 4) << "Got another notification, restarting RGWRados "
148 "initialization." << dendl;
149
150 RGWStoreManager::close_storage(store_cleanup);
151 }
152 }
153
9f95a23c 154 int r = store->getRados()->register_to_service_map("rgw", service_map_meta);
224ce89b
WB
155 if (r < 0) {
156 lderr(cct) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
157
158 /* ignore error */
159 }
160
7c673cae
FG
161 ldout(cct, 1) << "Finishing initialization of new store" << dendl;
162 // finish initializing the new store
163 ldout(cct, 1) << " - REST subsystem init" << dendl;
9f95a23c 164 rgw_rest_init(cct, store->svc()->zone->get_zonegroup());
7c673cae 165 ldout(cct, 1) << " - usage subsystem init" << dendl;
9f95a23c 166 rgw_log_usage_init(cct, store->getRados());
7c673cae
FG
167
168 ldout(cct, 1) << "Resuming frontends with new realm configuration." << dendl;
169
170 frontends->resume(store);
171}