]>
Commit | Line | Data |
---|---|---|
f67539c2 TL |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | ||
4 | #include "common/debug.h" | |
5 | #include "common/errno.h" | |
6 | #include "common/Timer.h" | |
7 | #include "include/stringify.h" | |
8 | #include "ServiceDaemon.h" | |
9 | ||
10 | #define dout_context g_ceph_context | |
11 | #define dout_subsys ceph_subsys_cephfs_mirror | |
12 | #undef dout_prefix | |
13 | #define dout_prefix *_dout << "cephfs::mirror::ServiceDaemon: " << this << " " \ | |
14 | << __func__ | |
15 | ||
16 | namespace cephfs { | |
17 | namespace mirror { | |
18 | ||
19 | namespace { | |
20 | ||
21 | struct AttributeDumpVisitor : public boost::static_visitor<void> { | |
22 | ceph::Formatter *f; | |
23 | std::string name; | |
24 | ||
25 | AttributeDumpVisitor(ceph::Formatter *f, std::string_view name) | |
26 | : f(f), name(name) { | |
27 | } | |
28 | ||
29 | void operator()(bool val) const { | |
30 | f->dump_bool(name.c_str(), val); | |
31 | } | |
32 | void operator()(uint64_t val) const { | |
33 | f->dump_unsigned(name.c_str(), val); | |
34 | } | |
35 | void operator()(const std::string &val) const { | |
36 | f->dump_string(name.c_str(), val); | |
37 | } | |
38 | }; | |
39 | ||
40 | } // anonymous namespace | |
41 | ||
42 | ServiceDaemon::ServiceDaemon(CephContext *cct, RadosRef rados) | |
43 | : m_cct(cct), | |
44 | m_rados(rados), | |
45 | m_timer(new SafeTimer(cct, m_timer_lock, true)) { | |
46 | m_timer->init(); | |
47 | } | |
48 | ||
49 | ServiceDaemon::~ServiceDaemon() { | |
50 | dout(10) << dendl; | |
51 | { | |
52 | std::scoped_lock timer_lock(m_timer_lock); | |
53 | if (m_timer_ctx != nullptr) { | |
54 | dout(5) << ": canceling timer task=" << m_timer_ctx << dendl; | |
55 | m_timer->cancel_event(m_timer_ctx); | |
56 | } | |
57 | m_timer->shutdown(); | |
58 | } | |
59 | ||
60 | delete m_timer; | |
61 | } | |
62 | ||
63 | int ServiceDaemon::init() { | |
64 | dout(20) << dendl; | |
65 | ||
66 | std::string id = m_cct->_conf->name.get_id(); | |
67 | if (id.find(CEPHFS_MIRROR_AUTH_ID_PREFIX) == 0) { | |
68 | id = id.substr(CEPHFS_MIRROR_AUTH_ID_PREFIX.size()); | |
69 | } | |
70 | std::string instance_id = stringify(m_rados->get_instance_id()); | |
71 | ||
72 | std::map<std::string, std::string> service_metadata = {{"id", id}, | |
73 | {"instance_id", instance_id}}; | |
74 | int r = m_rados->service_daemon_register("cephfs-mirror", instance_id, | |
75 | service_metadata); | |
76 | if (r < 0) { | |
77 | return r; | |
78 | } | |
79 | return 0; | |
80 | } | |
81 | ||
82 | void ServiceDaemon::add_filesystem(fs_cluster_id_t fscid, std::string_view fs_name) { | |
83 | dout(10) << ": fscid=" << fscid << ", fs_name=" << fs_name << dendl; | |
84 | ||
85 | { | |
86 | std::scoped_lock locker(m_lock); | |
87 | m_filesystems.emplace(fscid, Filesystem(fs_name)); | |
88 | } | |
89 | schedule_update_status(); | |
90 | } | |
91 | ||
92 | void ServiceDaemon::remove_filesystem(fs_cluster_id_t fscid) { | |
93 | dout(10) << ": fscid=" << fscid << dendl; | |
94 | ||
95 | { | |
96 | std::scoped_lock locker(m_lock); | |
97 | m_filesystems.erase(fscid); | |
98 | } | |
99 | schedule_update_status(); | |
100 | } | |
101 | ||
102 | void ServiceDaemon::add_peer(fs_cluster_id_t fscid, const Peer &peer) { | |
103 | dout(10) << ": peer=" << peer << dendl; | |
104 | ||
105 | { | |
106 | std::scoped_lock locker(m_lock); | |
107 | auto fs_it = m_filesystems.find(fscid); | |
108 | if (fs_it == m_filesystems.end()) { | |
109 | return; | |
110 | } | |
111 | fs_it->second.peer_attributes.emplace(peer, Attributes{}); | |
112 | } | |
113 | schedule_update_status(); | |
114 | } | |
115 | ||
116 | void ServiceDaemon::remove_peer(fs_cluster_id_t fscid, const Peer &peer) { | |
117 | dout(10) << ": peer=" << peer << dendl; | |
118 | ||
119 | { | |
120 | std::scoped_lock locker(m_lock); | |
121 | auto fs_it = m_filesystems.find(fscid); | |
122 | if (fs_it == m_filesystems.end()) { | |
123 | return; | |
124 | } | |
125 | fs_it->second.peer_attributes.erase(peer); | |
126 | } | |
127 | schedule_update_status(); | |
128 | } | |
129 | ||
130 | void ServiceDaemon::add_or_update_fs_attribute(fs_cluster_id_t fscid, std::string_view key, | |
131 | AttributeValue value) { | |
132 | dout(10) << ": fscid=" << fscid << dendl; | |
133 | ||
134 | { | |
135 | std::scoped_lock locker(m_lock); | |
136 | auto fs_it = m_filesystems.find(fscid); | |
137 | if (fs_it == m_filesystems.end()) { | |
138 | return; | |
139 | } | |
140 | ||
141 | fs_it->second.fs_attributes[std::string(key)] = value; | |
142 | } | |
143 | schedule_update_status(); | |
144 | } | |
145 | ||
146 | void ServiceDaemon::add_or_update_peer_attribute(fs_cluster_id_t fscid, const Peer &peer, | |
147 | std::string_view key, AttributeValue value) { | |
148 | dout(10) << ": fscid=" << fscid << dendl; | |
149 | ||
150 | { | |
151 | std::scoped_lock locker(m_lock); | |
152 | auto fs_it = m_filesystems.find(fscid); | |
153 | if (fs_it == m_filesystems.end()) { | |
154 | return; | |
155 | } | |
156 | ||
157 | auto peer_it = fs_it->second.peer_attributes.find(peer); | |
158 | if (peer_it == fs_it->second.peer_attributes.end()) { | |
159 | return; | |
160 | } | |
161 | ||
162 | peer_it->second[std::string(key)] = value; | |
163 | } | |
164 | schedule_update_status(); | |
165 | } | |
166 | ||
167 | void ServiceDaemon::schedule_update_status() { | |
168 | dout(10) << dendl; | |
169 | ||
170 | std::scoped_lock timer_lock(m_timer_lock); | |
171 | if (m_timer_ctx != nullptr) { | |
172 | return; | |
173 | } | |
174 | ||
175 | m_timer_ctx = new LambdaContext([this] { | |
176 | m_timer_ctx = nullptr; | |
177 | update_status(); | |
178 | }); | |
179 | m_timer->add_event_after(1, m_timer_ctx); | |
180 | } | |
181 | ||
182 | void ServiceDaemon::update_status() { | |
183 | dout(20) << ": " << m_filesystems.size() << " filesystem(s)" << dendl; | |
184 | ||
185 | ceph::JSONFormatter f; | |
186 | { | |
187 | std::scoped_lock locker(m_lock); | |
188 | f.open_object_section("filesystems"); | |
189 | for (auto &[fscid, filesystem] : m_filesystems) { | |
190 | f.open_object_section(stringify(fscid).c_str()); | |
191 | f.dump_string("name", filesystem.fs_name); | |
192 | for (auto &[attr_name, attr_value] : filesystem.fs_attributes) { | |
193 | AttributeDumpVisitor visitor(&f, attr_name); | |
194 | boost::apply_visitor(visitor, attr_value); | |
195 | } | |
196 | f.open_object_section("peers"); | |
197 | for (auto &[peer, attributes] : filesystem.peer_attributes) { | |
198 | f.open_object_section(peer.uuid); | |
199 | f.dump_object("remote", peer.remote); | |
200 | f.open_object_section("stats"); | |
201 | for (auto &[attr_name, attr_value] : attributes) { | |
202 | AttributeDumpVisitor visitor(&f, attr_name); | |
203 | boost::apply_visitor(visitor, attr_value); | |
204 | } | |
205 | f.close_section(); // stats | |
206 | f.close_section(); // peer.uuid | |
207 | } | |
208 | f.close_section(); // peers | |
209 | f.close_section(); // fscid | |
210 | } | |
211 | f.close_section(); // filesystems | |
212 | } | |
213 | ||
214 | std::stringstream ss; | |
215 | f.flush(ss); | |
216 | ||
217 | int r = m_rados->service_daemon_update_status({{"status_json", ss.str()}}); | |
218 | if (r < 0) { | |
219 | derr << ": failed to update service daemon status: " << cpp_strerror(r) | |
220 | << dendl; | |
221 | } | |
222 | } | |
223 | ||
224 | } // namespace mirror | |
225 | } // namespace cephfs |