]> git.proxmox.com Git - ceph.git/blob - ceph/src/mgr/DaemonState.cc
import quincy beta 17.1.0
[ceph.git] / ceph / src / mgr / DaemonState.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2016 John Spray <john.spray@redhat.com>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 */
13
14 #include "DaemonState.h"
15
16 #include <experimental/iterator>
17
18 #include "MgrSession.h"
19 #include "include/stringify.h"
20 #include "common/Formatter.h"
21
22 #define dout_context g_ceph_context
23 #define dout_subsys ceph_subsys_mgr
24 #undef dout_prefix
25 #define dout_prefix *_dout << "mgr " << __func__ << " "
26
27 using std::list;
28 using std::make_pair;
29 using std::map;
30 using std::ostream;
31 using std::ostringstream;
32 using std::string;
33 using std::stringstream;
34 using std::unique_ptr;
35
36 void DeviceState::set_metadata(map<string,string>&& m)
37 {
38 metadata = std::move(m);
39 auto p = metadata.find("life_expectancy_min");
40 if (p != metadata.end()) {
41 life_expectancy.first.parse(p->second);
42 }
43 p = metadata.find("life_expectancy_max");
44 if (p != metadata.end()) {
45 life_expectancy.second.parse(p->second);
46 }
47 p = metadata.find("life_expectancy_stamp");
48 if (p != metadata.end()) {
49 life_expectancy_stamp.parse(p->second);
50 }
51 p = metadata.find("wear_level");
52 if (p != metadata.end()) {
53 wear_level = atof(p->second.c_str());
54 }
55 }
56
57 void DeviceState::set_life_expectancy(utime_t from, utime_t to, utime_t now)
58 {
59 life_expectancy = make_pair(from, to);
60 life_expectancy_stamp = now;
61 if (from != utime_t()) {
62 metadata["life_expectancy_min"] = stringify(from);
63 } else {
64 metadata["life_expectancy_min"] = "";
65 }
66 if (to != utime_t()) {
67 metadata["life_expectancy_max"] = stringify(to);
68 } else {
69 metadata["life_expectancy_max"] = "";
70 }
71 if (now != utime_t()) {
72 metadata["life_expectancy_stamp"] = stringify(now);
73 } else {
74 metadata["life_expectancy_stamp"] = "";
75 }
76 }
77
78 void DeviceState::rm_life_expectancy()
79 {
80 life_expectancy = make_pair(utime_t(), utime_t());
81 life_expectancy_stamp = utime_t();
82 metadata.erase("life_expectancy_min");
83 metadata.erase("life_expectancy_max");
84 metadata.erase("life_expectancy_stamp");
85 }
86
87 void DeviceState::set_wear_level(float wear)
88 {
89 wear_level = wear;
90 if (wear >= 0) {
91 metadata["wear_level"] = stringify(wear);
92 } else {
93 metadata.erase("wear_level");
94 }
95 }
96
97 string DeviceState::get_life_expectancy_str(utime_t now) const
98 {
99 if (life_expectancy.first == utime_t()) {
100 return string();
101 }
102 if (now >= life_expectancy.first) {
103 return "now";
104 }
105 utime_t min = life_expectancy.first - now;
106 utime_t max = life_expectancy.second - now;
107 if (life_expectancy.second == utime_t()) {
108 return string(">") + timespan_str(make_timespan(min));
109 }
110 string a = timespan_str(make_timespan(min));
111 string b = timespan_str(make_timespan(max));
112 if (a == b) {
113 return a;
114 }
115 return a + " to " + b;
116 }
117
118 void DeviceState::dump(Formatter *f) const
119 {
120 f->dump_string("devid", devid);
121 f->open_array_section("location");
122 for (auto& i : attachments) {
123 f->open_object_section("attachment");
124 f->dump_string("host", std::get<0>(i));
125 f->dump_string("dev", std::get<1>(i));
126 f->dump_string("path", std::get<2>(i));
127 f->close_section();
128 }
129 f->close_section();
130 f->open_array_section("daemons");
131 for (auto& i : daemons) {
132 f->dump_stream("daemon") << i;
133 }
134 f->close_section();
135 if (life_expectancy.first != utime_t()) {
136 f->dump_stream("life_expectancy_min") << life_expectancy.first;
137 f->dump_stream("life_expectancy_max") << life_expectancy.second;
138 f->dump_stream("life_expectancy_stamp")
139 << life_expectancy_stamp;
140 }
141 if (wear_level >= 0) {
142 f->dump_float("wear_level", wear_level);
143 }
144 }
145
146 void DeviceState::print(ostream& out) const
147 {
148 out << "device " << devid << "\n";
149 for (auto& i : attachments) {
150 out << "attachment " << std::get<0>(i) << " " << std::get<1>(i) << " "
151 << std::get<2>(i) << "\n";
152 out << "\n";
153 }
154 std::copy(std::begin(daemons), std::end(daemons),
155 std::experimental::make_ostream_joiner(out, ","));
156 out << '\n';
157 if (life_expectancy.first != utime_t()) {
158 out << "life_expectancy " << life_expectancy.first << " to "
159 << life_expectancy.second
160 << " (as of " << life_expectancy_stamp << ")\n";
161 }
162 if (wear_level >= 0) {
163 out << "wear_level " << wear_level << "\n";
164 }
165 }
166
167 void DaemonState::set_metadata(const std::map<std::string,std::string>& m)
168 {
169 devices.clear();
170 devices_bypath.clear();
171 metadata = m;
172 if (auto found = m.find("device_ids"); found != m.end()) {
173 auto& device_ids = found->second;
174 std::map<std::string,std::string> paths; // devname -> id or path
175 if (auto found = m.find("device_paths"); found != m.end()) {
176 get_str_map(found->second, &paths, ",; ");
177 }
178 for_each_pair(
179 device_ids, ",; ",
180 [&paths, this](std::string_view devname, std::string_view id) {
181 // skip blank ids
182 if (id.empty()) {
183 return;
184 }
185 // id -> devname
186 devices.emplace(id, devname);
187 if (auto path = paths.find(std::string(id)); path != paths.end()) {
188 // id -> path
189 devices_bypath.emplace(id, path->second);
190 }
191 });
192 }
193 if (auto found = m.find("hostname"); found != m.end()) {
194 hostname = found->second;
195 }
196 }
197
198 const std::map<std::string,std::string>& DaemonState::_get_config_defaults()
199 {
200 if (config_defaults.empty() &&
201 config_defaults_bl.length()) {
202 auto p = config_defaults_bl.cbegin();
203 try {
204 decode(config_defaults, p);
205 } catch (buffer::error& e) {
206 }
207 }
208 return config_defaults;
209 }
210
211 void DaemonStateIndex::insert(DaemonStatePtr dm)
212 {
213 std::unique_lock l{lock};
214 _insert(dm);
215 }
216
217 void DaemonStateIndex::_insert(DaemonStatePtr dm)
218 {
219 if (all.count(dm->key)) {
220 _erase(dm->key);
221 }
222
223 by_server[dm->hostname][dm->key] = dm;
224 all[dm->key] = dm;
225
226 for (auto& i : dm->devices) {
227 auto d = _get_or_create_device(i.first);
228 d->daemons.insert(dm->key);
229 auto p = dm->devices_bypath.find(i.first);
230 if (p != dm->devices_bypath.end()) {
231 d->attachments.insert(std::make_tuple(dm->hostname, i.second, p->second));
232 } else {
233 d->attachments.insert(std::make_tuple(dm->hostname, i.second,
234 std::string()));
235 }
236 }
237 }
238
239 void DaemonStateIndex::_erase(const DaemonKey& dmk)
240 {
241 ceph_assert(ceph_mutex_is_wlocked(lock));
242
243 const auto to_erase = all.find(dmk);
244 ceph_assert(to_erase != all.end());
245 const auto dm = to_erase->second;
246
247 for (auto& i : dm->devices) {
248 auto d = _get_or_create_device(i.first);
249 ceph_assert(d->daemons.count(dmk));
250 d->daemons.erase(dmk);
251 auto p = dm->devices_bypath.find(i.first);
252 if (p != dm->devices_bypath.end()) {
253 d->attachments.erase(make_tuple(dm->hostname, i.second, p->second));
254 } else {
255 d->attachments.erase(make_tuple(dm->hostname, i.second, std::string()));
256 }
257 if (d->empty()) {
258 _erase_device(d);
259 }
260 }
261
262 auto &server_collection = by_server[dm->hostname];
263 server_collection.erase(dm->key);
264 if (server_collection.empty()) {
265 by_server.erase(dm->hostname);
266 }
267
268 all.erase(to_erase);
269 }
270
271 DaemonStateCollection DaemonStateIndex::get_by_service(
272 const std::string& svc) const
273 {
274 std::shared_lock l{lock};
275
276 DaemonStateCollection result;
277
278 for (const auto& [key, state] : all) {
279 if (key.type == svc) {
280 result[key] = state;
281 }
282 }
283
284 return result;
285 }
286
287 DaemonStateCollection DaemonStateIndex::get_by_server(
288 const std::string &hostname) const
289 {
290 std::shared_lock l{lock};
291
292 if (auto found = by_server.find(hostname); found != by_server.end()) {
293 return found->second;
294 } else {
295 return {};
296 }
297 }
298
299 bool DaemonStateIndex::exists(const DaemonKey &key) const
300 {
301 std::shared_lock l{lock};
302
303 return all.count(key) > 0;
304 }
305
306 DaemonStatePtr DaemonStateIndex::get(const DaemonKey &key)
307 {
308 std::shared_lock l{lock};
309
310 auto iter = all.find(key);
311 if (iter != all.end()) {
312 return iter->second;
313 } else {
314 return nullptr;
315 }
316 }
317
318 void DaemonStateIndex::rm(const DaemonKey &key)
319 {
320 std::unique_lock l{lock};
321 _rm(key);
322 }
323
324 void DaemonStateIndex::_rm(const DaemonKey &key)
325 {
326 if (all.count(key)) {
327 _erase(key);
328 }
329 }
330
331 void DaemonStateIndex::cull(const std::string& svc_name,
332 const std::set<std::string>& names_exist)
333 {
334 std::vector<string> victims;
335
336 std::unique_lock l{lock};
337 auto begin = all.lower_bound({svc_name, ""});
338 auto end = all.end();
339 for (auto &i = begin; i != end; ++i) {
340 const auto& daemon_key = i->first;
341 if (daemon_key.type != svc_name)
342 break;
343 if (names_exist.count(daemon_key.name) == 0) {
344 victims.push_back(daemon_key.name);
345 }
346 }
347
348 for (auto &i : victims) {
349 DaemonKey daemon_key{svc_name, i};
350 dout(4) << "Removing data for " << daemon_key << dendl;
351 _erase(daemon_key);
352 }
353 }
354
355 void DaemonStateIndex::cull_services(const std::set<std::string>& types_exist)
356 {
357 std::set<DaemonKey> victims;
358
359 std::unique_lock l{lock};
360 for (auto it = all.begin(); it != all.end(); ++it) {
361 const auto& daemon_key = it->first;
362 if (it->second->service_daemon &&
363 types_exist.count(daemon_key.type) == 0) {
364 victims.insert(daemon_key);
365 }
366 }
367
368 for (auto &i : victims) {
369 dout(4) << "Removing data for " << i << dendl;
370 _erase(i);
371 }
372 }
373
374 void DaemonPerfCounters::update(const MMgrReport& report)
375 {
376 dout(20) << "loading " << report.declare_types.size() << " new types, "
377 << report.undeclare_types.size() << " old types, had "
378 << types.size() << " types, got "
379 << report.packed.length() << " bytes of data" << dendl;
380
381 // Retrieve session state
382 auto priv = report.get_connection()->get_priv();
383 auto session = static_cast<MgrSession*>(priv.get());
384
385 // Load any newly declared types
386 for (const auto &t : report.declare_types) {
387 types.insert(std::make_pair(t.path, t));
388 session->declared_types.insert(t.path);
389 }
390 // Remove any old types
391 for (const auto &t : report.undeclare_types) {
392 session->declared_types.erase(t);
393 }
394
395 const auto now = ceph_clock_now();
396
397 // Parse packed data according to declared set of types
398 auto p = report.packed.cbegin();
399 DECODE_START(1, p);
400 for (const auto &t_path : session->declared_types) {
401 const auto &t = types.at(t_path);
402 auto instances_it = instances.find(t_path);
403 // Always check the instance exists, as we don't prevent yet
404 // multiple sessions from daemons with the same name, and one
405 // session clearing stats created by another on open.
406 if (instances_it == instances.end()) {
407 instances_it = instances.insert({t_path, t.type}).first;
408 }
409 uint64_t val = 0;
410 uint64_t avgcount = 0;
411 uint64_t avgcount2 = 0;
412
413 decode(val, p);
414 if (t.type & PERFCOUNTER_LONGRUNAVG) {
415 decode(avgcount, p);
416 decode(avgcount2, p);
417 instances_it->second.push_avg(now, val, avgcount);
418 } else {
419 instances_it->second.push(now, val);
420 }
421 }
422 DECODE_FINISH(p);
423 }
424
425 void PerfCounterInstance::push(utime_t t, uint64_t const &v)
426 {
427 buffer.push_back({t, v});
428 }
429
430 void PerfCounterInstance::push_avg(utime_t t, uint64_t const &s,
431 uint64_t const &c)
432 {
433 avg_buffer.push_back({t, s, c});
434 }