]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | /* | |
4 | * Ceph - scalable distributed file system | |
5 | * | |
6 | * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> | |
7 | * | |
8 | * This is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License version 2.1, as published by the Free Software | |
11 | * Foundation. See file COPYING. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <sstream> | |
16 | #include <boost/utility.hpp> | |
224ce89b | 17 | #include <boost/regex.hpp> |
7c673cae FG |
18 | |
19 | #include "MDSMonitor.h" | |
20 | #include "FSCommands.h" | |
21 | #include "Monitor.h" | |
22 | #include "MonitorDBStore.h" | |
23 | #include "OSDMonitor.h" | |
24 | #include "PGMonitor.h" | |
25 | ||
26 | #include "common/strtol.h" | |
27 | #include "common/perf_counters.h" | |
28 | #include "common/config.h" | |
29 | #include "common/cmdparse.h" | |
30 | #include "messages/MMDSMap.h" | |
31 | #include "messages/MFSMap.h" | |
32 | #include "messages/MFSMapUser.h" | |
33 | #include "messages/MMDSLoadTargets.h" | |
34 | #include "messages/MMonCommand.h" | |
35 | #include "messages/MGenericMessage.h" | |
36 | ||
37 | #include "include/assert.h" | |
38 | #include "include/str_list.h" | |
39 | #include "include/stringify.h" | |
40 | #include "mds/mdstypes.h" | |
41 | #include "Session.h" | |
42 | ||
43 | #define dout_subsys ceph_subsys_mon | |
44 | #undef dout_prefix | |
45 | #define dout_prefix _prefix(_dout, mon, fsmap) | |
46 | static ostream& _prefix(std::ostream *_dout, Monitor *mon, FSMap const& fsmap) { | |
47 | return *_dout << "mon." << mon->name << "@" << mon->rank | |
48 | << "(" << mon->get_state_name() | |
49 | << ").mds e" << fsmap.get_epoch() << " "; | |
50 | } | |
51 | ||
52 | /* | |
53 | * Specialized implementation of cmd_getval to allow us to parse | |
54 | * out strongly-typedef'd types | |
55 | */ | |
56 | template<> bool cmd_getval(CephContext *cct, const cmdmap_t& cmdmap, | |
31f18b77 | 57 | const std::string& k, mds_gid_t &val) |
7c673cae FG |
58 | { |
59 | return cmd_getval(cct, cmdmap, k, (int64_t&)val); | |
60 | } | |
61 | ||
62 | template<> bool cmd_getval(CephContext *cct, const cmdmap_t& cmdmap, | |
31f18b77 | 63 | const std::string& k, mds_rank_t &val) |
7c673cae FG |
64 | { |
65 | return cmd_getval(cct, cmdmap, k, (int64_t&)val); | |
66 | } | |
67 | ||
68 | template<> bool cmd_getval(CephContext *cct, const cmdmap_t& cmdmap, | |
31f18b77 | 69 | const std::string& k, MDSMap::DaemonState &val) |
7c673cae FG |
70 | { |
71 | return cmd_getval(cct, cmdmap, k, (int64_t&)val); | |
72 | } | |
73 | ||
74 | static const string MDS_METADATA_PREFIX("mds_metadata"); | |
75 | ||
76 | ||
77 | // my methods | |
78 | ||
79 | void MDSMonitor::print_map(FSMap &m, int dbl) | |
80 | { | |
81 | dout(dbl) << "print_map\n"; | |
82 | m.print(*_dout); | |
83 | *_dout << dendl; | |
84 | } | |
85 | ||
86 | // service methods | |
87 | void MDSMonitor::create_initial() | |
88 | { | |
89 | dout(10) << "create_initial" << dendl; | |
90 | } | |
91 | ||
92 | ||
93 | void MDSMonitor::update_from_paxos(bool *need_bootstrap) | |
94 | { | |
95 | version_t version = get_last_committed(); | |
96 | if (version == fsmap.epoch) | |
97 | return; | |
98 | ||
99 | dout(10) << __func__ << " version " << version | |
100 | << ", my e " << fsmap.epoch << dendl; | |
101 | assert(version > fsmap.epoch); | |
102 | ||
224ce89b WB |
103 | load_health(); |
104 | ||
7c673cae FG |
105 | // read and decode |
106 | bufferlist fsmap_bl; | |
107 | fsmap_bl.clear(); | |
108 | int err = get_version(version, fsmap_bl); | |
109 | assert(err == 0); | |
110 | ||
111 | assert(fsmap_bl.length() > 0); | |
112 | dout(10) << __func__ << " got " << version << dendl; | |
113 | fsmap.decode(fsmap_bl); | |
114 | ||
115 | // new map | |
116 | dout(4) << "new map" << dendl; | |
117 | print_map(fsmap, 0); | |
118 | if (!g_conf->mon_mds_skip_sanity) { | |
119 | fsmap.sanity(); | |
120 | } | |
121 | ||
122 | check_subs(); | |
123 | update_logger(); | |
124 | } | |
125 | ||
126 | void MDSMonitor::init() | |
127 | { | |
128 | (void)load_metadata(pending_metadata); | |
129 | } | |
130 | ||
131 | void MDSMonitor::create_pending() | |
132 | { | |
133 | pending_fsmap = fsmap; | |
134 | pending_fsmap.epoch++; | |
135 | ||
136 | dout(10) << "create_pending e" << pending_fsmap.epoch << dendl; | |
137 | } | |
138 | ||
139 | void MDSMonitor::encode_pending(MonitorDBStore::TransactionRef t) | |
140 | { | |
141 | dout(10) << "encode_pending e" << pending_fsmap.epoch << dendl; | |
142 | ||
143 | ||
144 | // print map iff 'debug mon = 30' or higher | |
145 | print_map(pending_fsmap, 30); | |
146 | if (!g_conf->mon_mds_skip_sanity) { | |
147 | pending_fsmap.sanity(); | |
148 | } | |
149 | ||
150 | // Set 'modified' on maps modified this epoch | |
151 | for (auto &i : fsmap.filesystems) { | |
152 | if (i.second->mds_map.epoch == fsmap.epoch) { | |
153 | i.second->mds_map.modified = ceph_clock_now(); | |
154 | } | |
155 | } | |
156 | ||
157 | // apply to paxos | |
158 | assert(get_last_committed() + 1 == pending_fsmap.epoch); | |
159 | bufferlist fsmap_bl; | |
160 | pending_fsmap.encode(fsmap_bl, mon->get_quorum_con_features()); | |
161 | ||
162 | /* put everything in the transaction */ | |
163 | put_version(t, pending_fsmap.epoch, fsmap_bl); | |
164 | put_last_committed(t, pending_fsmap.epoch); | |
165 | ||
166 | // Encode MDSHealth data | |
167 | for (std::map<uint64_t, MDSHealth>::iterator i = pending_daemon_health.begin(); | |
168 | i != pending_daemon_health.end(); ++i) { | |
169 | bufferlist bl; | |
170 | i->second.encode(bl); | |
171 | t->put(MDS_HEALTH_PREFIX, stringify(i->first), bl); | |
172 | } | |
173 | ||
174 | for (std::set<uint64_t>::iterator i = pending_daemon_health_rm.begin(); | |
175 | i != pending_daemon_health_rm.end(); ++i) { | |
176 | t->erase(MDS_HEALTH_PREFIX, stringify(*i)); | |
177 | } | |
178 | pending_daemon_health_rm.clear(); | |
179 | remove_from_metadata(t); | |
224ce89b WB |
180 | |
181 | // health | |
182 | health_check_map_t new_checks; | |
183 | const auto info_map = pending_fsmap.get_mds_info(); | |
184 | for (const auto &i : info_map) { | |
185 | const auto &gid = i.first; | |
186 | const auto &info = i.second; | |
187 | if (pending_daemon_health_rm.count(gid)) { | |
188 | continue; | |
189 | } | |
190 | MDSHealth health; | |
191 | auto p = pending_daemon_health.find(gid); | |
192 | if (p != pending_daemon_health.end()) { | |
193 | health = p->second; | |
194 | } else { | |
195 | bufferlist bl; | |
196 | mon->store->get(MDS_HEALTH_PREFIX, stringify(gid), bl); | |
197 | if (!bl.length()) { | |
198 | derr << "Missing health data for MDS " << gid << dendl; | |
199 | continue; | |
200 | } | |
201 | bufferlist::iterator bl_i = bl.begin(); | |
202 | health.decode(bl_i); | |
203 | } | |
204 | for (const auto &metric : health.metrics) { | |
205 | int const rank = info.rank; | |
206 | health_check_t *check = &new_checks.get_or_add( | |
207 | mds_metric_name(metric.type), | |
208 | metric.sev, | |
209 | mds_metric_summary(metric.type)); | |
210 | ostringstream ss; | |
211 | ss << "mds" << info.name << "(mds." << rank << "): " << metric.message; | |
212 | for (auto p = metric.metadata.begin(); | |
213 | p != metric.metadata.end(); | |
214 | ++p) { | |
215 | if (p != metric.metadata.begin()) { | |
216 | ss << ", "; | |
217 | } | |
218 | ss << p->first << ": " << p->second; | |
219 | } | |
220 | check->detail.push_back(ss.str()); | |
221 | } | |
222 | } | |
223 | pending_fsmap.get_health_checks(&new_checks); | |
224 | for (auto& p : new_checks.checks) { | |
225 | p.second.summary = boost::regex_replace( | |
226 | p.second.summary, | |
227 | boost::regex("%num%"), | |
228 | stringify(p.second.detail.size())); | |
229 | p.second.summary = boost::regex_replace( | |
230 | p.second.summary, | |
231 | boost::regex("%plurals%"), | |
232 | p.second.detail.size() > 1 ? "s" : ""); | |
233 | p.second.summary = boost::regex_replace( | |
234 | p.second.summary, | |
235 | boost::regex("%isorare%"), | |
236 | p.second.detail.size() > 1 ? "are" : "is"); | |
237 | } | |
238 | encode_health(new_checks, t); | |
7c673cae FG |
239 | } |
240 | ||
241 | version_t MDSMonitor::get_trim_to() | |
242 | { | |
243 | version_t floor = 0; | |
244 | if (g_conf->mon_mds_force_trim_to > 0 && | |
245 | g_conf->mon_mds_force_trim_to < (int)get_last_committed()) { | |
246 | floor = g_conf->mon_mds_force_trim_to; | |
247 | dout(10) << __func__ << " explicit mon_mds_force_trim_to = " | |
248 | << floor << dendl; | |
249 | } | |
250 | ||
251 | unsigned max = g_conf->mon_max_mdsmap_epochs; | |
252 | version_t last = get_last_committed(); | |
253 | ||
254 | if (last - get_first_committed() > max && floor < last - max) | |
255 | return last - max; | |
256 | return floor; | |
257 | } | |
258 | ||
259 | void MDSMonitor::update_logger() | |
260 | { | |
261 | dout(10) << "update_logger" << dendl; | |
262 | ||
263 | uint64_t up = 0; | |
264 | uint64_t in = 0; | |
265 | uint64_t failed = 0; | |
266 | for (const auto &i : fsmap.filesystems) { | |
267 | const MDSMap &mds_map = i.second->mds_map; | |
268 | ||
269 | up += mds_map.get_num_up_mds(); | |
270 | in += mds_map.get_num_in_mds(); | |
271 | failed += mds_map.get_num_failed_mds(); | |
272 | } | |
273 | mon->cluster_logger->set(l_cluster_num_mds_up, up); | |
274 | mon->cluster_logger->set(l_cluster_num_mds_in, in); | |
275 | mon->cluster_logger->set(l_cluster_num_mds_failed, failed); | |
276 | mon->cluster_logger->set(l_cluster_mds_epoch, fsmap.get_epoch()); | |
277 | } | |
278 | ||
279 | bool MDSMonitor::preprocess_query(MonOpRequestRef op) | |
280 | { | |
281 | op->mark_mdsmon_event(__func__); | |
282 | PaxosServiceMessage *m = static_cast<PaxosServiceMessage*>(op->get_req()); | |
283 | dout(10) << "preprocess_query " << *m << " from " << m->get_orig_source_inst() << dendl; | |
284 | ||
285 | switch (m->get_type()) { | |
286 | ||
287 | case MSG_MDS_BEACON: | |
288 | return preprocess_beacon(op); | |
289 | ||
290 | case MSG_MON_COMMAND: | |
291 | return preprocess_command(op); | |
292 | ||
293 | case MSG_MDS_OFFLOAD_TARGETS: | |
294 | return preprocess_offload_targets(op); | |
295 | ||
296 | default: | |
297 | ceph_abort(); | |
298 | return true; | |
299 | } | |
300 | } | |
301 | ||
302 | void MDSMonitor::_note_beacon(MMDSBeacon *m) | |
303 | { | |
304 | mds_gid_t gid = mds_gid_t(m->get_global_id()); | |
305 | version_t seq = m->get_seq(); | |
306 | ||
307 | dout(15) << "_note_beacon " << *m << " noting time" << dendl; | |
308 | last_beacon[gid].stamp = ceph_clock_now(); | |
309 | last_beacon[gid].seq = seq; | |
310 | } | |
311 | ||
312 | bool MDSMonitor::preprocess_beacon(MonOpRequestRef op) | |
313 | { | |
314 | op->mark_mdsmon_event(__func__); | |
315 | MMDSBeacon *m = static_cast<MMDSBeacon*>(op->get_req()); | |
316 | MDSMap::DaemonState state = m->get_state(); | |
317 | mds_gid_t gid = m->get_global_id(); | |
318 | version_t seq = m->get_seq(); | |
319 | MDSMap::mds_info_t info; | |
320 | epoch_t effective_epoch = 0; | |
321 | ||
322 | // check privileges, ignore if fails | |
323 | MonSession *session = m->get_session(); | |
324 | assert(session); | |
325 | if (!session->is_capable("mds", MON_CAP_X)) { | |
326 | dout(0) << "preprocess_beacon got MMDSBeacon from entity with insufficient privileges " | |
327 | << session->caps << dendl; | |
328 | goto ignore; | |
329 | } | |
330 | ||
331 | if (m->get_fsid() != mon->monmap->fsid) { | |
332 | dout(0) << "preprocess_beacon on fsid " << m->get_fsid() << " != " << mon->monmap->fsid << dendl; | |
333 | goto ignore; | |
334 | } | |
335 | ||
336 | dout(12) << "preprocess_beacon " << *m | |
337 | << " from " << m->get_orig_source_inst() | |
338 | << " " << m->get_compat() | |
339 | << dendl; | |
340 | ||
341 | // make sure the address has a port | |
342 | if (m->get_orig_source_addr().get_port() == 0) { | |
343 | dout(1) << " ignoring boot message without a port" << dendl; | |
344 | goto ignore; | |
345 | } | |
346 | ||
347 | // check compat | |
348 | if (!m->get_compat().writeable(fsmap.compat)) { | |
349 | dout(1) << " mds " << m->get_source_inst() << " can't write to fsmap " << fsmap.compat << dendl; | |
350 | goto ignore; | |
351 | } | |
352 | ||
353 | // fw to leader? | |
354 | if (!mon->is_leader()) | |
355 | return false; | |
356 | ||
357 | // booted, but not in map? | |
358 | if (!pending_fsmap.gid_exists(gid)) { | |
359 | if (state != MDSMap::STATE_BOOT) { | |
360 | dout(7) << "mds_beacon " << *m << " is not in fsmap (state " | |
361 | << ceph_mds_state_name(state) << ")" << dendl; | |
362 | ||
363 | MDSMap null_map; | |
364 | null_map.epoch = fsmap.epoch; | |
365 | null_map.compat = fsmap.compat; | |
366 | mon->send_reply(op, new MMDSMap(mon->monmap->fsid, &null_map)); | |
367 | return true; | |
368 | } else { | |
369 | return false; // not booted yet. | |
370 | } | |
371 | } | |
372 | dout(10) << __func__ << ": GID exists in map: " << gid << dendl; | |
373 | info = pending_fsmap.get_info_gid(gid); | |
374 | ||
375 | // old seq? | |
376 | if (info.state_seq > seq) { | |
377 | dout(7) << "mds_beacon " << *m << " has old seq, ignoring" << dendl; | |
378 | goto ignore; | |
379 | } | |
380 | ||
381 | // Work out the latest epoch that this daemon should have seen | |
382 | { | |
383 | fs_cluster_id_t fscid = pending_fsmap.mds_roles.at(gid); | |
384 | if (fscid == FS_CLUSTER_ID_NONE) { | |
385 | effective_epoch = pending_fsmap.standby_epochs.at(gid); | |
386 | } else { | |
387 | effective_epoch = pending_fsmap.get_filesystem(fscid)->mds_map.epoch; | |
388 | } | |
389 | if (effective_epoch != m->get_last_epoch_seen()) { | |
390 | dout(10) << "mds_beacon " << *m | |
391 | << " ignoring requested state, because mds hasn't seen latest map" << dendl; | |
392 | goto reply; | |
393 | } | |
394 | } | |
395 | ||
396 | if (info.laggy()) { | |
397 | _note_beacon(m); | |
398 | return false; // no longer laggy, need to update map. | |
399 | } | |
400 | if (state == MDSMap::STATE_BOOT) { | |
401 | // ignore, already booted. | |
402 | goto ignore; | |
403 | } | |
404 | // is there a state change here? | |
405 | if (info.state != state) { | |
406 | // legal state change? | |
407 | if ((info.state == MDSMap::STATE_STANDBY || | |
408 | info.state == MDSMap::STATE_STANDBY_REPLAY) && state > 0) { | |
409 | dout(10) << "mds_beacon mds can't activate itself (" << ceph_mds_state_name(info.state) | |
410 | << " -> " << ceph_mds_state_name(state) << ")" << dendl; | |
411 | goto reply; | |
412 | } | |
413 | ||
414 | if ((state == MDSMap::STATE_STANDBY || state == MDSMap::STATE_STANDBY_REPLAY) | |
415 | && info.rank != MDS_RANK_NONE) | |
416 | { | |
417 | dout(4) << "mds_beacon MDS can't go back into standby after taking rank: " | |
418 | "held rank " << info.rank << " while requesting state " | |
419 | << ceph_mds_state_name(state) << dendl; | |
420 | goto reply; | |
421 | } | |
422 | ||
423 | _note_beacon(m); | |
424 | return false; | |
425 | } | |
426 | ||
427 | // Comparing known daemon health with m->get_health() | |
428 | // and return false (i.e. require proposal) if they | |
429 | // do not match, to update our stored | |
430 | if (!(pending_daemon_health[gid] == m->get_health())) { | |
431 | dout(20) << __func__ << " health metrics for gid " << gid << " were updated" << dendl; | |
432 | _note_beacon(m); | |
433 | return false; | |
434 | } | |
435 | ||
436 | reply: | |
437 | // note time and reply | |
438 | assert(effective_epoch > 0); | |
439 | _note_beacon(m); | |
440 | mon->send_reply(op, | |
441 | new MMDSBeacon(mon->monmap->fsid, m->get_global_id(), m->get_name(), | |
442 | effective_epoch, state, seq, | |
443 | CEPH_FEATURES_SUPPORTED_DEFAULT)); | |
444 | return true; | |
445 | ||
446 | ignore: | |
447 | // I won't reply this beacon, drop it. | |
448 | mon->no_reply(op); | |
449 | return true; | |
450 | } | |
451 | ||
452 | bool MDSMonitor::preprocess_offload_targets(MonOpRequestRef op) | |
453 | { | |
454 | op->mark_mdsmon_event(__func__); | |
455 | MMDSLoadTargets *m = static_cast<MMDSLoadTargets*>(op->get_req()); | |
456 | dout(10) << "preprocess_offload_targets " << *m << " from " << m->get_orig_source() << dendl; | |
457 | ||
458 | // check privileges, ignore message if fails | |
459 | MonSession *session = m->get_session(); | |
460 | if (!session) | |
461 | goto done; | |
462 | if (!session->is_capable("mds", MON_CAP_X)) { | |
463 | dout(0) << "preprocess_offload_targets got MMDSLoadTargets from entity with insufficient caps " | |
464 | << session->caps << dendl; | |
465 | goto done; | |
466 | } | |
467 | ||
468 | if (fsmap.gid_exists(m->global_id) && | |
469 | m->targets == fsmap.get_info_gid(m->global_id).export_targets) | |
470 | goto done; | |
471 | ||
472 | return false; | |
473 | ||
474 | done: | |
475 | return true; | |
476 | } | |
477 | ||
478 | ||
479 | bool MDSMonitor::prepare_update(MonOpRequestRef op) | |
480 | { | |
481 | op->mark_mdsmon_event(__func__); | |
482 | PaxosServiceMessage *m = static_cast<PaxosServiceMessage*>(op->get_req()); | |
483 | dout(7) << "prepare_update " << *m << dendl; | |
484 | ||
485 | switch (m->get_type()) { | |
486 | ||
487 | case MSG_MDS_BEACON: | |
488 | return prepare_beacon(op); | |
489 | ||
490 | case MSG_MON_COMMAND: | |
491 | return prepare_command(op); | |
492 | ||
493 | case MSG_MDS_OFFLOAD_TARGETS: | |
494 | return prepare_offload_targets(op); | |
495 | ||
496 | default: | |
497 | ceph_abort(); | |
498 | } | |
499 | ||
500 | return true; | |
501 | } | |
502 | ||
503 | bool MDSMonitor::prepare_beacon(MonOpRequestRef op) | |
504 | { | |
505 | op->mark_mdsmon_event(__func__); | |
506 | MMDSBeacon *m = static_cast<MMDSBeacon*>(op->get_req()); | |
507 | // -- this is an update -- | |
508 | dout(12) << "prepare_beacon " << *m << " from " << m->get_orig_source_inst() << dendl; | |
509 | entity_addr_t addr = m->get_orig_source_inst().addr; | |
510 | mds_gid_t gid = m->get_global_id(); | |
511 | MDSMap::DaemonState state = m->get_state(); | |
512 | version_t seq = m->get_seq(); | |
513 | ||
514 | dout(20) << __func__ << " got health from gid " << gid << " with " << m->get_health().metrics.size() << " metrics." << dendl; | |
515 | ||
516 | // Calculate deltas of health metrics created and removed | |
517 | // Do this by type rather than MDSHealthMetric equality, because messages can | |
518 | // change a lot when they include e.g. a number of items. | |
519 | const auto &old_health = pending_daemon_health[gid].metrics; | |
520 | const auto &new_health = m->get_health().metrics; | |
521 | ||
522 | std::set<mds_metric_t> old_types; | |
523 | for (const auto &i : old_health) { | |
524 | old_types.insert(i.type); | |
525 | } | |
526 | ||
527 | std::set<mds_metric_t> new_types; | |
528 | for (const auto &i : new_health) { | |
529 | new_types.insert(i.type); | |
530 | } | |
531 | ||
532 | for (const auto &new_metric: new_health) { | |
533 | if (old_types.count(new_metric.type) == 0) { | |
534 | std::stringstream msg; | |
535 | msg << "MDS health message (" << m->get_orig_source_inst().name << "): " | |
536 | << new_metric.message; | |
537 | if (new_metric.sev == HEALTH_ERR) { | |
538 | mon->clog->error() << msg.str(); | |
539 | } else if (new_metric.sev == HEALTH_WARN) { | |
540 | mon->clog->warn() << msg.str(); | |
541 | } else { | |
542 | mon->clog->info() << msg.str(); | |
543 | } | |
544 | } | |
545 | } | |
546 | ||
547 | // Log the disappearance of health messages at INFO | |
548 | for (const auto &old_metric : old_health) { | |
549 | if (new_types.count(old_metric.type) == 0) { | |
550 | mon->clog->info() << "MDS health message cleared (" | |
551 | << m->get_orig_source_inst().name << "): " << old_metric.message; | |
552 | } | |
553 | } | |
554 | ||
555 | // Store health | |
556 | pending_daemon_health[gid] = m->get_health(); | |
557 | ||
558 | // boot? | |
559 | if (state == MDSMap::STATE_BOOT) { | |
560 | // zap previous instance of this name? | |
561 | if (g_conf->mds_enforce_unique_name) { | |
562 | bool failed_mds = false; | |
563 | while (mds_gid_t existing = pending_fsmap.find_mds_gid_by_name(m->get_name())) { | |
564 | if (!mon->osdmon()->is_writeable()) { | |
565 | mon->osdmon()->wait_for_writeable(op, new C_RetryMessage(this, op)); | |
566 | return false; | |
567 | } | |
31f18b77 | 568 | mon->clog->info() << "MDS daemon '" << m->get_name() << "' restarted"; |
7c673cae FG |
569 | fail_mds_gid(existing); |
570 | failed_mds = true; | |
571 | } | |
572 | if (failed_mds) { | |
573 | assert(mon->osdmon()->is_writeable()); | |
574 | request_proposal(mon->osdmon()); | |
575 | } | |
576 | } | |
577 | ||
578 | // Add this daemon to the map | |
579 | if (pending_fsmap.mds_roles.count(gid) == 0) { | |
580 | MDSMap::mds_info_t new_info; | |
581 | new_info.global_id = gid; | |
582 | new_info.name = m->get_name(); | |
583 | new_info.addr = addr; | |
584 | new_info.mds_features = m->get_mds_features(); | |
585 | new_info.state = MDSMap::STATE_STANDBY; | |
586 | new_info.state_seq = seq; | |
587 | new_info.standby_for_rank = m->get_standby_for_rank(); | |
588 | new_info.standby_for_name = m->get_standby_for_name(); | |
589 | new_info.standby_for_fscid = m->get_standby_for_fscid(); | |
590 | new_info.standby_replay = m->get_standby_replay(); | |
591 | pending_fsmap.insert(new_info); | |
592 | } | |
593 | ||
594 | // Resolve standby_for_name to a rank | |
595 | const MDSMap::mds_info_t &info = pending_fsmap.get_info_gid(gid); | |
596 | if (!info.standby_for_name.empty()) { | |
597 | const MDSMap::mds_info_t *leaderinfo = fsmap.find_by_name( | |
598 | info.standby_for_name); | |
599 | if (leaderinfo && (leaderinfo->rank >= 0)) { | |
600 | auto fscid = pending_fsmap.mds_roles.at(leaderinfo->global_id); | |
601 | auto fs = pending_fsmap.get_filesystem(fscid); | |
602 | bool followable = fs->mds_map.is_followable(leaderinfo->rank); | |
603 | ||
604 | pending_fsmap.modify_daemon(gid, [fscid, leaderinfo, followable]( | |
605 | MDSMap::mds_info_t *info) { | |
606 | info->standby_for_rank = leaderinfo->rank; | |
607 | info->standby_for_fscid = fscid; | |
608 | }); | |
609 | } | |
610 | } | |
611 | ||
612 | // initialize the beacon timer | |
613 | last_beacon[gid].stamp = ceph_clock_now(); | |
614 | last_beacon[gid].seq = seq; | |
615 | ||
616 | // new incompat? | |
617 | if (!pending_fsmap.compat.writeable(m->get_compat())) { | |
618 | dout(10) << " fsmap " << pending_fsmap.compat | |
619 | << " can't write to new mds' " << m->get_compat() | |
620 | << ", updating fsmap and killing old mds's" | |
621 | << dendl; | |
622 | pending_fsmap.update_compat(m->get_compat()); | |
623 | } | |
624 | ||
625 | update_metadata(m->get_global_id(), m->get_sys_info()); | |
626 | } else { | |
627 | // state update | |
628 | const MDSMap::mds_info_t &info = pending_fsmap.get_info_gid(gid); | |
629 | // Old MDS daemons don't mention that they're standby replay until | |
630 | // after they've sent their boot beacon, so update this field. | |
631 | if (info.standby_replay != m->get_standby_replay()) { | |
632 | pending_fsmap.modify_daemon(info.global_id, [&m]( | |
633 | MDSMap::mds_info_t *i) | |
634 | { | |
635 | i->standby_replay = m->get_standby_replay(); | |
636 | }); | |
637 | } | |
638 | ||
639 | if (info.state == MDSMap::STATE_STOPPING && state != MDSMap::STATE_STOPPED ) { | |
640 | // we can't transition to any other states from STOPPING | |
641 | dout(0) << "got beacon for MDS in STATE_STOPPING, ignoring requested state change" | |
642 | << dendl; | |
643 | _note_beacon(m); | |
644 | return true; | |
645 | } | |
646 | ||
647 | if (info.laggy()) { | |
648 | dout(10) << "prepare_beacon clearing laggy flag on " << addr << dendl; | |
649 | pending_fsmap.modify_daemon(info.global_id, [](MDSMap::mds_info_t *info) | |
650 | { | |
651 | info->clear_laggy(); | |
652 | } | |
653 | ); | |
654 | } | |
655 | ||
656 | dout(10) << "prepare_beacon mds." << info.rank | |
657 | << " " << ceph_mds_state_name(info.state) | |
658 | << " -> " << ceph_mds_state_name(state) | |
659 | << " standby_for_rank=" << m->get_standby_for_rank() | |
660 | << dendl; | |
661 | if (state == MDSMap::STATE_STOPPED) { | |
662 | auto erased = pending_fsmap.stop(gid); | |
663 | erased.push_back(gid); | |
664 | ||
665 | for (const auto &erased_gid : erased) { | |
666 | last_beacon.erase(erased_gid); | |
667 | if (pending_daemon_health.count(erased_gid)) { | |
668 | pending_daemon_health.erase(erased_gid); | |
669 | pending_daemon_health_rm.insert(erased_gid); | |
670 | } | |
671 | } | |
672 | } else if (state == MDSMap::STATE_DAMAGED) { | |
673 | if (!mon->osdmon()->is_writeable()) { | |
674 | dout(4) << __func__ << ": DAMAGED from rank " << info.rank | |
675 | << " waiting for osdmon writeable to blacklist it" << dendl; | |
676 | mon->osdmon()->wait_for_writeable(op, new C_RetryMessage(this, op)); | |
677 | return false; | |
678 | } | |
679 | ||
680 | // Record this MDS rank as damaged, so that other daemons | |
681 | // won't try to run it. | |
682 | dout(4) << __func__ << ": marking rank " | |
683 | << info.rank << " damaged" << dendl; | |
684 | ||
685 | utime_t until = ceph_clock_now(); | |
686 | until += g_conf->mds_blacklist_interval; | |
687 | const auto blacklist_epoch = mon->osdmon()->blacklist(info.addr, until); | |
688 | request_proposal(mon->osdmon()); | |
689 | pending_fsmap.damaged(gid, blacklist_epoch); | |
690 | last_beacon.erase(gid); | |
691 | ||
692 | // Respond to MDS, so that it knows it can continue to shut down | |
693 | mon->send_reply(op, | |
694 | new MMDSBeacon( | |
695 | mon->monmap->fsid, m->get_global_id(), | |
696 | m->get_name(), fsmap.get_epoch(), state, seq, | |
697 | CEPH_FEATURES_SUPPORTED_DEFAULT)); | |
698 | } else if (state == MDSMap::STATE_DNE) { | |
699 | if (!mon->osdmon()->is_writeable()) { | |
700 | dout(4) << __func__ << ": DNE from rank " << info.rank | |
701 | << " waiting for osdmon writeable to blacklist it" << dendl; | |
702 | mon->osdmon()->wait_for_writeable(op, new C_RetryMessage(this, op)); | |
703 | return false; | |
704 | } | |
705 | ||
706 | fail_mds_gid(gid); | |
707 | assert(mon->osdmon()->is_writeable()); | |
708 | request_proposal(mon->osdmon()); | |
709 | ||
710 | // Respond to MDS, so that it knows it can continue to shut down | |
711 | mon->send_reply(op, | |
712 | new MMDSBeacon( | |
713 | mon->monmap->fsid, m->get_global_id(), | |
714 | m->get_name(), fsmap.get_epoch(), state, seq, | |
715 | CEPH_FEATURES_SUPPORTED_DEFAULT)); | |
716 | } else if (info.state == MDSMap::STATE_STANDBY && state != info.state) { | |
717 | // Standby daemons should never modify their own | |
718 | // state. Reject any attempts to do so. | |
719 | derr << "standby " << gid << " attempted to change state to " | |
720 | << ceph_mds_state_name(state) << ", rejecting" << dendl; | |
721 | return true; | |
722 | } else if (info.state != MDSMap::STATE_STANDBY && state != info.state && | |
723 | !MDSMap::state_transition_valid(info.state, state)) { | |
724 | // Validate state transitions for daemons that hold a rank | |
725 | derr << "daemon " << gid << " (rank " << info.rank << ") " | |
726 | << "reported invalid state transition " | |
727 | << ceph_mds_state_name(info.state) << " -> " | |
728 | << ceph_mds_state_name(state) << dendl; | |
729 | return true; | |
730 | } else { | |
731 | // Made it through special cases and validations, record the | |
732 | // daemon's reported state to the FSMap. | |
733 | pending_fsmap.modify_daemon(gid, [state, seq](MDSMap::mds_info_t *info) { | |
734 | info->state = state; | |
735 | info->state_seq = seq; | |
736 | }); | |
737 | } | |
738 | } | |
739 | ||
740 | dout(7) << "prepare_beacon pending map now:" << dendl; | |
741 | print_map(pending_fsmap); | |
742 | ||
743 | wait_for_finished_proposal(op, new FunctionContext([op, this](int r){ | |
744 | if (r >= 0) | |
745 | _updated(op); // success | |
746 | else if (r == -ECANCELED) { | |
747 | mon->no_reply(op); | |
748 | } else { | |
749 | dispatch(op); // try again | |
750 | } | |
751 | })); | |
752 | ||
753 | return true; | |
754 | } | |
755 | ||
756 | bool MDSMonitor::prepare_offload_targets(MonOpRequestRef op) | |
757 | { | |
758 | op->mark_mdsmon_event(__func__); | |
759 | MMDSLoadTargets *m = static_cast<MMDSLoadTargets*>(op->get_req()); | |
760 | mds_gid_t gid = m->global_id; | |
761 | if (pending_fsmap.gid_has_rank(gid)) { | |
762 | dout(10) << "prepare_offload_targets " << gid << " " << m->targets << dendl; | |
763 | pending_fsmap.update_export_targets(gid, m->targets); | |
764 | } else { | |
765 | dout(10) << "prepare_offload_targets " << gid << " not in map" << dendl; | |
766 | } | |
767 | return true; | |
768 | } | |
769 | ||
770 | bool MDSMonitor::should_propose(double& delay) | |
771 | { | |
772 | // delegate to PaxosService to assess whether we should propose | |
773 | return PaxosService::should_propose(delay); | |
774 | } | |
775 | ||
776 | void MDSMonitor::_updated(MonOpRequestRef op) | |
777 | { | |
778 | op->mark_mdsmon_event(__func__); | |
779 | MMDSBeacon *m = static_cast<MMDSBeacon*>(op->get_req()); | |
780 | dout(10) << "_updated " << m->get_orig_source() << " " << *m << dendl; | |
781 | mon->clog->info() << m->get_orig_source_inst() << " " | |
782 | << ceph_mds_state_name(m->get_state()); | |
783 | ||
784 | if (m->get_state() == MDSMap::STATE_STOPPED) { | |
785 | // send the map manually (they're out of the map, so they won't get it automatic) | |
786 | MDSMap null_map; | |
787 | null_map.epoch = fsmap.epoch; | |
788 | null_map.compat = fsmap.compat; | |
789 | mon->send_reply(op, new MMDSMap(mon->monmap->fsid, &null_map)); | |
790 | } else { | |
791 | mon->send_reply(op, new MMDSBeacon(mon->monmap->fsid, | |
792 | m->get_global_id(), | |
793 | m->get_name(), | |
794 | fsmap.get_epoch(), | |
795 | m->get_state(), | |
796 | m->get_seq(), | |
797 | CEPH_FEATURES_SUPPORTED_DEFAULT)); | |
798 | } | |
799 | } | |
800 | ||
801 | void MDSMonitor::on_active() | |
802 | { | |
803 | tick(); | |
804 | update_logger(); | |
805 | ||
224ce89b WB |
806 | if (mon->is_leader()) { |
807 | mon->clog->debug() << "fsmap " << fsmap; | |
808 | } | |
7c673cae FG |
809 | } |
810 | ||
811 | void MDSMonitor::get_health(list<pair<health_status_t, string> >& summary, | |
812 | list<pair<health_status_t, string> > *detail, | |
813 | CephContext* cct) const | |
814 | { | |
815 | fsmap.get_health(summary, detail); | |
816 | ||
817 | // For each MDS GID... | |
818 | const auto info_map = fsmap.get_mds_info(); | |
819 | for (const auto &i : info_map) { | |
820 | const auto &gid = i.first; | |
821 | const auto &info = i.second; | |
822 | ||
823 | // Decode MDSHealth | |
824 | bufferlist bl; | |
825 | mon->store->get(MDS_HEALTH_PREFIX, stringify(gid), bl); | |
826 | if (!bl.length()) { | |
827 | derr << "Missing health data for MDS " << gid << dendl; | |
828 | continue; | |
829 | } | |
830 | MDSHealth health; | |
831 | bufferlist::iterator bl_i = bl.begin(); | |
832 | health.decode(bl_i); | |
833 | ||
834 | for (const auto &metric : health.metrics) { | |
835 | int const rank = info.rank; | |
836 | std::ostringstream message; | |
837 | message << "mds" << rank << ": " << metric.message; | |
838 | summary.push_back(std::make_pair(metric.sev, message.str())); | |
839 | ||
840 | if (detail) { | |
841 | // There is no way for us to clealy associate detail entries with summary entries (#7192), so | |
842 | // we duplicate the summary message in the detail string and tag the metadata on. | |
843 | std::ostringstream detail_message; | |
844 | detail_message << message.str(); | |
845 | if (metric.metadata.size()) { | |
846 | detail_message << "("; | |
847 | auto k = metric.metadata.begin(); | |
848 | while (k != metric.metadata.end()) { | |
849 | detail_message << k->first << ": " << k->second; | |
850 | if (boost::next(k) != metric.metadata.end()) { | |
851 | detail_message << ", "; | |
852 | } | |
853 | ++k; | |
854 | } | |
855 | detail_message << ")"; | |
856 | } | |
857 | detail->push_back(std::make_pair(metric.sev, detail_message.str())); | |
858 | } | |
859 | } | |
860 | } | |
861 | } | |
862 | ||
863 | void MDSMonitor::dump_info(Formatter *f) | |
864 | { | |
865 | f->open_object_section("fsmap"); | |
866 | fsmap.dump(f); | |
867 | f->close_section(); | |
868 | ||
869 | f->dump_unsigned("mdsmap_first_committed", get_first_committed()); | |
870 | f->dump_unsigned("mdsmap_last_committed", get_last_committed()); | |
871 | } | |
872 | ||
873 | bool MDSMonitor::preprocess_command(MonOpRequestRef op) | |
874 | { | |
875 | op->mark_mdsmon_event(__func__); | |
876 | MMonCommand *m = static_cast<MMonCommand*>(op->get_req()); | |
877 | int r = -1; | |
878 | bufferlist rdata; | |
879 | stringstream ss, ds; | |
880 | ||
881 | map<string, cmd_vartype> cmdmap; | |
882 | if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) { | |
883 | // ss has reason for failure | |
884 | string rs = ss.str(); | |
885 | mon->reply_command(op, -EINVAL, rs, rdata, get_last_committed()); | |
886 | return true; | |
887 | } | |
888 | ||
889 | string prefix; | |
890 | cmd_getval(g_ceph_context, cmdmap, "prefix", prefix); | |
891 | string format; | |
892 | cmd_getval(g_ceph_context, cmdmap, "format", format, string("plain")); | |
893 | boost::scoped_ptr<Formatter> f(Formatter::create(format)); | |
894 | ||
895 | MonSession *session = m->get_session(); | |
896 | if (!session) { | |
897 | mon->reply_command(op, -EACCES, "access denied", rdata, get_last_committed()); | |
898 | return true; | |
899 | } | |
900 | ||
901 | if (prefix == "mds stat") { | |
902 | if (f) { | |
903 | f->open_object_section("mds_stat"); | |
904 | dump_info(f.get()); | |
905 | f->close_section(); | |
906 | f->flush(ds); | |
907 | } else { | |
908 | ds << fsmap; | |
909 | } | |
910 | r = 0; | |
911 | } else if (prefix == "mds dump") { | |
912 | int64_t epocharg; | |
913 | epoch_t epoch; | |
914 | ||
915 | FSMap *p = &fsmap; | |
916 | if (cmd_getval(g_ceph_context, cmdmap, "epoch", epocharg)) { | |
917 | epoch = epocharg; | |
918 | bufferlist b; | |
919 | int err = get_version(epoch, b); | |
920 | if (err == -ENOENT) { | |
921 | p = 0; | |
922 | r = -ENOENT; | |
923 | } else { | |
924 | assert(err == 0); | |
925 | assert(b.length()); | |
926 | p = new FSMap; | |
927 | p->decode(b); | |
928 | } | |
929 | } | |
930 | if (p) { | |
931 | stringstream ds; | |
932 | const MDSMap *mdsmap = nullptr; | |
933 | MDSMap blank; | |
934 | blank.epoch = fsmap.epoch; | |
935 | if (fsmap.legacy_client_fscid != FS_CLUSTER_ID_NONE) { | |
936 | mdsmap = &(fsmap.filesystems[fsmap.legacy_client_fscid]->mds_map); | |
937 | } else { | |
938 | mdsmap = ␣ | |
939 | } | |
940 | if (f != NULL) { | |
941 | f->open_object_section("mdsmap"); | |
942 | mdsmap->dump(f.get()); | |
943 | f->close_section(); | |
944 | f->flush(ds); | |
945 | r = 0; | |
946 | } else { | |
947 | mdsmap->print(ds); | |
948 | r = 0; | |
949 | } | |
950 | if (r == 0) { | |
951 | rdata.append(ds); | |
952 | ss << "dumped fsmap epoch " << p->get_epoch(); | |
953 | } | |
954 | if (p != &fsmap) { | |
955 | delete p; | |
956 | } | |
957 | } | |
958 | } else if (prefix == "fs dump") { | |
959 | int64_t epocharg; | |
960 | epoch_t epoch; | |
961 | ||
962 | FSMap *p = &fsmap; | |
963 | if (cmd_getval(g_ceph_context, cmdmap, "epoch", epocharg)) { | |
964 | epoch = epocharg; | |
965 | bufferlist b; | |
966 | int err = get_version(epoch, b); | |
967 | if (err == -ENOENT) { | |
968 | p = 0; | |
969 | r = -ENOENT; | |
970 | } else { | |
971 | assert(err == 0); | |
972 | assert(b.length()); | |
973 | p = new FSMap; | |
974 | p->decode(b); | |
975 | } | |
976 | } | |
977 | if (p) { | |
978 | stringstream ds; | |
979 | if (f != NULL) { | |
980 | f->open_object_section("fsmap"); | |
981 | p->dump(f.get()); | |
982 | f->close_section(); | |
983 | f->flush(ds); | |
984 | r = 0; | |
985 | } else { | |
986 | p->print(ds); | |
987 | r = 0; | |
988 | } | |
989 | if (r == 0) { | |
990 | rdata.append(ds); | |
991 | ss << "dumped fsmap epoch " << p->get_epoch(); | |
992 | } | |
993 | if (p != &fsmap) | |
994 | delete p; | |
995 | } | |
996 | } else if (prefix == "mds metadata") { | |
997 | if (!f) | |
998 | f.reset(Formatter::create("json-pretty")); | |
999 | ||
1000 | string who; | |
1001 | bool all = !cmd_getval(g_ceph_context, cmdmap, "who", who); | |
1002 | dout(1) << "all = " << all << dendl; | |
1003 | if (all) { | |
1004 | r = 0; | |
1005 | // Dump all MDSs' metadata | |
1006 | const auto all_info = fsmap.get_mds_info(); | |
1007 | ||
1008 | f->open_array_section("mds_metadata"); | |
1009 | for(const auto &i : all_info) { | |
1010 | const auto &info = i.second; | |
1011 | ||
1012 | f->open_object_section("mds"); | |
1013 | f->dump_string("name", info.name); | |
1014 | std::ostringstream get_err; | |
1015 | r = dump_metadata(info.name, f.get(), get_err); | |
1016 | if (r == -EINVAL || r == -ENOENT) { | |
1017 | // Drop error, list what metadata we do have | |
1018 | dout(1) << get_err.str() << dendl; | |
1019 | r = 0; | |
1020 | } else if (r != 0) { | |
1021 | derr << "Unexpected error reading metadata: " << cpp_strerror(r) | |
1022 | << dendl; | |
1023 | ss << get_err.str(); | |
1024 | break; | |
1025 | } | |
1026 | f->close_section(); | |
1027 | } | |
1028 | f->close_section(); | |
1029 | } else { | |
1030 | // Dump a single daemon's metadata | |
1031 | f->open_object_section("mds_metadata"); | |
1032 | r = dump_metadata(who, f.get(), ss); | |
1033 | f->close_section(); | |
1034 | } | |
1035 | f->flush(ds); | |
31f18b77 FG |
1036 | } else if (prefix == "mds versions") { |
1037 | if (!f) | |
1038 | f.reset(Formatter::create("json-pretty")); | |
1039 | count_metadata("ceph_version", f.get()); | |
1040 | f->flush(ds); | |
1041 | r = 0; | |
1042 | } else if (prefix == "mds count-metadata") { | |
1043 | if (!f) | |
1044 | f.reset(Formatter::create("json-pretty")); | |
1045 | string field; | |
1046 | cmd_getval(g_ceph_context, cmdmap, "property", field); | |
1047 | count_metadata(field, f.get()); | |
1048 | f->flush(ds); | |
1049 | r = 0; | |
7c673cae FG |
1050 | } else if (prefix == "mds getmap") { |
1051 | epoch_t e; | |
1052 | int64_t epocharg; | |
1053 | bufferlist b; | |
1054 | if (cmd_getval(g_ceph_context, cmdmap, "epoch", epocharg)) { | |
1055 | e = epocharg; | |
1056 | int err = get_version(e, b); | |
1057 | if (err == -ENOENT) { | |
1058 | r = -ENOENT; | |
1059 | } else { | |
1060 | assert(err == 0); | |
1061 | assert(b.length()); | |
1062 | FSMap mm; | |
1063 | mm.decode(b); | |
1064 | mm.encode(rdata, m->get_connection()->get_features()); | |
1065 | ss << "got fsmap epoch " << mm.get_epoch(); | |
1066 | r = 0; | |
1067 | } | |
1068 | } else { | |
1069 | fsmap.encode(rdata, m->get_connection()->get_features()); | |
1070 | ss << "got fsmap epoch " << fsmap.get_epoch(); | |
1071 | r = 0; | |
1072 | } | |
1073 | } else if (prefix == "mds compat show") { | |
1074 | if (f) { | |
1075 | f->open_object_section("mds_compat"); | |
1076 | fsmap.compat.dump(f.get()); | |
1077 | f->close_section(); | |
1078 | f->flush(ds); | |
1079 | } else { | |
1080 | ds << fsmap.compat; | |
1081 | } | |
1082 | r = 0; | |
1083 | } else if (prefix == "fs get") { | |
1084 | string fs_name; | |
1085 | cmd_getval(g_ceph_context, cmdmap, "fs_name", fs_name); | |
1086 | auto fs = fsmap.get_filesystem(fs_name); | |
1087 | if (fs == nullptr) { | |
1088 | ss << "filesystem '" << fs_name << "' not found"; | |
1089 | r = -ENOENT; | |
1090 | } else { | |
1091 | if (f != nullptr) { | |
1092 | f->open_object_section("filesystem"); | |
1093 | fs->dump(f.get()); | |
1094 | f->close_section(); | |
1095 | f->flush(ds); | |
1096 | r = 0; | |
1097 | } else { | |
1098 | fs->print(ds); | |
1099 | r = 0; | |
1100 | } | |
1101 | } | |
1102 | } else if (prefix == "fs ls") { | |
1103 | if (f) { | |
1104 | f->open_array_section("filesystems"); | |
1105 | { | |
1106 | for (const auto i : fsmap.filesystems) { | |
1107 | const auto fs = i.second; | |
1108 | f->open_object_section("filesystem"); | |
1109 | { | |
1110 | const MDSMap &mds_map = fs->mds_map; | |
1111 | f->dump_string("name", mds_map.fs_name); | |
1112 | /* Output both the names and IDs of pools, for use by | |
1113 | * humans and machines respectively */ | |
1114 | f->dump_string("metadata_pool", mon->osdmon()->osdmap.get_pool_name( | |
1115 | mds_map.metadata_pool)); | |
1116 | f->dump_int("metadata_pool_id", mds_map.metadata_pool); | |
1117 | f->open_array_section("data_pool_ids"); | |
1118 | { | |
1119 | for (auto dpi = mds_map.data_pools.begin(); | |
1120 | dpi != mds_map.data_pools.end(); ++dpi) { | |
1121 | f->dump_int("data_pool_id", *dpi); | |
1122 | } | |
1123 | } | |
1124 | f->close_section(); | |
1125 | ||
1126 | f->open_array_section("data_pools"); | |
1127 | { | |
1128 | for (auto dpi = mds_map.data_pools.begin(); | |
1129 | dpi != mds_map.data_pools.end(); ++dpi) { | |
1130 | const auto &name = mon->osdmon()->osdmap.get_pool_name( | |
1131 | *dpi); | |
1132 | f->dump_string("data_pool", name); | |
1133 | } | |
1134 | } | |
1135 | ||
1136 | f->close_section(); | |
1137 | } | |
1138 | f->close_section(); | |
1139 | } | |
1140 | } | |
1141 | f->close_section(); | |
1142 | f->flush(ds); | |
1143 | } else { | |
1144 | for (const auto i : fsmap.filesystems) { | |
1145 | const auto fs = i.second; | |
1146 | const MDSMap &mds_map = fs->mds_map; | |
1147 | const string &md_pool_name = mon->osdmon()->osdmap.get_pool_name( | |
1148 | mds_map.metadata_pool); | |
1149 | ||
1150 | ds << "name: " << mds_map.fs_name << ", metadata pool: " | |
1151 | << md_pool_name << ", data pools: ["; | |
31f18b77 FG |
1152 | for (auto dpi : mds_map.data_pools) { |
1153 | const string &pool_name = mon->osdmon()->osdmap.get_pool_name(dpi); | |
7c673cae FG |
1154 | ds << pool_name << " "; |
1155 | } | |
1156 | ds << "]" << std::endl; | |
1157 | } | |
1158 | ||
1159 | if (fsmap.filesystems.empty()) { | |
1160 | ds << "No filesystems enabled" << std::endl; | |
1161 | } | |
1162 | } | |
1163 | r = 0; | |
1164 | } | |
1165 | ||
1166 | if (r != -1) { | |
1167 | rdata.append(ds); | |
1168 | string rs; | |
1169 | getline(ss, rs); | |
1170 | mon->reply_command(op, r, rs, rdata, get_last_committed()); | |
1171 | return true; | |
1172 | } else | |
1173 | return false; | |
1174 | } | |
1175 | ||
1176 | bool MDSMonitor::fail_mds_gid(mds_gid_t gid) | |
1177 | { | |
1178 | const MDSMap::mds_info_t info = pending_fsmap.get_info_gid(gid); | |
1179 | dout(10) << "fail_mds_gid " << gid << " mds." << info.name << " role " << info.rank << dendl; | |
1180 | ||
1181 | epoch_t blacklist_epoch = 0; | |
1182 | if (info.rank >= 0 && info.state != MDSMap::STATE_STANDBY_REPLAY) { | |
1183 | utime_t until = ceph_clock_now(); | |
1184 | until += g_conf->mds_blacklist_interval; | |
1185 | blacklist_epoch = mon->osdmon()->blacklist(info.addr, until); | |
1186 | } | |
1187 | ||
1188 | pending_fsmap.erase(gid, blacklist_epoch); | |
1189 | last_beacon.erase(gid); | |
1190 | if (pending_daemon_health.count(gid)) { | |
1191 | pending_daemon_health.erase(gid); | |
1192 | pending_daemon_health_rm.insert(gid); | |
1193 | } | |
1194 | ||
1195 | return blacklist_epoch != 0; | |
1196 | } | |
1197 | ||
1198 | mds_gid_t MDSMonitor::gid_from_arg(const std::string& arg, std::ostream &ss) | |
1199 | { | |
31f18b77 FG |
1200 | const FSMap *relevant_fsmap = mon->is_leader() ? &pending_fsmap : &fsmap; |
1201 | ||
7c673cae FG |
1202 | // Try parsing as a role |
1203 | mds_role_t role; | |
1204 | std::ostringstream ignore_err; // Don't spam 'ss' with parse_role errors | |
1205 | int r = parse_role(arg, &role, ignore_err); | |
1206 | if (r == 0) { | |
1207 | // See if a GID is assigned to this role | |
31f18b77 | 1208 | auto fs = relevant_fsmap->get_filesystem(role.fscid); |
7c673cae FG |
1209 | assert(fs != nullptr); // parse_role ensures it exists |
1210 | if (fs->mds_map.is_up(role.rank)) { | |
1211 | dout(10) << __func__ << ": validated rank/GID " << role | |
1212 | << " as a rank" << dendl; | |
1213 | return fs->mds_map.get_mds_info(role.rank).global_id; | |
1214 | } | |
1215 | } | |
1216 | ||
1217 | // Try parsing as a gid | |
1218 | std::string err; | |
1219 | unsigned long long maybe_gid = strict_strtoll(arg.c_str(), 10, &err); | |
1220 | if (!err.empty()) { | |
1221 | // Not a role or a GID, try as a daemon name | |
31f18b77 | 1222 | const MDSMap::mds_info_t *mds_info = relevant_fsmap->find_by_name(arg); |
7c673cae FG |
1223 | if (!mds_info) { |
1224 | ss << "MDS named '" << arg | |
1225 | << "' does not exist, or is not up"; | |
1226 | return MDS_GID_NONE; | |
1227 | } | |
1228 | dout(10) << __func__ << ": resolved MDS name '" << arg | |
1229 | << "' to GID " << mds_info->global_id << dendl; | |
1230 | return mds_info->global_id; | |
1231 | } else { | |
1232 | // Not a role, but parses as a an integer, might be a GID | |
1233 | dout(10) << __func__ << ": treating MDS reference '" << arg | |
1234 | << "' as an integer " << maybe_gid << dendl; | |
31f18b77 FG |
1235 | |
1236 | if (relevant_fsmap->gid_exists(mds_gid_t(maybe_gid))) { | |
1237 | return mds_gid_t(maybe_gid); | |
7c673cae FG |
1238 | } |
1239 | } | |
1240 | ||
1241 | dout(1) << __func__ << ": rank/GID " << arg | |
1242 | << " not a existent rank or GID" << dendl; | |
1243 | return MDS_GID_NONE; | |
1244 | } | |
1245 | ||
1246 | int MDSMonitor::fail_mds(std::ostream &ss, const std::string &arg) | |
1247 | { | |
1248 | mds_gid_t gid = gid_from_arg(arg, ss); | |
1249 | if (gid == MDS_GID_NONE) { | |
1250 | return 0; | |
1251 | } | |
1252 | if (!mon->osdmon()->is_writeable()) { | |
1253 | return -EAGAIN; | |
1254 | } | |
1255 | fail_mds_gid(gid); | |
1256 | ss << "failed mds gid " << gid; | |
1257 | assert(mon->osdmon()->is_writeable()); | |
1258 | request_proposal(mon->osdmon()); | |
1259 | return 0; | |
1260 | } | |
1261 | ||
1262 | bool MDSMonitor::prepare_command(MonOpRequestRef op) | |
1263 | { | |
1264 | op->mark_mdsmon_event(__func__); | |
1265 | MMonCommand *m = static_cast<MMonCommand*>(op->get_req()); | |
1266 | int r = -EINVAL; | |
1267 | stringstream ss; | |
1268 | bufferlist rdata; | |
1269 | ||
1270 | map<string, cmd_vartype> cmdmap; | |
1271 | if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) { | |
1272 | string rs = ss.str(); | |
1273 | mon->reply_command(op, -EINVAL, rs, rdata, get_last_committed()); | |
1274 | return true; | |
1275 | } | |
1276 | ||
1277 | string prefix; | |
1278 | cmd_getval(g_ceph_context, cmdmap, "prefix", prefix); | |
1279 | ||
1280 | /* Refuse access if message not associated with a valid session */ | |
1281 | MonSession *session = m->get_session(); | |
1282 | if (!session) { | |
1283 | mon->reply_command(op, -EACCES, "access denied", rdata, get_last_committed()); | |
1284 | return true; | |
1285 | } | |
1286 | ||
1287 | for (auto h : handlers) { | |
1288 | if (h->can_handle(prefix)) { | |
1289 | r = h->handle(mon, pending_fsmap, op, cmdmap, ss); | |
1290 | if (r == -EAGAIN) { | |
1291 | // message has been enqueued for retry; return. | |
1292 | dout(4) << __func__ << " enqueue for retry by prepare_command" << dendl; | |
1293 | return false; | |
1294 | } else { | |
1295 | if (r == 0) { | |
1296 | // On successful updates, print the updated map | |
1297 | print_map(pending_fsmap); | |
1298 | } | |
1299 | // Successful or not, we're done: respond. | |
1300 | goto out; | |
1301 | } | |
1302 | } | |
1303 | } | |
1304 | ||
1305 | r = filesystem_command(op, prefix, cmdmap, ss); | |
1306 | if (r >= 0) { | |
1307 | goto out; | |
1308 | } else if (r == -EAGAIN) { | |
1309 | // Do not reply, the message has been enqueued for retry | |
1310 | dout(4) << __func__ << " enqueue for retry by filesystem_command" << dendl; | |
1311 | return false; | |
1312 | } else if (r != -ENOSYS) { | |
1313 | goto out; | |
1314 | } | |
1315 | ||
1316 | // Only handle legacy commands if there is a filesystem configured | |
1317 | if (pending_fsmap.legacy_client_fscid == FS_CLUSTER_ID_NONE) { | |
1318 | if (pending_fsmap.filesystems.size() == 0) { | |
1319 | ss << "No filesystem configured: use `ceph fs new` to create a filesystem"; | |
1320 | } else { | |
1321 | ss << "No filesystem set for use with legacy commands"; | |
1322 | } | |
1323 | r = -EINVAL; | |
1324 | goto out; | |
1325 | } | |
1326 | ||
1327 | r = legacy_filesystem_command(op, prefix, cmdmap, ss); | |
1328 | ||
1329 | if (r == -ENOSYS && ss.str().empty()) { | |
1330 | ss << "unrecognized command"; | |
1331 | } | |
1332 | ||
1333 | out: | |
1334 | dout(4) << __func__ << " done, r=" << r << dendl; | |
1335 | /* Compose response */ | |
1336 | string rs; | |
1337 | getline(ss, rs); | |
1338 | ||
1339 | if (r >= 0) { | |
1340 | // success.. delay reply | |
1341 | wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, r, rs, | |
1342 | get_last_committed() + 1)); | |
1343 | return true; | |
1344 | } else { | |
1345 | // reply immediately | |
1346 | mon->reply_command(op, r, rs, rdata, get_last_committed()); | |
1347 | return false; | |
1348 | } | |
1349 | } | |
1350 | ||
1351 | ||
1352 | /** | |
1353 | * Given one of the following forms: | |
1354 | * <fs name>:<rank> | |
1355 | * <fs id>:<rank> | |
1356 | * <rank> | |
1357 | * | |
1358 | * Parse into a mds_role_t. The rank-only form is only valid | |
1359 | * if legacy_client_ns is set. | |
1360 | */ | |
1361 | int MDSMonitor::parse_role( | |
1362 | const std::string &role_str, | |
1363 | mds_role_t *role, | |
1364 | std::ostream &ss) | |
1365 | { | |
1366 | const FSMap *relevant_fsmap = &fsmap; | |
1367 | if (mon->is_leader()) { | |
1368 | relevant_fsmap = &pending_fsmap; | |
1369 | } | |
1370 | return relevant_fsmap->parse_role(role_str, role, ss); | |
1371 | } | |
1372 | ||
1373 | int MDSMonitor::filesystem_command( | |
1374 | MonOpRequestRef op, | |
1375 | std::string const &prefix, | |
1376 | map<string, cmd_vartype> &cmdmap, | |
1377 | std::stringstream &ss) | |
1378 | { | |
1379 | dout(4) << __func__ << " prefix='" << prefix << "'" << dendl; | |
1380 | op->mark_mdsmon_event(__func__); | |
1381 | int r = 0; | |
1382 | string whostr; | |
1383 | cmd_getval(g_ceph_context, cmdmap, "who", whostr); | |
1384 | ||
1385 | if (prefix == "mds stop" || | |
1386 | prefix == "mds deactivate") { | |
1387 | ||
1388 | mds_role_t role; | |
1389 | r = parse_role(whostr, &role, ss); | |
1390 | if (r < 0 ) { | |
1391 | return r; | |
1392 | } | |
1393 | auto fs = pending_fsmap.get_filesystem(role.fscid); | |
1394 | ||
1395 | if (!fs->mds_map.is_active(role.rank)) { | |
1396 | r = -EEXIST; | |
1397 | ss << "mds." << role << " not active (" | |
1398 | << ceph_mds_state_name(fs->mds_map.get_state(role.rank)) << ")"; | |
1399 | } else if (fs->mds_map.get_root() == role.rank || | |
1400 | fs->mds_map.get_tableserver() == role.rank) { | |
1401 | r = -EINVAL; | |
1402 | ss << "can't tell the root (" << fs->mds_map.get_root() | |
1403 | << ") or tableserver (" << fs->mds_map.get_tableserver() | |
1404 | << ") to deactivate"; | |
31f18b77 FG |
1405 | } else if (role.rank != fs->mds_map.get_last_in_mds()) { |
1406 | r = -EINVAL; | |
1407 | ss << "mds." << role << " doesn't have the max rank (" | |
1408 | << fs->mds_map.get_last_in_mds() << ")"; | |
7c673cae FG |
1409 | } else if (fs->mds_map.get_num_in_mds() <= size_t(fs->mds_map.get_max_mds())) { |
1410 | r = -EBUSY; | |
1411 | ss << "must decrease max_mds or else MDS will immediately reactivate"; | |
1412 | } else { | |
1413 | r = 0; | |
1414 | mds_gid_t gid = fs->mds_map.up.at(role.rank); | |
1415 | ss << "telling mds." << role << " " | |
1416 | << pending_fsmap.get_info_gid(gid).addr << " to deactivate"; | |
1417 | ||
1418 | pending_fsmap.modify_daemon(gid, [](MDSMap::mds_info_t *info) { | |
1419 | info->state = MDSMap::STATE_STOPPING; | |
1420 | }); | |
1421 | } | |
1422 | } else if (prefix == "mds set_state") { | |
1423 | mds_gid_t gid; | |
1424 | if (!cmd_getval(g_ceph_context, cmdmap, "gid", gid)) { | |
1425 | ss << "error parsing 'gid' value '" | |
1426 | << cmd_vartype_stringify(cmdmap["gid"]) << "'"; | |
1427 | return -EINVAL; | |
1428 | } | |
1429 | MDSMap::DaemonState state; | |
1430 | if (!cmd_getval(g_ceph_context, cmdmap, "state", state)) { | |
1431 | ss << "error parsing 'state' string value '" | |
1432 | << cmd_vartype_stringify(cmdmap["state"]) << "'"; | |
1433 | return -EINVAL; | |
1434 | } | |
1435 | if (pending_fsmap.gid_exists(gid)) { | |
1436 | pending_fsmap.modify_daemon(gid, [state](MDSMap::mds_info_t *info) { | |
1437 | info->state = state; | |
1438 | }); | |
1439 | ss << "set mds gid " << gid << " to state " << state << " " | |
1440 | << ceph_mds_state_name(state); | |
1441 | return 0; | |
1442 | } | |
1443 | } else if (prefix == "mds fail") { | |
1444 | string who; | |
1445 | cmd_getval(g_ceph_context, cmdmap, "who", who); | |
1446 | r = fail_mds(ss, who); | |
1447 | if (r < 0 && r == -EAGAIN) { | |
1448 | mon->osdmon()->wait_for_writeable(op, new C_RetryMessage(this, op)); | |
1449 | return -EAGAIN; // don't propose yet; wait for message to be retried | |
1450 | } | |
1451 | } else if (prefix == "mds rm") { | |
1452 | mds_gid_t gid; | |
1453 | if (!cmd_getval(g_ceph_context, cmdmap, "gid", gid)) { | |
1454 | ss << "error parsing 'gid' value '" | |
1455 | << cmd_vartype_stringify(cmdmap["gid"]) << "'"; | |
1456 | return -EINVAL; | |
1457 | } | |
1458 | if (!pending_fsmap.gid_exists(gid)) { | |
1459 | ss << "mds gid " << gid << " dne"; | |
1460 | r = 0; | |
1461 | } else { | |
1462 | MDSMap::DaemonState state = pending_fsmap.get_info_gid(gid).state; | |
1463 | if (state > 0) { | |
1464 | ss << "cannot remove active mds." << pending_fsmap.get_info_gid(gid).name | |
1465 | << " rank " << pending_fsmap.get_info_gid(gid).rank; | |
1466 | return -EBUSY; | |
1467 | } else { | |
1468 | pending_fsmap.erase(gid, {}); | |
1469 | ss << "removed mds gid " << gid; | |
1470 | return 0; | |
1471 | } | |
1472 | } | |
1473 | } else if (prefix == "mds rmfailed") { | |
1474 | string confirm; | |
1475 | if (!cmd_getval(g_ceph_context, cmdmap, "confirm", confirm) || | |
1476 | confirm != "--yes-i-really-mean-it") { | |
1477 | ss << "WARNING: this can make your filesystem inaccessible! " | |
1478 | "Add --yes-i-really-mean-it if you are sure you wish to continue."; | |
1479 | return -EPERM; | |
1480 | } | |
1481 | ||
1482 | std::string role_str; | |
1483 | cmd_getval(g_ceph_context, cmdmap, "who", role_str); | |
1484 | mds_role_t role; | |
1485 | int r = parse_role(role_str, &role, ss); | |
1486 | if (r < 0) { | |
1487 | ss << "invalid role '" << role_str << "'"; | |
1488 | return -EINVAL; | |
1489 | } | |
1490 | ||
1491 | pending_fsmap.modify_filesystem( | |
1492 | role.fscid, | |
1493 | [role](std::shared_ptr<Filesystem> fs) | |
1494 | { | |
1495 | fs->mds_map.failed.erase(role.rank); | |
1496 | }); | |
1497 | ||
1498 | ss << "removed failed mds." << role; | |
1499 | return 0; | |
1500 | } else if (prefix == "mds compat rm_compat") { | |
1501 | int64_t f; | |
1502 | if (!cmd_getval(g_ceph_context, cmdmap, "feature", f)) { | |
1503 | ss << "error parsing feature value '" | |
1504 | << cmd_vartype_stringify(cmdmap["feature"]) << "'"; | |
1505 | return -EINVAL; | |
1506 | } | |
1507 | if (pending_fsmap.compat.compat.contains(f)) { | |
1508 | ss << "removing compat feature " << f; | |
1509 | CompatSet modified = pending_fsmap.compat; | |
1510 | modified.compat.remove(f); | |
1511 | pending_fsmap.update_compat(modified); | |
1512 | } else { | |
1513 | ss << "compat feature " << f << " not present in " << pending_fsmap.compat; | |
1514 | } | |
1515 | r = 0; | |
1516 | } else if (prefix == "mds compat rm_incompat") { | |
1517 | int64_t f; | |
1518 | if (!cmd_getval(g_ceph_context, cmdmap, "feature", f)) { | |
1519 | ss << "error parsing feature value '" | |
1520 | << cmd_vartype_stringify(cmdmap["feature"]) << "'"; | |
1521 | return -EINVAL; | |
1522 | } | |
1523 | if (pending_fsmap.compat.incompat.contains(f)) { | |
1524 | ss << "removing incompat feature " << f; | |
1525 | CompatSet modified = pending_fsmap.compat; | |
1526 | modified.incompat.remove(f); | |
1527 | pending_fsmap.update_compat(modified); | |
1528 | } else { | |
1529 | ss << "incompat feature " << f << " not present in " << pending_fsmap.compat; | |
1530 | } | |
1531 | r = 0; | |
1532 | } else if (prefix == "mds repaired") { | |
1533 | std::string role_str; | |
1534 | cmd_getval(g_ceph_context, cmdmap, "rank", role_str); | |
1535 | mds_role_t role; | |
1536 | r = parse_role(role_str, &role, ss); | |
1537 | if (r < 0) { | |
1538 | return r; | |
1539 | } | |
1540 | ||
1541 | bool modified = pending_fsmap.undamaged(role.fscid, role.rank); | |
1542 | if (modified) { | |
1543 | dout(4) << "repaired: restoring rank " << role << dendl; | |
1544 | } else { | |
1545 | dout(4) << "repaired: no-op on rank " << role << dendl; | |
1546 | } | |
1547 | ||
1548 | r = 0; | |
1549 | } else { | |
1550 | return -ENOSYS; | |
1551 | } | |
1552 | ||
1553 | return r; | |
1554 | } | |
1555 | ||
1556 | /** | |
1557 | * Helper to legacy_filesystem_command | |
1558 | */ | |
1559 | void MDSMonitor::modify_legacy_filesystem( | |
1560 | std::function<void(std::shared_ptr<Filesystem> )> fn) | |
1561 | { | |
1562 | pending_fsmap.modify_filesystem( | |
1563 | pending_fsmap.legacy_client_fscid, | |
1564 | fn | |
1565 | ); | |
1566 | } | |
1567 | ||
1568 | ||
1569 | ||
1570 | /** | |
1571 | * Handle a command that affects the filesystem (i.e. a filesystem | |
1572 | * must exist for the command to act upon). | |
1573 | * | |
1574 | * @retval 0 Command was successfully handled and has side effects | |
1575 | * @retval -EAGAIN Messages has been requeued for retry | |
1576 | * @retval -ENOSYS Unknown command | |
1577 | * @retval < 0 An error has occurred; **ss** may have been set. | |
1578 | */ | |
1579 | int MDSMonitor::legacy_filesystem_command( | |
1580 | MonOpRequestRef op, | |
1581 | std::string const &prefix, | |
1582 | map<string, cmd_vartype> &cmdmap, | |
1583 | std::stringstream &ss) | |
1584 | { | |
1585 | dout(4) << __func__ << " prefix='" << prefix << "'" << dendl; | |
1586 | op->mark_mdsmon_event(__func__); | |
1587 | int r = 0; | |
1588 | string whostr; | |
1589 | cmd_getval(g_ceph_context, cmdmap, "who", whostr); | |
1590 | ||
1591 | assert (pending_fsmap.legacy_client_fscid != FS_CLUSTER_ID_NONE); | |
1592 | ||
1593 | if (prefix == "mds set_max_mds") { | |
1594 | // NOTE: deprecated by "fs set max_mds" | |
1595 | int64_t maxmds; | |
1596 | if (!cmd_getval(g_ceph_context, cmdmap, "maxmds", maxmds) || maxmds <= 0) { | |
1597 | return -EINVAL; | |
1598 | } | |
1599 | ||
1600 | const MDSMap& mdsmap = | |
1601 | pending_fsmap.filesystems.at(pending_fsmap.legacy_client_fscid)->mds_map; | |
1602 | ||
1603 | if (!mdsmap.allows_multimds() && | |
1604 | maxmds > mdsmap.get_max_mds() && | |
1605 | maxmds > 1) { | |
1606 | ss << "multi-MDS clusters are not enabled; set 'allow_multimds' to enable"; | |
1607 | return -EINVAL; | |
1608 | } | |
1609 | ||
1610 | if (maxmds > MAX_MDS) { | |
1611 | ss << "may not have more than " << MAX_MDS << " MDS ranks"; | |
1612 | return -EINVAL; | |
1613 | } | |
1614 | ||
1615 | modify_legacy_filesystem( | |
1616 | [maxmds](std::shared_ptr<Filesystem> fs) | |
1617 | { | |
1618 | fs->mds_map.set_max_mds(maxmds); | |
1619 | }); | |
1620 | ||
1621 | r = 0; | |
1622 | ss << "max_mds = " << maxmds; | |
1623 | } else if (prefix == "mds cluster_down") { | |
1624 | // NOTE: deprecated by "fs set cluster_down" | |
1625 | modify_legacy_filesystem( | |
1626 | [](std::shared_ptr<Filesystem> fs) | |
1627 | { | |
1628 | fs->mds_map.set_flag(CEPH_MDSMAP_DOWN); | |
1629 | }); | |
1630 | ss << "marked fsmap DOWN"; | |
1631 | r = 0; | |
1632 | } else if (prefix == "mds cluster_up") { | |
1633 | // NOTE: deprecated by "fs set cluster_up" | |
1634 | modify_legacy_filesystem( | |
1635 | [](std::shared_ptr<Filesystem> fs) | |
1636 | { | |
1637 | fs->mds_map.clear_flag(CEPH_MDSMAP_DOWN); | |
1638 | }); | |
1639 | ss << "unmarked fsmap DOWN"; | |
1640 | r = 0; | |
1641 | } else { | |
1642 | return -ENOSYS; | |
1643 | } | |
1644 | ||
1645 | return r; | |
1646 | } | |
1647 | ||
1648 | ||
1649 | void MDSMonitor::check_subs() | |
1650 | { | |
1651 | std::list<std::string> types; | |
1652 | ||
1653 | // Subscriptions may be to "mdsmap" (MDS and legacy clients), | |
1654 | // "mdsmap.<namespace>", or to "fsmap" for the full state of all | |
1655 | // filesystems. Build a list of all the types we service | |
1656 | // subscriptions for. | |
1657 | types.push_back("fsmap"); | |
1658 | types.push_back("fsmap.user"); | |
1659 | types.push_back("mdsmap"); | |
1660 | for (const auto &i : fsmap.filesystems) { | |
1661 | auto fscid = i.first; | |
1662 | std::ostringstream oss; | |
1663 | oss << "mdsmap." << fscid; | |
1664 | types.push_back(oss.str()); | |
1665 | } | |
1666 | ||
1667 | for (const auto &type : types) { | |
1668 | if (mon->session_map.subs.count(type) == 0) | |
1669 | continue; | |
1670 | xlist<Subscription*>::iterator p = mon->session_map.subs[type]->begin(); | |
1671 | while (!p.end()) { | |
1672 | Subscription *sub = *p; | |
1673 | ++p; | |
1674 | check_sub(sub); | |
1675 | } | |
1676 | } | |
1677 | } | |
1678 | ||
1679 | ||
1680 | void MDSMonitor::check_sub(Subscription *sub) | |
1681 | { | |
1682 | dout(20) << __func__ << ": " << sub->type << dendl; | |
1683 | ||
1684 | if (sub->type == "fsmap") { | |
1685 | if (sub->next <= fsmap.get_epoch()) { | |
1686 | sub->session->con->send_message(new MFSMap(mon->monmap->fsid, fsmap)); | |
1687 | if (sub->onetime) { | |
1688 | mon->session_map.remove_sub(sub); | |
1689 | } else { | |
1690 | sub->next = fsmap.get_epoch() + 1; | |
1691 | } | |
1692 | } | |
1693 | } else if (sub->type == "fsmap.user") { | |
1694 | if (sub->next <= fsmap.get_epoch()) { | |
1695 | FSMapUser fsmap_u; | |
1696 | fsmap_u.epoch = fsmap.get_epoch(); | |
1697 | fsmap_u.legacy_client_fscid = fsmap.legacy_client_fscid; | |
1698 | for (auto p = fsmap.filesystems.begin(); | |
1699 | p != fsmap.filesystems.end(); | |
1700 | ++p) { | |
1701 | FSMapUser::fs_info_t& fs_info = fsmap_u.filesystems[p->first]; | |
1702 | fs_info.cid = p->first; | |
1703 | fs_info.name= p->second->mds_map.fs_name; | |
1704 | } | |
1705 | sub->session->con->send_message(new MFSMapUser(mon->monmap->fsid, fsmap_u)); | |
1706 | if (sub->onetime) { | |
1707 | mon->session_map.remove_sub(sub); | |
1708 | } else { | |
1709 | sub->next = fsmap.get_epoch() + 1; | |
1710 | } | |
1711 | } | |
1712 | } else if (sub->type.compare(0, 6, "mdsmap") == 0) { | |
1713 | if (sub->next > fsmap.get_epoch()) { | |
1714 | return; | |
1715 | } | |
1716 | ||
1717 | const bool is_mds = sub->session->inst.name.is_mds(); | |
1718 | mds_gid_t mds_gid = MDS_GID_NONE; | |
1719 | fs_cluster_id_t fscid = FS_CLUSTER_ID_NONE; | |
1720 | if (is_mds) { | |
1721 | // What (if any) namespace are you assigned to? | |
1722 | auto mds_info = fsmap.get_mds_info(); | |
1723 | for (const auto &i : mds_info) { | |
1724 | if (i.second.addr == sub->session->inst.addr) { | |
1725 | mds_gid = i.first; | |
1726 | fscid = fsmap.mds_roles.at(mds_gid); | |
1727 | } | |
1728 | } | |
1729 | } else { | |
1730 | // You're a client. Did you request a particular | |
1731 | // namespace? | |
1732 | if (sub->type.find("mdsmap.") == 0) { | |
1733 | auto namespace_id_str = sub->type.substr(std::string("mdsmap.").size()); | |
1734 | dout(10) << __func__ << ": namespace_id " << namespace_id_str << dendl; | |
1735 | std::string err; | |
1736 | fscid = strict_strtoll(namespace_id_str.c_str(), 10, &err); | |
1737 | if (!err.empty()) { | |
1738 | // Client asked for a non-existent namespace, send them nothing | |
1739 | dout(1) << "Invalid client subscription '" << sub->type | |
1740 | << "'" << dendl; | |
1741 | return; | |
1742 | } | |
1743 | if (fsmap.filesystems.count(fscid) == 0) { | |
1744 | // Client asked for a non-existent namespace, send them nothing | |
1745 | // TODO: something more graceful for when a client has a filesystem | |
1746 | // mounted, and the fileysstem is deleted. Add a "shut down you fool" | |
1747 | // flag to MMDSMap? | |
1748 | dout(1) << "Client subscribed to non-existent namespace '" << | |
1749 | fscid << "'" << dendl; | |
1750 | return; | |
1751 | } | |
1752 | } else { | |
1753 | // Unqualified request for "mdsmap": give it the one marked | |
1754 | // for use by legacy clients. | |
1755 | if (fsmap.legacy_client_fscid != FS_CLUSTER_ID_NONE) { | |
1756 | fscid = fsmap.legacy_client_fscid; | |
1757 | } else { | |
1758 | dout(1) << "Client subscribed for legacy filesystem but " | |
1759 | "none is configured" << dendl; | |
1760 | return; | |
1761 | } | |
1762 | } | |
1763 | } | |
1764 | dout(10) << __func__ << ": is_mds=" << is_mds << ", fscid= " << fscid << dendl; | |
1765 | ||
1766 | // Work out the effective latest epoch | |
1767 | MDSMap *mds_map = nullptr; | |
1768 | MDSMap null_map; | |
1769 | null_map.compat = fsmap.compat; | |
1770 | if (fscid == FS_CLUSTER_ID_NONE) { | |
1771 | // For a client, we should have already dropped out | |
1772 | assert(is_mds); | |
1773 | ||
1774 | if (fsmap.standby_daemons.count(mds_gid)) { | |
1775 | // For an MDS, we need to feed it an MDSMap with its own state in | |
1776 | null_map.mds_info[mds_gid] = fsmap.standby_daemons[mds_gid]; | |
1777 | null_map.epoch = fsmap.standby_epochs[mds_gid]; | |
1778 | } else { | |
1779 | null_map.epoch = fsmap.epoch; | |
1780 | } | |
1781 | mds_map = &null_map; | |
1782 | } else { | |
1783 | // Check the effective epoch | |
1784 | mds_map = &(fsmap.filesystems.at(fscid)->mds_map); | |
1785 | } | |
1786 | ||
1787 | assert(mds_map != nullptr); | |
1788 | dout(10) << __func__ << " selected MDS map epoch " << | |
1789 | mds_map->epoch << " for namespace " << fscid << " for subscriber " | |
1790 | << sub->session->inst.name << " who wants epoch " << sub->next << dendl; | |
1791 | ||
1792 | if (sub->next > mds_map->epoch) { | |
1793 | return; | |
1794 | } | |
1795 | auto msg = new MMDSMap(mon->monmap->fsid, mds_map); | |
1796 | ||
1797 | sub->session->con->send_message(msg); | |
1798 | if (sub->onetime) { | |
1799 | mon->session_map.remove_sub(sub); | |
1800 | } else { | |
1801 | sub->next = mds_map->get_epoch() + 1; | |
1802 | } | |
1803 | } | |
1804 | } | |
1805 | ||
1806 | ||
1807 | void MDSMonitor::update_metadata(mds_gid_t gid, | |
1808 | const map<string, string>& metadata) | |
1809 | { | |
1810 | if (metadata.empty()) { | |
1811 | return; | |
1812 | } | |
1813 | pending_metadata[gid] = metadata; | |
1814 | ||
1815 | MonitorDBStore::TransactionRef t = paxos->get_pending_transaction(); | |
1816 | bufferlist bl; | |
1817 | ::encode(pending_metadata, bl); | |
1818 | t->put(MDS_METADATA_PREFIX, "last_metadata", bl); | |
1819 | paxos->trigger_propose(); | |
1820 | } | |
1821 | ||
1822 | void MDSMonitor::remove_from_metadata(MonitorDBStore::TransactionRef t) | |
1823 | { | |
1824 | bool update = false; | |
1825 | for (map<mds_gid_t, Metadata>::iterator i = pending_metadata.begin(); | |
1826 | i != pending_metadata.end(); ) { | |
1827 | if (!pending_fsmap.gid_exists(i->first)) { | |
1828 | pending_metadata.erase(i++); | |
1829 | update = true; | |
1830 | } else { | |
1831 | ++i; | |
1832 | } | |
1833 | } | |
1834 | if (!update) | |
1835 | return; | |
1836 | bufferlist bl; | |
1837 | ::encode(pending_metadata, bl); | |
1838 | t->put(MDS_METADATA_PREFIX, "last_metadata", bl); | |
1839 | } | |
1840 | ||
1841 | int MDSMonitor::load_metadata(map<mds_gid_t, Metadata>& m) | |
1842 | { | |
1843 | bufferlist bl; | |
1844 | int r = mon->store->get(MDS_METADATA_PREFIX, "last_metadata", bl); | |
1845 | if (r) { | |
1846 | dout(1) << "Unable to load 'last_metadata'" << dendl; | |
1847 | return r; | |
1848 | } | |
1849 | ||
1850 | bufferlist::iterator it = bl.begin(); | |
1851 | ::decode(m, it); | |
1852 | return 0; | |
1853 | } | |
1854 | ||
31f18b77 FG |
1855 | void MDSMonitor::count_metadata(const string& field, Formatter *f) |
1856 | { | |
1857 | map<string,int> by_val; | |
1858 | map<mds_gid_t,Metadata> meta; | |
1859 | load_metadata(meta); | |
1860 | for (auto& p : meta) { | |
1861 | auto q = p.second.find(field); | |
1862 | if (q == p.second.end()) { | |
1863 | by_val["unknown"]++; | |
1864 | } else { | |
1865 | by_val[q->second]++; | |
1866 | } | |
1867 | } | |
1868 | f->open_object_section(field.c_str()); | |
1869 | for (auto& p : by_val) { | |
1870 | f->dump_int(p.first.c_str(), p.second); | |
1871 | } | |
1872 | f->close_section(); | |
1873 | } | |
1874 | ||
7c673cae FG |
1875 | int MDSMonitor::dump_metadata(const std::string &who, Formatter *f, ostream& err) |
1876 | { | |
1877 | assert(f); | |
1878 | ||
1879 | mds_gid_t gid = gid_from_arg(who, err); | |
1880 | if (gid == MDS_GID_NONE) { | |
1881 | return -EINVAL; | |
1882 | } | |
1883 | ||
1884 | map<mds_gid_t, Metadata> metadata; | |
1885 | if (int r = load_metadata(metadata)) { | |
1886 | err << "Unable to load 'last_metadata'"; | |
1887 | return r; | |
1888 | } | |
1889 | ||
1890 | if (!metadata.count(gid)) { | |
1891 | return -ENOENT; | |
1892 | } | |
1893 | const Metadata& m = metadata[gid]; | |
1894 | for (Metadata::const_iterator p = m.begin(); p != m.end(); ++p) { | |
1895 | f->dump_string(p->first.c_str(), p->second); | |
1896 | } | |
1897 | return 0; | |
1898 | } | |
1899 | ||
1900 | int MDSMonitor::print_nodes(Formatter *f) | |
1901 | { | |
1902 | assert(f); | |
1903 | ||
1904 | map<mds_gid_t, Metadata> metadata; | |
1905 | if (int r = load_metadata(metadata)) { | |
1906 | return r; | |
1907 | } | |
1908 | ||
1909 | map<string, list<int> > mdses; // hostname => rank | |
1910 | for (map<mds_gid_t, Metadata>::iterator it = metadata.begin(); | |
1911 | it != metadata.end(); ++it) { | |
1912 | const Metadata& m = it->second; | |
1913 | Metadata::const_iterator hostname = m.find("hostname"); | |
1914 | if (hostname == m.end()) { | |
1915 | // not likely though | |
1916 | continue; | |
1917 | } | |
1918 | const mds_gid_t gid = it->first; | |
1919 | if (!fsmap.gid_exists(gid)) { | |
1920 | dout(5) << __func__ << ": GID " << gid << " not existent" << dendl; | |
1921 | continue; | |
1922 | } | |
1923 | const MDSMap::mds_info_t& mds_info = fsmap.get_info_gid(gid); | |
1924 | // FIXME: include filesystem name with rank here | |
1925 | mdses[hostname->second].push_back(mds_info.rank); | |
1926 | } | |
1927 | ||
1928 | dump_services(f, mdses, "mds"); | |
1929 | return 0; | |
1930 | } | |
1931 | ||
1932 | /** | |
1933 | * If a cluster is undersized (with respect to max_mds), then | |
1934 | * attempt to find daemons to grow it. | |
1935 | */ | |
1936 | bool MDSMonitor::maybe_expand_cluster(std::shared_ptr<Filesystem> fs) | |
1937 | { | |
1938 | bool do_propose = false; | |
1939 | ||
1940 | if (fs->mds_map.test_flag(CEPH_MDSMAP_DOWN)) { | |
1941 | return do_propose; | |
1942 | } | |
1943 | ||
1944 | while (fs->mds_map.get_num_in_mds() < size_t(fs->mds_map.get_max_mds()) && | |
1945 | !fs->mds_map.is_degraded()) { | |
1946 | mds_rank_t mds = mds_rank_t(0); | |
1947 | string name; | |
1948 | while (fs->mds_map.is_in(mds)) { | |
1949 | mds++; | |
1950 | } | |
1951 | mds_gid_t newgid = pending_fsmap.find_replacement_for({fs->fscid, mds}, | |
1952 | name, g_conf->mon_force_standby_active); | |
1953 | if (newgid == MDS_GID_NONE) { | |
1954 | break; | |
1955 | } | |
1956 | ||
1957 | dout(1) << "adding standby " << pending_fsmap.get_info_gid(newgid).addr | |
1958 | << " as mds." << mds << dendl; | |
1959 | pending_fsmap.promote(newgid, fs, mds); | |
1960 | do_propose = true; | |
1961 | } | |
1962 | ||
1963 | return do_propose; | |
1964 | } | |
1965 | ||
1966 | ||
1967 | /** | |
1968 | * If a daemon is laggy, and a suitable replacement | |
1969 | * is available, fail this daemon (remove from map) and pass its | |
1970 | * role to another daemon. | |
1971 | */ | |
1972 | void MDSMonitor::maybe_replace_gid(mds_gid_t gid, | |
1973 | const beacon_info_t &beacon, | |
1974 | bool *mds_propose, bool *osd_propose) | |
1975 | { | |
1976 | assert(mds_propose != nullptr); | |
1977 | assert(osd_propose != nullptr); | |
1978 | ||
1979 | const MDSMap::mds_info_t info = pending_fsmap.get_info_gid(gid); | |
1980 | const auto fscid = pending_fsmap.mds_roles.at(gid); | |
1981 | ||
1982 | dout(10) << "no beacon from " << gid << " " << info.addr << " mds." | |
1983 | << info.rank << "." << info.inc | |
1984 | << " " << ceph_mds_state_name(info.state) | |
1985 | << " since " << beacon.stamp << dendl; | |
1986 | ||
31f18b77 FG |
1987 | // We will only take decisive action (replacing/removing a daemon) |
1988 | // if we have some indicating that some other daemon(s) are successfully | |
1989 | // getting beacons through recently. | |
1990 | utime_t latest_beacon; | |
1991 | for (const auto & i : last_beacon) { | |
1992 | latest_beacon = MAX(i.second.stamp, latest_beacon); | |
1993 | } | |
1994 | const bool may_replace = latest_beacon > | |
1995 | (ceph_clock_now() - | |
1996 | MAX(g_conf->mds_beacon_interval, g_conf->mds_beacon_grace * 0.5)); | |
1997 | ||
7c673cae FG |
1998 | // are we in? |
1999 | // and is there a non-laggy standby that can take over for us? | |
2000 | mds_gid_t sgid; | |
2001 | if (info.rank >= 0 && | |
2002 | info.state != MDSMap::STATE_STANDBY && | |
2003 | info.state != MDSMap::STATE_STANDBY_REPLAY && | |
31f18b77 | 2004 | may_replace && |
7c673cae FG |
2005 | !pending_fsmap.get_filesystem(fscid)->mds_map.test_flag(CEPH_MDSMAP_DOWN) && |
2006 | (sgid = pending_fsmap.find_replacement_for({fscid, info.rank}, info.name, | |
2007 | g_conf->mon_force_standby_active)) != MDS_GID_NONE) | |
2008 | { | |
2009 | ||
2010 | MDSMap::mds_info_t si = pending_fsmap.get_info_gid(sgid); | |
2011 | dout(10) << " replacing " << gid << " " << info.addr << " mds." | |
2012 | << info.rank << "." << info.inc | |
2013 | << " " << ceph_mds_state_name(info.state) | |
2014 | << " with " << sgid << "/" << si.name << " " << si.addr << dendl; | |
2015 | ||
31f18b77 FG |
2016 | mon->clog->warn() << "MDS daemon '" << info.name << "'" |
2017 | << " is not responding, replacing it " | |
2018 | << "as rank " << info.rank | |
2019 | << " with standby '" << si.name << "'"; | |
2020 | ||
7c673cae FG |
2021 | // Remember what NS the old one was in |
2022 | const fs_cluster_id_t fscid = pending_fsmap.mds_roles.at(gid); | |
2023 | ||
2024 | // Remove the old one | |
2025 | *osd_propose |= fail_mds_gid(gid); | |
2026 | ||
2027 | // Promote the replacement | |
2028 | auto fs = pending_fsmap.filesystems.at(fscid); | |
2029 | pending_fsmap.promote(sgid, fs, info.rank); | |
2030 | ||
2031 | *mds_propose = true; | |
31f18b77 FG |
2032 | } else if ((info.state == MDSMap::STATE_STANDBY_REPLAY || |
2033 | info.state == MDSMap::STATE_STANDBY) && may_replace) { | |
7c673cae FG |
2034 | dout(10) << " failing and removing " << gid << " " << info.addr << " mds." << info.rank |
2035 | << "." << info.inc << " " << ceph_mds_state_name(info.state) | |
2036 | << dendl; | |
31f18b77 FG |
2037 | mon->clog->info() << "MDS standby '" << info.name |
2038 | << "' is not responding, removing it from the set of " | |
2039 | << "standbys"; | |
7c673cae FG |
2040 | fail_mds_gid(gid); |
2041 | *mds_propose = true; | |
2042 | } else if (!info.laggy()) { | |
2043 | dout(10) << " marking " << gid << " " << info.addr << " mds." << info.rank << "." << info.inc | |
2044 | << " " << ceph_mds_state_name(info.state) | |
2045 | << " laggy" << dendl; | |
2046 | pending_fsmap.modify_daemon(info.global_id, [](MDSMap::mds_info_t *info) { | |
2047 | info->laggy_since = ceph_clock_now(); | |
2048 | }); | |
2049 | *mds_propose = true; | |
2050 | } | |
2051 | } | |
2052 | ||
2053 | bool MDSMonitor::maybe_promote_standby(std::shared_ptr<Filesystem> fs) | |
2054 | { | |
2055 | assert(!fs->mds_map.test_flag(CEPH_MDSMAP_DOWN)); | |
2056 | ||
2057 | bool do_propose = false; | |
2058 | ||
2059 | // have a standby take over? | |
2060 | set<mds_rank_t> failed; | |
2061 | fs->mds_map.get_failed_mds_set(failed); | |
2062 | if (!failed.empty()) { | |
2063 | set<mds_rank_t>::iterator p = failed.begin(); | |
2064 | while (p != failed.end()) { | |
2065 | mds_rank_t f = *p++; | |
2066 | mds_gid_t sgid = pending_fsmap.find_replacement_for({fs->fscid, f}, {}, | |
2067 | g_conf->mon_force_standby_active); | |
2068 | if (sgid) { | |
2069 | const MDSMap::mds_info_t si = pending_fsmap.get_info_gid(sgid); | |
2070 | dout(0) << " taking over failed mds." << f << " with " << sgid | |
2071 | << "/" << si.name << " " << si.addr << dendl; | |
2072 | pending_fsmap.promote(sgid, fs, f); | |
2073 | do_propose = true; | |
2074 | } | |
2075 | } | |
2076 | } else { | |
2077 | // There were no failures to replace, so try using any available standbys | |
2078 | // as standby-replay daemons. | |
2079 | ||
2080 | // Take a copy of the standby GIDs so that we can iterate over | |
2081 | // them while perhaps-modifying standby_daemons during the loop | |
2082 | // (if we promote anyone they are removed from standby_daemons) | |
2083 | std::vector<mds_gid_t> standby_gids; | |
2084 | for (const auto &j : pending_fsmap.standby_daemons) { | |
2085 | standby_gids.push_back(j.first); | |
2086 | } | |
2087 | ||
2088 | for (const auto &gid : standby_gids) { | |
2089 | const auto &info = pending_fsmap.standby_daemons.at(gid); | |
2090 | assert(info.state == MDSMap::STATE_STANDBY); | |
2091 | ||
2092 | if (!info.standby_replay) { | |
2093 | continue; | |
2094 | } | |
2095 | ||
2096 | /* | |
2097 | * This mds is standby but has no rank assigned. | |
2098 | * See if we can find it somebody to shadow | |
2099 | */ | |
2100 | dout(20) << "gid " << gid << " is standby and following nobody" << dendl; | |
2101 | ||
2102 | // standby for someone specific? | |
2103 | if (info.standby_for_rank >= 0) { | |
2104 | // The mds_info_t may or may not tell us exactly which filesystem | |
2105 | // the standby_for_rank refers to: lookup via legacy_client_fscid | |
2106 | mds_role_t target_role = { | |
2107 | info.standby_for_fscid == FS_CLUSTER_ID_NONE ? | |
2108 | pending_fsmap.legacy_client_fscid : info.standby_for_fscid, | |
2109 | info.standby_for_rank}; | |
2110 | ||
2111 | // It is possible that the map contains a standby_for_fscid | |
2112 | // that doesn't correspond to an existing filesystem, especially | |
2113 | // if we loaded from a version with a bug (#17466) | |
2114 | if (info.standby_for_fscid != FS_CLUSTER_ID_NONE | |
2115 | && !pending_fsmap.filesystem_exists(info.standby_for_fscid)) { | |
2116 | derr << "gid " << gid << " has invalid standby_for_fscid " | |
2117 | << info.standby_for_fscid << dendl; | |
2118 | continue; | |
2119 | } | |
2120 | ||
2121 | // If we managed to resolve a full target role | |
2122 | if (target_role.fscid != FS_CLUSTER_ID_NONE) { | |
2123 | auto fs = pending_fsmap.get_filesystem(target_role.fscid); | |
2124 | if (fs->mds_map.is_followable(target_role.rank)) { | |
2125 | do_propose |= try_standby_replay( | |
2126 | info, | |
2127 | *fs, | |
2128 | fs->mds_map.get_info(target_role.rank)); | |
2129 | } | |
2130 | } | |
2131 | ||
2132 | continue; | |
2133 | } | |
2134 | ||
2135 | // check everyone | |
2136 | for (auto fs_i : pending_fsmap.filesystems) { | |
2137 | const MDSMap &mds_map = fs_i.second->mds_map; | |
2138 | for (auto mds_i : mds_map.mds_info) { | |
2139 | MDSMap::mds_info_t &cand_info = mds_i.second; | |
2140 | if (cand_info.rank >= 0 && mds_map.is_followable(cand_info.rank)) { | |
2141 | if ((info.standby_for_name.length() && info.standby_for_name != cand_info.name) || | |
2142 | info.standby_for_rank != MDS_RANK_NONE) { | |
2143 | continue; // we're supposed to follow someone else | |
2144 | } | |
2145 | ||
2146 | if (try_standby_replay(info, *(fs_i.second), cand_info)) { | |
2147 | do_propose = true; | |
2148 | break; | |
2149 | } | |
2150 | continue; | |
2151 | } | |
2152 | } | |
2153 | } | |
2154 | } | |
2155 | } | |
2156 | ||
2157 | return do_propose; | |
2158 | } | |
2159 | ||
2160 | void MDSMonitor::tick() | |
2161 | { | |
2162 | // make sure mds's are still alive | |
2163 | // ...if i am an active leader | |
2164 | if (!is_active()) return; | |
2165 | ||
2166 | dout(10) << fsmap << dendl; | |
2167 | ||
2168 | bool do_propose = false; | |
2169 | ||
2170 | if (!mon->is_leader()) return; | |
2171 | ||
2172 | do_propose |= pending_fsmap.check_health(); | |
2173 | ||
2174 | // expand mds cluster (add new nodes to @in)? | |
2175 | for (auto i : pending_fsmap.filesystems) { | |
2176 | do_propose |= maybe_expand_cluster(i.second); | |
2177 | } | |
2178 | ||
2179 | const auto now = ceph_clock_now(); | |
2180 | if (last_tick.is_zero()) { | |
2181 | last_tick = now; | |
2182 | } | |
2183 | ||
2184 | if (now - last_tick > (g_conf->mds_beacon_grace - g_conf->mds_beacon_interval)) { | |
2185 | // This case handles either local slowness (calls being delayed | |
2186 | // for whatever reason) or cluster election slowness (a long gap | |
2187 | // between calls while an election happened) | |
2188 | dout(4) << __func__ << ": resetting beacon timeouts due to mon delay " | |
2189 | "(slow election?) of " << now - last_tick << " seconds" << dendl; | |
2190 | for (auto &i : last_beacon) { | |
2191 | i.second.stamp = now; | |
2192 | } | |
2193 | } | |
2194 | ||
2195 | last_tick = now; | |
2196 | ||
2197 | // check beacon timestamps | |
2198 | utime_t cutoff = now; | |
2199 | cutoff -= g_conf->mds_beacon_grace; | |
2200 | ||
2201 | // make sure last_beacon is fully populated | |
2202 | for (const auto &p : pending_fsmap.mds_roles) { | |
2203 | auto &gid = p.first; | |
2204 | if (last_beacon.count(gid) == 0) { | |
2205 | last_beacon[gid].stamp = now; | |
2206 | last_beacon[gid].seq = 0; | |
2207 | } | |
2208 | } | |
2209 | ||
2210 | // If the OSDMap is writeable, we can blacklist things, so we can | |
2211 | // try failing any laggy MDS daemons. Consider each one for failure. | |
2212 | if (mon->osdmon()->is_writeable()) { | |
2213 | bool propose_osdmap = false; | |
2214 | ||
2215 | map<mds_gid_t, beacon_info_t>::iterator p = last_beacon.begin(); | |
2216 | while (p != last_beacon.end()) { | |
2217 | mds_gid_t gid = p->first; | |
2218 | auto beacon_info = p->second; | |
2219 | ++p; | |
2220 | ||
2221 | if (!pending_fsmap.gid_exists(gid)) { | |
2222 | // clean it out | |
2223 | last_beacon.erase(gid); | |
2224 | continue; | |
2225 | } | |
2226 | ||
2227 | if (beacon_info.stamp < cutoff) { | |
2228 | maybe_replace_gid(gid, beacon_info, &do_propose, &propose_osdmap); | |
2229 | } | |
2230 | } | |
2231 | ||
2232 | if (propose_osdmap) { | |
2233 | request_proposal(mon->osdmon()); | |
2234 | } | |
2235 | } | |
2236 | ||
2237 | for (auto i : pending_fsmap.filesystems) { | |
2238 | auto fs = i.second; | |
2239 | if (!fs->mds_map.test_flag(CEPH_MDSMAP_DOWN)) { | |
2240 | do_propose |= maybe_promote_standby(fs); | |
2241 | } | |
2242 | } | |
2243 | ||
2244 | if (do_propose) { | |
2245 | propose_pending(); | |
2246 | } | |
2247 | } | |
2248 | ||
2249 | /** | |
2250 | * finfo: the would-be follower | |
2251 | * leader_fs: the Filesystem containing the would-be leader | |
2252 | * ainfo: the would-be leader | |
2253 | */ | |
2254 | bool MDSMonitor::try_standby_replay( | |
2255 | const MDSMap::mds_info_t& finfo, | |
2256 | const Filesystem &leader_fs, | |
2257 | const MDSMap::mds_info_t& ainfo) | |
2258 | { | |
2259 | // someone else already following? | |
2260 | if (leader_fs.has_standby_replay(ainfo.global_id)) { | |
2261 | dout(20) << " mds." << ainfo.rank << " already has a follower" << dendl; | |
2262 | return false; | |
2263 | } else { | |
2264 | // Assign the new role to the standby | |
2265 | dout(10) << " setting to follow mds rank " << ainfo.rank << dendl; | |
2266 | pending_fsmap.assign_standby_replay(finfo.global_id, leader_fs.fscid, ainfo.rank); | |
2267 | return true; | |
2268 | } | |
2269 | } | |
2270 | ||
2271 | MDSMonitor::MDSMonitor(Monitor *mn, Paxos *p, string service_name) | |
2272 | : PaxosService(mn, p, service_name) | |
2273 | { | |
2274 | handlers = FileSystemCommandHandler::load(); | |
2275 | } | |
2276 | ||
2277 | void MDSMonitor::on_restart() | |
2278 | { | |
2279 | // Clear out the leader-specific state. | |
2280 | last_tick = utime_t(); | |
2281 | last_beacon.clear(); | |
2282 | } | |
2283 |