]> git.proxmox.com Git - ceph.git/blame - ceph/src/mds/MDSDaemon.cc
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / mds / MDSDaemon.cc
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15#include <unistd.h>
16
17#include "include/compat.h"
18#include "global/signal_handler.h"
19
20#include "include/types.h"
21#include "include/str_list.h"
22#include "common/entity_name.h"
23#include "common/Clock.h"
24#include "common/signal.h"
25#include "common/ceph_argparse.h"
26#include "common/errno.h"
27
28#include "msg/Messenger.h"
29#include "mon/MonClient.h"
30
31#include "osdc/Objecter.h"
32
33#include "MDSMap.h"
34
35#include "MDSDaemon.h"
36#include "Server.h"
37#include "Locker.h"
38
39#include "SnapServer.h"
40#include "SnapClient.h"
41
42#include "common/HeartbeatMap.h"
43
44#include "common/perf_counters.h"
45
46#include "common/Timer.h"
47
48#include "events/ESession.h"
49#include "events/ESubtreeMap.h"
50
51#include "messages/MMDSMap.h"
52
53#include "messages/MGenericMessage.h"
54
55#include "messages/MMonCommand.h"
56#include "messages/MCommand.h"
57#include "messages/MCommandReply.h"
58
59#include "auth/AuthAuthorizeHandler.h"
60#include "auth/RotatingKeyRing.h"
61#include "auth/KeyRing.h"
62
63#include "common/config.h"
64
65#include "perfglue/cpu_profiler.h"
66#include "perfglue/heap_profiler.h"
67
68#define dout_context g_ceph_context
69#define dout_subsys ceph_subsys_mds
70#undef dout_prefix
71#define dout_prefix *_dout << "mds." << name << ' '
72
73
74class MDSDaemon::C_MDS_Tick : public Context {
75 protected:
76 MDSDaemon *mds_daemon;
77public:
78 explicit C_MDS_Tick(MDSDaemon *m) : mds_daemon(m) {}
79 void finish(int r) override {
80 assert(mds_daemon->mds_lock.is_locked_by_me());
81
82 mds_daemon->tick_event = 0;
83 mds_daemon->tick();
84 }
85};
86
87// cons/des
88MDSDaemon::MDSDaemon(const std::string &n, Messenger *m, MonClient *mc) :
89 Dispatcher(m->cct),
90 mds_lock("MDSDaemon::mds_lock"),
91 stopping(false),
92 timer(m->cct, mds_lock),
93 beacon(m->cct, mc, n),
94 authorize_handler_cluster_registry(new AuthAuthorizeHandlerRegistry(m->cct,
95 m->cct->_conf->auth_supported.empty() ?
96 m->cct->_conf->auth_cluster_required :
97 m->cct->_conf->auth_supported)),
98 authorize_handler_service_registry(new AuthAuthorizeHandlerRegistry(m->cct,
99 m->cct->_conf->auth_supported.empty() ?
100 m->cct->_conf->auth_service_required :
101 m->cct->_conf->auth_supported)),
102 name(n),
103 messenger(m),
104 monc(mc),
105 mgrc(m->cct, m),
106 log_client(m->cct, messenger, &mc->monmap, LogClient::NO_FLAGS),
107 mds_rank(NULL),
108 tick_event(0),
109 asok_hook(NULL)
110{
111 orig_argc = 0;
112 orig_argv = NULL;
113
114 clog = log_client.create_channel();
115
116 monc->set_messenger(messenger);
117
118 mdsmap = new MDSMap;
119}
120
121MDSDaemon::~MDSDaemon() {
122 Mutex::Locker lock(mds_lock);
123
124 delete mds_rank;
125 mds_rank = NULL;
126 delete mdsmap;
127 mdsmap = NULL;
128
129 delete authorize_handler_service_registry;
130 delete authorize_handler_cluster_registry;
131}
132
133class MDSSocketHook : public AdminSocketHook {
134 MDSDaemon *mds;
135public:
136 explicit MDSSocketHook(MDSDaemon *m) : mds(m) {}
137 bool call(std::string command, cmdmap_t& cmdmap, std::string format,
138 bufferlist& out) override {
139 stringstream ss;
140 bool r = mds->asok_command(command, cmdmap, format, ss);
141 out.append(ss);
142 return r;
143 }
144};
145
146bool MDSDaemon::asok_command(string command, cmdmap_t& cmdmap, string format,
147 ostream& ss)
148{
149 dout(1) << "asok_command: " << command << " (starting...)" << dendl;
150
151 Formatter *f = Formatter::create(format, "json-pretty", "json-pretty");
152 bool handled = false;
153 if (command == "status") {
154 dump_status(f);
155 handled = true;
156 } else {
157 if (mds_rank == NULL) {
158 dout(1) << "Can't run that command on an inactive MDS!" << dendl;
159 f->dump_string("error", "mds_not_active");
160 } else {
161 handled = mds_rank->handle_asok_command(command, cmdmap, f, ss);
162 }
163 }
164 f->flush(ss);
165 delete f;
166
167 dout(1) << "asok_command: " << command << " (complete)" << dendl;
168
169 return handled;
170}
171
172void MDSDaemon::dump_status(Formatter *f)
173{
174 f->open_object_section("status");
175 f->dump_stream("cluster_fsid") << monc->get_fsid();
176 if (mds_rank) {
177 f->dump_int("whoami", mds_rank->get_nodeid());
178 } else {
179 f->dump_int("whoami", MDS_RANK_NONE);
180 }
181
182 f->dump_int("id", monc->get_global_id());
183 f->dump_string("want_state", ceph_mds_state_name(beacon.get_want_state()));
184 f->dump_string("state", ceph_mds_state_name(mdsmap->get_state_gid(mds_gid_t(
185 monc->get_global_id()))));
186 if (mds_rank) {
187 Mutex::Locker l(mds_lock);
188 mds_rank->dump_status(f);
189 }
190
191 f->dump_unsigned("mdsmap_epoch", mdsmap->get_epoch());
192 if (mds_rank) {
193 f->dump_unsigned("osdmap_epoch", mds_rank->get_osd_epoch());
194 f->dump_unsigned("osdmap_epoch_barrier", mds_rank->get_osd_epoch_barrier());
195 } else {
196 f->dump_unsigned("osdmap_epoch", 0);
197 f->dump_unsigned("osdmap_epoch_barrier", 0);
198 }
199 f->close_section(); // status
200}
201
202void MDSDaemon::set_up_admin_socket()
203{
204 int r;
205 AdminSocket *admin_socket = g_ceph_context->get_admin_socket();
206 assert(asok_hook == nullptr);
207 asok_hook = new MDSSocketHook(this);
208 r = admin_socket->register_command("status", "status", asok_hook,
209 "high-level status of MDS");
210 assert(r == 0);
211 r = admin_socket->register_command("dump_ops_in_flight",
212 "dump_ops_in_flight", asok_hook,
213 "show the ops currently in flight");
214 assert(r == 0);
215 r = admin_socket->register_command("ops",
216 "ops", asok_hook,
217 "show the ops currently in flight");
218 assert(r == 0);
219 r = admin_socket->register_command("dump_blocked_ops", "dump_blocked_ops",
220 asok_hook,
221 "show the blocked ops currently in flight");
222 assert(r == 0);
223 r = admin_socket->register_command("dump_historic_ops", "dump_historic_ops",
224 asok_hook,
225 "show slowest recent ops");
226 assert(r == 0);
227 r = admin_socket->register_command("dump_historic_ops_by_duration", "dump_historic_ops_by_duration",
228 asok_hook,
229 "show slowest recent ops, sorted by op duration");
230 assert(r == 0);
231 r = admin_socket->register_command("scrub_path",
232 "scrub_path name=path,type=CephString "
233 "name=scrubops,type=CephChoices,"
234 "strings=force|recursive|repair,n=N,req=false",
235 asok_hook,
236 "scrub an inode and output results");
237 assert(r == 0);
238 r = admin_socket->register_command("tag path",
239 "tag path name=path,type=CephString"
240 " name=tag,type=CephString",
241 asok_hook,
242 "Apply scrub tag recursively");
243 assert(r == 0);
244 r = admin_socket->register_command("flush_path",
245 "flush_path name=path,type=CephString",
246 asok_hook,
247 "flush an inode (and its dirfrags)");
248 assert(r == 0);
249 r = admin_socket->register_command("export dir",
250 "export dir "
251 "name=path,type=CephString "
252 "name=rank,type=CephInt",
253 asok_hook,
254 "migrate a subtree to named MDS");
255 assert(r == 0);
256 r = admin_socket->register_command("dump cache",
257 "dump cache name=path,type=CephString,req=false",
258 asok_hook,
259 "dump metadata cache (optionally to a file)");
260 assert(r == 0);
261 r = admin_socket->register_command("dump tree",
262 "dump tree "
263 "name=root,type=CephString,req=true "
264 "name=depth,type=CephInt,req=false ",
265 asok_hook,
266 "dump metadata cache for subtree");
267 assert(r == 0);
268 r = admin_socket->register_command("session evict",
269 "session evict name=client_id,type=CephString",
270 asok_hook,
271 "Evict a CephFS client");
272 assert(r == 0);
273 r = admin_socket->register_command("osdmap barrier",
274 "osdmap barrier name=target_epoch,type=CephInt",
275 asok_hook,
276 "Wait until the MDS has this OSD map epoch");
277 assert(r == 0);
278 r = admin_socket->register_command("session ls",
279 "session ls",
280 asok_hook,
281 "Enumerate connected CephFS clients");
282 assert(r == 0);
283 r = admin_socket->register_command("flush journal",
284 "flush journal",
285 asok_hook,
286 "Flush the journal to the backing store");
287 assert(r == 0);
288 r = admin_socket->register_command("force_readonly",
289 "force_readonly",
290 asok_hook,
291 "Force MDS to read-only mode");
292 assert(r == 0);
293 r = admin_socket->register_command("get subtrees",
294 "get subtrees",
295 asok_hook,
296 "Return the subtree map");
297 assert(r == 0);
298 r = admin_socket->register_command("dirfrag split",
299 "dirfrag split "
300 "name=path,type=CephString,req=true "
301 "name=frag,type=CephString,req=true "
302 "name=bits,type=CephInt,req=true ",
303 asok_hook,
304 "Fragment directory by path");
305 assert(r == 0);
306 r = admin_socket->register_command("dirfrag merge",
307 "dirfrag merge "
308 "name=path,type=CephString,req=true "
309 "name=frag,type=CephString,req=true",
310 asok_hook,
311 "De-fragment directory by path");
312 assert(r == 0);
313 r = admin_socket->register_command("dirfrag ls",
314 "dirfrag ls "
315 "name=path,type=CephString,req=true",
316 asok_hook,
317 "List fragments in directory");
318 assert(r == 0);
319}
320
321void MDSDaemon::clean_up_admin_socket()
322{
323 AdminSocket *admin_socket = g_ceph_context->get_admin_socket();
324 admin_socket->unregister_command("status");
325 admin_socket->unregister_command("dump_ops_in_flight");
326 admin_socket->unregister_command("ops");
327 admin_socket->unregister_command("dump_blocked_ops");
328 admin_socket->unregister_command("dump_historic_ops");
329 admin_socket->unregister_command("dump_historic_ops_by_duration");
330 admin_socket->unregister_command("scrub_path");
331 admin_socket->unregister_command("tag path");
332 admin_socket->unregister_command("flush_path");
333 admin_socket->unregister_command("export dir");
334 admin_socket->unregister_command("dump cache");
335 admin_socket->unregister_command("dump tree");
336 admin_socket->unregister_command("session evict");
337 admin_socket->unregister_command("osdmap barrier");
338 admin_socket->unregister_command("session ls");
339 admin_socket->unregister_command("flush journal");
340 admin_socket->unregister_command("force_readonly");
341 admin_socket->unregister_command("get subtrees");
342 admin_socket->unregister_command("dirfrag split");
343 admin_socket->unregister_command("dirfrag merge");
344 admin_socket->unregister_command("dirfrag ls");
345 delete asok_hook;
346 asok_hook = NULL;
347}
348
349const char** MDSDaemon::get_tracked_conf_keys() const
350{
351 static const char* KEYS[] = {
352 "mds_op_complaint_time", "mds_op_log_threshold",
353 "mds_op_history_size", "mds_op_history_duration",
354 "mds_enable_op_tracker",
355 "mds_log_pause",
356 // clog & admin clog
357 "clog_to_monitors",
358 "clog_to_syslog",
359 "clog_to_syslog_facility",
360 "clog_to_syslog_level",
361 // PurgeQueue
362 "mds_max_purge_ops",
363 "mds_max_purge_ops_per_pg",
364 "mds_max_purge_files",
365 "clog_to_graylog",
366 "clog_to_graylog_host",
367 "clog_to_graylog_port",
368 "host",
369 "fsid",
370 NULL
371 };
372 return KEYS;
373}
374
375void MDSDaemon::handle_conf_change(const struct md_config_t *conf,
376 const std::set <std::string> &changed)
377{
378 // We may be called within mds_lock (via `tell`) or outwith the
379 // lock (via admin socket `config set`), so handle either case.
380 const bool initially_locked = mds_lock.is_locked_by_me();
381 if (!initially_locked) {
382 mds_lock.Lock();
383 }
384
385 if (changed.count("mds_op_complaint_time") ||
386 changed.count("mds_op_log_threshold")) {
387 if (mds_rank) {
388 mds_rank->op_tracker.set_complaint_and_threshold(conf->mds_op_complaint_time,
389 conf->mds_op_log_threshold);
390 }
391 }
392 if (changed.count("mds_op_history_size") ||
393 changed.count("mds_op_history_duration")) {
394 if (mds_rank) {
395 mds_rank->op_tracker.set_history_size_and_duration(conf->mds_op_history_size,
396 conf->mds_op_history_duration);
397 }
398 }
399 if (changed.count("mds_enable_op_tracker")) {
400 if (mds_rank) {
401 mds_rank->op_tracker.set_tracking(conf->mds_enable_op_tracker);
402 }
403 }
404 if (changed.count("clog_to_monitors") ||
405 changed.count("clog_to_syslog") ||
406 changed.count("clog_to_syslog_level") ||
407 changed.count("clog_to_syslog_facility") ||
408 changed.count("clog_to_graylog") ||
409 changed.count("clog_to_graylog_host") ||
410 changed.count("clog_to_graylog_port") ||
411 changed.count("host") ||
412 changed.count("fsid")) {
413 if (mds_rank) {
414 mds_rank->update_log_config();
415 }
416 }
417
418 if (!g_conf->mds_log_pause && changed.count("mds_log_pause")) {
419 if (mds_rank) {
420 mds_rank->mdlog->kick_submitter();
421 }
422 }
423
424 if (mds_rank) {
425 mds_rank->handle_conf_change(conf, changed);
426 }
427
428 if (!initially_locked) {
429 mds_lock.Unlock();
430 }
431}
432
433
434int MDSDaemon::init()
435{
436 dout(10) << sizeof(MDSCacheObject) << "\tMDSCacheObject" << dendl;
437 dout(10) << sizeof(CInode) << "\tCInode" << dendl;
438 dout(10) << sizeof(elist<void*>::item) << "\t elist<>::item *7=" << 7*sizeof(elist<void*>::item) << dendl;
439 dout(10) << sizeof(inode_t) << "\t inode_t " << dendl;
440 dout(10) << sizeof(nest_info_t) << "\t nest_info_t " << dendl;
441 dout(10) << sizeof(frag_info_t) << "\t frag_info_t " << dendl;
442 dout(10) << sizeof(SimpleLock) << "\t SimpleLock *5=" << 5*sizeof(SimpleLock) << dendl;
443 dout(10) << sizeof(ScatterLock) << "\t ScatterLock *3=" << 3*sizeof(ScatterLock) << dendl;
444 dout(10) << sizeof(CDentry) << "\tCDentry" << dendl;
445 dout(10) << sizeof(elist<void*>::item) << "\t elist<>::item" << dendl;
446 dout(10) << sizeof(SimpleLock) << "\t SimpleLock" << dendl;
447 dout(10) << sizeof(CDir) << "\tCDir " << dendl;
448 dout(10) << sizeof(elist<void*>::item) << "\t elist<>::item *2=" << 2*sizeof(elist<void*>::item) << dendl;
449 dout(10) << sizeof(fnode_t) << "\t fnode_t " << dendl;
450 dout(10) << sizeof(nest_info_t) << "\t nest_info_t *2" << dendl;
451 dout(10) << sizeof(frag_info_t) << "\t frag_info_t *2" << dendl;
452 dout(10) << sizeof(Capability) << "\tCapability " << dendl;
453 dout(10) << sizeof(xlist<void*>::item) << "\t xlist<>::item *2=" << 2*sizeof(xlist<void*>::item) << dendl;
454
455 messenger->add_dispatcher_tail(&beacon);
456 messenger->add_dispatcher_tail(this);
457
458 // get monmap
459 monc->set_messenger(messenger);
460
461 monc->set_want_keys(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD |
462 CEPH_ENTITY_TYPE_MDS | CEPH_ENTITY_TYPE_MGR);
463 int r = 0;
464 r = monc->init();
465 if (r < 0) {
466 derr << "ERROR: failed to get monmap: " << cpp_strerror(-r) << dendl;
467 mds_lock.Lock();
468 suicide();
469 mds_lock.Unlock();
470 return r;
471 }
472
473 // tell monc about log_client so it will know about mon session resets
474 monc->set_log_client(&log_client);
475
476 r = monc->authenticate();
477 if (r < 0) {
478 derr << "ERROR: failed to authenticate: " << cpp_strerror(-r) << dendl;
479 mds_lock.Lock();
480 suicide();
481 mds_lock.Unlock();
482 return r;
483 }
484
485 int rotating_auth_attempts = 0;
486 while (monc->wait_auth_rotating(30.0) < 0) {
487 if (++rotating_auth_attempts <= g_conf->max_rotating_auth_attempts) {
488 derr << "unable to obtain rotating service keys; retrying" << dendl;
489 continue;
490 }
491 derr << "ERROR: failed to refresh rotating keys, "
492 << "maximum retry time reached." << dendl;
493 mds_lock.Lock();
494 suicide();
495 mds_lock.Unlock();
496 return -ETIMEDOUT;
497 }
498
499 mgrc.init();
500 messenger->add_dispatcher_head(&mgrc);
501
502 mds_lock.Lock();
503 if (beacon.get_want_state() == CEPH_MDS_STATE_DNE) {
504 dout(4) << __func__ << ": terminated already, dropping out" << dendl;
505 mds_lock.Unlock();
506 return 0;
507 }
508
509 monc->sub_want("mdsmap", 0, 0);
510 monc->sub_want("mgrmap", 0, 0);
511 monc->renew_subs();
512
513 mds_lock.Unlock();
514
515 // Set up admin socket before taking mds_lock, so that ordering
516 // is consistent (later we take mds_lock within asok callbacks)
517 set_up_admin_socket();
518 g_conf->add_observer(this);
519 mds_lock.Lock();
520 if (beacon.get_want_state() == MDSMap::STATE_DNE) {
521 suicide(); // we could do something more graceful here
522 dout(4) << __func__ << ": terminated already, dropping out" << dendl;
523 mds_lock.Unlock();
524 return 0;
525 }
526
527 timer.init();
528
529 beacon.init(mdsmap);
530 messenger->set_myname(entity_name_t::MDS(MDS_RANK_NONE));
531
532 // schedule tick
533 reset_tick();
534 mds_lock.Unlock();
535
536 return 0;
537}
538
539void MDSDaemon::reset_tick()
540{
541 // cancel old
542 if (tick_event) timer.cancel_event(tick_event);
543
544 // schedule
545 tick_event = new C_MDS_Tick(this);
546 timer.add_event_after(g_conf->mds_tick_interval, tick_event);
547}
548
549void MDSDaemon::tick()
550{
551 tick_event = 0;
552
553 // reschedule
554 reset_tick();
555
556 // Call through to subsystems' tick functions
557 if (mds_rank) {
558 mds_rank->tick();
559 }
560}
561
562void MDSDaemon::send_command_reply(MCommand *m, MDSRank *mds_rank,
563 int r, bufferlist outbl,
564 const std::string& outs)
565{
566 Session *session = static_cast<Session *>(m->get_connection()->get_priv());
567 assert(session != NULL);
568 // If someone is using a closed session for sending commands (e.g.
569 // the ceph CLI) then we should feel free to clean up this connection
570 // as soon as we've sent them a response.
571 const bool live_session = mds_rank &&
572 mds_rank->sessionmap.get_session(session->info.inst.name) != nullptr
573 && session->get_state_seq() > 0;
574
575 if (!live_session) {
576 // This session only existed to issue commands, so terminate it
577 // as soon as we can.
578 assert(session->is_closed());
579 session->connection->mark_disposable();
580 session->put();
581 }
582
583 MCommandReply *reply = new MCommandReply(r, outs);
584 reply->set_tid(m->get_tid());
585 reply->set_data(outbl);
586 m->get_connection()->send_message(reply);
587}
588
589/* This function DOES put the passed message before returning*/
590void MDSDaemon::handle_command(MCommand *m)
591{
592 Session *session = static_cast<Session *>(m->get_connection()->get_priv());
593 assert(session != NULL);
594
595 int r = 0;
596 cmdmap_t cmdmap;
597 std::stringstream ss;
598 std::string outs;
599 bufferlist outbl;
600 Context *run_after = NULL;
601 bool need_reply = true;
602
603 if (!session->auth_caps.allow_all()) {
604 dout(1) << __func__
605 << ": received command from client without `tell` capability: "
606 << m->get_connection()->peer_addr << dendl;
607
608 ss << "permission denied";
609 r = -EPERM;
610 } else if (m->cmd.empty()) {
611 r = -EINVAL;
612 ss << "no command given";
613 outs = ss.str();
614 } else if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) {
615 r = -EINVAL;
616 outs = ss.str();
617 } else {
618 r = _handle_command(cmdmap, m, &outbl, &outs, &run_after, &need_reply);
619 }
620
621 if (need_reply) {
622 send_command_reply(m, mds_rank, r, outbl, outs);
623 }
624
625 if (run_after) {
626 run_after->complete(0);
627 }
628
629 m->put();
630}
631
632
633struct MDSCommand {
634 string cmdstring;
635 string helpstring;
636 string module;
637 string perm;
638 string availability;
639} mds_commands[] = {
640
641#define COMMAND(parsesig, helptext, module, perm, availability) \
642 {parsesig, helptext, module, perm, availability},
643
644COMMAND("injectargs " \
645 "name=injected_args,type=CephString,n=N",
646 "inject configuration arguments into running MDS",
647 "mds", "*", "cli,rest")
648COMMAND("exit",
649 "Terminate this MDS",
650 "mds", "*", "cli,rest")
651COMMAND("respawn",
652 "Restart this MDS",
653 "mds", "*", "cli,rest")
654COMMAND("session kill " \
655 "name=session_id,type=CephInt",
656 "End a client session",
657 "mds", "*", "cli,rest")
658COMMAND("cpu_profiler " \
659 "name=arg,type=CephChoices,strings=status|flush",
660 "run cpu profiling on daemon", "mds", "rw", "cli,rest")
661COMMAND("session ls " \
662 "name=filters,type=CephString,n=N,req=false",
663 "List client sessions", "mds", "r", "cli,rest")
664COMMAND("session evict " \
665 "name=filters,type=CephString,n=N,req=false",
666 "Evict client session(s)", "mds", "rw", "cli,rest")
667COMMAND("damage ls",
668 "List detected metadata damage", "mds", "r", "cli,rest")
669COMMAND("damage rm name=damage_id,type=CephInt",
670 "Remove a damage table entry", "mds", "rw", "cli,rest")
671COMMAND("heap " \
672 "name=heapcmd,type=CephChoices,strings=dump|start_profiler|stop_profiler|release|stats", \
673 "show heap usage info (available only if compiled with tcmalloc)", \
674 "mds", "*", "cli,rest")
675};
676
677
678int MDSDaemon::_handle_command(
679 const cmdmap_t &cmdmap,
680 MCommand *m,
681 bufferlist *outbl,
682 std::string *outs,
683 Context **run_later,
684 bool *need_reply)
685{
686 assert(outbl != NULL);
687 assert(outs != NULL);
688
689 class SuicideLater : public Context
690 {
691 MDSDaemon *mds;
692
693 public:
694 explicit SuicideLater(MDSDaemon *mds_) : mds(mds_) {}
695 void finish(int r) override {
696 // Wait a little to improve chances of caller getting
697 // our response before seeing us disappear from mdsmap
698 sleep(1);
699
700 mds->suicide();
701 }
702 };
703
704
705 class RespawnLater : public Context
706 {
707 MDSDaemon *mds;
708
709 public:
710
711 explicit RespawnLater(MDSDaemon *mds_) : mds(mds_) {}
712 void finish(int r) override {
713 // Wait a little to improve chances of caller getting
714 // our response before seeing us disappear from mdsmap
715 sleep(1);
716
717 mds->respawn();
718 }
719 };
720
721 std::stringstream ds;
722 std::stringstream ss;
723 std::string prefix;
724 cmd_getval(cct, cmdmap, "prefix", prefix);
725
726 int r = 0;
727
728 if (prefix == "get_command_descriptions") {
729 int cmdnum = 0;
730 JSONFormatter *f = new JSONFormatter();
731 f->open_object_section("command_descriptions");
732 for (MDSCommand *cp = mds_commands;
733 cp < &mds_commands[ARRAY_SIZE(mds_commands)]; cp++) {
734
735 ostringstream secname;
736 secname << "cmd" << setfill('0') << std::setw(3) << cmdnum;
737 dump_cmddesc_to_json(f, secname.str(), cp->cmdstring, cp->helpstring,
738 cp->module, cp->perm, cp->availability, 0);
739 cmdnum++;
740 }
741 f->close_section(); // command_descriptions
742
743 f->flush(ds);
744 delete f;
745 } else if (prefix == "injectargs") {
746 vector<string> argsvec;
747 cmd_getval(cct, cmdmap, "injected_args", argsvec);
748
749 if (argsvec.empty()) {
750 r = -EINVAL;
751 ss << "ignoring empty injectargs";
752 goto out;
753 }
754 string args = argsvec.front();
755 for (vector<string>::iterator a = ++argsvec.begin(); a != argsvec.end(); ++a)
756 args += " " + *a;
757 r = cct->_conf->injectargs(args, &ss);
758 } else if (prefix == "exit") {
759 // We will send response before executing
760 ss << "Exiting...";
761 *run_later = new SuicideLater(this);
762 }
763 else if (prefix == "respawn") {
764 // We will send response before executing
765 ss << "Respawning...";
766 *run_later = new RespawnLater(this);
767 } else if (prefix == "session kill") {
768 if (mds_rank == NULL) {
769 r = -EINVAL;
770 ss << "MDS not active";
771 goto out;
772 }
773 // FIXME harmonize `session kill` with admin socket session evict
774 int64_t session_id = 0;
775 bool got = cmd_getval(cct, cmdmap, "session_id", session_id);
776 assert(got);
777 bool killed = mds_rank->kill_session(session_id, false, ss);
778 if (!killed)
779 r = -ENOENT;
780 } else if (prefix == "heap") {
781 if (!ceph_using_tcmalloc()) {
782 r = -EOPNOTSUPP;
783 ss << "could not issue heap profiler command -- not using tcmalloc!";
784 } else {
785 string heapcmd;
786 cmd_getval(cct, cmdmap, "heapcmd", heapcmd);
787 vector<string> heapcmd_vec;
788 get_str_vec(heapcmd, heapcmd_vec);
789 ceph_heap_profiler_handle_command(heapcmd_vec, ds);
790 }
791 } else if (prefix == "cpu_profiler") {
792 string arg;
793 cmd_getval(cct, cmdmap, "arg", arg);
794 vector<string> argvec;
795 get_str_vec(arg, argvec);
796 cpu_profiler_handle_command(argvec, ds);
797 } else {
798 // Give MDSRank a shot at the command
799 if (mds_rank) {
800 bool handled = mds_rank->handle_command(cmdmap, m, &r, &ds, &ss,
801 need_reply);
802 if (handled) {
803 goto out;
804 }
805 }
806
807 // Neither MDSDaemon nor MDSRank know this command
808 std::ostringstream ss;
809 ss << "unrecognized command! " << prefix;
810 r = -EINVAL;
811 }
812
813out:
814 *outs = ss.str();
815 outbl->append(ds);
816 return r;
817}
818
819/* This function deletes the passed message before returning. */
820
821void MDSDaemon::handle_mds_map(MMDSMap *m)
822{
823 version_t epoch = m->get_epoch();
824 dout(5) << "handle_mds_map epoch " << epoch << " from " << m->get_source() << dendl;
825
826 // is it new?
827 if (epoch <= mdsmap->get_epoch()) {
828 dout(5) << " old map epoch " << epoch << " <= " << mdsmap->get_epoch()
829 << ", discarding" << dendl;
830 m->put();
831 return;
832 }
833
834 entity_addr_t addr;
835
836 // keep old map, for a moment
837 MDSMap *oldmap = mdsmap;
838
839 // decode and process
840 mdsmap = new MDSMap;
841 mdsmap->decode(m->get_encoded());
842 const MDSMap::DaemonState new_state = mdsmap->get_state_gid(mds_gid_t(monc->get_global_id()));
843 const int incarnation = mdsmap->get_inc_gid(mds_gid_t(monc->get_global_id()));
844
845 monc->sub_got("mdsmap", mdsmap->get_epoch());
846
847 // Calculate my effective rank (either my owned rank or my
848 // standby_for_rank if in standby replay)
849 mds_rank_t whoami = mdsmap->get_rank_gid(mds_gid_t(monc->get_global_id()));
850
851 // verify compatset
852 CompatSet mdsmap_compat(get_mdsmap_compat_set_all());
853 dout(10) << " my compat " << mdsmap_compat << dendl;
854 dout(10) << " mdsmap compat " << mdsmap->compat << dendl;
855 if (!mdsmap_compat.writeable(mdsmap->compat)) {
856 dout(0) << "handle_mds_map mdsmap compatset " << mdsmap->compat
857 << " not writeable with daemon features " << mdsmap_compat
858 << ", killing myself" << dendl;
859 suicide();
860 goto out;
861 }
862
863 // mark down any failed peers
864 for (map<mds_gid_t,MDSMap::mds_info_t>::const_iterator p = oldmap->get_mds_info().begin();
865 p != oldmap->get_mds_info().end();
866 ++p) {
867 if (mdsmap->get_mds_info().count(p->first) == 0) {
868 dout(10) << " peer mds gid " << p->first << " removed from map" << dendl;
869 messenger->mark_down(p->second.addr);
870 }
871 }
872
873 if (whoami == MDS_RANK_NONE &&
874 new_state == MDSMap::STATE_STANDBY_REPLAY) {
875 whoami = mdsmap->get_mds_info_gid(mds_gid_t(monc->get_global_id())).standby_for_rank;
876 }
877
878 // see who i am
879 addr = messenger->get_myaddr();
880 dout(10) << "map says i am " << addr << " mds." << whoami << "." << incarnation
881 << " state " << ceph_mds_state_name(new_state) << dendl;
882
883 if (whoami == MDS_RANK_NONE) {
884 if (mds_rank != NULL) {
885 // We have entered a rank-holding state, we shouldn't be back
886 // here!
887 if (g_conf->mds_enforce_unique_name) {
888 if (mds_gid_t existing = mdsmap->find_mds_gid_by_name(name)) {
889 const MDSMap::mds_info_t& i = mdsmap->get_info_gid(existing);
890 if (i.global_id > monc->get_global_id()) {
891 dout(1) << "handle_mds_map i (" << addr
892 << ") dne in the mdsmap, new instance has larger gid " << i.global_id
893 << ", suicide" << dendl;
894 // Call suicide() rather than respawn() because if someone else
895 // has taken our ID, we don't want to keep restarting and
896 // fighting them for the ID.
897 suicide();
898 m->put();
899 return;
900 }
901 }
902 }
903
904 dout(1) << "handle_mds_map i (" << addr
905 << ") dne in the mdsmap, respawning myself" << dendl;
906 respawn();
907 }
908 // MDSRank not active: process the map here to see if we have
909 // been assigned a rank.
910 dout(10) << __func__ << ": handling map in rankless mode" << dendl;
911 _handle_mds_map(oldmap);
912 } else {
913
914 // Did we already hold a different rank? MDSMonitor shouldn't try
915 // to change that out from under me!
916 if (mds_rank && whoami != mds_rank->get_nodeid()) {
917 derr << "Invalid rank transition " << mds_rank->get_nodeid() << "->"
918 << whoami << dendl;
919 respawn();
920 }
921
922 // Did I previously not hold a rank? Initialize!
923 if (mds_rank == NULL) {
924 mds_rank = new MDSRankDispatcher(whoami, mds_lock, clog,
925 timer, beacon, mdsmap, messenger, monc,
926 new FunctionContext([this](int r){respawn();}),
927 new FunctionContext([this](int r){suicide();}));
928 dout(10) << __func__ << ": initializing MDS rank "
929 << mds_rank->get_nodeid() << dendl;
930 mds_rank->init();
931 }
932
933 // MDSRank is active: let him process the map, we have no say.
934 dout(10) << __func__ << ": handling map as rank "
935 << mds_rank->get_nodeid() << dendl;
936 mds_rank->handle_mds_map(m, oldmap);
937 }
938
939out:
940 beacon.notify_mdsmap(mdsmap);
941 m->put();
942 delete oldmap;
943}
944
945void MDSDaemon::_handle_mds_map(MDSMap *oldmap)
946{
947 MDSMap::DaemonState new_state = mdsmap->get_state_gid(mds_gid_t(monc->get_global_id()));
948
949 // Normal rankless case, we're marked as standby
950 if (new_state == MDSMap::STATE_STANDBY) {
951 beacon.set_want_state(mdsmap, new_state);
952 dout(1) << "handle_mds_map standby" << dendl;
953
954 return;
955 }
956
957 // Case where we thought we were standby, but MDSMap disagrees
958 if (beacon.get_want_state() == MDSMap::STATE_STANDBY) {
959 dout(10) << "dropped out of mdsmap, try to re-add myself" << dendl;
960 new_state = MDSMap::STATE_BOOT;
961 beacon.set_want_state(mdsmap, new_state);
962 return;
963 }
964
965 // Case where we have sent a boot beacon that isn't reflected yet
966 if (beacon.get_want_state() == MDSMap::STATE_BOOT) {
967 dout(10) << "not in map yet" << dendl;
968 }
969}
970
971void MDSDaemon::handle_signal(int signum)
972{
973 assert(signum == SIGINT || signum == SIGTERM);
974 derr << "*** got signal " << sig_str(signum) << " ***" << dendl;
975 {
976 Mutex::Locker l(mds_lock);
977 if (stopping) {
978 return;
979 }
980 suicide();
981 }
982}
983
984void MDSDaemon::suicide()
985{
986 assert(mds_lock.is_locked());
987
988 // make sure we don't suicide twice
989 assert(stopping == false);
990 stopping = true;
991
992 dout(1) << "suicide. wanted state "
993 << ceph_mds_state_name(beacon.get_want_state()) << dendl;
994
995 if (tick_event) {
996 timer.cancel_event(tick_event);
997 tick_event = 0;
998 }
999
1000 //because add_observer is called after set_up_admin_socket
1001 //so we can use asok_hook to avoid assert in the remove_observer
1002 if (asok_hook != NULL)
1003 g_conf->remove_observer(this);
1004
1005 clean_up_admin_socket();
1006
1007 // Inform MDS we are going away, then shut down beacon
1008 beacon.set_want_state(mdsmap, MDSMap::STATE_DNE);
1009 if (!mdsmap->is_dne_gid(mds_gid_t(monc->get_global_id()))) {
1010 // Notify the MDSMonitor that we're dying, so that it doesn't have to
1011 // wait for us to go laggy. Only do this if we're actually in the
1012 // MDSMap, because otherwise the MDSMonitor will drop our message.
1013 beacon.send_and_wait(1);
1014 }
1015 beacon.shutdown();
1016
1017 mgrc.shutdown();
1018
1019 if (mds_rank) {
1020 mds_rank->shutdown();
1021 } else {
1022 timer.shutdown();
1023
1024 monc->shutdown();
1025 messenger->shutdown();
1026 }
1027}
1028
1029void MDSDaemon::respawn()
1030{
1031 dout(1) << "respawn" << dendl;
1032
1033 char *new_argv[orig_argc+1];
1034 dout(1) << " e: '" << orig_argv[0] << "'" << dendl;
1035 for (int i=0; i<orig_argc; i++) {
1036 new_argv[i] = (char *)orig_argv[i];
1037 dout(1) << " " << i << ": '" << orig_argv[i] << "'" << dendl;
1038 }
1039 new_argv[orig_argc] = NULL;
1040
1041 /* Determine the path to our executable, test if Linux /proc/self/exe exists.
1042 * This allows us to exec the same executable even if it has since been
1043 * unlinked.
1044 */
1045 char exe_path[PATH_MAX] = "";
1046 if (readlink(PROCPREFIX "/proc/self/exe", exe_path, PATH_MAX-1) == -1) {
1047 /* Print CWD for the user's interest */
1048 char buf[PATH_MAX];
1049 char *cwd = getcwd(buf, sizeof(buf));
1050 assert(cwd);
1051 dout(1) << " cwd " << cwd << dendl;
1052
1053 /* Fall back to a best-effort: just running in our CWD */
1054 strncpy(exe_path, orig_argv[0], PATH_MAX-1);
1055 } else {
1056 dout(1) << "respawning with exe " << exe_path << dendl;
1057 strcpy(exe_path, PROCPREFIX "/proc/self/exe");
1058 }
1059
1060 dout(1) << " exe_path " << exe_path << dendl;
1061
1062 unblock_all_signals(NULL);
1063 execv(exe_path, new_argv);
1064
1065 dout(0) << "respawn execv " << orig_argv[0]
1066 << " failed with " << cpp_strerror(errno) << dendl;
1067
1068 // We have to assert out here, because suicide() returns, and callers
1069 // to respawn expect it never to return.
1070 ceph_abort();
1071}
1072
1073
1074
1075bool MDSDaemon::ms_dispatch(Message *m)
1076{
1077 Mutex::Locker l(mds_lock);
1078 if (stopping) {
1079 return false;
1080 }
1081
1082 // Drop out early if shutting down
1083 if (beacon.get_want_state() == CEPH_MDS_STATE_DNE) {
1084 dout(10) << " stopping, discarding " << *m << dendl;
1085 m->put();
1086 return true;
1087 }
1088
1089 // First see if it's a daemon message
1090 const bool handled_core = handle_core_message(m);
1091 if (handled_core) {
1092 return true;
1093 }
1094
1095 // Not core, try it as a rank message
1096 if (mds_rank) {
1097 return mds_rank->ms_dispatch(m);
1098 } else {
1099 return false;
1100 }
1101}
1102
1103bool MDSDaemon::ms_get_authorizer(int dest_type, AuthAuthorizer **authorizer, bool force_new)
1104{
1105 dout(10) << "MDSDaemon::ms_get_authorizer type="
1106 << ceph_entity_type_name(dest_type) << dendl;
1107
1108 /* monitor authorization is being handled on different layer */
1109 if (dest_type == CEPH_ENTITY_TYPE_MON)
1110 return true;
1111
1112 if (force_new) {
1113 if (monc->wait_auth_rotating(10) < 0)
1114 return false;
1115 }
1116
1117 *authorizer = monc->build_authorizer(dest_type);
1118 return *authorizer != NULL;
1119}
1120
1121
1122/*
1123 * high priority messages we always process
1124 */
1125bool MDSDaemon::handle_core_message(Message *m)
1126{
1127 switch (m->get_type()) {
1128 case CEPH_MSG_MON_MAP:
1129 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON);
1130 m->put();
1131 break;
1132
1133 // MDS
1134 case CEPH_MSG_MDS_MAP:
1135 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_MDS);
1136 handle_mds_map(static_cast<MMDSMap*>(m));
1137 break;
1138
1139 // OSD
1140 case MSG_COMMAND:
1141 handle_command(static_cast<MCommand*>(m));
1142 break;
1143 case CEPH_MSG_OSD_MAP:
1144 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD);
1145
1146 if (mds_rank) {
1147 mds_rank->handle_osd_map();
1148 }
1149 m->put();
1150 break;
1151
1152 case MSG_MON_COMMAND:
1153 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON);
1154 clog->warn() << "dropping `mds tell` command from legacy monitor";
1155 m->put();
1156 break;
1157
1158 default:
1159 return false;
1160 }
1161 return true;
1162}
1163
1164void MDSDaemon::ms_handle_connect(Connection *con)
1165{
1166}
1167
1168bool MDSDaemon::ms_handle_reset(Connection *con)
1169{
1170 if (con->get_peer_type() != CEPH_ENTITY_TYPE_CLIENT)
1171 return false;
1172
1173 Mutex::Locker l(mds_lock);
1174 if (stopping) {
1175 return false;
1176 }
1177 dout(5) << "ms_handle_reset on " << con->get_peer_addr() << dendl;
1178 if (beacon.get_want_state() == CEPH_MDS_STATE_DNE)
1179 return false;
1180
1181 Session *session = static_cast<Session *>(con->get_priv());
1182 if (session) {
1183 if (session->is_closed()) {
1184 dout(3) << "ms_handle_reset closing connection for session " << session->info.inst << dendl;
1185 con->mark_down();
1186 con->set_priv(NULL);
1187 }
1188 session->put();
1189 } else {
1190 con->mark_down();
1191 }
1192 return false;
1193}
1194
1195
1196void MDSDaemon::ms_handle_remote_reset(Connection *con)
1197{
1198 if (con->get_peer_type() != CEPH_ENTITY_TYPE_CLIENT)
1199 return;
1200
1201 Mutex::Locker l(mds_lock);
1202 if (stopping) {
1203 return;
1204 }
1205
1206 dout(5) << "ms_handle_remote_reset on " << con->get_peer_addr() << dendl;
1207 if (beacon.get_want_state() == CEPH_MDS_STATE_DNE)
1208 return;
1209
1210 Session *session = static_cast<Session *>(con->get_priv());
1211 if (session) {
1212 if (session->is_closed()) {
1213 dout(3) << "ms_handle_remote_reset closing connection for session " << session->info.inst << dendl;
1214 con->mark_down();
1215 con->set_priv(NULL);
1216 }
1217 session->put();
1218 }
1219}
1220
1221bool MDSDaemon::ms_handle_refused(Connection *con)
1222{
1223 // do nothing for now
1224 return false;
1225}
1226
1227bool MDSDaemon::ms_verify_authorizer(Connection *con, int peer_type,
1228 int protocol, bufferlist& authorizer_data, bufferlist& authorizer_reply,
1229 bool& is_valid, CryptoKey& session_key)
1230{
1231 Mutex::Locker l(mds_lock);
1232 if (stopping) {
1233 return false;
1234 }
1235 if (beacon.get_want_state() == CEPH_MDS_STATE_DNE)
1236 return false;
1237
1238 AuthAuthorizeHandler *authorize_handler = 0;
1239 switch (peer_type) {
1240 case CEPH_ENTITY_TYPE_MDS:
1241 authorize_handler = authorize_handler_cluster_registry->get_handler(protocol);
1242 break;
1243 default:
1244 authorize_handler = authorize_handler_service_registry->get_handler(protocol);
1245 }
1246 if (!authorize_handler) {
1247 dout(0) << "No AuthAuthorizeHandler found for protocol " << protocol << dendl;
1248 is_valid = false;
1249 return true;
1250 }
1251
1252 AuthCapsInfo caps_info;
1253 EntityName name;
1254 uint64_t global_id;
1255
1256 is_valid = authorize_handler->verify_authorizer(
1257 cct, monc->rotating_secrets.get(),
1258 authorizer_data, authorizer_reply, name, global_id, caps_info, session_key);
1259
1260 if (is_valid) {
1261 entity_name_t n(con->get_peer_type(), global_id);
1262
1263 // We allow connections and assign Session instances to connections
1264 // even if we have not been assigned a rank, because clients with
1265 // "allow *" are allowed to connect and do 'tell' operations before
1266 // we have a rank.
1267 Session *s = NULL;
1268 if (mds_rank) {
1269 // If we do hold a rank, see if this is an existing client establishing
1270 // a new connection, rather than a new client
1271 s = mds_rank->sessionmap.get_session(n);
1272 }
1273
1274 // Wire up a Session* to this connection
1275 // It doesn't go into a SessionMap instance until it sends an explicit
1276 // request to open a session (initial state of Session is `closed`)
1277 if (!s) {
1278 s = new Session;
1279 s->info.auth_name = name;
1280 s->info.inst.addr = con->get_peer_addr();
1281 s->info.inst.name = n;
1282 dout(10) << " new session " << s << " for " << s->info.inst << " con " << con << dendl;
1283 con->set_priv(s);
1284 s->connection = con;
1285 } else {
1286 dout(10) << " existing session " << s << " for " << s->info.inst << " existing con " << s->connection
1287 << ", new/authorizing con " << con << dendl;
1288 con->set_priv(s->get());
1289
1290
1291
1292 // Wait until we fully accept the connection before setting
1293 // s->connection. In particular, if there are multiple incoming
1294 // connection attempts, they will all get their authorizer
1295 // validated, but some of them may "lose the race" and get
1296 // dropped. We only want to consider the winner(s). See
1297 // ms_handle_accept(). This is important for Sessions we replay
1298 // from the journal on recovery that don't have established
1299 // messenger state; we want the con from only the winning
1300 // connect attempt(s). (Normal reconnects that don't follow MDS
1301 // recovery are reconnected to the existing con by the
1302 // messenger.)
1303 }
1304
1305 if (caps_info.allow_all) {
1306 // Flag for auth providers that don't provide cap strings
1307 s->auth_caps.set_allow_all();
1308 }
1309
1310 bufferlist::iterator p = caps_info.caps.begin();
1311 string auth_cap_str;
1312 try {
1313 ::decode(auth_cap_str, p);
1314
1315 dout(10) << __func__ << ": parsing auth_cap_str='" << auth_cap_str << "'" << dendl;
1316 std::ostringstream errstr;
1317 if (!s->auth_caps.parse(g_ceph_context, auth_cap_str, &errstr)) {
1318 dout(1) << __func__ << ": auth cap parse error: " << errstr.str()
1319 << " parsing '" << auth_cap_str << "'" << dendl;
1320 clog->warn() << name << " mds cap '" << auth_cap_str
1321 << "' does not parse: " << errstr.str();
1322 }
1323 } catch (buffer::error& e) {
1324 // Assume legacy auth, defaults to:
1325 // * permit all filesystem ops
1326 // * permit no `tell` ops
1327 dout(1) << __func__ << ": cannot decode auth caps bl of length " << caps_info.caps.length() << dendl;
1328 }
1329 }
1330
1331 return true; // we made a decision (see is_valid)
1332}
1333
1334
1335void MDSDaemon::ms_handle_accept(Connection *con)
1336{
1337 Mutex::Locker l(mds_lock);
1338 if (stopping) {
1339 return;
1340 }
1341
1342 Session *s = static_cast<Session *>(con->get_priv());
1343 dout(10) << "ms_handle_accept " << con->get_peer_addr() << " con " << con << " session " << s << dendl;
1344 if (s) {
1345 if (s->connection != con) {
1346 dout(10) << " session connection " << s->connection << " -> " << con << dendl;
1347 s->connection = con;
1348
1349 // send out any queued messages
1350 while (!s->preopen_out_queue.empty()) {
1351 con->send_message(s->preopen_out_queue.front());
1352 s->preopen_out_queue.pop_front();
1353 }
1354 }
1355 s->put();
1356 }
1357}
1358
1359bool MDSDaemon::is_clean_shutdown()
1360{
1361 if (mds_rank) {
1362 return mds_rank->is_stopped();
1363 } else {
1364 return true;
1365 }
1366}