]> git.proxmox.com Git - ceph.git/blob - ceph/src/mds/MDSDaemon.cc
update source to 12.2.11
[ceph.git] / ceph / src / mds / MDSDaemon.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #include <unistd.h>
16
17 #include "include/compat.h"
18 #include "include/types.h"
19 #include "include/str_list.h"
20
21 #include "common/Clock.h"
22 #include "common/HeartbeatMap.h"
23 #include "common/Timer.h"
24 #include "common/backport14.h"
25 #include "common/ceph_argparse.h"
26 #include "common/config.h"
27 #include "common/entity_name.h"
28 #include "common/errno.h"
29 #include "common/perf_counters.h"
30 #include "common/signal.h"
31 #include "common/version.h"
32
33 #include "global/signal_handler.h"
34
35 #include "msg/Messenger.h"
36 #include "mon/MonClient.h"
37
38 #include "osdc/Objecter.h"
39
40 #include "MDSMap.h"
41
42 #include "MDSDaemon.h"
43 #include "Server.h"
44 #include "Locker.h"
45
46 #include "SnapServer.h"
47 #include "SnapClient.h"
48
49 #include "events/ESession.h"
50 #include "events/ESubtreeMap.h"
51
52 #include "messages/MMDSMap.h"
53
54 #include "messages/MGenericMessage.h"
55
56 #include "messages/MMonCommand.h"
57 #include "messages/MCommand.h"
58 #include "messages/MCommandReply.h"
59
60 #include "auth/AuthAuthorizeHandler.h"
61 #include "auth/RotatingKeyRing.h"
62 #include "auth/KeyRing.h"
63
64 #include "perfglue/cpu_profiler.h"
65 #include "perfglue/heap_profiler.h"
66
67 #define dout_context g_ceph_context
68 #define dout_subsys ceph_subsys_mds
69 #undef dout_prefix
70 #define dout_prefix *_dout << "mds." << name << ' '
71
72 // cons/des
73 MDSDaemon::MDSDaemon(boost::string_view n, Messenger *m, MonClient *mc) :
74 Dispatcher(m->cct),
75 mds_lock("MDSDaemon::mds_lock"),
76 stopping(false),
77 timer(m->cct, mds_lock),
78 beacon(m->cct, mc, n),
79 authorize_handler_cluster_registry(new AuthAuthorizeHandlerRegistry(m->cct,
80 m->cct->_conf->auth_supported.empty() ?
81 m->cct->_conf->auth_cluster_required :
82 m->cct->_conf->auth_supported)),
83 authorize_handler_service_registry(new AuthAuthorizeHandlerRegistry(m->cct,
84 m->cct->_conf->auth_supported.empty() ?
85 m->cct->_conf->auth_service_required :
86 m->cct->_conf->auth_supported)),
87 name(n),
88 messenger(m),
89 monc(mc),
90 mgrc(m->cct, m),
91 log_client(m->cct, messenger, &mc->monmap, LogClient::NO_FLAGS),
92 mds_rank(NULL),
93 asok_hook(NULL),
94 starttime(mono_clock::now())
95 {
96 orig_argc = 0;
97 orig_argv = NULL;
98
99 clog = log_client.create_channel();
100
101 monc->set_messenger(messenger);
102
103 mdsmap = new MDSMap;
104 }
105
106 MDSDaemon::~MDSDaemon() {
107 Mutex::Locker lock(mds_lock);
108
109 delete mds_rank;
110 mds_rank = NULL;
111 delete mdsmap;
112 mdsmap = NULL;
113
114 delete authorize_handler_service_registry;
115 delete authorize_handler_cluster_registry;
116 }
117
118 class MDSSocketHook : public AdminSocketHook {
119 MDSDaemon *mds;
120 public:
121 explicit MDSSocketHook(MDSDaemon *m) : mds(m) {}
122 bool call(std::string command, cmdmap_t& cmdmap, std::string format,
123 bufferlist& out) override {
124 stringstream ss;
125 bool r = mds->asok_command(command, cmdmap, format, ss);
126 out.append(ss);
127 return r;
128 }
129 };
130
131 bool MDSDaemon::asok_command(string command, cmdmap_t& cmdmap, string format,
132 ostream& ss)
133 {
134 dout(1) << "asok_command: " << command << " (starting...)" << dendl;
135
136 Formatter *f = Formatter::create(format, "json-pretty", "json-pretty");
137 bool handled = false;
138 if (command == "status") {
139 dump_status(f);
140 handled = true;
141 } else {
142 if (mds_rank == NULL) {
143 dout(1) << "Can't run that command on an inactive MDS!" << dendl;
144 f->dump_string("error", "mds_not_active");
145 } else {
146 handled = mds_rank->handle_asok_command(command, cmdmap, f, ss);
147 }
148 }
149 f->flush(ss);
150 delete f;
151
152 dout(1) << "asok_command: " << command << " (complete)" << dendl;
153
154 return handled;
155 }
156
157 void MDSDaemon::dump_status(Formatter *f)
158 {
159 f->open_object_section("status");
160 f->dump_stream("cluster_fsid") << monc->get_fsid();
161 if (mds_rank) {
162 f->dump_int("whoami", mds_rank->get_nodeid());
163 } else {
164 f->dump_int("whoami", MDS_RANK_NONE);
165 }
166
167 f->dump_int("id", monc->get_global_id());
168 f->dump_string("want_state", ceph_mds_state_name(beacon.get_want_state()));
169 f->dump_string("state", ceph_mds_state_name(mdsmap->get_state_gid(mds_gid_t(
170 monc->get_global_id()))));
171 if (mds_rank) {
172 Mutex::Locker l(mds_lock);
173 mds_rank->dump_status(f);
174 }
175
176 f->dump_unsigned("mdsmap_epoch", mdsmap->get_epoch());
177 if (mds_rank) {
178 f->dump_unsigned("osdmap_epoch", mds_rank->get_osd_epoch());
179 f->dump_unsigned("osdmap_epoch_barrier", mds_rank->get_osd_epoch_barrier());
180 } else {
181 f->dump_unsigned("osdmap_epoch", 0);
182 f->dump_unsigned("osdmap_epoch_barrier", 0);
183 }
184
185 f->dump_float("uptime", get_uptime().count());
186
187 f->close_section(); // status
188 }
189
190 void MDSDaemon::set_up_admin_socket()
191 {
192 int r;
193 AdminSocket *admin_socket = g_ceph_context->get_admin_socket();
194 assert(asok_hook == nullptr);
195 asok_hook = new MDSSocketHook(this);
196 r = admin_socket->register_command("status", "status", asok_hook,
197 "high-level status of MDS");
198 assert(r == 0);
199 r = admin_socket->register_command("dump_ops_in_flight",
200 "dump_ops_in_flight", asok_hook,
201 "show the ops currently in flight");
202 assert(r == 0);
203 r = admin_socket->register_command("ops",
204 "ops", asok_hook,
205 "show the ops currently in flight");
206 assert(r == 0);
207 r = admin_socket->register_command("dump_blocked_ops", "dump_blocked_ops",
208 asok_hook,
209 "show the blocked ops currently in flight");
210 assert(r == 0);
211 r = admin_socket->register_command("dump_historic_ops", "dump_historic_ops",
212 asok_hook,
213 "show slowest recent ops");
214 assert(r == 0);
215 r = admin_socket->register_command("dump_historic_ops_by_duration", "dump_historic_ops_by_duration",
216 asok_hook,
217 "show slowest recent ops, sorted by op duration");
218 assert(r == 0);
219 r = admin_socket->register_command("scrub_path",
220 "scrub_path name=path,type=CephString "
221 "name=scrubops,type=CephChoices,"
222 "strings=force|recursive|repair,n=N,req=false",
223 asok_hook,
224 "scrub an inode and output results");
225 assert(r == 0);
226 r = admin_socket->register_command("tag path",
227 "tag path name=path,type=CephString"
228 " name=tag,type=CephString",
229 asok_hook,
230 "Apply scrub tag recursively");
231 assert(r == 0);
232 r = admin_socket->register_command("flush_path",
233 "flush_path name=path,type=CephString",
234 asok_hook,
235 "flush an inode (and its dirfrags)");
236 assert(r == 0);
237 r = admin_socket->register_command("export dir",
238 "export dir "
239 "name=path,type=CephString "
240 "name=rank,type=CephInt",
241 asok_hook,
242 "migrate a subtree to named MDS");
243 assert(r == 0);
244 r = admin_socket->register_command("dump cache",
245 "dump cache name=path,type=CephString,req=false",
246 asok_hook,
247 "dump metadata cache (optionally to a file)");
248 assert(r == 0);
249 r = admin_socket->register_command("cache status",
250 "cache status",
251 asok_hook,
252 "show cache status");
253 assert(r == 0);
254 r = admin_socket->register_command("cache drop",
255 "cache drop name=timeout,type=CephInt,range=0,req=false",
256 asok_hook,
257 "drop cache");
258 assert(r == 0);
259 r = admin_socket->register_command("dump tree",
260 "dump tree "
261 "name=root,type=CephString,req=true "
262 "name=depth,type=CephInt,req=false ",
263 asok_hook,
264 "dump metadata cache for subtree");
265 assert(r == 0);
266 r = admin_socket->register_command("dump loads",
267 "dump loads",
268 asok_hook,
269 "dump metadata loads");
270 assert(r == 0);
271 r = admin_socket->register_command("session evict",
272 "session evict name=client_id,type=CephString",
273 asok_hook,
274 "Evict a CephFS client");
275 assert(r == 0);
276 r = admin_socket->register_command("osdmap barrier",
277 "osdmap barrier name=target_epoch,type=CephInt",
278 asok_hook,
279 "Wait until the MDS has this OSD map epoch");
280 assert(r == 0);
281 r = admin_socket->register_command("session ls",
282 "session ls",
283 asok_hook,
284 "Enumerate connected CephFS clients");
285 assert(r == 0);
286 r = admin_socket->register_command("flush journal",
287 "flush journal",
288 asok_hook,
289 "Flush the journal to the backing store");
290 assert(r == 0);
291 r = admin_socket->register_command("force_readonly",
292 "force_readonly",
293 asok_hook,
294 "Force MDS to read-only mode");
295 assert(r == 0);
296 r = admin_socket->register_command("get subtrees",
297 "get subtrees",
298 asok_hook,
299 "Return the subtree map");
300 assert(r == 0);
301 r = admin_socket->register_command("dirfrag split",
302 "dirfrag split "
303 "name=path,type=CephString,req=true "
304 "name=frag,type=CephString,req=true "
305 "name=bits,type=CephInt,req=true ",
306 asok_hook,
307 "Fragment directory by path");
308 assert(r == 0);
309 r = admin_socket->register_command("dirfrag merge",
310 "dirfrag merge "
311 "name=path,type=CephString,req=true "
312 "name=frag,type=CephString,req=true",
313 asok_hook,
314 "De-fragment directory by path");
315 assert(r == 0);
316 r = admin_socket->register_command("dirfrag ls",
317 "dirfrag ls "
318 "name=path,type=CephString,req=true",
319 asok_hook,
320 "List fragments in directory");
321 assert(r == 0);
322 }
323
324 void MDSDaemon::clean_up_admin_socket()
325 {
326 AdminSocket *admin_socket = g_ceph_context->get_admin_socket();
327 admin_socket->unregister_command("status");
328 admin_socket->unregister_command("dump_ops_in_flight");
329 admin_socket->unregister_command("ops");
330 admin_socket->unregister_command("dump_blocked_ops");
331 admin_socket->unregister_command("dump_historic_ops");
332 admin_socket->unregister_command("dump_historic_ops_by_duration");
333 admin_socket->unregister_command("scrub_path");
334 admin_socket->unregister_command("tag path");
335 admin_socket->unregister_command("flush_path");
336 admin_socket->unregister_command("export dir");
337 admin_socket->unregister_command("dump cache");
338 admin_socket->unregister_command("cache status");
339 admin_socket->unregister_command("dump tree");
340 admin_socket->unregister_command("dump loads");
341 admin_socket->unregister_command("session evict");
342 admin_socket->unregister_command("osdmap barrier");
343 admin_socket->unregister_command("session ls");
344 admin_socket->unregister_command("flush journal");
345 admin_socket->unregister_command("force_readonly");
346 admin_socket->unregister_command("get subtrees");
347 admin_socket->unregister_command("dirfrag split");
348 admin_socket->unregister_command("dirfrag merge");
349 admin_socket->unregister_command("dirfrag ls");
350 delete asok_hook;
351 asok_hook = NULL;
352 }
353
354 const char** MDSDaemon::get_tracked_conf_keys() const
355 {
356 static const char* KEYS[] = {
357 "mds_op_complaint_time", "mds_op_log_threshold",
358 "mds_op_history_size", "mds_op_history_duration",
359 "mds_enable_op_tracker",
360 "mds_log_pause",
361 // clog & admin clog
362 "clog_to_monitors",
363 "clog_to_syslog",
364 "clog_to_syslog_facility",
365 "clog_to_syslog_level",
366 "clog_to_graylog",
367 "clog_to_graylog_host",
368 "clog_to_graylog_port",
369 // MDCache
370 "mds_cache_size",
371 "mds_cache_memory_limit",
372 "mds_cache_reservation",
373 "mds_health_cache_threshold",
374 "mds_cache_mid",
375 "mds_dump_cache_threshold_formatter",
376 "mds_dump_cache_threshold_file",
377 // MDBalancer
378 "mds_bal_fragment_interval",
379 // PurgeQueue
380 "mds_max_purge_ops",
381 "mds_max_purge_ops_per_pg",
382 "mds_max_purge_files",
383 // Migrator
384 "mds_max_export_size",
385 "mds_inject_migrator_session_race",
386 "mds_inject_migrator_message_loss",
387 "host",
388 "fsid",
389 "mds_request_load_average_decay_rate",
390 "mds_cap_revoke_eviction_timeout",
391 NULL
392 };
393 return KEYS;
394 }
395
396 void MDSDaemon::handle_conf_change(const struct md_config_t *conf,
397 const std::set <std::string> &changed)
398 {
399 // We may be called within mds_lock (via `tell`) or outwith the
400 // lock (via admin socket `config set`), so handle either case.
401 const bool initially_locked = mds_lock.is_locked_by_me();
402 if (!initially_locked) {
403 mds_lock.Lock();
404 }
405
406 if (changed.count("mds_op_complaint_time") ||
407 changed.count("mds_op_log_threshold")) {
408 if (mds_rank) {
409 mds_rank->op_tracker.set_complaint_and_threshold(conf->mds_op_complaint_time,
410 conf->mds_op_log_threshold);
411 }
412 }
413 if (changed.count("mds_op_history_size") ||
414 changed.count("mds_op_history_duration")) {
415 if (mds_rank) {
416 mds_rank->op_tracker.set_history_size_and_duration(conf->mds_op_history_size,
417 conf->mds_op_history_duration);
418 }
419 }
420 if (changed.count("mds_enable_op_tracker")) {
421 if (mds_rank) {
422 mds_rank->op_tracker.set_tracking(conf->mds_enable_op_tracker);
423 }
424 }
425 if (changed.count("clog_to_monitors") ||
426 changed.count("clog_to_syslog") ||
427 changed.count("clog_to_syslog_level") ||
428 changed.count("clog_to_syslog_facility") ||
429 changed.count("clog_to_graylog") ||
430 changed.count("clog_to_graylog_host") ||
431 changed.count("clog_to_graylog_port") ||
432 changed.count("host") ||
433 changed.count("fsid")) {
434 if (mds_rank) {
435 mds_rank->update_log_config();
436 }
437 }
438
439 if (!g_conf->mds_log_pause && changed.count("mds_log_pause")) {
440 if (mds_rank) {
441 mds_rank->mdlog->kick_submitter();
442 }
443 }
444
445 if (mds_rank) {
446 mds_rank->handle_conf_change(conf, changed);
447 }
448
449 if (!initially_locked) {
450 mds_lock.Unlock();
451 }
452 }
453
454
455 int MDSDaemon::init()
456 {
457 dout(10) << sizeof(MDSCacheObject) << "\tMDSCacheObject" << dendl;
458 dout(10) << sizeof(CInode) << "\tCInode" << dendl;
459 dout(10) << sizeof(elist<void*>::item) << "\t elist<>::item *7=" << 7*sizeof(elist<void*>::item) << dendl;
460 dout(10) << sizeof(CInode::mempool_inode) << "\t inode " << dendl;
461 dout(10) << sizeof(CInode::mempool_old_inode) << "\t old_inode " << dendl;
462 dout(10) << sizeof(nest_info_t) << "\t nest_info_t " << dendl;
463 dout(10) << sizeof(frag_info_t) << "\t frag_info_t " << dendl;
464 dout(10) << sizeof(SimpleLock) << "\t SimpleLock *5=" << 5*sizeof(SimpleLock) << dendl;
465 dout(10) << sizeof(ScatterLock) << "\t ScatterLock *3=" << 3*sizeof(ScatterLock) << dendl;
466 dout(10) << sizeof(CDentry) << "\tCDentry" << dendl;
467 dout(10) << sizeof(elist<void*>::item) << "\t elist<>::item" << dendl;
468 dout(10) << sizeof(SimpleLock) << "\t SimpleLock" << dendl;
469 dout(10) << sizeof(CDir) << "\tCDir " << dendl;
470 dout(10) << sizeof(elist<void*>::item) << "\t elist<>::item *2=" << 2*sizeof(elist<void*>::item) << dendl;
471 dout(10) << sizeof(fnode_t) << "\t fnode_t " << dendl;
472 dout(10) << sizeof(nest_info_t) << "\t nest_info_t *2" << dendl;
473 dout(10) << sizeof(frag_info_t) << "\t frag_info_t *2" << dendl;
474 dout(10) << sizeof(Capability) << "\tCapability " << dendl;
475 dout(10) << sizeof(xlist<void*>::item) << "\t xlist<>::item *2=" << 2*sizeof(xlist<void*>::item) << dendl;
476
477 messenger->add_dispatcher_tail(&beacon);
478 messenger->add_dispatcher_tail(this);
479
480 // get monmap
481 monc->set_messenger(messenger);
482
483 monc->set_want_keys(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD |
484 CEPH_ENTITY_TYPE_MDS | CEPH_ENTITY_TYPE_MGR);
485 int r = 0;
486 r = monc->init();
487 if (r < 0) {
488 derr << "ERROR: failed to get monmap: " << cpp_strerror(-r) << dendl;
489 mds_lock.Lock();
490 suicide();
491 mds_lock.Unlock();
492 return r;
493 }
494
495 // tell monc about log_client so it will know about mon session resets
496 monc->set_log_client(&log_client);
497
498 r = monc->authenticate();
499 if (r < 0) {
500 derr << "ERROR: failed to authenticate: " << cpp_strerror(-r) << dendl;
501 mds_lock.Lock();
502 suicide();
503 mds_lock.Unlock();
504 return r;
505 }
506
507 int rotating_auth_attempts = 0;
508 while (monc->wait_auth_rotating(30.0) < 0) {
509 if (++rotating_auth_attempts <= g_conf->max_rotating_auth_attempts) {
510 derr << "unable to obtain rotating service keys; retrying" << dendl;
511 continue;
512 }
513 derr << "ERROR: failed to refresh rotating keys, "
514 << "maximum retry time reached." << dendl;
515 mds_lock.Lock();
516 suicide();
517 mds_lock.Unlock();
518 return -ETIMEDOUT;
519 }
520
521 mgrc.init();
522 messenger->add_dispatcher_head(&mgrc);
523
524 mds_lock.Lock();
525 if (beacon.get_want_state() == CEPH_MDS_STATE_DNE) {
526 dout(4) << __func__ << ": terminated already, dropping out" << dendl;
527 mds_lock.Unlock();
528 return 0;
529 }
530
531 monc->sub_want("mdsmap", 0, 0);
532 monc->sub_want("mgrmap", 0, 0);
533 monc->renew_subs();
534
535 mds_lock.Unlock();
536
537 // Set up admin socket before taking mds_lock, so that ordering
538 // is consistent (later we take mds_lock within asok callbacks)
539 set_up_admin_socket();
540 g_conf->add_observer(this);
541 mds_lock.Lock();
542 if (beacon.get_want_state() == MDSMap::STATE_DNE) {
543 suicide(); // we could do something more graceful here
544 dout(4) << __func__ << ": terminated already, dropping out" << dendl;
545 mds_lock.Unlock();
546 return 0;
547 }
548
549 timer.init();
550
551 beacon.init(mdsmap);
552 messenger->set_myname(entity_name_t::MDS(MDS_RANK_NONE));
553
554 // schedule tick
555 reset_tick();
556 mds_lock.Unlock();
557
558 return 0;
559 }
560
561 void MDSDaemon::reset_tick()
562 {
563 // cancel old
564 if (tick_event) timer.cancel_event(tick_event);
565
566 // schedule
567 tick_event = timer.add_event_after(
568 g_conf->mds_tick_interval,
569 new FunctionContext([this](int) {
570 assert(mds_lock.is_locked_by_me());
571 tick();
572 }));
573 }
574
575 void MDSDaemon::tick()
576 {
577 // reschedule
578 reset_tick();
579
580 // Call through to subsystems' tick functions
581 if (mds_rank) {
582 mds_rank->tick();
583 }
584 }
585
586 void MDSDaemon::send_command_reply(MCommand *m, MDSRank *mds_rank,
587 int r, bufferlist outbl,
588 boost::string_view outs)
589 {
590 Session *session = static_cast<Session *>(m->get_connection()->get_priv());
591 assert(session != NULL);
592 // If someone is using a closed session for sending commands (e.g.
593 // the ceph CLI) then we should feel free to clean up this connection
594 // as soon as we've sent them a response.
595 const bool live_session =
596 session->get_state_seq() > 0 &&
597 mds_rank &&
598 mds_rank->sessionmap.get_session(session->info.inst.name);
599
600 if (!live_session) {
601 // This session only existed to issue commands, so terminate it
602 // as soon as we can.
603 assert(session->is_closed());
604 session->connection->mark_disposable();
605 }
606 session->put();
607
608 MCommandReply *reply = new MCommandReply(r, outs);
609 reply->set_tid(m->get_tid());
610 reply->set_data(outbl);
611 m->get_connection()->send_message(reply);
612 }
613
614 /* This function DOES put the passed message before returning*/
615 void MDSDaemon::handle_command(MCommand *m)
616 {
617 Session *session = static_cast<Session *>(m->get_connection()->get_priv());
618 assert(session != NULL);
619
620 int r = 0;
621 cmdmap_t cmdmap;
622 std::stringstream ss;
623 std::string outs;
624 bufferlist outbl;
625 Context *run_after = NULL;
626 bool need_reply = true;
627
628 if (!session->auth_caps.allow_all()) {
629 dout(1) << __func__
630 << ": received command from client without `tell` capability: "
631 << m->get_connection()->peer_addr << dendl;
632
633 ss << "permission denied";
634 r = -EPERM;
635 } else if (m->cmd.empty()) {
636 r = -EINVAL;
637 ss << "no command given";
638 outs = ss.str();
639 } else if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) {
640 r = -EINVAL;
641 outs = ss.str();
642 } else {
643 r = _handle_command(cmdmap, m, &outbl, &outs, &run_after, &need_reply);
644 }
645 session->put();
646
647 if (need_reply) {
648 send_command_reply(m, mds_rank, r, outbl, outs);
649 }
650
651 if (run_after) {
652 run_after->complete(0);
653 }
654
655 m->put();
656 }
657
658 struct MDSCommand {
659 string cmdstring;
660 string helpstring;
661 string module;
662 string perm;
663 string availability;
664 } mds_commands[] = {
665
666 #define COMMAND(parsesig, helptext, module, perm, availability) \
667 {parsesig, helptext, module, perm, availability},
668
669 COMMAND("injectargs " \
670 "name=injected_args,type=CephString,n=N",
671 "inject configuration arguments into running MDS",
672 "mds", "*", "cli,rest")
673 COMMAND("config set " \
674 "name=key,type=CephString name=value,type=CephString",
675 "Set a configuration option at runtime (not persistent)",
676 "mds", "*", "cli,rest")
677 COMMAND("exit",
678 "Terminate this MDS",
679 "mds", "*", "cli,rest")
680 COMMAND("respawn",
681 "Restart this MDS",
682 "mds", "*", "cli,rest")
683 COMMAND("session kill " \
684 "name=session_id,type=CephInt",
685 "End a client session",
686 "mds", "*", "cli,rest")
687 COMMAND("cpu_profiler " \
688 "name=arg,type=CephChoices,strings=status|flush",
689 "run cpu profiling on daemon", "mds", "rw", "cli,rest")
690 COMMAND("session ls " \
691 "name=filters,type=CephString,n=N,req=false",
692 "List client sessions", "mds", "r", "cli,rest")
693 COMMAND("client ls " \
694 "name=filters,type=CephString,n=N,req=false",
695 "List client sessions", "mds", "r", "cli,rest")
696 COMMAND("session evict " \
697 "name=filters,type=CephString,n=N,req=false",
698 "Evict client session(s)", "mds", "rw", "cli,rest")
699 COMMAND("client evict " \
700 "name=filters,type=CephString,n=N,req=false",
701 "Evict client session(s)", "mds", "rw", "cli,rest")
702 COMMAND("damage ls",
703 "List detected metadata damage", "mds", "r", "cli,rest")
704 COMMAND("damage rm name=damage_id,type=CephInt",
705 "Remove a damage table entry", "mds", "rw", "cli,rest")
706 COMMAND("version", "report version of MDS", "mds", "r", "cli,rest")
707 COMMAND("heap " \
708 "name=heapcmd,type=CephChoices,strings=dump|start_profiler|stop_profiler|release|stats", \
709 "show heap usage info (available only if compiled with tcmalloc)", \
710 "mds", "*", "cli,rest")
711 COMMAND("cache drop name=timeout,type=CephInt,range=0,req=false", "trim cache and optionally "
712 "request client to release all caps and flush the journal", "mds",
713 "r", "cli,rest")
714 };
715
716
717 int MDSDaemon::_handle_command(
718 const cmdmap_t &cmdmap,
719 MCommand *m,
720 bufferlist *outbl,
721 std::string *outs,
722 Context **run_later,
723 bool *need_reply)
724 {
725 assert(outbl != NULL);
726 assert(outs != NULL);
727
728 class SuicideLater : public Context
729 {
730 MDSDaemon *mds;
731
732 public:
733 explicit SuicideLater(MDSDaemon *mds_) : mds(mds_) {}
734 void finish(int r) override {
735 // Wait a little to improve chances of caller getting
736 // our response before seeing us disappear from mdsmap
737 sleep(1);
738
739 mds->suicide();
740 }
741 };
742
743
744 class RespawnLater : public Context
745 {
746 MDSDaemon *mds;
747
748 public:
749
750 explicit RespawnLater(MDSDaemon *mds_) : mds(mds_) {}
751 void finish(int r) override {
752 // Wait a little to improve chances of caller getting
753 // our response before seeing us disappear from mdsmap
754 sleep(1);
755
756 mds->respawn();
757 }
758 };
759
760 std::stringstream ds;
761 std::stringstream ss;
762 std::string prefix;
763 std::string format;
764 std::unique_ptr<Formatter> f(Formatter::create(format));
765 cmd_getval(cct, cmdmap, "prefix", prefix);
766
767 int r = 0;
768
769 if (prefix == "get_command_descriptions") {
770 int cmdnum = 0;
771 std::unique_ptr<JSONFormatter> f(ceph::make_unique<JSONFormatter>());
772 f->open_object_section("command_descriptions");
773 for (MDSCommand *cp = mds_commands;
774 cp < &mds_commands[ARRAY_SIZE(mds_commands)]; cp++) {
775
776 ostringstream secname;
777 secname << "cmd" << setfill('0') << std::setw(3) << cmdnum;
778 dump_cmddesc_to_json(f.get(), secname.str(), cp->cmdstring, cp->helpstring,
779 cp->module, cp->perm, cp->availability, 0);
780 cmdnum++;
781 }
782 f->close_section(); // command_descriptions
783
784 f->flush(ds);
785 goto out;
786 }
787
788 cmd_getval(cct, cmdmap, "format", format);
789 if (prefix == "version") {
790 if (f) {
791 f->open_object_section("version");
792 f->dump_string("version", pretty_version_to_str());
793 f->close_section();
794 f->flush(ds);
795 } else {
796 ds << pretty_version_to_str();
797 }
798 } else if (prefix == "injectargs") {
799 vector<string> argsvec;
800 cmd_getval(cct, cmdmap, "injected_args", argsvec);
801
802 if (argsvec.empty()) {
803 r = -EINVAL;
804 ss << "ignoring empty injectargs";
805 goto out;
806 }
807 string args = argsvec.front();
808 for (vector<string>::iterator a = ++argsvec.begin(); a != argsvec.end(); ++a)
809 args += " " + *a;
810 r = cct->_conf->injectargs(args, &ss);
811 } else if (prefix == "config set") {
812 std::string key;
813 cmd_getval(cct, cmdmap, "key", key);
814 std::string val;
815 cmd_getval(cct, cmdmap, "value", val);
816 r = cct->_conf->set_val(key, val, true, &ss);
817 if (r == 0) {
818 cct->_conf->apply_changes(nullptr);
819 }
820 } else if (prefix == "exit") {
821 // We will send response before executing
822 ss << "Exiting...";
823 *run_later = new SuicideLater(this);
824 } else if (prefix == "respawn") {
825 // We will send response before executing
826 ss << "Respawning...";
827 *run_later = new RespawnLater(this);
828 } else if (prefix == "session kill") {
829 if (mds_rank == NULL) {
830 r = -EINVAL;
831 ss << "MDS not active";
832 goto out;
833 }
834 // FIXME harmonize `session kill` with admin socket session evict
835 int64_t session_id = 0;
836 bool got = cmd_getval(cct, cmdmap, "session_id", session_id);
837 assert(got);
838 bool killed = mds_rank->evict_client(session_id, false,
839 g_conf->mds_session_blacklist_on_evict,
840 ss);
841 if (!killed)
842 r = -ENOENT;
843 } else if (prefix == "heap") {
844 if (!ceph_using_tcmalloc()) {
845 r = -EOPNOTSUPP;
846 ss << "could not issue heap profiler command -- not using tcmalloc!";
847 } else {
848 string heapcmd;
849 cmd_getval(cct, cmdmap, "heapcmd", heapcmd);
850 vector<string> heapcmd_vec;
851 get_str_vec(heapcmd, heapcmd_vec);
852 ceph_heap_profiler_handle_command(heapcmd_vec, ds);
853 }
854 } else if (prefix == "cpu_profiler") {
855 string arg;
856 cmd_getval(cct, cmdmap, "arg", arg);
857 vector<string> argvec;
858 get_str_vec(arg, argvec);
859 cpu_profiler_handle_command(argvec, ds);
860 } else {
861 // Give MDSRank a shot at the command
862 if (!mds_rank) {
863 ss << "MDS not active";
864 r = -EINVAL;
865 }
866 else {
867 bool handled = mds_rank->handle_command(cmdmap, m, &r, &ds, &ss,
868 run_later, need_reply);
869 if (!handled) {
870 // MDSDaemon doesn't know this command
871 ss << "unrecognized command! " << prefix;
872 r = -EINVAL;
873 }
874 }
875 }
876
877 out:
878 *outs = ss.str();
879 outbl->append(ds);
880 return r;
881 }
882
883 /* This function deletes the passed message before returning. */
884
885 void MDSDaemon::handle_mds_map(MMDSMap *m)
886 {
887 version_t epoch = m->get_epoch();
888
889 // is it new?
890 if (epoch <= mdsmap->get_epoch()) {
891 dout(5) << "handle_mds_map old map epoch " << epoch << " <= "
892 << mdsmap->get_epoch() << ", discarding" << dendl;
893 m->put();
894 return;
895 }
896
897 dout(1) << "Updating MDS map to version " << epoch << " from " << m->get_source() << dendl;
898
899 entity_addr_t addr;
900
901 // keep old map, for a moment
902 MDSMap *oldmap = mdsmap;
903
904 // decode and process
905 mdsmap = new MDSMap;
906 mdsmap->decode(m->get_encoded());
907 const MDSMap::DaemonState new_state = mdsmap->get_state_gid(mds_gid_t(monc->get_global_id()));
908 const int incarnation = mdsmap->get_inc_gid(mds_gid_t(monc->get_global_id()));
909
910 monc->sub_got("mdsmap", mdsmap->get_epoch());
911
912 // Calculate my effective rank (either my owned rank or my
913 // standby_for_rank if in standby replay)
914 mds_rank_t whoami = mdsmap->get_rank_gid(mds_gid_t(monc->get_global_id()));
915
916 // verify compatset
917 CompatSet mdsmap_compat(MDSMap::get_compat_set_all());
918 dout(10) << " my compat " << mdsmap_compat << dendl;
919 dout(10) << " mdsmap compat " << mdsmap->compat << dendl;
920 if (!mdsmap_compat.writeable(mdsmap->compat)) {
921 dout(0) << "handle_mds_map mdsmap compatset " << mdsmap->compat
922 << " not writeable with daemon features " << mdsmap_compat
923 << ", killing myself" << dendl;
924 suicide();
925 goto out;
926 }
927
928 // mark down any failed peers
929 for (map<mds_gid_t,MDSMap::mds_info_t>::const_iterator p = oldmap->get_mds_info().begin();
930 p != oldmap->get_mds_info().end();
931 ++p) {
932 if (mdsmap->get_mds_info().count(p->first) == 0) {
933 dout(10) << " peer mds gid " << p->first << " removed from map" << dendl;
934 messenger->mark_down(p->second.addr);
935 }
936 }
937
938 if (whoami == MDS_RANK_NONE &&
939 new_state == MDSMap::STATE_STANDBY_REPLAY) {
940 whoami = mdsmap->get_mds_info_gid(mds_gid_t(monc->get_global_id())).standby_for_rank;
941 }
942
943 // see who i am
944 addr = messenger->get_myaddr();
945 dout(10) << "map says I am " << addr << " mds." << whoami << "." << incarnation
946 << " state " << ceph_mds_state_name(new_state) << dendl;
947
948 if (whoami == MDS_RANK_NONE) {
949 if (mds_rank != NULL) {
950 const auto myid = monc->get_global_id();
951 // We have entered a rank-holding state, we shouldn't be back
952 // here!
953 if (g_conf->mds_enforce_unique_name) {
954 if (mds_gid_t existing = mdsmap->find_mds_gid_by_name(name)) {
955 const MDSMap::mds_info_t& i = mdsmap->get_info_gid(existing);
956 if (i.global_id > myid) {
957 dout(1) << "Map replaced me with another mds." << whoami
958 << " with gid (" << i.global_id << ") larger than myself ("
959 << myid << "); quitting!" << dendl;
960 // Call suicide() rather than respawn() because if someone else
961 // has taken our ID, we don't want to keep restarting and
962 // fighting them for the ID.
963 suicide();
964 m->put();
965 return;
966 }
967 }
968 }
969
970 dout(1) << "Map removed me (mds." << whoami << " gid:"
971 << myid << ") from cluster due to lost contact; respawning" << dendl;
972 respawn();
973 }
974 // MDSRank not active: process the map here to see if we have
975 // been assigned a rank.
976 dout(10) << __func__ << ": handling map in rankless mode" << dendl;
977 _handle_mds_map(oldmap);
978 } else {
979
980 // Did we already hold a different rank? MDSMonitor shouldn't try
981 // to change that out from under me!
982 if (mds_rank && whoami != mds_rank->get_nodeid()) {
983 derr << "Invalid rank transition " << mds_rank->get_nodeid() << "->"
984 << whoami << dendl;
985 respawn();
986 }
987
988 // Did I previously not hold a rank? Initialize!
989 if (mds_rank == NULL) {
990 mds_rank = new MDSRankDispatcher(whoami, mds_lock, clog,
991 timer, beacon, mdsmap, messenger, monc,
992 new FunctionContext([this](int r){respawn();}),
993 new FunctionContext([this](int r){suicide();}));
994 dout(10) << __func__ << ": initializing MDS rank "
995 << mds_rank->get_nodeid() << dendl;
996 mds_rank->init();
997 }
998
999 // MDSRank is active: let him process the map, we have no say.
1000 dout(10) << __func__ << ": handling map as rank "
1001 << mds_rank->get_nodeid() << dendl;
1002 mds_rank->handle_mds_map(m, oldmap);
1003 }
1004
1005 out:
1006 beacon.notify_mdsmap(mdsmap);
1007 m->put();
1008 delete oldmap;
1009 }
1010
1011 void MDSDaemon::_handle_mds_map(MDSMap *oldmap)
1012 {
1013 MDSMap::DaemonState new_state = mdsmap->get_state_gid(mds_gid_t(monc->get_global_id()));
1014
1015 // Normal rankless case, we're marked as standby
1016 if (new_state == MDSMap::STATE_STANDBY) {
1017 beacon.set_want_state(mdsmap, new_state);
1018 dout(1) << "Map has assigned me to become a standby" << dendl;
1019
1020 return;
1021 }
1022
1023 // Case where we thought we were standby, but MDSMap disagrees
1024 if (beacon.get_want_state() == MDSMap::STATE_STANDBY) {
1025 dout(10) << "dropped out of mdsmap, try to re-add myself" << dendl;
1026 new_state = MDSMap::STATE_BOOT;
1027 beacon.set_want_state(mdsmap, new_state);
1028 return;
1029 }
1030
1031 // Case where we have sent a boot beacon that isn't reflected yet
1032 if (beacon.get_want_state() == MDSMap::STATE_BOOT) {
1033 dout(10) << "not in map yet" << dendl;
1034 }
1035 }
1036
1037 void MDSDaemon::handle_signal(int signum)
1038 {
1039 assert(signum == SIGINT || signum == SIGTERM);
1040 derr << "*** got signal " << sig_str(signum) << " ***" << dendl;
1041 {
1042 Mutex::Locker l(mds_lock);
1043 if (stopping) {
1044 return;
1045 }
1046 suicide();
1047 }
1048 }
1049
1050 void MDSDaemon::suicide()
1051 {
1052 assert(mds_lock.is_locked());
1053
1054 // make sure we don't suicide twice
1055 assert(stopping == false);
1056 stopping = true;
1057
1058 dout(1) << "suicide! Wanted state "
1059 << ceph_mds_state_name(beacon.get_want_state()) << dendl;
1060
1061 if (tick_event) {
1062 timer.cancel_event(tick_event);
1063 tick_event = 0;
1064 }
1065
1066 //because add_observer is called after set_up_admin_socket
1067 //so we can use asok_hook to avoid assert in the remove_observer
1068 if (asok_hook != NULL) {
1069 mds_lock.Unlock();
1070 g_conf->remove_observer(this);
1071 mds_lock.Lock();
1072 }
1073
1074 clean_up_admin_socket();
1075
1076 // Inform MDS we are going away, then shut down beacon
1077 beacon.set_want_state(mdsmap, MDSMap::STATE_DNE);
1078 if (!mdsmap->is_dne_gid(mds_gid_t(monc->get_global_id()))) {
1079 // Notify the MDSMonitor that we're dying, so that it doesn't have to
1080 // wait for us to go laggy. Only do this if we're actually in the
1081 // MDSMap, because otherwise the MDSMonitor will drop our message.
1082 beacon.send_and_wait(1);
1083 }
1084 beacon.shutdown();
1085
1086 mgrc.shutdown();
1087
1088 if (mds_rank) {
1089 mds_rank->shutdown();
1090 } else {
1091 timer.shutdown();
1092
1093 monc->shutdown();
1094 messenger->shutdown();
1095 }
1096 }
1097
1098 void MDSDaemon::respawn()
1099 {
1100 dout(1) << "respawn!" << dendl;
1101
1102 /* Dump recent in case the MDS was stuck doing something which caused it to
1103 * be removed from the MDSMap leading to respawn. */
1104 g_ceph_context->_log->dump_recent();
1105
1106 char *new_argv[orig_argc+1];
1107 dout(1) << " e: '" << orig_argv[0] << "'" << dendl;
1108 for (int i=0; i<orig_argc; i++) {
1109 new_argv[i] = (char *)orig_argv[i];
1110 dout(1) << " " << i << ": '" << orig_argv[i] << "'" << dendl;
1111 }
1112 new_argv[orig_argc] = NULL;
1113
1114 /* Determine the path to our executable, test if Linux /proc/self/exe exists.
1115 * This allows us to exec the same executable even if it has since been
1116 * unlinked.
1117 */
1118 char exe_path[PATH_MAX] = "";
1119 if (readlink(PROCPREFIX "/proc/self/exe", exe_path, PATH_MAX-1) == -1) {
1120 /* Print CWD for the user's interest */
1121 char buf[PATH_MAX];
1122 char *cwd = getcwd(buf, sizeof(buf));
1123 assert(cwd);
1124 dout(1) << " cwd " << cwd << dendl;
1125
1126 /* Fall back to a best-effort: just running in our CWD */
1127 strncpy(exe_path, orig_argv[0], PATH_MAX-1);
1128 } else {
1129 dout(1) << "respawning with exe " << exe_path << dendl;
1130 strcpy(exe_path, PROCPREFIX "/proc/self/exe");
1131 }
1132
1133 dout(1) << " exe_path " << exe_path << dendl;
1134
1135 unblock_all_signals(NULL);
1136 execv(exe_path, new_argv);
1137
1138 dout(0) << "respawn execv " << orig_argv[0]
1139 << " failed with " << cpp_strerror(errno) << dendl;
1140
1141 // We have to assert out here, because suicide() returns, and callers
1142 // to respawn expect it never to return.
1143 ceph_abort();
1144 }
1145
1146
1147
1148 bool MDSDaemon::ms_dispatch(Message *m)
1149 {
1150 Mutex::Locker l(mds_lock);
1151 if (stopping) {
1152 return false;
1153 }
1154
1155 // Drop out early if shutting down
1156 if (beacon.get_want_state() == CEPH_MDS_STATE_DNE) {
1157 dout(10) << " stopping, discarding " << *m << dendl;
1158 m->put();
1159 return true;
1160 }
1161
1162 // First see if it's a daemon message
1163 const bool handled_core = handle_core_message(m);
1164 if (handled_core) {
1165 return true;
1166 }
1167
1168 // Not core, try it as a rank message
1169 if (mds_rank) {
1170 return mds_rank->ms_dispatch(m);
1171 } else {
1172 return false;
1173 }
1174 }
1175
1176 bool MDSDaemon::ms_get_authorizer(int dest_type, AuthAuthorizer **authorizer, bool force_new)
1177 {
1178 dout(10) << "MDSDaemon::ms_get_authorizer type="
1179 << ceph_entity_type_name(dest_type) << dendl;
1180
1181 /* monitor authorization is being handled on different layer */
1182 if (dest_type == CEPH_ENTITY_TYPE_MON)
1183 return true;
1184
1185 if (force_new) {
1186 if (monc->wait_auth_rotating(10) < 0)
1187 return false;
1188 }
1189
1190 *authorizer = monc->build_authorizer(dest_type);
1191 return *authorizer != NULL;
1192 }
1193
1194
1195 /*
1196 * high priority messages we always process
1197 */
1198 bool MDSDaemon::handle_core_message(Message *m)
1199 {
1200 switch (m->get_type()) {
1201 case CEPH_MSG_MON_MAP:
1202 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON);
1203 m->put();
1204 break;
1205
1206 // MDS
1207 case CEPH_MSG_MDS_MAP:
1208 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_MDS);
1209 handle_mds_map(static_cast<MMDSMap*>(m));
1210 break;
1211
1212 // OSD
1213 case MSG_COMMAND:
1214 handle_command(static_cast<MCommand*>(m));
1215 break;
1216 case CEPH_MSG_OSD_MAP:
1217 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD);
1218
1219 if (mds_rank) {
1220 mds_rank->handle_osd_map();
1221 }
1222 m->put();
1223 break;
1224
1225 case MSG_MON_COMMAND:
1226 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON);
1227 clog->warn() << "dropping `mds tell` command from legacy monitor";
1228 m->put();
1229 break;
1230
1231 default:
1232 return false;
1233 }
1234 return true;
1235 }
1236
1237 void MDSDaemon::ms_handle_connect(Connection *con)
1238 {
1239 }
1240
1241 bool MDSDaemon::ms_handle_reset(Connection *con)
1242 {
1243 if (con->get_peer_type() != CEPH_ENTITY_TYPE_CLIENT)
1244 return false;
1245
1246 Mutex::Locker l(mds_lock);
1247 if (stopping) {
1248 return false;
1249 }
1250 dout(5) << "ms_handle_reset on " << con->get_peer_addr() << dendl;
1251 if (beacon.get_want_state() == CEPH_MDS_STATE_DNE)
1252 return false;
1253
1254 Session *session = static_cast<Session *>(con->get_priv());
1255 if (session) {
1256 if (session->is_closed()) {
1257 dout(3) << "ms_handle_reset closing connection for session " << session->info.inst << dendl;
1258 con->mark_down();
1259 con->set_priv(NULL);
1260 }
1261 session->put();
1262 } else {
1263 con->mark_down();
1264 }
1265 return false;
1266 }
1267
1268
1269 void MDSDaemon::ms_handle_remote_reset(Connection *con)
1270 {
1271 if (con->get_peer_type() != CEPH_ENTITY_TYPE_CLIENT)
1272 return;
1273
1274 Mutex::Locker l(mds_lock);
1275 if (stopping) {
1276 return;
1277 }
1278
1279 dout(5) << "ms_handle_remote_reset on " << con->get_peer_addr() << dendl;
1280 if (beacon.get_want_state() == CEPH_MDS_STATE_DNE)
1281 return;
1282
1283 Session *session = static_cast<Session *>(con->get_priv());
1284 if (session) {
1285 if (session->is_closed()) {
1286 dout(3) << "ms_handle_remote_reset closing connection for session " << session->info.inst << dendl;
1287 con->mark_down();
1288 con->set_priv(NULL);
1289 }
1290 session->put();
1291 }
1292 }
1293
1294 bool MDSDaemon::ms_handle_refused(Connection *con)
1295 {
1296 // do nothing for now
1297 return false;
1298 }
1299
1300 bool MDSDaemon::ms_verify_authorizer(Connection *con, int peer_type,
1301 int protocol, bufferlist& authorizer_data, bufferlist& authorizer_reply,
1302 bool& is_valid, CryptoKey& session_key,
1303 std::unique_ptr<AuthAuthorizerChallenge> *challenge)
1304 {
1305 Mutex::Locker l(mds_lock);
1306 if (stopping) {
1307 return false;
1308 }
1309 if (beacon.get_want_state() == CEPH_MDS_STATE_DNE)
1310 return false;
1311
1312 AuthAuthorizeHandler *authorize_handler = 0;
1313 switch (peer_type) {
1314 case CEPH_ENTITY_TYPE_MDS:
1315 authorize_handler = authorize_handler_cluster_registry->get_handler(protocol);
1316 break;
1317 default:
1318 authorize_handler = authorize_handler_service_registry->get_handler(protocol);
1319 }
1320 if (!authorize_handler) {
1321 dout(0) << "No AuthAuthorizeHandler found for protocol " << protocol << dendl;
1322 is_valid = false;
1323 return true;
1324 }
1325
1326 AuthCapsInfo caps_info;
1327 EntityName name;
1328 uint64_t global_id;
1329
1330 RotatingKeyRing *keys = monc->rotating_secrets.get();
1331 if (keys) {
1332 is_valid = authorize_handler->verify_authorizer(
1333 cct, keys,
1334 authorizer_data, authorizer_reply, name, global_id, caps_info,
1335 session_key, nullptr, challenge);
1336 } else {
1337 dout(10) << __func__ << " no rotating_keys (yet), denied" << dendl;
1338 is_valid = false;
1339 }
1340
1341 if (is_valid) {
1342 entity_name_t n(con->get_peer_type(), global_id);
1343
1344 // We allow connections and assign Session instances to connections
1345 // even if we have not been assigned a rank, because clients with
1346 // "allow *" are allowed to connect and do 'tell' operations before
1347 // we have a rank.
1348 Session *s = NULL;
1349 if (mds_rank) {
1350 // If we do hold a rank, see if this is an existing client establishing
1351 // a new connection, rather than a new client
1352 s = mds_rank->sessionmap.get_session(n);
1353 }
1354
1355 // Wire up a Session* to this connection
1356 // It doesn't go into a SessionMap instance until it sends an explicit
1357 // request to open a session (initial state of Session is `closed`)
1358 if (!s) {
1359 s = new Session;
1360 s->info.auth_name = name;
1361 s->info.inst.addr = con->get_peer_addr();
1362 s->info.inst.name = n;
1363 dout(10) << " new session " << s << " for " << s->info.inst << " con " << con << dendl;
1364 con->set_priv(s);
1365 s->connection = con;
1366 if (mds_rank) {
1367 mds_rank->kick_waiters_for_any_client_connection();
1368 }
1369 } else {
1370 dout(10) << " existing session " << s << " for " << s->info.inst << " existing con " << s->connection
1371 << ", new/authorizing con " << con << dendl;
1372 con->set_priv(s->get());
1373
1374
1375
1376 // Wait until we fully accept the connection before setting
1377 // s->connection. In particular, if there are multiple incoming
1378 // connection attempts, they will all get their authorizer
1379 // validated, but some of them may "lose the race" and get
1380 // dropped. We only want to consider the winner(s). See
1381 // ms_handle_accept(). This is important for Sessions we replay
1382 // from the journal on recovery that don't have established
1383 // messenger state; we want the con from only the winning
1384 // connect attempt(s). (Normal reconnects that don't follow MDS
1385 // recovery are reconnected to the existing con by the
1386 // messenger.)
1387 }
1388
1389 if (caps_info.allow_all) {
1390 // Flag for auth providers that don't provide cap strings
1391 s->auth_caps.set_allow_all();
1392 } else {
1393 bufferlist::iterator p = caps_info.caps.begin();
1394 string auth_cap_str;
1395 try {
1396 ::decode(auth_cap_str, p);
1397
1398 dout(10) << __func__ << ": parsing auth_cap_str='" << auth_cap_str << "'" << dendl;
1399 std::ostringstream errstr;
1400 if (!s->auth_caps.parse(g_ceph_context, auth_cap_str, &errstr)) {
1401 dout(1) << __func__ << ": auth cap parse error: " << errstr.str()
1402 << " parsing '" << auth_cap_str << "'" << dendl;
1403 clog->warn() << name << " mds cap '" << auth_cap_str
1404 << "' does not parse: " << errstr.str();
1405 is_valid = false;
1406 }
1407 } catch (buffer::error& e) {
1408 // Assume legacy auth, defaults to:
1409 // * permit all filesystem ops
1410 // * permit no `tell` ops
1411 dout(1) << __func__ << ": cannot decode auth caps bl of length " << caps_info.caps.length() << dendl;
1412 is_valid = false;
1413 }
1414 }
1415 }
1416
1417 return true; // we made a decision (see is_valid)
1418 }
1419
1420
1421 void MDSDaemon::ms_handle_accept(Connection *con)
1422 {
1423 Mutex::Locker l(mds_lock);
1424 if (stopping) {
1425 return;
1426 }
1427
1428 Session *s = static_cast<Session *>(con->get_priv());
1429 dout(10) << "ms_handle_accept " << con->get_peer_addr() << " con " << con << " session " << s << dendl;
1430 if (s) {
1431 if (s->connection != con) {
1432 dout(10) << " session connection " << s->connection << " -> " << con << dendl;
1433 s->connection = con;
1434
1435 // send out any queued messages
1436 while (!s->preopen_out_queue.empty()) {
1437 con->send_message(s->preopen_out_queue.front());
1438 s->preopen_out_queue.pop_front();
1439 }
1440 }
1441 s->put();
1442 }
1443 }
1444
1445 bool MDSDaemon::is_clean_shutdown()
1446 {
1447 if (mds_rank) {
1448 return mds_rank->is_stopped();
1449 } else {
1450 return true;
1451 }
1452 }