]> git.proxmox.com Git - ceph.git/blob - ceph/src/mds/MDSRank.cc
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / mds / MDSRank.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2015 Red Hat
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #include "common/debug.h"
16 #include "common/errno.h"
17
18 #include "messages/MClientRequestForward.h"
19 #include "messages/MMDSLoadTargets.h"
20 #include "messages/MMDSMap.h"
21 #include "messages/MMDSTableRequest.h"
22 #include "messages/MCommand.h"
23 #include "messages/MCommandReply.h"
24
25 #include "MDSDaemon.h"
26 #include "MDSMap.h"
27 #include "SnapClient.h"
28 #include "SnapServer.h"
29 #include "MDBalancer.h"
30 #include "Locker.h"
31 #include "Server.h"
32 #include "InoTable.h"
33 #include "mon/MonClient.h"
34 #include "common/HeartbeatMap.h"
35 #include "ScrubStack.h"
36
37
38 #include "MDSRank.h"
39
40 #define dout_context g_ceph_context
41 #define dout_subsys ceph_subsys_mds
42 #undef dout_prefix
43 #define dout_prefix *_dout << "mds." << whoami << '.' << incarnation << ' '
44
45 MDSRank::MDSRank(
46 mds_rank_t whoami_,
47 Mutex &mds_lock_,
48 LogChannelRef &clog_,
49 SafeTimer &timer_,
50 Beacon &beacon_,
51 MDSMap *& mdsmap_,
52 Messenger *msgr,
53 MonClient *monc_,
54 Context *respawn_hook_,
55 Context *suicide_hook_)
56 :
57 whoami(whoami_), incarnation(0),
58 mds_lock(mds_lock_), clog(clog_), timer(timer_),
59 mdsmap(mdsmap_),
60 objecter(new Objecter(g_ceph_context, msgr, monc_, nullptr, 0, 0)),
61 server(NULL), mdcache(NULL), locker(NULL), mdlog(NULL),
62 balancer(NULL), scrubstack(NULL),
63 damage_table(whoami_),
64 inotable(NULL), snapserver(NULL), snapclient(NULL),
65 sessionmap(this), logger(NULL), mlogger(NULL),
66 op_tracker(g_ceph_context, g_conf->mds_enable_op_tracker,
67 g_conf->osd_num_op_tracker_shard),
68 last_state(MDSMap::STATE_BOOT),
69 state(MDSMap::STATE_BOOT),
70 cluster_degraded(false), stopping(false),
71 purge_queue(g_ceph_context, whoami_,
72 mdsmap_->get_metadata_pool(), objecter,
73 new FunctionContext(
74 [this](int r){
75 // Purge Queue operates inside mds_lock when we're calling into
76 // it, and outside when in background, so must handle both cases.
77 if (mds_lock.is_locked_by_me()) {
78 damaged();
79 } else {
80 damaged_unlocked();
81 }
82 }
83 )
84 ),
85 progress_thread(this), dispatch_depth(0),
86 hb(NULL), last_tid(0), osd_epoch_barrier(0), beacon(beacon_),
87 mds_slow_req_count(0),
88 last_client_mdsmap_bcast(0),
89 messenger(msgr), monc(monc_),
90 respawn_hook(respawn_hook_),
91 suicide_hook(suicide_hook_),
92 standby_replaying(false)
93 {
94 hb = g_ceph_context->get_heartbeat_map()->add_worker("MDSRank", pthread_self());
95
96 purge_queue.update_op_limit(*mdsmap);
97
98 objecter->unset_honor_osdmap_full();
99
100 finisher = new Finisher(msgr->cct);
101
102 mdcache = new MDCache(this, purge_queue);
103 mdlog = new MDLog(this);
104 balancer = new MDBalancer(this, messenger, monc);
105
106 scrubstack = new ScrubStack(mdcache, finisher);
107
108 inotable = new InoTable(this);
109 snapserver = new SnapServer(this, monc);
110 snapclient = new SnapClient(this);
111
112 server = new Server(this);
113 locker = new Locker(this, mdcache);
114
115 op_tracker.set_complaint_and_threshold(msgr->cct->_conf->mds_op_complaint_time,
116 msgr->cct->_conf->mds_op_log_threshold);
117 op_tracker.set_history_size_and_duration(msgr->cct->_conf->mds_op_history_size,
118 msgr->cct->_conf->mds_op_history_duration);
119 }
120
121 MDSRank::~MDSRank()
122 {
123 if (hb) {
124 g_ceph_context->get_heartbeat_map()->remove_worker(hb);
125 }
126
127 if (scrubstack) { delete scrubstack; scrubstack = NULL; }
128 if (mdcache) { delete mdcache; mdcache = NULL; }
129 if (mdlog) { delete mdlog; mdlog = NULL; }
130 if (balancer) { delete balancer; balancer = NULL; }
131 if (inotable) { delete inotable; inotable = NULL; }
132 if (snapserver) { delete snapserver; snapserver = NULL; }
133 if (snapclient) { delete snapclient; snapclient = NULL; }
134 if (mdsmap) { delete mdsmap; mdsmap = 0; }
135
136 if (server) { delete server; server = 0; }
137 if (locker) { delete locker; locker = 0; }
138
139 if (logger) {
140 g_ceph_context->get_perfcounters_collection()->remove(logger);
141 delete logger;
142 logger = 0;
143 }
144 if (mlogger) {
145 g_ceph_context->get_perfcounters_collection()->remove(mlogger);
146 delete mlogger;
147 mlogger = 0;
148 }
149
150 delete finisher;
151 finisher = NULL;
152
153 delete suicide_hook;
154 suicide_hook = NULL;
155
156 delete respawn_hook;
157 respawn_hook = NULL;
158
159 delete objecter;
160 objecter = nullptr;
161 }
162
163 void MDSRankDispatcher::init()
164 {
165 objecter->init();
166 messenger->add_dispatcher_head(objecter);
167
168 objecter->start();
169
170 update_log_config();
171 create_logger();
172
173 // Expose the OSDMap (already populated during MDS::init) to anyone
174 // who is interested in it.
175 handle_osd_map();
176
177 progress_thread.create("mds_rank_progr");
178
179 purge_queue.init();
180
181 finisher->start();
182 }
183
184 void MDSRank::update_targets(utime_t now)
185 {
186 // get MonMap's idea of my export_targets
187 const set<mds_rank_t>& map_targets = mdsmap->get_mds_info(get_nodeid()).export_targets;
188
189 dout(20) << "updating export targets, currently " << map_targets.size() << " ranks are targets" << dendl;
190
191 bool send = false;
192 set<mds_rank_t> new_map_targets;
193
194 auto it = export_targets.begin();
195 while (it != export_targets.end()) {
196 mds_rank_t rank = it->first;
197 double val = it->second.get(now);
198 dout(20) << "export target mds." << rank << " value is " << val << " @ " << now << dendl;
199
200 if (val <= 0.01) {
201 dout(15) << "export target mds." << rank << " is no longer an export target" << dendl;
202 export_targets.erase(it++);
203 send = true;
204 continue;
205 }
206 if (!map_targets.count(rank)) {
207 dout(15) << "export target mds." << rank << " not in map's export_targets" << dendl;
208 send = true;
209 }
210 new_map_targets.insert(rank);
211 it++;
212 }
213 if (new_map_targets.size() < map_targets.size()) {
214 dout(15) << "export target map holds stale targets, sending update" << dendl;
215 send = true;
216 }
217
218 if (send) {
219 dout(15) << "updating export_targets, now " << new_map_targets.size() << " ranks are targets" << dendl;
220 MMDSLoadTargets* m = new MMDSLoadTargets(mds_gid_t(monc->get_global_id()), new_map_targets);
221 monc->send_mon_message(m);
222 }
223 }
224
225 void MDSRank::hit_export_target(utime_t now, mds_rank_t rank, double amount)
226 {
227 double rate = g_conf->mds_bal_target_decay;
228 if (amount < 0.0) {
229 amount = 100.0/g_conf->mds_bal_target_decay; /* a good default for "i am trying to keep this export_target active" */
230 }
231 auto em = export_targets.emplace(std::piecewise_construct, std::forward_as_tuple(rank), std::forward_as_tuple(now, DecayRate(rate)));
232 if (em.second) {
233 dout(15) << "hit export target (new) " << amount << " @ " << now << dendl;
234 } else {
235 dout(15) << "hit export target " << amount << " @ " << now << dendl;
236 }
237 em.first->second.hit(now, amount);
238 }
239
240 void MDSRankDispatcher::tick()
241 {
242 heartbeat_reset();
243
244 if (beacon.is_laggy()) {
245 dout(5) << "tick bailing out since we seem laggy" << dendl;
246 return;
247 }
248
249 check_ops_in_flight();
250
251 // Wake up thread in case we use to be laggy and have waiting_for_nolaggy
252 // messages to progress.
253 progress_thread.signal();
254
255 // make sure mds log flushes, trims periodically
256 mdlog->flush();
257
258 if (is_active() || is_stopping()) {
259 mdcache->trim();
260 mdcache->trim_client_leases();
261 mdcache->check_memory_usage();
262 mdlog->trim(); // NOT during recovery!
263 }
264
265 // log
266 mds_load_t load = balancer->get_load(ceph_clock_now());
267
268 if (logger) {
269 logger->set(l_mds_load_cent, 100 * load.mds_load());
270 logger->set(l_mds_dispatch_queue_len, messenger->get_dispatch_queue_len());
271 logger->set(l_mds_subtrees, mdcache->num_subtrees());
272
273 mdcache->log_stat();
274 }
275
276 // ...
277 if (is_clientreplay() || is_active() || is_stopping()) {
278 server->find_idle_sessions();
279 locker->tick();
280 }
281
282 if (is_reconnect())
283 server->reconnect_tick();
284
285 if (is_active()) {
286 balancer->tick();
287 mdcache->find_stale_fragment_freeze();
288 mdcache->migrator->find_stale_export_freeze();
289 if (snapserver)
290 snapserver->check_osd_map(false);
291 }
292
293 if (is_active() || is_stopping()) {
294 update_targets(ceph_clock_now());
295 }
296
297 // shut down?
298 if (is_stopping()) {
299 mdlog->trim();
300 if (mdcache->shutdown_pass()) {
301 uint64_t pq_progress = 0 ;
302 uint64_t pq_total = 0;
303 size_t pq_in_flight = 0;
304 if (!purge_queue.drain(&pq_progress, &pq_total, &pq_in_flight)) {
305 dout(7) << "shutdown_pass=true, but still waiting for purge queue"
306 << dendl;
307 // This takes unbounded time, so we must indicate progress
308 // to the administrator: we do it in a slightly imperfect way
309 // by sending periodic (tick frequency) clog messages while
310 // in this state.
311 clog->info() << "MDS rank " << whoami << " waiting for purge queue ("
312 << std::dec << pq_progress << "/" << pq_total << " " << pq_in_flight
313 << " files purging" << ")";
314 } else {
315 dout(7) << "shutdown_pass=true, finished w/ shutdown, moving to "
316 "down:stopped" << dendl;
317 stopping_done();
318 }
319 }
320 else {
321 dout(7) << "shutdown_pass=false" << dendl;
322 }
323 }
324
325 // Expose ourselves to Beacon to update health indicators
326 beacon.notify_health(this);
327 }
328
329 void MDSRankDispatcher::shutdown()
330 {
331 // It should never be possible for shutdown to get called twice, because
332 // anyone picking up mds_lock checks if stopping is true and drops
333 // out if it is.
334 assert(stopping == false);
335 stopping = true;
336
337 dout(1) << __func__ << ": shutting down rank " << whoami << dendl;
338
339 timer.shutdown();
340
341 // MDLog has to shut down before the finisher, because some of its
342 // threads block on IOs that require finisher to complete.
343 mdlog->shutdown();
344
345 // shut down cache
346 mdcache->shutdown();
347
348 purge_queue.shutdown();
349
350 mds_lock.Unlock();
351 finisher->stop(); // no flushing
352 mds_lock.Lock();
353
354 if (objecter->initialized.read())
355 objecter->shutdown();
356
357 monc->shutdown();
358
359 op_tracker.on_shutdown();
360
361 progress_thread.shutdown();
362
363 // release mds_lock for finisher/messenger threads (e.g.
364 // MDSDaemon::ms_handle_reset called from Messenger).
365 mds_lock.Unlock();
366
367 // shut down messenger
368 messenger->shutdown();
369
370 mds_lock.Lock();
371
372 // Workaround unclean shutdown: HeartbeatMap will assert if
373 // worker is not removed (as we do in ~MDS), but ~MDS is not
374 // always called after suicide.
375 if (hb) {
376 g_ceph_context->get_heartbeat_map()->remove_worker(hb);
377 hb = NULL;
378 }
379 }
380
381 /**
382 * Helper for simple callbacks that call a void fn with no args.
383 */
384 class C_MDS_VoidFn : public MDSInternalContext
385 {
386 typedef void (MDSRank::*fn_ptr)();
387 protected:
388 fn_ptr fn;
389 public:
390 C_MDS_VoidFn(MDSRank *mds_, fn_ptr fn_)
391 : MDSInternalContext(mds_), fn(fn_)
392 {
393 assert(mds_);
394 assert(fn_);
395 }
396
397 void finish(int r) override
398 {
399 (mds->*fn)();
400 }
401 };
402
403 int64_t MDSRank::get_metadata_pool()
404 {
405 return mdsmap->get_metadata_pool();
406 }
407
408 MDSTableClient *MDSRank::get_table_client(int t)
409 {
410 switch (t) {
411 case TABLE_ANCHOR: return NULL;
412 case TABLE_SNAP: return snapclient;
413 default: ceph_abort();
414 }
415 }
416
417 MDSTableServer *MDSRank::get_table_server(int t)
418 {
419 switch (t) {
420 case TABLE_ANCHOR: return NULL;
421 case TABLE_SNAP: return snapserver;
422 default: ceph_abort();
423 }
424 }
425
426 void MDSRank::suicide()
427 {
428 if (suicide_hook) {
429 suicide_hook->complete(0);
430 suicide_hook = NULL;
431 }
432 }
433
434 void MDSRank::respawn()
435 {
436 if (respawn_hook) {
437 respawn_hook->complete(0);
438 respawn_hook = NULL;
439 }
440 }
441
442 void MDSRank::damaged()
443 {
444 assert(whoami != MDS_RANK_NONE);
445 assert(mds_lock.is_locked_by_me());
446
447 beacon.set_want_state(mdsmap, MDSMap::STATE_DAMAGED);
448 monc->flush_log(); // Flush any clog error from before we were called
449 beacon.notify_health(this); // Include latest status in our swan song
450 beacon.send_and_wait(g_conf->mds_mon_shutdown_timeout);
451
452 // It's okay if we timed out and the mon didn't get our beacon, because
453 // another daemon (or ourselves after respawn) will eventually take the
454 // rank and report DAMAGED again when it hits same problem we did.
455
456 respawn(); // Respawn into standby in case mon has other work for us
457 }
458
459 void MDSRank::damaged_unlocked()
460 {
461 Mutex::Locker l(mds_lock);
462 damaged();
463 }
464
465 void MDSRank::handle_write_error(int err)
466 {
467 if (err == -EBLACKLISTED) {
468 derr << "we have been blacklisted (fenced), respawning..." << dendl;
469 respawn();
470 return;
471 }
472
473 if (g_conf->mds_action_on_write_error >= 2) {
474 derr << "unhandled write error " << cpp_strerror(err) << ", suicide..." << dendl;
475 respawn();
476 } else if (g_conf->mds_action_on_write_error == 1) {
477 derr << "unhandled write error " << cpp_strerror(err) << ", force readonly..." << dendl;
478 mdcache->force_readonly();
479 } else {
480 // ignore;
481 derr << "unhandled write error " << cpp_strerror(err) << ", ignore..." << dendl;
482 }
483 }
484
485 void *MDSRank::ProgressThread::entry()
486 {
487 Mutex::Locker l(mds->mds_lock);
488 while (true) {
489 while (!mds->stopping &&
490 mds->finished_queue.empty() &&
491 (mds->waiting_for_nolaggy.empty() || mds->beacon.is_laggy())) {
492 cond.Wait(mds->mds_lock);
493 }
494
495 if (mds->stopping) {
496 break;
497 }
498
499 mds->_advance_queues();
500 }
501
502 return NULL;
503 }
504
505
506 void MDSRank::ProgressThread::shutdown()
507 {
508 assert(mds->mds_lock.is_locked_by_me());
509 assert(mds->stopping);
510
511 if (am_self()) {
512 // Stopping is set, we will fall out of our main loop naturally
513 } else {
514 // Kick the thread to notice mds->stopping, and join it
515 cond.Signal();
516 mds->mds_lock.Unlock();
517 if (is_started())
518 join();
519 mds->mds_lock.Lock();
520 }
521 }
522
523 bool MDSRankDispatcher::ms_dispatch(Message *m)
524 {
525 bool ret;
526 inc_dispatch_depth();
527 ret = _dispatch(m, true);
528 dec_dispatch_depth();
529 return ret;
530 }
531
532 /* If this function returns true, it recognizes the message and has taken the
533 * reference. If it returns false, it has done neither. */
534 bool MDSRank::_dispatch(Message *m, bool new_msg)
535 {
536 if (is_stale_message(m)) {
537 m->put();
538 return true;
539 }
540
541 if (beacon.is_laggy()) {
542 dout(10) << " laggy, deferring " << *m << dendl;
543 waiting_for_nolaggy.push_back(m);
544 } else if (new_msg && !waiting_for_nolaggy.empty()) {
545 dout(10) << " there are deferred messages, deferring " << *m << dendl;
546 waiting_for_nolaggy.push_back(m);
547 } else {
548 if (!handle_deferrable_message(m)) {
549 dout(0) << "unrecognized message " << *m << dendl;
550 return false;
551 }
552
553 heartbeat_reset();
554 }
555
556 if (dispatch_depth > 1)
557 return true;
558
559 // finish any triggered contexts
560 _advance_queues();
561
562 if (beacon.is_laggy()) {
563 // We've gone laggy during dispatch, don't do any
564 // more housekeeping
565 return true;
566 }
567
568 // done with all client replayed requests?
569 if (is_clientreplay() &&
570 mdcache->is_open() &&
571 replay_queue.empty() &&
572 beacon.get_want_state() == MDSMap::STATE_CLIENTREPLAY) {
573 int num_requests = mdcache->get_num_client_requests();
574 dout(10) << " still have " << num_requests << " active replay requests" << dendl;
575 if (num_requests == 0)
576 clientreplay_done();
577 }
578
579 // hack: thrash exports
580 static utime_t start;
581 utime_t now = ceph_clock_now();
582 if (start == utime_t())
583 start = now;
584 /*double el = now - start;
585 if (el > 30.0 &&
586 el < 60.0)*/
587 for (int i=0; i<g_conf->mds_thrash_exports; i++) {
588 set<mds_rank_t> s;
589 if (!is_active()) break;
590 mdsmap->get_mds_set(s, MDSMap::STATE_ACTIVE);
591 if (s.size() < 2 || CInode::count() < 10)
592 break; // need peers for this to work.
593 if (mdcache->migrator->get_num_exporting() > g_conf->mds_thrash_exports * 5 ||
594 mdcache->migrator->get_export_queue_size() > g_conf->mds_thrash_exports * 10)
595 break;
596
597 dout(7) << "mds thrashing exports pass " << (i+1) << "/" << g_conf->mds_thrash_exports << dendl;
598
599 // pick a random dir inode
600 CInode *in = mdcache->hack_pick_random_inode();
601
602 list<CDir*> ls;
603 in->get_dirfrags(ls);
604 if (!ls.empty()) { // must be an open dir.
605 list<CDir*>::iterator p = ls.begin();
606 int n = rand() % ls.size();
607 while (n--)
608 ++p;
609 CDir *dir = *p;
610 if (!dir->get_parent_dir()) continue; // must be linked.
611 if (!dir->is_auth()) continue; // must be auth.
612
613 mds_rank_t dest;
614 do {
615 int k = rand() % s.size();
616 set<mds_rank_t>::iterator p = s.begin();
617 while (k--) ++p;
618 dest = *p;
619 } while (dest == whoami);
620 mdcache->migrator->export_dir_nicely(dir,dest);
621 }
622 }
623 // hack: thrash fragments
624 for (int i=0; i<g_conf->mds_thrash_fragments; i++) {
625 if (!is_active()) break;
626 if (mdcache->get_num_fragmenting_dirs() > 5 * g_conf->mds_thrash_fragments) break;
627 dout(7) << "mds thrashing fragments pass " << (i+1) << "/" << g_conf->mds_thrash_fragments << dendl;
628
629 // pick a random dir inode
630 CInode *in = mdcache->hack_pick_random_inode();
631
632 list<CDir*> ls;
633 in->get_dirfrags(ls);
634 if (ls.empty()) continue; // must be an open dir.
635 CDir *dir = ls.front();
636 if (!dir->get_parent_dir()) continue; // must be linked.
637 if (!dir->is_auth()) continue; // must be auth.
638 frag_t fg = dir->get_frag();
639 if (mdsmap->allows_dirfrags()) {
640 if ((fg == frag_t() || (rand() % (1 << fg.bits()) == 0))) {
641 mdcache->split_dir(dir, 1);
642 } else {
643 balancer->queue_merge(dir);
644 }
645 }
646 }
647
648 // hack: force hash root?
649 /*
650 if (false &&
651 mdcache->get_root() &&
652 mdcache->get_root()->dir &&
653 !(mdcache->get_root()->dir->is_hashed() ||
654 mdcache->get_root()->dir->is_hashing())) {
655 dout(0) << "hashing root" << dendl;
656 mdcache->migrator->hash_dir(mdcache->get_root()->dir);
657 }
658 */
659
660 if (mlogger) {
661 mlogger->set(l_mdm_ino, CInode::count());
662 mlogger->set(l_mdm_dir, CDir::count());
663 mlogger->set(l_mdm_dn, CDentry::count());
664 mlogger->set(l_mdm_cap, Capability::count());
665
666 mlogger->set(l_mdm_inoa, CInode::increments());
667 mlogger->set(l_mdm_inos, CInode::decrements());
668 mlogger->set(l_mdm_dira, CDir::increments());
669 mlogger->set(l_mdm_dirs, CDir::decrements());
670 mlogger->set(l_mdm_dna, CDentry::increments());
671 mlogger->set(l_mdm_dns, CDentry::decrements());
672 mlogger->set(l_mdm_capa, Capability::increments());
673 mlogger->set(l_mdm_caps, Capability::decrements());
674
675 mlogger->set(l_mdm_buf, buffer::get_total_alloc());
676 }
677
678 return true;
679 }
680
681 /*
682 * lower priority messages we defer if we seem laggy
683 */
684 bool MDSRank::handle_deferrable_message(Message *m)
685 {
686 int port = m->get_type() & 0xff00;
687
688 switch (port) {
689 case MDS_PORT_CACHE:
690 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
691 mdcache->dispatch(m);
692 break;
693
694 case MDS_PORT_MIGRATOR:
695 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
696 mdcache->migrator->dispatch(m);
697 break;
698
699 default:
700 switch (m->get_type()) {
701 // SERVER
702 case CEPH_MSG_CLIENT_SESSION:
703 case CEPH_MSG_CLIENT_RECONNECT:
704 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_CLIENT);
705 // fall-thru
706 case CEPH_MSG_CLIENT_REQUEST:
707 server->dispatch(m);
708 break;
709 case MSG_MDS_SLAVE_REQUEST:
710 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
711 server->dispatch(m);
712 break;
713
714 case MSG_MDS_HEARTBEAT:
715 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
716 balancer->proc_message(m);
717 break;
718
719 case MSG_MDS_TABLE_REQUEST:
720 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
721 {
722 MMDSTableRequest *req = static_cast<MMDSTableRequest*>(m);
723 if (req->op < 0) {
724 MDSTableClient *client = get_table_client(req->table);
725 client->handle_request(req);
726 } else {
727 MDSTableServer *server = get_table_server(req->table);
728 server->handle_request(req);
729 }
730 }
731 break;
732
733 case MSG_MDS_LOCK:
734 case MSG_MDS_INODEFILECAPS:
735 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
736 locker->dispatch(m);
737 break;
738
739 case CEPH_MSG_CLIENT_CAPS:
740 case CEPH_MSG_CLIENT_CAPRELEASE:
741 case CEPH_MSG_CLIENT_LEASE:
742 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_CLIENT);
743 locker->dispatch(m);
744 break;
745
746 default:
747 return false;
748 }
749 }
750
751 return true;
752 }
753
754 /**
755 * Advance finished_queue and waiting_for_nolaggy.
756 *
757 * Usually drain both queues, but may not drain waiting_for_nolaggy
758 * if beacon is currently laggy.
759 */
760 void MDSRank::_advance_queues()
761 {
762 assert(mds_lock.is_locked_by_me());
763
764 while (!finished_queue.empty()) {
765 dout(7) << "mds has " << finished_queue.size() << " queued contexts" << dendl;
766 dout(10) << finished_queue << dendl;
767 list<MDSInternalContextBase*> ls;
768 ls.swap(finished_queue);
769 while (!ls.empty()) {
770 dout(10) << " finish " << ls.front() << dendl;
771 ls.front()->complete(0);
772 ls.pop_front();
773
774 heartbeat_reset();
775 }
776 }
777
778 while (!waiting_for_nolaggy.empty()) {
779 // stop if we're laggy now!
780 if (beacon.is_laggy())
781 break;
782
783 Message *old = waiting_for_nolaggy.front();
784 waiting_for_nolaggy.pop_front();
785
786 if (is_stale_message(old)) {
787 old->put();
788 } else {
789 dout(7) << " processing laggy deferred " << *old << dendl;
790 if (!handle_deferrable_message(old)) {
791 dout(0) << "unrecognized message " << *old << dendl;
792 old->put();
793 }
794 }
795
796 heartbeat_reset();
797 }
798 }
799
800 /**
801 * Call this when you take mds_lock, or periodically if you're going to
802 * hold the lock for a long time (e.g. iterating over clients/inodes)
803 */
804 void MDSRank::heartbeat_reset()
805 {
806 // Any thread might jump into mds_lock and call us immediately
807 // after a call to suicide() completes, in which case MDSRank::hb
808 // has been freed and we are a no-op.
809 if (!hb) {
810 assert(stopping);
811 return;
812 }
813
814 // NB not enabling suicide grace, because the mon takes care of killing us
815 // (by blacklisting us) when we fail to send beacons, and it's simpler to
816 // only have one way of dying.
817 g_ceph_context->get_heartbeat_map()->reset_timeout(hb, g_conf->mds_beacon_grace, 0);
818 }
819
820 bool MDSRank::is_stale_message(Message *m) const
821 {
822 // from bad mds?
823 if (m->get_source().is_mds()) {
824 mds_rank_t from = mds_rank_t(m->get_source().num());
825 if (!mdsmap->have_inst(from) ||
826 mdsmap->get_inst(from) != m->get_source_inst() ||
827 mdsmap->is_down(from)) {
828 // bogus mds?
829 if (m->get_type() == CEPH_MSG_MDS_MAP) {
830 dout(5) << "got " << *m << " from old/bad/imposter mds " << m->get_source()
831 << ", but it's an mdsmap, looking at it" << dendl;
832 } else if (m->get_type() == MSG_MDS_CACHEEXPIRE &&
833 mdsmap->get_inst(from) == m->get_source_inst()) {
834 dout(5) << "got " << *m << " from down mds " << m->get_source()
835 << ", but it's a cache_expire, looking at it" << dendl;
836 } else {
837 dout(5) << "got " << *m << " from down/old/bad/imposter mds " << m->get_source()
838 << ", dropping" << dendl;
839 return true;
840 }
841 }
842 }
843 return false;
844 }
845
846
847 void MDSRank::send_message(Message *m, Connection *c)
848 {
849 assert(c);
850 c->send_message(m);
851 }
852
853
854 void MDSRank::send_message_mds(Message *m, mds_rank_t mds)
855 {
856 if (!mdsmap->is_up(mds)) {
857 dout(10) << "send_message_mds mds." << mds << " not up, dropping " << *m << dendl;
858 m->put();
859 return;
860 }
861
862 // send mdsmap first?
863 if (mds != whoami && peer_mdsmap_epoch[mds] < mdsmap->get_epoch()) {
864 messenger->send_message(new MMDSMap(monc->get_fsid(), mdsmap),
865 mdsmap->get_inst(mds));
866 peer_mdsmap_epoch[mds] = mdsmap->get_epoch();
867 }
868
869 // send message
870 messenger->send_message(m, mdsmap->get_inst(mds));
871 }
872
873 void MDSRank::forward_message_mds(Message *m, mds_rank_t mds)
874 {
875 assert(mds != whoami);
876
877 // client request?
878 if (m->get_type() == CEPH_MSG_CLIENT_REQUEST &&
879 (static_cast<MClientRequest*>(m))->get_source().is_client()) {
880 MClientRequest *creq = static_cast<MClientRequest*>(m);
881 creq->inc_num_fwd(); // inc forward counter
882
883 /*
884 * don't actually forward if non-idempotent!
885 * client has to do it. although the MDS will ignore duplicate requests,
886 * the affected metadata may migrate, in which case the new authority
887 * won't have the metareq_id in the completed request map.
888 */
889 // NEW: always make the client resend!
890 bool client_must_resend = true; //!creq->can_forward();
891
892 // tell the client where it should go
893 messenger->send_message(new MClientRequestForward(creq->get_tid(), mds, creq->get_num_fwd(),
894 client_must_resend),
895 creq->get_source_inst());
896
897 if (client_must_resend) {
898 m->put();
899 return;
900 }
901 }
902
903 // these are the only types of messages we should be 'forwarding'; they
904 // explicitly encode their source mds, which gets clobbered when we resend
905 // them here.
906 assert(m->get_type() == MSG_MDS_DIRUPDATE ||
907 m->get_type() == MSG_MDS_EXPORTDIRDISCOVER);
908
909 // send mdsmap first?
910 if (peer_mdsmap_epoch[mds] < mdsmap->get_epoch()) {
911 messenger->send_message(new MMDSMap(monc->get_fsid(), mdsmap),
912 mdsmap->get_inst(mds));
913 peer_mdsmap_epoch[mds] = mdsmap->get_epoch();
914 }
915
916 messenger->send_message(m, mdsmap->get_inst(mds));
917 }
918
919
920
921 void MDSRank::send_message_client_counted(Message *m, client_t client)
922 {
923 Session *session = sessionmap.get_session(entity_name_t::CLIENT(client.v));
924 if (session) {
925 send_message_client_counted(m, session);
926 } else {
927 dout(10) << "send_message_client_counted no session for client." << client << " " << *m << dendl;
928 }
929 }
930
931 void MDSRank::send_message_client_counted(Message *m, Connection *connection)
932 {
933 Session *session = static_cast<Session *>(connection->get_priv());
934 if (session) {
935 session->put(); // do not carry ref
936 send_message_client_counted(m, session);
937 } else {
938 dout(10) << "send_message_client_counted has no session for " << m->get_source_inst() << dendl;
939 // another Connection took over the Session
940 }
941 }
942
943 void MDSRank::send_message_client_counted(Message *m, Session *session)
944 {
945 version_t seq = session->inc_push_seq();
946 dout(10) << "send_message_client_counted " << session->info.inst.name << " seq "
947 << seq << " " << *m << dendl;
948 if (session->connection) {
949 session->connection->send_message(m);
950 } else {
951 session->preopen_out_queue.push_back(m);
952 }
953 }
954
955 void MDSRank::send_message_client(Message *m, Session *session)
956 {
957 dout(10) << "send_message_client " << session->info.inst << " " << *m << dendl;
958 if (session->connection) {
959 session->connection->send_message(m);
960 } else {
961 session->preopen_out_queue.push_back(m);
962 }
963 }
964
965 /**
966 * This is used whenever a RADOS operation has been cancelled
967 * or a RADOS client has been blacklisted, to cause the MDS and
968 * any clients to wait for this OSD epoch before using any new caps.
969 *
970 * See doc/cephfs/eviction
971 */
972 void MDSRank::set_osd_epoch_barrier(epoch_t e)
973 {
974 dout(4) << __func__ << ": epoch=" << e << dendl;
975 osd_epoch_barrier = e;
976 }
977
978 /**
979 * FIXME ugly call up to MDS daemon until the dispatching is separated out
980 */
981 void MDSRank::retry_dispatch(Message *m)
982 {
983 inc_dispatch_depth();
984 _dispatch(m, false);
985 dec_dispatch_depth();
986 }
987
988 utime_t MDSRank::get_laggy_until() const
989 {
990 return beacon.get_laggy_until();
991 }
992
993 bool MDSRank::is_daemon_stopping() const
994 {
995 return stopping;
996 }
997
998 void MDSRank::request_state(MDSMap::DaemonState s)
999 {
1000 dout(3) << "request_state " << ceph_mds_state_name(s) << dendl;
1001 beacon.set_want_state(mdsmap, s);
1002 beacon.send();
1003 }
1004
1005
1006 class C_MDS_BootStart : public MDSInternalContext {
1007 MDSRank::BootStep nextstep;
1008 public:
1009 C_MDS_BootStart(MDSRank *m, MDSRank::BootStep n)
1010 : MDSInternalContext(m), nextstep(n) {}
1011 void finish(int r) override {
1012 mds->boot_start(nextstep, r);
1013 }
1014 };
1015
1016
1017 void MDSRank::boot_start(BootStep step, int r)
1018 {
1019 // Handle errors from previous step
1020 if (r < 0) {
1021 if (is_standby_replay() && (r == -EAGAIN)) {
1022 dout(0) << "boot_start encountered an error EAGAIN"
1023 << ", respawning since we fell behind journal" << dendl;
1024 respawn();
1025 } else if (r == -EINVAL || r == -ENOENT) {
1026 // Invalid or absent data, indicates damaged on-disk structures
1027 clog->error() << "Error loading MDS rank " << whoami << ": "
1028 << cpp_strerror(r);
1029 damaged();
1030 assert(r == 0); // Unreachable, damaged() calls respawn()
1031 } else {
1032 // Completely unexpected error, give up and die
1033 dout(0) << "boot_start encountered an error, failing" << dendl;
1034 suicide();
1035 return;
1036 }
1037 }
1038
1039 assert(is_starting() || is_any_replay());
1040
1041 switch(step) {
1042 case MDS_BOOT_INITIAL:
1043 {
1044 mdcache->init_layouts();
1045
1046 MDSGatherBuilder gather(g_ceph_context,
1047 new C_MDS_BootStart(this, MDS_BOOT_OPEN_ROOT));
1048 dout(2) << "boot_start " << step << ": opening inotable" << dendl;
1049 inotable->set_rank(whoami);
1050 inotable->load(gather.new_sub());
1051
1052 dout(2) << "boot_start " << step << ": opening sessionmap" << dendl;
1053 sessionmap.set_rank(whoami);
1054 sessionmap.load(gather.new_sub());
1055
1056 dout(2) << "boot_start " << step << ": opening mds log" << dendl;
1057 mdlog->open(gather.new_sub());
1058
1059 if (mdsmap->get_tableserver() == whoami) {
1060 dout(2) << "boot_start " << step << ": opening snap table" << dendl;
1061 snapserver->set_rank(whoami);
1062 snapserver->load(gather.new_sub());
1063 }
1064
1065 gather.activate();
1066 }
1067 break;
1068 case MDS_BOOT_OPEN_ROOT:
1069 {
1070 dout(2) << "boot_start " << step << ": loading/discovering base inodes" << dendl;
1071
1072 MDSGatherBuilder gather(g_ceph_context,
1073 new C_MDS_BootStart(this, MDS_BOOT_PREPARE_LOG));
1074
1075 mdcache->open_mydir_inode(gather.new_sub());
1076
1077 purge_queue.open(new C_IO_Wrapper(this, gather.new_sub()));
1078
1079 if (is_starting() ||
1080 whoami == mdsmap->get_root()) { // load root inode off disk if we are auth
1081 mdcache->open_root_inode(gather.new_sub());
1082 } else {
1083 // replay. make up fake root inode to start with
1084 (void)mdcache->create_root_inode();
1085 }
1086 gather.activate();
1087 }
1088 break;
1089 case MDS_BOOT_PREPARE_LOG:
1090 if (is_any_replay()) {
1091 dout(2) << "boot_start " << step << ": replaying mds log" << dendl;
1092 mdlog->replay(new C_MDS_BootStart(this, MDS_BOOT_REPLAY_DONE));
1093 } else {
1094 dout(2) << "boot_start " << step << ": positioning at end of old mds log" << dendl;
1095 mdlog->append();
1096 starting_done();
1097 }
1098 break;
1099 case MDS_BOOT_REPLAY_DONE:
1100 assert(is_any_replay());
1101
1102 // Sessiontable and inotable should be in sync after replay, validate
1103 // that they are consistent.
1104 validate_sessions();
1105
1106 replay_done();
1107 break;
1108 }
1109 }
1110
1111 void MDSRank::validate_sessions()
1112 {
1113 assert(mds_lock.is_locked_by_me());
1114 std::vector<Session*> victims;
1115
1116 // Identify any sessions which have state inconsistent with other,
1117 // after they have been loaded from rados during startup.
1118 // Mitigate bugs like: http://tracker.ceph.com/issues/16842
1119 const auto &sessions = sessionmap.get_sessions();
1120 for (const auto &i : sessions) {
1121 Session *session = i.second;
1122 interval_set<inodeno_t> badones;
1123 if (inotable->intersects_free(session->info.prealloc_inos, &badones)) {
1124 clog->error() << "Client session loaded with invalid preallocated "
1125 "inodes, evicting session " << *session;
1126
1127 // Make the session consistent with inotable so that it can
1128 // be cleanly torn down
1129 session->info.prealloc_inos.subtract(badones);
1130
1131 victims.push_back(session);
1132 }
1133 }
1134
1135 for (const auto &session: victims) {
1136 server->kill_session(session, nullptr);
1137 }
1138 }
1139
1140 void MDSRank::starting_done()
1141 {
1142 dout(3) << "starting_done" << dendl;
1143 assert(is_starting());
1144 request_state(MDSMap::STATE_ACTIVE);
1145
1146 mdcache->open_root();
1147
1148 // start new segment
1149 mdlog->start_new_segment();
1150 }
1151
1152
1153 void MDSRank::calc_recovery_set()
1154 {
1155 // initialize gather sets
1156 set<mds_rank_t> rs;
1157 mdsmap->get_recovery_mds_set(rs);
1158 rs.erase(whoami);
1159 mdcache->set_recovery_set(rs);
1160
1161 dout(1) << " recovery set is " << rs << dendl;
1162 }
1163
1164
1165 void MDSRank::replay_start()
1166 {
1167 dout(1) << "replay_start" << dendl;
1168
1169 if (is_standby_replay())
1170 standby_replaying = true;
1171
1172 calc_recovery_set();
1173
1174 // Check if we need to wait for a newer OSD map before starting
1175 Context *fin = new C_IO_Wrapper(this, new C_MDS_BootStart(this, MDS_BOOT_INITIAL));
1176 bool const ready = objecter->wait_for_map(
1177 mdsmap->get_last_failure_osd_epoch(),
1178 fin);
1179
1180 if (ready) {
1181 delete fin;
1182 boot_start();
1183 } else {
1184 dout(1) << " waiting for osdmap " << mdsmap->get_last_failure_osd_epoch()
1185 << " (which blacklists prior instance)" << dendl;
1186 }
1187 }
1188
1189
1190 class MDSRank::C_MDS_StandbyReplayRestartFinish : public MDSIOContext {
1191 uint64_t old_read_pos;
1192 public:
1193 C_MDS_StandbyReplayRestartFinish(MDSRank *mds_, uint64_t old_read_pos_) :
1194 MDSIOContext(mds_), old_read_pos(old_read_pos_) {}
1195 void finish(int r) override {
1196 mds->_standby_replay_restart_finish(r, old_read_pos);
1197 }
1198 };
1199
1200 void MDSRank::_standby_replay_restart_finish(int r, uint64_t old_read_pos)
1201 {
1202 if (old_read_pos < mdlog->get_journaler()->get_trimmed_pos()) {
1203 dout(0) << "standby MDS fell behind active MDS journal's expire_pos, restarting" << dendl;
1204 respawn(); /* we're too far back, and this is easier than
1205 trying to reset everything in the cache, etc */
1206 } else {
1207 mdlog->standby_trim_segments();
1208 boot_start(MDS_BOOT_PREPARE_LOG, r);
1209 }
1210 }
1211
1212 inline void MDSRank::standby_replay_restart()
1213 {
1214 if (standby_replaying) {
1215 /* Go around for another pass of replaying in standby */
1216 dout(4) << "standby_replay_restart (as standby)" << dendl;
1217 mdlog->get_journaler()->reread_head_and_probe(
1218 new C_MDS_StandbyReplayRestartFinish(
1219 this,
1220 mdlog->get_journaler()->get_read_pos()));
1221 } else {
1222 /* We are transitioning out of standby: wait for OSD map update
1223 before making final pass */
1224 dout(1) << "standby_replay_restart (final takeover pass)" << dendl;
1225 Context *fin = new C_IO_Wrapper(this, new C_MDS_BootStart(this, MDS_BOOT_PREPARE_LOG));
1226 bool const ready =
1227 objecter->wait_for_map(mdsmap->get_last_failure_osd_epoch(), fin);
1228 if (ready) {
1229 delete fin;
1230 mdlog->get_journaler()->reread_head_and_probe(
1231 new C_MDS_StandbyReplayRestartFinish(
1232 this,
1233 mdlog->get_journaler()->get_read_pos()));
1234 } else {
1235 dout(1) << " waiting for osdmap " << mdsmap->get_last_failure_osd_epoch()
1236 << " (which blacklists prior instance)" << dendl;
1237 }
1238 }
1239 }
1240
1241 class MDSRank::C_MDS_StandbyReplayRestart : public MDSInternalContext {
1242 public:
1243 explicit C_MDS_StandbyReplayRestart(MDSRank *m) : MDSInternalContext(m) {}
1244 void finish(int r) override {
1245 assert(!r);
1246 mds->standby_replay_restart();
1247 }
1248 };
1249
1250 void MDSRank::replay_done()
1251 {
1252 dout(1) << "replay_done" << (standby_replaying ? " (as standby)" : "") << dendl;
1253
1254 if (is_standby_replay()) {
1255 // The replay was done in standby state, and we are still in that state
1256 assert(standby_replaying);
1257 dout(10) << "setting replay timer" << dendl;
1258 timer.add_event_after(g_conf->mds_replay_interval,
1259 new C_MDS_StandbyReplayRestart(this));
1260 return;
1261 } else if (standby_replaying) {
1262 // The replay was done in standby state, we have now _left_ that state
1263 dout(10) << " last replay pass was as a standby; making final pass" << dendl;
1264 standby_replaying = false;
1265 standby_replay_restart();
1266 return;
1267 } else {
1268 // Replay is complete, journal read should be up to date
1269 assert(mdlog->get_journaler()->get_read_pos() == mdlog->get_journaler()->get_write_pos());
1270 assert(!is_standby_replay());
1271
1272 // Reformat and come back here
1273 if (mdlog->get_journaler()->get_stream_format() < g_conf->mds_journal_format) {
1274 dout(4) << "reformatting journal on standbyreplay->replay transition" << dendl;
1275 mdlog->reopen(new C_MDS_BootStart(this, MDS_BOOT_REPLAY_DONE));
1276 return;
1277 }
1278 }
1279
1280 dout(1) << "making mds journal writeable" << dendl;
1281 mdlog->get_journaler()->set_writeable();
1282 mdlog->get_journaler()->trim_tail();
1283
1284 if (g_conf->mds_wipe_sessions) {
1285 dout(1) << "wiping out client sessions" << dendl;
1286 sessionmap.wipe();
1287 sessionmap.save(new C_MDSInternalNoop);
1288 }
1289 if (g_conf->mds_wipe_ino_prealloc) {
1290 dout(1) << "wiping out ino prealloc from sessions" << dendl;
1291 sessionmap.wipe_ino_prealloc();
1292 sessionmap.save(new C_MDSInternalNoop);
1293 }
1294 if (g_conf->mds_skip_ino) {
1295 inodeno_t i = g_conf->mds_skip_ino;
1296 dout(1) << "skipping " << i << " inodes" << dendl;
1297 inotable->skip_inos(i);
1298 inotable->save(new C_MDSInternalNoop);
1299 }
1300
1301 if (mdsmap->get_num_in_mds() == 1 &&
1302 mdsmap->get_num_failed_mds() == 0) { // just me!
1303 dout(2) << "i am alone, moving to state reconnect" << dendl;
1304 request_state(MDSMap::STATE_RECONNECT);
1305 } else {
1306 dout(2) << "i am not alone, moving to state resolve" << dendl;
1307 request_state(MDSMap::STATE_RESOLVE);
1308 }
1309 }
1310
1311 void MDSRank::reopen_log()
1312 {
1313 dout(1) << "reopen_log" << dendl;
1314 mdcache->rollback_uncommitted_fragments();
1315 }
1316
1317
1318 void MDSRank::resolve_start()
1319 {
1320 dout(1) << "resolve_start" << dendl;
1321
1322 reopen_log();
1323
1324 mdcache->resolve_start(new C_MDS_VoidFn(this, &MDSRank::resolve_done));
1325 finish_contexts(g_ceph_context, waiting_for_resolve);
1326 }
1327 void MDSRank::resolve_done()
1328 {
1329 dout(1) << "resolve_done" << dendl;
1330 request_state(MDSMap::STATE_RECONNECT);
1331 }
1332
1333 void MDSRank::reconnect_start()
1334 {
1335 dout(1) << "reconnect_start" << dendl;
1336
1337 if (last_state == MDSMap::STATE_REPLAY) {
1338 reopen_log();
1339 }
1340
1341 server->reconnect_clients(new C_MDS_VoidFn(this, &MDSRank::reconnect_done));
1342 finish_contexts(g_ceph_context, waiting_for_reconnect);
1343 }
1344 void MDSRank::reconnect_done()
1345 {
1346 dout(1) << "reconnect_done" << dendl;
1347 request_state(MDSMap::STATE_REJOIN); // move to rejoin state
1348 }
1349
1350 void MDSRank::rejoin_joint_start()
1351 {
1352 dout(1) << "rejoin_joint_start" << dendl;
1353 mdcache->rejoin_send_rejoins();
1354 }
1355 void MDSRank::rejoin_start()
1356 {
1357 dout(1) << "rejoin_start" << dendl;
1358 mdcache->rejoin_start(new C_MDS_VoidFn(this, &MDSRank::rejoin_done));
1359 }
1360 void MDSRank::rejoin_done()
1361 {
1362 dout(1) << "rejoin_done" << dendl;
1363 mdcache->show_subtrees();
1364 mdcache->show_cache();
1365
1366 // funny case: is our cache empty? no subtrees?
1367 if (!mdcache->is_subtrees()) {
1368 if (whoami == 0) {
1369 // The root should always have a subtree!
1370 clog->error() << "No subtrees found for root MDS rank!";
1371 damaged();
1372 assert(mdcache->is_subtrees());
1373 } else {
1374 dout(1) << " empty cache, no subtrees, leaving cluster" << dendl;
1375 request_state(MDSMap::STATE_STOPPED);
1376 }
1377 return;
1378 }
1379
1380 if (replay_queue.empty())
1381 request_state(MDSMap::STATE_ACTIVE);
1382 else
1383 request_state(MDSMap::STATE_CLIENTREPLAY);
1384 }
1385
1386 void MDSRank::clientreplay_start()
1387 {
1388 dout(1) << "clientreplay_start" << dendl;
1389 finish_contexts(g_ceph_context, waiting_for_replay); // kick waiters
1390 mdcache->start_files_to_recover();
1391 queue_one_replay();
1392 }
1393
1394 bool MDSRank::queue_one_replay()
1395 {
1396 if (replay_queue.empty()) {
1397 mdlog->wait_for_safe(new C_MDS_VoidFn(this, &MDSRank::clientreplay_done));
1398 return false;
1399 }
1400 queue_waiter(replay_queue.front());
1401 replay_queue.pop_front();
1402 return true;
1403 }
1404
1405 void MDSRank::clientreplay_done()
1406 {
1407 dout(1) << "clientreplay_done" << dendl;
1408 request_state(MDSMap::STATE_ACTIVE);
1409 }
1410
1411 void MDSRank::active_start()
1412 {
1413 dout(1) << "active_start" << dendl;
1414
1415 if (last_state == MDSMap::STATE_CREATING) {
1416 mdcache->open_root();
1417 }
1418
1419 mdcache->clean_open_file_lists();
1420 mdcache->export_remaining_imported_caps();
1421 finish_contexts(g_ceph_context, waiting_for_replay); // kick waiters
1422 mdcache->start_files_to_recover();
1423
1424 mdcache->reissue_all_caps();
1425 mdcache->activate_stray_manager();
1426
1427 finish_contexts(g_ceph_context, waiting_for_active); // kick waiters
1428 }
1429
1430 void MDSRank::recovery_done(int oldstate)
1431 {
1432 dout(1) << "recovery_done -- successful recovery!" << dendl;
1433 assert(is_clientreplay() || is_active());
1434
1435 // kick snaptable (resent AGREEs)
1436 if (mdsmap->get_tableserver() == whoami) {
1437 set<mds_rank_t> active;
1438 mdsmap->get_clientreplay_or_active_or_stopping_mds_set(active);
1439 snapserver->finish_recovery(active);
1440 }
1441
1442 if (oldstate == MDSMap::STATE_CREATING)
1443 return;
1444
1445 mdcache->start_recovered_truncates();
1446 mdcache->do_file_recover();
1447
1448 // tell connected clients
1449 //bcast_mds_map(); // not anymore, they get this from the monitor
1450
1451 mdcache->populate_mydir();
1452 }
1453
1454 void MDSRank::creating_done()
1455 {
1456 dout(1)<< "creating_done" << dendl;
1457 request_state(MDSMap::STATE_ACTIVE);
1458 }
1459
1460 void MDSRank::boot_create()
1461 {
1462 dout(3) << "boot_create" << dendl;
1463
1464 MDSGatherBuilder fin(g_ceph_context, new C_MDS_VoidFn(this, &MDSRank::creating_done));
1465
1466 mdcache->init_layouts();
1467
1468 snapserver->set_rank(whoami);
1469 inotable->set_rank(whoami);
1470 sessionmap.set_rank(whoami);
1471
1472 // start with a fresh journal
1473 dout(10) << "boot_create creating fresh journal" << dendl;
1474 mdlog->create(fin.new_sub());
1475
1476 // open new journal segment, but do not journal subtree map (yet)
1477 mdlog->prepare_new_segment();
1478
1479 if (whoami == mdsmap->get_root()) {
1480 dout(3) << "boot_create creating fresh hierarchy" << dendl;
1481 mdcache->create_empty_hierarchy(fin.get());
1482 }
1483
1484 dout(3) << "boot_create creating mydir hierarchy" << dendl;
1485 mdcache->create_mydir_hierarchy(fin.get());
1486
1487 // fixme: fake out inotable (reset, pretend loaded)
1488 dout(10) << "boot_create creating fresh inotable table" << dendl;
1489 inotable->reset();
1490 inotable->save(fin.new_sub());
1491
1492 // write empty sessionmap
1493 sessionmap.save(fin.new_sub());
1494
1495 // Create empty purge queue
1496 purge_queue.create(new C_IO_Wrapper(this, fin.new_sub()));
1497
1498 // initialize tables
1499 if (mdsmap->get_tableserver() == whoami) {
1500 dout(10) << "boot_create creating fresh snaptable" << dendl;
1501 snapserver->reset();
1502 snapserver->save(fin.new_sub());
1503 }
1504
1505 assert(g_conf->mds_kill_create_at != 1);
1506
1507 // ok now journal it
1508 mdlog->journal_segment_subtree_map(fin.new_sub());
1509 mdlog->flush();
1510
1511 fin.activate();
1512 }
1513
1514 void MDSRank::stopping_start()
1515 {
1516 dout(2) << "stopping_start" << dendl;
1517
1518 if (mdsmap->get_num_in_mds() == 1 && !sessionmap.empty()) {
1519 // we're the only mds up!
1520 dout(0) << "we are the last MDS, and have mounted clients: we cannot flush our journal. suicide!" << dendl;
1521 suicide();
1522 }
1523
1524 mdcache->shutdown_start();
1525 }
1526
1527 void MDSRank::stopping_done()
1528 {
1529 dout(2) << "stopping_done" << dendl;
1530
1531 // tell monitor we shut down cleanly.
1532 request_state(MDSMap::STATE_STOPPED);
1533 }
1534
1535 void MDSRankDispatcher::handle_mds_map(
1536 MMDSMap *m,
1537 MDSMap *oldmap)
1538 {
1539 // I am only to be passed MDSMaps in which I hold a rank
1540 assert(whoami != MDS_RANK_NONE);
1541
1542 MDSMap::DaemonState oldstate = state;
1543 mds_gid_t mds_gid = mds_gid_t(monc->get_global_id());
1544 state = mdsmap->get_state_gid(mds_gid);
1545 if (state != oldstate) {
1546 last_state = oldstate;
1547 incarnation = mdsmap->get_inc_gid(mds_gid);
1548 }
1549
1550 version_t epoch = m->get_epoch();
1551
1552 // note source's map version
1553 if (m->get_source().is_mds() &&
1554 peer_mdsmap_epoch[mds_rank_t(m->get_source().num())] < epoch) {
1555 dout(15) << " peer " << m->get_source()
1556 << " has mdsmap epoch >= " << epoch
1557 << dendl;
1558 peer_mdsmap_epoch[mds_rank_t(m->get_source().num())] = epoch;
1559 }
1560
1561 // Validate state transitions while I hold a rank
1562 if (!MDSMap::state_transition_valid(oldstate, state)) {
1563 derr << "Invalid state transition " << ceph_mds_state_name(oldstate)
1564 << "->" << ceph_mds_state_name(state) << dendl;
1565 respawn();
1566 }
1567
1568 if (oldstate != state) {
1569 // update messenger.
1570 if (state == MDSMap::STATE_STANDBY_REPLAY) {
1571 dout(1) << "handle_mds_map i am now mds." << mds_gid << "." << incarnation
1572 << " replaying mds." << whoami << "." << incarnation << dendl;
1573 messenger->set_myname(entity_name_t::MDS(mds_gid));
1574 } else {
1575 dout(1) << "handle_mds_map i am now mds." << whoami << "." << incarnation << dendl;
1576 messenger->set_myname(entity_name_t::MDS(whoami));
1577 }
1578 }
1579
1580 // tell objecter my incarnation
1581 if (objecter->get_client_incarnation() != incarnation)
1582 objecter->set_client_incarnation(incarnation);
1583
1584 // for debug
1585 if (g_conf->mds_dump_cache_on_map)
1586 mdcache->dump_cache();
1587
1588 // did it change?
1589 if (oldstate != state) {
1590 dout(1) << "handle_mds_map state change "
1591 << ceph_mds_state_name(oldstate) << " --> "
1592 << ceph_mds_state_name(state) << dendl;
1593 beacon.set_want_state(mdsmap, state);
1594
1595 if (oldstate == MDSMap::STATE_STANDBY_REPLAY) {
1596 dout(10) << "Monitor activated us! Deactivating replay loop" << dendl;
1597 assert (state == MDSMap::STATE_REPLAY);
1598 } else {
1599 // did i just recover?
1600 if ((is_active() || is_clientreplay()) &&
1601 (oldstate == MDSMap::STATE_CREATING ||
1602 oldstate == MDSMap::STATE_REJOIN ||
1603 oldstate == MDSMap::STATE_RECONNECT))
1604 recovery_done(oldstate);
1605
1606 if (is_active()) {
1607 active_start();
1608 } else if (is_any_replay()) {
1609 replay_start();
1610 } else if (is_resolve()) {
1611 resolve_start();
1612 } else if (is_reconnect()) {
1613 reconnect_start();
1614 } else if (is_rejoin()) {
1615 rejoin_start();
1616 } else if (is_clientreplay()) {
1617 clientreplay_start();
1618 } else if (is_creating()) {
1619 boot_create();
1620 } else if (is_starting()) {
1621 boot_start();
1622 } else if (is_stopping()) {
1623 assert(oldstate == MDSMap::STATE_ACTIVE);
1624 stopping_start();
1625 }
1626 }
1627 }
1628
1629 // RESOLVE
1630 // is someone else newly resolving?
1631 if (is_resolve() || is_reconnect() || is_rejoin() ||
1632 is_clientreplay() || is_active() || is_stopping()) {
1633 if (!oldmap->is_resolving() && mdsmap->is_resolving()) {
1634 set<mds_rank_t> resolve;
1635 mdsmap->get_mds_set(resolve, MDSMap::STATE_RESOLVE);
1636 dout(10) << " resolve set is " << resolve << dendl;
1637 calc_recovery_set();
1638 mdcache->send_resolves();
1639 }
1640 }
1641
1642 // REJOIN
1643 // is everybody finally rejoining?
1644 if (is_rejoin() || is_clientreplay() || is_active() || is_stopping()) {
1645 // did we start?
1646 if (!oldmap->is_rejoining() && mdsmap->is_rejoining())
1647 rejoin_joint_start();
1648
1649 // did we finish?
1650 if (g_conf->mds_dump_cache_after_rejoin &&
1651 oldmap->is_rejoining() && !mdsmap->is_rejoining())
1652 mdcache->dump_cache(); // for DEBUG only
1653
1654 if (oldstate >= MDSMap::STATE_REJOIN) {
1655 // ACTIVE|CLIENTREPLAY|REJOIN => we can discover from them.
1656 set<mds_rank_t> olddis, dis;
1657 oldmap->get_mds_set(olddis, MDSMap::STATE_ACTIVE);
1658 oldmap->get_mds_set(olddis, MDSMap::STATE_CLIENTREPLAY);
1659 oldmap->get_mds_set(olddis, MDSMap::STATE_REJOIN);
1660 mdsmap->get_mds_set(dis, MDSMap::STATE_ACTIVE);
1661 mdsmap->get_mds_set(dis, MDSMap::STATE_CLIENTREPLAY);
1662 mdsmap->get_mds_set(dis, MDSMap::STATE_REJOIN);
1663 for (set<mds_rank_t>::iterator p = dis.begin(); p != dis.end(); ++p)
1664 if (*p != whoami && // not me
1665 olddis.count(*p) == 0) { // newly so?
1666 mdcache->kick_discovers(*p);
1667 mdcache->kick_open_ino_peers(*p);
1668 }
1669 }
1670 }
1671
1672 cluster_degraded = mdsmap->is_degraded();
1673 if (oldmap->is_degraded() && !cluster_degraded && state >= MDSMap::STATE_ACTIVE) {
1674 dout(1) << "cluster recovered." << dendl;
1675 auto it = waiting_for_active_peer.find(MDS_RANK_NONE);
1676 if (it != waiting_for_active_peer.end()) {
1677 queue_waiters(it->second);
1678 waiting_for_active_peer.erase(it);
1679 }
1680 }
1681
1682 // did someone go active?
1683 if (oldstate >= MDSMap::STATE_CLIENTREPLAY &&
1684 (is_clientreplay() || is_active() || is_stopping())) {
1685 set<mds_rank_t> oldactive, active;
1686 oldmap->get_mds_set(oldactive, MDSMap::STATE_ACTIVE);
1687 oldmap->get_mds_set(oldactive, MDSMap::STATE_CLIENTREPLAY);
1688 mdsmap->get_mds_set(active, MDSMap::STATE_ACTIVE);
1689 mdsmap->get_mds_set(active, MDSMap::STATE_CLIENTREPLAY);
1690 for (set<mds_rank_t>::iterator p = active.begin(); p != active.end(); ++p)
1691 if (*p != whoami && // not me
1692 oldactive.count(*p) == 0) // newly so?
1693 handle_mds_recovery(*p);
1694 }
1695
1696 // did someone fail?
1697 // new down?
1698 {
1699 set<mds_rank_t> olddown, down;
1700 oldmap->get_down_mds_set(&olddown);
1701 mdsmap->get_down_mds_set(&down);
1702 for (set<mds_rank_t>::iterator p = down.begin(); p != down.end(); ++p) {
1703 if (oldmap->have_inst(*p) && olddown.count(*p) == 0) {
1704 messenger->mark_down(oldmap->get_inst(*p).addr);
1705 handle_mds_failure(*p);
1706 }
1707 }
1708 }
1709
1710 // did someone fail?
1711 // did their addr/inst change?
1712 {
1713 set<mds_rank_t> up;
1714 mdsmap->get_up_mds_set(up);
1715 for (set<mds_rank_t>::iterator p = up.begin(); p != up.end(); ++p) {
1716 if (oldmap->have_inst(*p) &&
1717 oldmap->get_inst(*p) != mdsmap->get_inst(*p)) {
1718 messenger->mark_down(oldmap->get_inst(*p).addr);
1719 handle_mds_failure(*p);
1720 }
1721 }
1722 }
1723
1724 if (is_clientreplay() || is_active() || is_stopping()) {
1725 // did anyone stop?
1726 set<mds_rank_t> oldstopped, stopped;
1727 oldmap->get_stopped_mds_set(oldstopped);
1728 mdsmap->get_stopped_mds_set(stopped);
1729 for (set<mds_rank_t>::iterator p = stopped.begin(); p != stopped.end(); ++p)
1730 if (oldstopped.count(*p) == 0) // newly so?
1731 mdcache->migrator->handle_mds_failure_or_stop(*p);
1732 }
1733
1734 {
1735 map<epoch_t,list<MDSInternalContextBase*> >::iterator p = waiting_for_mdsmap.begin();
1736 while (p != waiting_for_mdsmap.end() && p->first <= mdsmap->get_epoch()) {
1737 list<MDSInternalContextBase*> ls;
1738 ls.swap(p->second);
1739 waiting_for_mdsmap.erase(p++);
1740 finish_contexts(g_ceph_context, ls);
1741 }
1742 }
1743
1744 if (is_active()) {
1745 // Before going active, set OSD epoch barrier to latest (so that
1746 // we don't risk handing out caps to clients with old OSD maps that
1747 // might not include barriers from the previous incarnation of this MDS)
1748 set_osd_epoch_barrier(objecter->with_osdmap(
1749 std::mem_fn(&OSDMap::get_epoch)));
1750 }
1751
1752 if (is_active()) {
1753 bool found = false;
1754 MDSMap::mds_info_t info = mdsmap->get_info(whoami);
1755
1756 for (map<mds_gid_t,MDSMap::mds_info_t>::const_iterator p = mdsmap->get_mds_info().begin();
1757 p != mdsmap->get_mds_info().end();
1758 ++p) {
1759 if (p->second.state == MDSMap::STATE_STANDBY_REPLAY &&
1760 (p->second.standby_for_rank == whoami ||(info.name.length() && p->second.standby_for_name == info.name))) {
1761 found = true;
1762 break;
1763 }
1764 if (found)
1765 mdlog->set_write_iohint(0);
1766 else
1767 mdlog->set_write_iohint(CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
1768 }
1769 }
1770
1771 if (oldmap->get_max_mds() != mdsmap->get_max_mds()) {
1772 purge_queue.update_op_limit(*mdsmap);
1773 }
1774 }
1775
1776 void MDSRank::handle_mds_recovery(mds_rank_t who)
1777 {
1778 dout(5) << "handle_mds_recovery mds." << who << dendl;
1779
1780 mdcache->handle_mds_recovery(who);
1781
1782 if (mdsmap->get_tableserver() == whoami) {
1783 snapserver->handle_mds_recovery(who);
1784 }
1785
1786 queue_waiters(waiting_for_active_peer[who]);
1787 waiting_for_active_peer.erase(who);
1788 }
1789
1790 void MDSRank::handle_mds_failure(mds_rank_t who)
1791 {
1792 if (who == whoami) {
1793 dout(5) << "handle_mds_failure for myself; not doing anything" << dendl;
1794 return;
1795 }
1796 dout(5) << "handle_mds_failure mds." << who << dendl;
1797
1798 mdcache->handle_mds_failure(who);
1799
1800 snapclient->handle_mds_failure(who);
1801 }
1802
1803 bool MDSRankDispatcher::handle_asok_command(
1804 std::string command, cmdmap_t& cmdmap, Formatter *f,
1805 std::ostream& ss)
1806 {
1807 if (command == "dump_ops_in_flight" ||
1808 command == "ops") {
1809 if (!op_tracker.dump_ops_in_flight(f)) {
1810 ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
1811 please enable \"osd_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
1812 }
1813 } else if (command == "dump_blocked_ops") {
1814 if (!op_tracker.dump_ops_in_flight(f, true)) {
1815 ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
1816 Please enable \"osd_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
1817 }
1818 } else if (command == "dump_historic_ops") {
1819 if (!op_tracker.dump_historic_ops(f)) {
1820 ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
1821 please enable \"osd_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
1822 }
1823 } else if (command == "dump_historic_ops_by_duration") {
1824 if (!op_tracker.dump_historic_ops(f, true)) {
1825 ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
1826 please enable \"osd_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
1827 }
1828 } else if (command == "osdmap barrier") {
1829 int64_t target_epoch = 0;
1830 bool got_val = cmd_getval(g_ceph_context, cmdmap, "target_epoch", target_epoch);
1831
1832 if (!got_val) {
1833 ss << "no target epoch given";
1834 return true;
1835 }
1836
1837 mds_lock.Lock();
1838 set_osd_epoch_barrier(target_epoch);
1839 mds_lock.Unlock();
1840
1841 C_SaferCond cond;
1842 bool already_got = objecter->wait_for_map(target_epoch, &cond);
1843 if (!already_got) {
1844 dout(4) << __func__ << ": waiting for OSD epoch " << target_epoch << dendl;
1845 cond.wait();
1846 }
1847 } else if (command == "session ls") {
1848 Mutex::Locker l(mds_lock);
1849
1850 heartbeat_reset();
1851
1852 dump_sessions(SessionFilter(), f);
1853 } else if (command == "session evict") {
1854 std::string client_id;
1855 const bool got_arg = cmd_getval(g_ceph_context, cmdmap, "client_id", client_id);
1856 if(!got_arg) {
1857 ss << "Invalid client_id specified";
1858 return true;
1859 }
1860
1861 mds_lock.Lock();
1862 stringstream dss;
1863 bool killed = kill_session(strtol(client_id.c_str(), 0, 10), true, dss);
1864 if (!killed) {
1865 dout(15) << dss.str() << dendl;
1866 ss << dss.str();
1867 }
1868 mds_lock.Unlock();
1869 } else if (command == "scrub_path") {
1870 string path;
1871 vector<string> scrubop_vec;
1872 cmd_getval(g_ceph_context, cmdmap, "scrubops", scrubop_vec);
1873 cmd_getval(g_ceph_context, cmdmap, "path", path);
1874 command_scrub_path(f, path, scrubop_vec);
1875 } else if (command == "tag path") {
1876 string path;
1877 cmd_getval(g_ceph_context, cmdmap, "path", path);
1878 string tag;
1879 cmd_getval(g_ceph_context, cmdmap, "tag", tag);
1880 command_tag_path(f, path, tag);
1881 } else if (command == "flush_path") {
1882 string path;
1883 cmd_getval(g_ceph_context, cmdmap, "path", path);
1884 command_flush_path(f, path);
1885 } else if (command == "flush journal") {
1886 command_flush_journal(f);
1887 } else if (command == "get subtrees") {
1888 command_get_subtrees(f);
1889 } else if (command == "export dir") {
1890 string path;
1891 if(!cmd_getval(g_ceph_context, cmdmap, "path", path)) {
1892 ss << "malformed path";
1893 return true;
1894 }
1895 int64_t rank;
1896 if(!cmd_getval(g_ceph_context, cmdmap, "rank", rank)) {
1897 ss << "malformed rank";
1898 return true;
1899 }
1900 command_export_dir(f, path, (mds_rank_t)rank);
1901 } else if (command == "dump cache") {
1902 Mutex::Locker l(mds_lock);
1903 string path;
1904 if(!cmd_getval(g_ceph_context, cmdmap, "path", path)) {
1905 mdcache->dump_cache(f);
1906 } else {
1907 mdcache->dump_cache(path);
1908 }
1909 } else if (command == "dump tree") {
1910 string root;
1911 int64_t depth;
1912 cmd_getval(g_ceph_context, cmdmap, "root", root);
1913 if (!cmd_getval(g_ceph_context, cmdmap, "depth", depth))
1914 depth = -1;
1915 {
1916 Mutex::Locker l(mds_lock);
1917 mdcache->dump_cache(root, depth, f);
1918 }
1919 } else if (command == "force_readonly") {
1920 Mutex::Locker l(mds_lock);
1921 mdcache->force_readonly();
1922 } else if (command == "dirfrag split") {
1923 command_dirfrag_split(cmdmap, ss);
1924 } else if (command == "dirfrag merge") {
1925 command_dirfrag_merge(cmdmap, ss);
1926 } else if (command == "dirfrag ls") {
1927 command_dirfrag_ls(cmdmap, ss, f);
1928 } else {
1929 return false;
1930 }
1931
1932 return true;
1933 }
1934
1935 class C_MDS_Send_Command_Reply : public MDSInternalContext
1936 {
1937 protected:
1938 MCommand *m;
1939 public:
1940 C_MDS_Send_Command_Reply(MDSRank *_mds, MCommand *_m) :
1941 MDSInternalContext(_mds), m(_m) { m->get(); }
1942 void send (int r, const std::string& out_str) {
1943 bufferlist bl;
1944 MDSDaemon::send_command_reply(m, mds, r, bl, out_str);
1945 m->put();
1946 }
1947 void finish (int r) override {
1948 send(r, "");
1949 }
1950 };
1951
1952 /**
1953 * This function drops the mds_lock, so don't do anything with
1954 * MDSRank after calling it (we could have gone into shutdown): just
1955 * send your result back to the calling client and finish.
1956 */
1957 void MDSRankDispatcher::evict_sessions(const SessionFilter &filter, MCommand *m)
1958 {
1959 C_MDS_Send_Command_Reply *reply = new C_MDS_Send_Command_Reply(this, m);
1960
1961 if (is_any_replay()) {
1962 reply->send(-EAGAIN, "MDS is replaying log");
1963 delete reply;
1964 return;
1965 }
1966
1967 std::list<Session*> victims;
1968 const auto sessions = sessionmap.get_sessions();
1969 for (const auto p : sessions) {
1970 if (!p.first.is_client()) {
1971 continue;
1972 }
1973
1974 Session *s = p.second;
1975
1976 if (filter.match(*s, std::bind(&Server::waiting_for_reconnect, server, std::placeholders::_1))) {
1977 victims.push_back(s);
1978 }
1979 }
1980
1981 dout(20) << __func__ << " matched " << victims.size() << " sessions" << dendl;
1982
1983 if (victims.empty()) {
1984 reply->send(0, "");
1985 delete reply;
1986 return;
1987 }
1988
1989 C_GatherBuilder gather(g_ceph_context, reply);
1990 for (const auto s : victims) {
1991 server->kill_session(s, gather.new_sub());
1992 }
1993 gather.activate();
1994 }
1995
1996 void MDSRankDispatcher::dump_sessions(const SessionFilter &filter, Formatter *f) const
1997 {
1998 // Dump sessions, decorated with recovery/replay status
1999 f->open_array_section("sessions");
2000 const ceph::unordered_map<entity_name_t, Session*> session_map = sessionmap.get_sessions();
2001 for (ceph::unordered_map<entity_name_t,Session*>::const_iterator p = session_map.begin();
2002 p != session_map.end();
2003 ++p) {
2004 if (!p->first.is_client()) {
2005 continue;
2006 }
2007
2008 Session *s = p->second;
2009
2010 if (!filter.match(*s, std::bind(&Server::waiting_for_reconnect, server, std::placeholders::_1))) {
2011 continue;
2012 }
2013
2014 f->open_object_section("session");
2015 f->dump_int("id", p->first.num());
2016
2017 f->dump_int("num_leases", s->leases.size());
2018 f->dump_int("num_caps", s->caps.size());
2019
2020 f->dump_string("state", s->get_state_name());
2021 f->dump_int("replay_requests", is_clientreplay() ? s->get_request_count() : 0);
2022 f->dump_unsigned("completed_requests", s->get_num_completed_requests());
2023 f->dump_bool("reconnecting", server->waiting_for_reconnect(p->first.num()));
2024 f->dump_stream("inst") << s->info.inst;
2025 f->open_object_section("client_metadata");
2026 for (map<string, string>::const_iterator i = s->info.client_metadata.begin();
2027 i != s->info.client_metadata.end(); ++i) {
2028 f->dump_string(i->first.c_str(), i->second);
2029 }
2030 f->close_section(); // client_metadata
2031 f->close_section(); //session
2032 }
2033 f->close_section(); //sessions
2034 }
2035
2036 void MDSRank::command_scrub_path(Formatter *f, const string& path, vector<string>& scrubop_vec)
2037 {
2038 bool force = false;
2039 bool recursive = false;
2040 bool repair = false;
2041 for (vector<string>::iterator i = scrubop_vec.begin() ; i != scrubop_vec.end(); ++i) {
2042 if (*i == "force")
2043 force = true;
2044 else if (*i == "recursive")
2045 recursive = true;
2046 else if (*i == "repair")
2047 repair = true;
2048 }
2049 C_SaferCond scond;
2050 {
2051 Mutex::Locker l(mds_lock);
2052 mdcache->enqueue_scrub(path, "", force, recursive, repair, f, &scond);
2053 }
2054 scond.wait();
2055 // scrub_dentry() finishers will dump the data for us; we're done!
2056 }
2057
2058 void MDSRank::command_tag_path(Formatter *f,
2059 const string& path, const std::string &tag)
2060 {
2061 C_SaferCond scond;
2062 {
2063 Mutex::Locker l(mds_lock);
2064 mdcache->enqueue_scrub(path, tag, true, true, false, f, &scond);
2065 }
2066 scond.wait();
2067 }
2068
2069 void MDSRank::command_flush_path(Formatter *f, const string& path)
2070 {
2071 C_SaferCond scond;
2072 {
2073 Mutex::Locker l(mds_lock);
2074 mdcache->flush_dentry(path, &scond);
2075 }
2076 int r = scond.wait();
2077 f->open_object_section("results");
2078 f->dump_int("return_code", r);
2079 f->close_section(); // results
2080 }
2081
2082 /**
2083 * Wrapper around _command_flush_journal that
2084 * handles serialization of result
2085 */
2086 void MDSRank::command_flush_journal(Formatter *f)
2087 {
2088 assert(f != NULL);
2089
2090 std::stringstream ss;
2091 const int r = _command_flush_journal(&ss);
2092 f->open_object_section("result");
2093 f->dump_string("message", ss.str());
2094 f->dump_int("return_code", r);
2095 f->close_section();
2096 }
2097
2098 /**
2099 * Implementation of "flush journal" asok command.
2100 *
2101 * @param ss
2102 * Optionally populate with a human readable string describing the
2103 * reason for any unexpected return status.
2104 */
2105 int MDSRank::_command_flush_journal(std::stringstream *ss)
2106 {
2107 assert(ss != NULL);
2108
2109 Mutex::Locker l(mds_lock);
2110
2111 if (mdcache->is_readonly()) {
2112 dout(5) << __func__ << ": read-only FS" << dendl;
2113 return -EROFS;
2114 }
2115
2116 if (!is_active()) {
2117 dout(5) << __func__ << ": MDS not active, no-op" << dendl;
2118 return 0;
2119 }
2120
2121 // I need to seal off the current segment, and then mark all previous segments
2122 // for expiry
2123 mdlog->start_new_segment();
2124 int r = 0;
2125
2126 // Flush initially so that all the segments older than our new one
2127 // will be elegible for expiry
2128 {
2129 C_SaferCond mdlog_flushed;
2130 mdlog->flush();
2131 mdlog->wait_for_safe(new MDSInternalContextWrapper(this, &mdlog_flushed));
2132 mds_lock.Unlock();
2133 r = mdlog_flushed.wait();
2134 mds_lock.Lock();
2135 if (r != 0) {
2136 *ss << "Error " << r << " (" << cpp_strerror(r) << ") while flushing journal";
2137 return r;
2138 }
2139 }
2140
2141 // Because we may not be the last wait_for_safe context on MDLog, and
2142 // subsequent contexts might wake up in the middle of our later trim_all
2143 // and interfere with expiry (by e.g. marking dirs/dentries dirty
2144 // on previous log segments), we run a second wait_for_safe here.
2145 // See #10368
2146 {
2147 C_SaferCond mdlog_cleared;
2148 mdlog->wait_for_safe(new MDSInternalContextWrapper(this, &mdlog_cleared));
2149 mds_lock.Unlock();
2150 r = mdlog_cleared.wait();
2151 mds_lock.Lock();
2152 if (r != 0) {
2153 *ss << "Error " << r << " (" << cpp_strerror(r) << ") while flushing journal";
2154 return r;
2155 }
2156 }
2157
2158 // Put all the old log segments into expiring or expired state
2159 dout(5) << __func__ << ": beginning segment expiry" << dendl;
2160 r = mdlog->trim_all();
2161 if (r != 0) {
2162 *ss << "Error " << r << " (" << cpp_strerror(r) << ") while trimming log";
2163 return r;
2164 }
2165
2166 // Attach contexts to wait for all expiring segments to expire
2167 MDSGatherBuilder expiry_gather(g_ceph_context);
2168
2169 const std::set<LogSegment*> &expiring_segments = mdlog->get_expiring_segments();
2170 for (std::set<LogSegment*>::const_iterator i = expiring_segments.begin();
2171 i != expiring_segments.end(); ++i) {
2172 (*i)->wait_for_expiry(expiry_gather.new_sub());
2173 }
2174 dout(5) << __func__ << ": waiting for " << expiry_gather.num_subs_created()
2175 << " segments to expire" << dendl;
2176
2177 if (expiry_gather.has_subs()) {
2178 C_SaferCond cond;
2179 expiry_gather.set_finisher(new MDSInternalContextWrapper(this, &cond));
2180 expiry_gather.activate();
2181
2182 // Drop mds_lock to allow progress until expiry is complete
2183 mds_lock.Unlock();
2184 int r = cond.wait();
2185 mds_lock.Lock();
2186
2187 assert(r == 0); // MDLog is not allowed to raise errors via wait_for_expiry
2188 }
2189
2190 dout(5) << __func__ << ": expiry complete, expire_pos/trim_pos is now " << std::hex <<
2191 mdlog->get_journaler()->get_expire_pos() << "/" <<
2192 mdlog->get_journaler()->get_trimmed_pos() << dendl;
2193
2194 // Now everyone I'm interested in is expired
2195 mdlog->trim_expired_segments();
2196
2197 dout(5) << __func__ << ": trim complete, expire_pos/trim_pos is now " << std::hex <<
2198 mdlog->get_journaler()->get_expire_pos() << "/" <<
2199 mdlog->get_journaler()->get_trimmed_pos() << dendl;
2200
2201 // Flush the journal header so that readers will start from after the flushed region
2202 C_SaferCond wrote_head;
2203 mdlog->get_journaler()->write_head(&wrote_head);
2204 mds_lock.Unlock(); // Drop lock to allow messenger dispatch progress
2205 r = wrote_head.wait();
2206 mds_lock.Lock();
2207 if (r != 0) {
2208 *ss << "Error " << r << " (" << cpp_strerror(r) << ") while writing header";
2209 return r;
2210 }
2211
2212 dout(5) << __func__ << ": write_head complete, all done!" << dendl;
2213
2214 return 0;
2215 }
2216
2217
2218 void MDSRank::command_get_subtrees(Formatter *f)
2219 {
2220 assert(f != NULL);
2221 Mutex::Locker l(mds_lock);
2222
2223 std::list<CDir*> subtrees;
2224 mdcache->list_subtrees(subtrees);
2225
2226 f->open_array_section("subtrees");
2227 for (std::list<CDir*>::iterator i = subtrees.begin(); i != subtrees.end(); ++i) {
2228 const CDir *dir = *i;
2229
2230 f->open_object_section("subtree");
2231 {
2232 f->dump_bool("is_auth", dir->is_auth());
2233 f->dump_int("auth_first", dir->get_dir_auth().first);
2234 f->dump_int("auth_second", dir->get_dir_auth().second);
2235 f->open_object_section("dir");
2236 dir->dump(f);
2237 f->close_section();
2238 }
2239 f->close_section();
2240 }
2241 f->close_section();
2242 }
2243
2244
2245 void MDSRank::command_export_dir(Formatter *f,
2246 const std::string &path,
2247 mds_rank_t target)
2248 {
2249 int r = _command_export_dir(path, target);
2250 f->open_object_section("results");
2251 f->dump_int("return_code", r);
2252 f->close_section(); // results
2253 }
2254
2255 int MDSRank::_command_export_dir(
2256 const std::string &path,
2257 mds_rank_t target)
2258 {
2259 Mutex::Locker l(mds_lock);
2260 filepath fp(path.c_str());
2261
2262 if (target == whoami || !mdsmap->is_up(target) || !mdsmap->is_in(target)) {
2263 derr << "bad MDS target " << target << dendl;
2264 return -ENOENT;
2265 }
2266
2267 CInode *in = mdcache->cache_traverse(fp);
2268 if (!in) {
2269 derr << "Bath path '" << path << "'" << dendl;
2270 return -ENOENT;
2271 }
2272 CDir *dir = in->get_dirfrag(frag_t());
2273 if (!dir || !(dir->is_auth())) {
2274 derr << "bad export_dir path dirfrag frag_t() or dir not auth" << dendl;
2275 return -EINVAL;
2276 }
2277
2278 mdcache->migrator->export_dir(dir, target);
2279 return 0;
2280 }
2281
2282 CDir *MDSRank::_command_dirfrag_get(
2283 const cmdmap_t &cmdmap,
2284 std::ostream &ss)
2285 {
2286 std::string path;
2287 bool got = cmd_getval(g_ceph_context, cmdmap, "path", path);
2288 if (!got) {
2289 ss << "missing path argument";
2290 return NULL;
2291 }
2292
2293 std::string frag_str;
2294 if (!cmd_getval(g_ceph_context, cmdmap, "frag", frag_str)) {
2295 ss << "missing frag argument";
2296 return NULL;
2297 }
2298
2299 CInode *in = mdcache->cache_traverse(filepath(path.c_str()));
2300 if (!in) {
2301 // TODO really we should load something in if it's not in cache,
2302 // but the infrastructure is harder, and we might still be unable
2303 // to act on it if someone else is auth.
2304 ss << "directory '" << path << "' inode not in cache";
2305 return NULL;
2306 }
2307
2308 frag_t fg;
2309
2310 if (!fg.parse(frag_str.c_str())) {
2311 ss << "frag " << frag_str << " failed to parse";
2312 return NULL;
2313 }
2314
2315 CDir *dir = in->get_dirfrag(fg);
2316 if (!dir) {
2317 ss << "frag 0x" << std::hex << in->ino() << "/" << fg << " not in cache ("
2318 "use `dirfrag ls` to see if it should exist)";
2319 return NULL;
2320 }
2321
2322 if (!dir->is_auth()) {
2323 ss << "frag " << dir->dirfrag() << " not auth (auth = "
2324 << dir->authority() << ")";
2325 return NULL;
2326 }
2327
2328 return dir;
2329 }
2330
2331 bool MDSRank::command_dirfrag_split(
2332 cmdmap_t cmdmap,
2333 std::ostream &ss)
2334 {
2335 Mutex::Locker l(mds_lock);
2336 if (!mdsmap->allows_dirfrags()) {
2337 ss << "dirfrags are disallowed by the mds map!";
2338 return false;
2339 }
2340
2341 int64_t by = 0;
2342 if (!cmd_getval(g_ceph_context, cmdmap, "bits", by)) {
2343 ss << "missing bits argument";
2344 return false;
2345 }
2346
2347 if (by <= 0) {
2348 ss << "must split by >0 bits";
2349 return false;
2350 }
2351
2352 CDir *dir = _command_dirfrag_get(cmdmap, ss);
2353 if (!dir) {
2354 return false;
2355 }
2356
2357 mdcache->split_dir(dir, by);
2358
2359 return true;
2360 }
2361
2362 bool MDSRank::command_dirfrag_merge(
2363 cmdmap_t cmdmap,
2364 std::ostream &ss)
2365 {
2366 Mutex::Locker l(mds_lock);
2367 std::string path;
2368 bool got = cmd_getval(g_ceph_context, cmdmap, "path", path);
2369 if (!got) {
2370 ss << "missing path argument";
2371 return false;
2372 }
2373
2374 std::string frag_str;
2375 if (!cmd_getval(g_ceph_context, cmdmap, "frag", frag_str)) {
2376 ss << "missing frag argument";
2377 return false;
2378 }
2379
2380 CInode *in = mdcache->cache_traverse(filepath(path.c_str()));
2381 if (!in) {
2382 ss << "directory '" << path << "' inode not in cache";
2383 return false;
2384 }
2385
2386 frag_t fg;
2387 if (!fg.parse(frag_str.c_str())) {
2388 ss << "frag " << frag_str << " failed to parse";
2389 return false;
2390 }
2391
2392 mdcache->merge_dir(in, fg);
2393
2394 return true;
2395 }
2396
2397 bool MDSRank::command_dirfrag_ls(
2398 cmdmap_t cmdmap,
2399 std::ostream &ss,
2400 Formatter *f)
2401 {
2402 Mutex::Locker l(mds_lock);
2403 std::string path;
2404 bool got = cmd_getval(g_ceph_context, cmdmap, "path", path);
2405 if (!got) {
2406 ss << "missing path argument";
2407 return false;
2408 }
2409
2410 CInode *in = mdcache->cache_traverse(filepath(path.c_str()));
2411 if (!in) {
2412 ss << "directory inode not in cache";
2413 return false;
2414 }
2415
2416 f->open_array_section("frags");
2417 std::list<frag_t> frags;
2418 // NB using get_leaves_under instead of get_dirfrags to give
2419 // you the list of what dirfrags may exist, not which are in cache
2420 in->dirfragtree.get_leaves_under(frag_t(), frags);
2421 for (std::list<frag_t>::iterator i = frags.begin();
2422 i != frags.end(); ++i) {
2423 f->open_object_section("frag");
2424 f->dump_int("value", i->value());
2425 f->dump_int("bits", i->bits());
2426 std::ostringstream frag_str;
2427 frag_str << std::hex << i->value() << "/" << std::dec << i->bits();
2428 f->dump_string("str", frag_str.str());
2429 f->close_section();
2430 }
2431 f->close_section();
2432
2433 return true;
2434 }
2435
2436 void MDSRank::dump_status(Formatter *f) const
2437 {
2438 if (state == MDSMap::STATE_REPLAY ||
2439 state == MDSMap::STATE_STANDBY_REPLAY) {
2440 mdlog->dump_replay_status(f);
2441 } else if (state == MDSMap::STATE_RESOLVE) {
2442 mdcache->dump_resolve_status(f);
2443 } else if (state == MDSMap::STATE_RECONNECT) {
2444 server->dump_reconnect_status(f);
2445 } else if (state == MDSMap::STATE_REJOIN) {
2446 mdcache->dump_rejoin_status(f);
2447 } else if (state == MDSMap::STATE_CLIENTREPLAY) {
2448 dump_clientreplay_status(f);
2449 }
2450 }
2451
2452 void MDSRank::dump_clientreplay_status(Formatter *f) const
2453 {
2454 f->open_object_section("clientreplay_status");
2455 f->dump_unsigned("clientreplay_queue", replay_queue.size());
2456 f->dump_unsigned("active_replay", mdcache->get_num_client_requests());
2457 f->close_section();
2458 }
2459
2460 void MDSRankDispatcher::update_log_config()
2461 {
2462 map<string,string> log_to_monitors;
2463 map<string,string> log_to_syslog;
2464 map<string,string> log_channel;
2465 map<string,string> log_prio;
2466 map<string,string> log_to_graylog;
2467 map<string,string> log_to_graylog_host;
2468 map<string,string> log_to_graylog_port;
2469 uuid_d fsid;
2470 string host;
2471
2472 if (parse_log_client_options(g_ceph_context, log_to_monitors, log_to_syslog,
2473 log_channel, log_prio, log_to_graylog,
2474 log_to_graylog_host, log_to_graylog_port,
2475 fsid, host) == 0)
2476 clog->update_config(log_to_monitors, log_to_syslog,
2477 log_channel, log_prio, log_to_graylog,
2478 log_to_graylog_host, log_to_graylog_port,
2479 fsid, host);
2480 dout(10) << __func__ << " log_to_monitors " << log_to_monitors << dendl;
2481 }
2482
2483 void MDSRank::create_logger()
2484 {
2485 dout(10) << "create_logger" << dendl;
2486 {
2487 PerfCountersBuilder mds_plb(g_ceph_context, "mds", l_mds_first, l_mds_last);
2488
2489 mds_plb.add_u64_counter(
2490 l_mds_request, "request", "Requests", "req",
2491 PerfCountersBuilder::PRIO_CRITICAL);
2492 mds_plb.add_u64_counter(l_mds_reply, "reply", "Replies");
2493 mds_plb.add_time_avg(
2494 l_mds_reply_latency, "reply_latency", "Reply latency", "rlat",
2495 PerfCountersBuilder::PRIO_CRITICAL);
2496 mds_plb.add_u64_counter(
2497 l_mds_forward, "forward", "Forwarding request", "fwd",
2498 PerfCountersBuilder::PRIO_INTERESTING);
2499 mds_plb.add_u64_counter(l_mds_dir_fetch, "dir_fetch", "Directory fetch");
2500 mds_plb.add_u64_counter(l_mds_dir_commit, "dir_commit", "Directory commit");
2501 mds_plb.add_u64_counter(l_mds_dir_split, "dir_split", "Directory split");
2502 mds_plb.add_u64_counter(l_mds_dir_merge, "dir_merge", "Directory merge");
2503
2504 mds_plb.add_u64(l_mds_inode_max, "inode_max", "Max inodes, cache size");
2505 mds_plb.add_u64(l_mds_inodes, "inodes", "Inodes", "inos",
2506 PerfCountersBuilder::PRIO_CRITICAL);
2507 mds_plb.add_u64(l_mds_inodes_top, "inodes_top", "Inodes on top");
2508 mds_plb.add_u64(l_mds_inodes_bottom, "inodes_bottom", "Inodes on bottom");
2509 mds_plb.add_u64(
2510 l_mds_inodes_pin_tail, "inodes_pin_tail", "Inodes on pin tail");
2511 mds_plb.add_u64(l_mds_inodes_pinned, "inodes_pinned", "Inodes pinned");
2512 mds_plb.add_u64(l_mds_inodes_expired, "inodes_expired", "Inodes expired");
2513 mds_plb.add_u64(
2514 l_mds_inodes_with_caps, "inodes_with_caps", "Inodes with capabilities");
2515 mds_plb.add_u64(l_mds_caps, "caps", "Capabilities", "caps",
2516 PerfCountersBuilder::PRIO_INTERESTING);
2517 mds_plb.add_u64(l_mds_subtrees, "subtrees", "Subtrees");
2518
2519 mds_plb.add_u64_counter(l_mds_traverse, "traverse", "Traverses");
2520 mds_plb.add_u64_counter(l_mds_traverse_hit, "traverse_hit", "Traverse hits");
2521 mds_plb.add_u64_counter(l_mds_traverse_forward, "traverse_forward",
2522 "Traverse forwards");
2523 mds_plb.add_u64_counter(l_mds_traverse_discover, "traverse_discover",
2524 "Traverse directory discovers");
2525 mds_plb.add_u64_counter(l_mds_traverse_dir_fetch, "traverse_dir_fetch",
2526 "Traverse incomplete directory content fetchings");
2527 mds_plb.add_u64_counter(l_mds_traverse_remote_ino, "traverse_remote_ino",
2528 "Traverse remote dentries");
2529 mds_plb.add_u64_counter(l_mds_traverse_lock, "traverse_lock",
2530 "Traverse locks");
2531
2532 mds_plb.add_u64(l_mds_load_cent, "load_cent", "Load per cent");
2533 mds_plb.add_u64(l_mds_dispatch_queue_len, "q", "Dispatch queue length");
2534
2535 mds_plb.add_u64_counter(l_mds_exported, "exported", "Exports");
2536 mds_plb.add_u64_counter(
2537 l_mds_exported_inodes, "exported_inodes", "Exported inodes", "exi",
2538 PerfCountersBuilder::PRIO_INTERESTING);
2539 mds_plb.add_u64_counter(l_mds_imported, "imported", "Imports");
2540 mds_plb.add_u64_counter(
2541 l_mds_imported_inodes, "imported_inodes", "Imported inodes", "imi",
2542 PerfCountersBuilder::PRIO_INTERESTING);
2543 logger = mds_plb.create_perf_counters();
2544 g_ceph_context->get_perfcounters_collection()->add(logger);
2545 }
2546
2547 {
2548 PerfCountersBuilder mdm_plb(g_ceph_context, "mds_mem", l_mdm_first, l_mdm_last);
2549 mdm_plb.add_u64(l_mdm_ino, "ino", "Inodes");
2550 mdm_plb.add_u64_counter(l_mdm_inoa, "ino+", "Inodes opened");
2551 mdm_plb.add_u64_counter(l_mdm_inos, "ino-", "Inodes closed");
2552 mdm_plb.add_u64(l_mdm_dir, "dir", "Directories");
2553 mdm_plb.add_u64_counter(l_mdm_dira, "dir+", "Directories opened");
2554 mdm_plb.add_u64_counter(l_mdm_dirs, "dir-", "Directories closed");
2555 mdm_plb.add_u64(l_mdm_dn, "dn", "Dentries");
2556 mdm_plb.add_u64_counter(l_mdm_dna, "dn+", "Dentries opened");
2557 mdm_plb.add_u64_counter(l_mdm_dns, "dn-", "Dentries closed");
2558 mdm_plb.add_u64(l_mdm_cap, "cap", "Capabilities");
2559 mdm_plb.add_u64_counter(l_mdm_capa, "cap+", "Capabilities added");
2560 mdm_plb.add_u64_counter(l_mdm_caps, "cap-", "Capabilities removed");
2561 mdm_plb.add_u64(l_mdm_rss, "rss", "RSS");
2562 mdm_plb.add_u64(l_mdm_heap, "heap", "Heap size");
2563 mdm_plb.add_u64(l_mdm_buf, "buf", "Buffer size");
2564 mlogger = mdm_plb.create_perf_counters();
2565 g_ceph_context->get_perfcounters_collection()->add(mlogger);
2566 }
2567
2568 mdlog->create_logger();
2569 server->create_logger();
2570 purge_queue.create_logger();
2571 sessionmap.register_perfcounters();
2572 mdcache->register_perfcounters();
2573 }
2574
2575 void MDSRank::check_ops_in_flight()
2576 {
2577 vector<string> warnings;
2578 int slow = 0;
2579 if (op_tracker.check_ops_in_flight(warnings, &slow)) {
2580 for (vector<string>::iterator i = warnings.begin();
2581 i != warnings.end();
2582 ++i) {
2583 clog->warn() << *i;
2584 }
2585 }
2586
2587 // set mds slow request count
2588 mds_slow_req_count = slow;
2589 return;
2590 }
2591
2592 void MDSRankDispatcher::handle_osd_map()
2593 {
2594 if (is_active() && snapserver) {
2595 snapserver->check_osd_map(true);
2596 }
2597
2598 server->handle_osd_map();
2599
2600 purge_queue.update_op_limit(*mdsmap);
2601
2602 // By default the objecter only requests OSDMap updates on use,
2603 // we would like to always receive the latest maps in order to
2604 // apply policy based on the FULL flag.
2605 objecter->maybe_request_map();
2606 }
2607
2608 bool MDSRankDispatcher::kill_session(int64_t session_id, bool wait, std::stringstream& err_ss)
2609 {
2610 if (is_any_replay()) {
2611 err_ss << "MDS is replaying log";
2612 return false;
2613 }
2614
2615 Session *session = sessionmap.get_session(entity_name_t(CEPH_ENTITY_TYPE_CLIENT, session_id));
2616 if (!session) {
2617 err_ss << "session " << session_id << " not in sessionmap!";
2618 return false;
2619 }
2620 if (wait) {
2621 C_SaferCond on_safe;
2622 server->kill_session(session, &on_safe);
2623
2624 mds_lock.Unlock();
2625 on_safe.wait();
2626 mds_lock.Lock();
2627 } else {
2628 server->kill_session(session, NULL);
2629 }
2630 return true;
2631 }
2632
2633 void MDSRank::bcast_mds_map()
2634 {
2635 dout(7) << "bcast_mds_map " << mdsmap->get_epoch() << dendl;
2636
2637 // share the map with mounted clients
2638 set<Session*> clients;
2639 sessionmap.get_client_session_set(clients);
2640 for (set<Session*>::const_iterator p = clients.begin();
2641 p != clients.end();
2642 ++p)
2643 (*p)->connection->send_message(new MMDSMap(monc->get_fsid(), mdsmap));
2644 last_client_mdsmap_bcast = mdsmap->get_epoch();
2645 }
2646
2647 MDSRankDispatcher::MDSRankDispatcher(
2648 mds_rank_t whoami_,
2649 Mutex &mds_lock_,
2650 LogChannelRef &clog_,
2651 SafeTimer &timer_,
2652 Beacon &beacon_,
2653 MDSMap *& mdsmap_,
2654 Messenger *msgr,
2655 MonClient *monc_,
2656 Context *respawn_hook_,
2657 Context *suicide_hook_)
2658 : MDSRank(whoami_, mds_lock_, clog_, timer_, beacon_, mdsmap_,
2659 msgr, monc_, respawn_hook_, suicide_hook_)
2660 {}
2661
2662 bool MDSRankDispatcher::handle_command(
2663 const cmdmap_t &cmdmap,
2664 MCommand *m,
2665 int *r,
2666 std::stringstream *ds,
2667 std::stringstream *ss,
2668 bool *need_reply)
2669 {
2670 assert(r != nullptr);
2671 assert(ds != nullptr);
2672 assert(ss != nullptr);
2673
2674 *need_reply = true;
2675
2676 std::string prefix;
2677 cmd_getval(g_ceph_context, cmdmap, "prefix", prefix);
2678
2679 if (prefix == "session ls") {
2680 std::vector<std::string> filter_args;
2681 cmd_getval(g_ceph_context, cmdmap, "filters", filter_args);
2682
2683 SessionFilter filter;
2684 *r = filter.parse(filter_args, ss);
2685 if (*r != 0) {
2686 return true;
2687 }
2688
2689 Formatter *f = new JSONFormatter(true);
2690 dump_sessions(filter, f);
2691 f->flush(*ds);
2692 delete f;
2693 return true;
2694 } else if (prefix == "session evict") {
2695 std::vector<std::string> filter_args;
2696 cmd_getval(g_ceph_context, cmdmap, "filters", filter_args);
2697
2698 SessionFilter filter;
2699 *r = filter.parse(filter_args, ss);
2700 if (*r != 0) {
2701 return true;
2702 }
2703
2704 evict_sessions(filter, m);
2705
2706 *need_reply = false;
2707 return true;
2708 } else if (prefix == "damage ls") {
2709 Formatter *f = new JSONFormatter(true);
2710 damage_table.dump(f);
2711 f->flush(*ds);
2712 delete f;
2713 return true;
2714 } else if (prefix == "damage rm") {
2715 damage_entry_id_t id = 0;
2716 bool got = cmd_getval(g_ceph_context, cmdmap, "damage_id", (int64_t&)id);
2717 if (!got) {
2718 *r = -EINVAL;
2719 return true;
2720 }
2721
2722 damage_table.erase(id);
2723 return true;
2724 } else {
2725 return false;
2726 }
2727 }
2728
2729 epoch_t MDSRank::get_osd_epoch() const
2730 {
2731 return objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch));
2732 }
2733