]> git.proxmox.com Git - ceph.git/blame - ceph/src/mds/MDSRank.cc
import new upstream nautilus stable release 14.2.8
[ceph.git] / ceph / src / mds / MDSRank.cc
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2015 Red Hat
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
11fdf7f2 15#include <string_view>
94b18763 16
7c673cae
FG
17#include "common/debug.h"
18#include "common/errno.h"
19
20#include "messages/MClientRequestForward.h"
21#include "messages/MMDSLoadTargets.h"
7c673cae 22#include "messages/MMDSTableRequest.h"
7c673cae
FG
23
24#include "MDSDaemon.h"
25#include "MDSMap.h"
26#include "SnapClient.h"
27#include "SnapServer.h"
28#include "MDBalancer.h"
91327a77 29#include "Migrator.h"
7c673cae 30#include "Locker.h"
7c673cae
FG
31#include "InoTable.h"
32#include "mon/MonClient.h"
33#include "common/HeartbeatMap.h"
34#include "ScrubStack.h"
35
36
37#include "MDSRank.h"
38
39#define dout_context g_ceph_context
40#define dout_subsys ceph_subsys_mds
41#undef dout_prefix
42#define dout_prefix *_dout << "mds." << whoami << '.' << incarnation << ' '
43
f64942e4
AA
44class C_Flush_Journal : public MDSInternalContext {
45public:
46 C_Flush_Journal(MDCache *mdcache, MDLog *mdlog, MDSRank *mds,
47 std::ostream *ss, Context *on_finish)
48 : MDSInternalContext(mds),
49 mdcache(mdcache), mdlog(mdlog), ss(ss), on_finish(on_finish),
50 whoami(mds->whoami), incarnation(mds->incarnation) {
51 }
52
53 void send() {
54 assert(mds->mds_lock.is_locked());
55
56 dout(20) << __func__ << dendl;
57
58 if (mdcache->is_readonly()) {
59 dout(5) << __func__ << ": read-only FS" << dendl;
60 complete(-EROFS);
61 return;
62 }
63
64 if (!mds->is_active()) {
65 dout(5) << __func__ << ": MDS not active, no-op" << dendl;
66 complete(0);
67 return;
68 }
69
70 flush_mdlog();
71 }
72
73private:
74
75 void flush_mdlog() {
76 dout(20) << __func__ << dendl;
77
78 // I need to seal off the current segment, and then mark all
79 // previous segments for expiry
80 mdlog->start_new_segment();
81
82 Context *ctx = new FunctionContext([this](int r) {
83 handle_flush_mdlog(r);
84 });
85
86 // Flush initially so that all the segments older than our new one
87 // will be elegible for expiry
88 mdlog->flush();
89 mdlog->wait_for_safe(new MDSInternalContextWrapper(mds, ctx));
90 }
91
92 void handle_flush_mdlog(int r) {
93 dout(20) << __func__ << ": r=" << r << dendl;
94
95 if (r != 0) {
96 *ss << "Error " << r << " (" << cpp_strerror(r) << ") while flushing journal";
97 complete(r);
98 return;
99 }
100
101 clear_mdlog();
102 }
103
104 void clear_mdlog() {
105 dout(20) << __func__ << dendl;
106
107 Context *ctx = new FunctionContext([this](int r) {
108 handle_clear_mdlog(r);
109 });
110
111 // Because we may not be the last wait_for_safe context on MDLog,
112 // and subsequent contexts might wake up in the middle of our
113 // later trim_all and interfere with expiry (by e.g. marking
114 // dirs/dentries dirty on previous log segments), we run a second
115 // wait_for_safe here. See #10368
116 mdlog->wait_for_safe(new MDSInternalContextWrapper(mds, ctx));
117 }
118
119 void handle_clear_mdlog(int r) {
120 dout(20) << __func__ << ": r=" << r << dendl;
121
122 if (r != 0) {
123 *ss << "Error " << r << " (" << cpp_strerror(r) << ") while flushing journal";
124 complete(r);
125 return;
126 }
127
128 trim_mdlog();
129 }
130
131 void trim_mdlog() {
132 // Put all the old log segments into expiring or expired state
133 dout(5) << __func__ << ": beginning segment expiry" << dendl;
134
135 int ret = mdlog->trim_all();
136 if (ret != 0) {
137 *ss << "Error " << ret << " (" << cpp_strerror(ret) << ") while trimming log";
138 complete(ret);
139 return;
140 }
141
142 expire_segments();
143 }
144
145 void expire_segments() {
146 dout(20) << __func__ << dendl;
147
148 // Attach contexts to wait for all expiring segments to expire
149 MDSGatherBuilder *expiry_gather = new MDSGatherBuilder(g_ceph_context);
150
151 const auto &expiring_segments = mdlog->get_expiring_segments();
152 for (auto p : expiring_segments) {
153 p->wait_for_expiry(expiry_gather->new_sub());
154 }
155 dout(5) << __func__ << ": waiting for " << expiry_gather->num_subs_created()
156 << " segments to expire" << dendl;
157
158 if (!expiry_gather->has_subs()) {
159 trim_segments();
160 delete expiry_gather;
161 return;
162 }
163
164 Context *ctx = new FunctionContext([this](int r) {
165 handle_expire_segments(r);
166 });
167 expiry_gather->set_finisher(new MDSInternalContextWrapper(mds, ctx));
168 expiry_gather->activate();
169 }
170
171 void handle_expire_segments(int r) {
172 dout(20) << __func__ << ": r=" << r << dendl;
173
174 ceph_assert(r == 0); // MDLog is not allowed to raise errors via
175 // wait_for_expiry
176 trim_segments();
177 }
178
179 void trim_segments() {
180 dout(20) << __func__ << dendl;
181
182 Context *ctx = new C_OnFinisher(new FunctionContext([this](int _) {
11fdf7f2 183 std::lock_guard locker(mds->mds_lock);
f64942e4
AA
184 trim_expired_segments();
185 }), mds->finisher);
186 ctx->complete(0);
187 }
188
189 void trim_expired_segments() {
190 dout(5) << __func__ << ": expiry complete, expire_pos/trim_pos is now "
191 << std::hex << mdlog->get_journaler()->get_expire_pos() << "/"
192 << mdlog->get_journaler()->get_trimmed_pos() << dendl;
193
194 // Now everyone I'm interested in is expired
195 mdlog->trim_expired_segments();
196
197 dout(5) << __func__ << ": trim complete, expire_pos/trim_pos is now "
198 << std::hex << mdlog->get_journaler()->get_expire_pos() << "/"
199 << mdlog->get_journaler()->get_trimmed_pos() << dendl;
200
201 write_journal_head();
202 }
203
204 void write_journal_head() {
205 dout(20) << __func__ << dendl;
206
207 Context *ctx = new FunctionContext([this](int r) {
11fdf7f2 208 std::lock_guard locker(mds->mds_lock);
f64942e4
AA
209 handle_write_head(r);
210 });
211 // Flush the journal header so that readers will start from after
212 // the flushed region
213 mdlog->get_journaler()->write_head(ctx);
214 }
215
216 void handle_write_head(int r) {
217 if (r != 0) {
218 *ss << "Error " << r << " (" << cpp_strerror(r) << ") while writing header";
219 } else {
220 dout(5) << __func__ << ": write_head complete, all done!" << dendl;
221 }
222
223 complete(r);
224 }
225
226 void finish(int r) override {
227 dout(20) << __func__ << ": r=" << r << dendl;
228 on_finish->complete(r);
229 }
230
231 MDCache *mdcache;
232 MDLog *mdlog;
233 std::ostream *ss;
234 Context *on_finish;
235
236 // so as to use dout
237 mds_rank_t whoami;
238 int incarnation;
239};
240
241class C_Drop_Cache : public MDSInternalContext {
242public:
243 C_Drop_Cache(Server *server, MDCache *mdcache, MDLog *mdlog,
244 MDSRank *mds, uint64_t recall_timeout,
245 Formatter *f, Context *on_finish)
246 : MDSInternalContext(mds),
247 server(server), mdcache(mdcache), mdlog(mdlog),
a8e16298
TL
248 recall_timeout(recall_timeout), recall_start(mono_clock::now()),
249 f(f), on_finish(on_finish),
f64942e4
AA
250 whoami(mds->whoami), incarnation(mds->incarnation) {
251 }
252
253 void send() {
254 // not really a hard requirement here, but lets ensure this in
255 // case we change the logic here.
256 assert(mds->mds_lock.is_locked());
257
258 dout(20) << __func__ << dendl;
a8e16298 259 f->open_object_section("result");
f64942e4
AA
260 recall_client_state();
261 }
262
263private:
264 // context which completes itself (with -ETIMEDOUT) after a specified
265 // timeout or when explicitly completed, whichever comes first. Note
266 // that the context does not detroy itself after completion -- it
267 // needs to be explicitly freed.
268 class C_ContextTimeout : public MDSInternalContext {
269 public:
270 C_ContextTimeout(MDSRank *mds, uint64_t timeout, Context *on_finish)
271 : MDSInternalContext(mds),
272 timeout(timeout),
273 lock("mds::context::timeout", false, true),
274 on_finish(on_finish) {
275 }
276 ~C_ContextTimeout() {
277 ceph_assert(timer_task == nullptr);
278 }
279
280 void start_timer() {
281 if (!timeout) {
282 return;
283 }
284
285 timer_task = new FunctionContext([this](int _) {
286 timer_task = nullptr;
287 complete(-ETIMEDOUT);
288 });
289 mds->timer.add_event_after(timeout, timer_task);
290 }
291
292 void finish(int r) override {
293 Context *ctx = nullptr;
294 {
11fdf7f2 295 std::lock_guard locker(lock);
f64942e4
AA
296 std::swap(on_finish, ctx);
297 }
298 if (ctx != nullptr) {
299 ctx->complete(r);
300 }
301 }
302 void complete(int r) override {
303 if (timer_task != nullptr) {
304 mds->timer.cancel_event(timer_task);
305 }
306
307 finish(r);
308 }
309
310 uint64_t timeout;
311 Mutex lock;
312 Context *on_finish = nullptr;
313 Context *timer_task = nullptr;
314 };
315
11fdf7f2
TL
316 auto do_trim() {
317 auto [throttled, count] = mdcache->trim(UINT64_MAX);
318 dout(10) << __func__
319 << (throttled ? " (throttled)" : "")
320 << " trimmed " << count << " caps" << dendl;
321 dentries_trimmed += count;
322 return std::make_pair(throttled, count);
323 }
324
f64942e4
AA
325 void recall_client_state() {
326 dout(20) << __func__ << dendl;
a8e16298
TL
327 auto now = mono_clock::now();
328 auto duration = std::chrono::duration<double>(now-recall_start).count();
f64942e4
AA
329
330 MDSGatherBuilder *gather = new MDSGatherBuilder(g_ceph_context);
92f5a8d4
TL
331 auto flags = Server::RecallFlags::STEADY|Server::RecallFlags::TRIM;
332 auto [throttled, count] = server->recall_client_state(gather, flags);
a8e16298
TL
333 dout(10) << __func__
334 << (throttled ? " (throttled)" : "")
335 << " recalled " << count << " caps" << dendl;
336
337 caps_recalled += count;
338 if ((throttled || count > 0) && (recall_timeout == 0 || duration < recall_timeout)) {
11fdf7f2
TL
339 C_ContextTimeout *ctx = new C_ContextTimeout(
340 mds, 1, new FunctionContext([this](int r) {
341 recall_client_state();
342 }));
343 ctx->start_timer();
344 gather->set_finisher(new MDSInternalContextWrapper(mds, ctx));
345 gather->activate();
346 mdlog->flush(); /* use down-time to incrementally flush log */
347 do_trim(); /* use down-time to incrementally trim cache */
a8e16298
TL
348 } else {
349 if (!gather->has_subs()) {
350 delete gather;
351 return handle_recall_client_state(0);
352 } else if (recall_timeout > 0 && duration > recall_timeout) {
11fdf7f2
TL
353 gather->set_finisher(new C_MDSInternalNoop);
354 gather->activate();
a8e16298
TL
355 return handle_recall_client_state(-ETIMEDOUT);
356 } else {
357 uint64_t remaining = (recall_timeout == 0 ? 0 : recall_timeout-duration);
358 C_ContextTimeout *ctx = new C_ContextTimeout(
359 mds, remaining, new FunctionContext([this](int r) {
360 handle_recall_client_state(r);
361 }));
362
363 ctx->start_timer();
364 gather->set_finisher(new MDSInternalContextWrapper(mds, ctx));
365 gather->activate();
366 }
f64942e4 367 }
f64942e4
AA
368 }
369
370 void handle_recall_client_state(int r) {
371 dout(20) << __func__ << ": r=" << r << dendl;
372
373 // client recall section
374 f->open_object_section("client_recall");
375 f->dump_int("return_code", r);
376 f->dump_string("message", cpp_strerror(r));
a8e16298 377 f->dump_int("recalled", caps_recalled);
f64942e4
AA
378 f->close_section();
379
380 // we can still continue after recall timeout
f64942e4
AA
381 flush_journal();
382 }
383
384 void flush_journal() {
385 dout(20) << __func__ << dendl;
386
387 Context *ctx = new FunctionContext([this](int r) {
388 handle_flush_journal(r);
389 });
390
391 C_Flush_Journal *flush_journal = new C_Flush_Journal(mdcache, mdlog, mds, &ss, ctx);
392 flush_journal->send();
393 }
394
395 void handle_flush_journal(int r) {
396 dout(20) << __func__ << ": r=" << r << dendl;
397
398 if (r != 0) {
399 cmd_err(f, ss.str());
400 complete(r);
401 return;
402 }
403
404 // journal flush section
405 f->open_object_section("flush_journal");
406 f->dump_int("return_code", r);
407 f->dump_string("message", ss.str());
408 f->close_section();
409
a8e16298
TL
410 trim_cache();
411 }
412
413 void trim_cache() {
414 dout(20) << __func__ << dendl;
415
11fdf7f2 416 auto [throttled, count] = do_trim();
a8e16298
TL
417 if (throttled && count > 0) {
418 auto timer = new FunctionContext([this](int _) {
419 trim_cache();
420 });
421 mds->timer.add_event_after(1.0, timer);
422 } else {
423 cache_status();
424 }
f64942e4
AA
425 }
426
427 void cache_status() {
428 dout(20) << __func__ << dendl;
429
a8e16298
TL
430 f->open_object_section("trim_cache");
431 f->dump_int("trimmed", dentries_trimmed);
432 f->close_section();
433
f64942e4
AA
434 // cache status section
435 mdcache->cache_status(f);
f64942e4
AA
436
437 complete(0);
438 }
439
440 void finish(int r) override {
441 dout(20) << __func__ << ": r=" << r << dendl;
442
a8e16298
TL
443 auto d = std::chrono::duration<double>(mono_clock::now()-recall_start);
444 f->dump_float("duration", d.count());
445
446 f->close_section();
f64942e4
AA
447 on_finish->complete(r);
448 }
449
450 Server *server;
451 MDCache *mdcache;
452 MDLog *mdlog;
453 uint64_t recall_timeout;
a8e16298 454 mono_time recall_start;
f64942e4
AA
455 Formatter *f;
456 Context *on_finish;
457
458 int retval = 0;
459 std::stringstream ss;
a8e16298
TL
460 uint64_t caps_recalled = 0;
461 uint64_t dentries_trimmed = 0;
f64942e4
AA
462
463 // so as to use dout
464 mds_rank_t whoami;
465 int incarnation;
466
11fdf7f2 467 void cmd_err(Formatter *f, std::string_view err) {
f64942e4
AA
468 f->reset();
469 f->open_object_section("result");
470 f->dump_string("error", err);
471 f->close_section();
472 }
473};
474
7c673cae
FG
475MDSRank::MDSRank(
476 mds_rank_t whoami_,
477 Mutex &mds_lock_,
478 LogChannelRef &clog_,
479 SafeTimer &timer_,
480 Beacon &beacon_,
11fdf7f2 481 std::unique_ptr<MDSMap>& mdsmap_,
7c673cae
FG
482 Messenger *msgr,
483 MonClient *monc_,
484 Context *respawn_hook_,
485 Context *suicide_hook_)
486 :
487 whoami(whoami_), incarnation(0),
b32b8144 488 mds_lock(mds_lock_), cct(msgr->cct), clog(clog_), timer(timer_),
7c673cae
FG
489 mdsmap(mdsmap_),
490 objecter(new Objecter(g_ceph_context, msgr, monc_, nullptr, 0, 0)),
491 server(NULL), mdcache(NULL), locker(NULL), mdlog(NULL),
492 balancer(NULL), scrubstack(NULL),
493 damage_table(whoami_),
494 inotable(NULL), snapserver(NULL), snapclient(NULL),
495 sessionmap(this), logger(NULL), mlogger(NULL),
11fdf7f2
TL
496 op_tracker(g_ceph_context, g_conf()->mds_enable_op_tracker,
497 g_conf()->osd_num_op_tracker_shard),
7c673cae
FG
498 last_state(MDSMap::STATE_BOOT),
499 state(MDSMap::STATE_BOOT),
500 cluster_degraded(false), stopping(false),
501 purge_queue(g_ceph_context, whoami_,
502 mdsmap_->get_metadata_pool(), objecter,
503 new FunctionContext(
504 [this](int r){
505 // Purge Queue operates inside mds_lock when we're calling into
506 // it, and outside when in background, so must handle both cases.
507 if (mds_lock.is_locked_by_me()) {
f64942e4 508 handle_write_error(r);
7c673cae 509 } else {
11fdf7f2 510 std::lock_guard l(mds_lock);
f64942e4 511 handle_write_error(r);
7c673cae
FG
512 }
513 }
514 )
515 ),
516 progress_thread(this), dispatch_depth(0),
517 hb(NULL), last_tid(0), osd_epoch_barrier(0), beacon(beacon_),
518 mds_slow_req_count(0),
519 last_client_mdsmap_bcast(0),
520 messenger(msgr), monc(monc_),
521 respawn_hook(respawn_hook_),
522 suicide_hook(suicide_hook_),
94b18763
FG
523 standby_replaying(false),
524 starttime(mono_clock::now())
7c673cae
FG
525{
526 hb = g_ceph_context->get_heartbeat_map()->add_worker("MDSRank", pthread_self());
527
528 purge_queue.update_op_limit(*mdsmap);
529
530 objecter->unset_honor_osdmap_full();
531
b32b8144 532 finisher = new Finisher(cct);
7c673cae
FG
533
534 mdcache = new MDCache(this, purge_queue);
535 mdlog = new MDLog(this);
536 balancer = new MDBalancer(this, messenger, monc);
537
538 scrubstack = new ScrubStack(mdcache, finisher);
539
540 inotable = new InoTable(this);
541 snapserver = new SnapServer(this, monc);
542 snapclient = new SnapClient(this);
543
544 server = new Server(this);
545 locker = new Locker(this, mdcache);
546
b32b8144
FG
547 op_tracker.set_complaint_and_threshold(cct->_conf->mds_op_complaint_time,
548 cct->_conf->mds_op_log_threshold);
549 op_tracker.set_history_size_and_duration(cct->_conf->mds_op_history_size,
550 cct->_conf->mds_op_history_duration);
7c673cae
FG
551}
552
553MDSRank::~MDSRank()
554{
555 if (hb) {
556 g_ceph_context->get_heartbeat_map()->remove_worker(hb);
557 }
558
559 if (scrubstack) { delete scrubstack; scrubstack = NULL; }
560 if (mdcache) { delete mdcache; mdcache = NULL; }
561 if (mdlog) { delete mdlog; mdlog = NULL; }
562 if (balancer) { delete balancer; balancer = NULL; }
563 if (inotable) { delete inotable; inotable = NULL; }
564 if (snapserver) { delete snapserver; snapserver = NULL; }
565 if (snapclient) { delete snapclient; snapclient = NULL; }
7c673cae
FG
566
567 if (server) { delete server; server = 0; }
568 if (locker) { delete locker; locker = 0; }
569
570 if (logger) {
571 g_ceph_context->get_perfcounters_collection()->remove(logger);
572 delete logger;
573 logger = 0;
574 }
575 if (mlogger) {
576 g_ceph_context->get_perfcounters_collection()->remove(mlogger);
577 delete mlogger;
578 mlogger = 0;
579 }
580
581 delete finisher;
582 finisher = NULL;
583
584 delete suicide_hook;
585 suicide_hook = NULL;
586
587 delete respawn_hook;
588 respawn_hook = NULL;
589
590 delete objecter;
591 objecter = nullptr;
592}
593
594void MDSRankDispatcher::init()
595{
596 objecter->init();
597 messenger->add_dispatcher_head(objecter);
598
599 objecter->start();
600
601 update_log_config();
602 create_logger();
603
604 // Expose the OSDMap (already populated during MDS::init) to anyone
605 // who is interested in it.
606 handle_osd_map();
607
608 progress_thread.create("mds_rank_progr");
609
610 purge_queue.init();
611
612 finisher->start();
613}
614
11fdf7f2 615void MDSRank::update_targets()
7c673cae
FG
616{
617 // get MonMap's idea of my export_targets
618 const set<mds_rank_t>& map_targets = mdsmap->get_mds_info(get_nodeid()).export_targets;
619
620 dout(20) << "updating export targets, currently " << map_targets.size() << " ranks are targets" << dendl;
621
622 bool send = false;
623 set<mds_rank_t> new_map_targets;
624
625 auto it = export_targets.begin();
626 while (it != export_targets.end()) {
627 mds_rank_t rank = it->first;
11fdf7f2
TL
628 auto &counter = it->second;
629 dout(20) << "export target mds." << rank << " is " << counter << dendl;
7c673cae 630
11fdf7f2 631 double val = counter.get();
7c673cae
FG
632 if (val <= 0.01) {
633 dout(15) << "export target mds." << rank << " is no longer an export target" << dendl;
634 export_targets.erase(it++);
635 send = true;
636 continue;
637 }
638 if (!map_targets.count(rank)) {
639 dout(15) << "export target mds." << rank << " not in map's export_targets" << dendl;
640 send = true;
641 }
642 new_map_targets.insert(rank);
643 it++;
644 }
645 if (new_map_targets.size() < map_targets.size()) {
646 dout(15) << "export target map holds stale targets, sending update" << dendl;
647 send = true;
648 }
649
650 if (send) {
651 dout(15) << "updating export_targets, now " << new_map_targets.size() << " ranks are targets" << dendl;
11fdf7f2
TL
652 auto m = MMDSLoadTargets::create(mds_gid_t(monc->get_global_id()), new_map_targets);
653 monc->send_mon_message(m.detach());
7c673cae
FG
654 }
655}
656
11fdf7f2 657void MDSRank::hit_export_target(mds_rank_t rank, double amount)
7c673cae 658{
11fdf7f2 659 double rate = g_conf()->mds_bal_target_decay;
7c673cae 660 if (amount < 0.0) {
11fdf7f2 661 amount = 100.0/g_conf()->mds_bal_target_decay; /* a good default for "i am trying to keep this export_target active" */
7c673cae 662 }
11fdf7f2
TL
663 auto em = export_targets.emplace(std::piecewise_construct, std::forward_as_tuple(rank), std::forward_as_tuple(DecayRate(rate)));
664 auto &counter = em.first->second;
665 counter.hit(amount);
7c673cae 666 if (em.second) {
11fdf7f2
TL
667 dout(15) << "hit export target (new) is " << counter << dendl;
668 } else {
669 dout(15) << "hit export target is " << counter << dendl;
670 }
671}
672
673class C_MDS_MonCommand : public MDSInternalContext {
674 std::string cmd;
675public:
676 std::string outs;
677 C_MDS_MonCommand(MDSRank *m, std::string_view c)
678 : MDSInternalContext(m), cmd(c) {}
679 void finish(int r) override {
680 mds->_mon_command_finish(r, cmd, outs);
681 }
682};
683
684void MDSRank::_mon_command_finish(int r, std::string_view cmd, std::string_view outs)
685{
686 if (r < 0) {
687 dout(0) << __func__ << ": mon command " << cmd << " failed with errno " << r
688 << " (" << outs << ")" << dendl;
7c673cae 689 } else {
11fdf7f2 690 dout(1) << __func__ << ": mon command " << cmd << " succeed" << dendl;
7c673cae 691 }
11fdf7f2
TL
692}
693
694void MDSRank::set_mdsmap_multimds_snaps_allowed()
695{
696 static bool already_sent = false;
697 if (already_sent)
698 return;
699
700 stringstream ss;
701 ss << "{\"prefix\":\"fs set\", \"fs_name\":\"" << mdsmap->get_fs_name() << "\", ";
702 ss << "\"var\":\"allow_multimds_snaps\", \"val\":\"true\", ";
703 ss << "\"confirm\":\"--yes-i-am-really-a-mds\"}";
704 std::vector<std::string> cmd = {ss.str()};
705
706 dout(0) << __func__ << ": sending mon command: " << cmd[0] << dendl;
707
708 C_MDS_MonCommand *fin = new C_MDS_MonCommand(this, cmd[0]);
709 monc->start_mon_command(cmd, {}, nullptr, &fin->outs, new C_IO_Wrapper(this, fin));
710
711 already_sent = true;
712}
713
714void MDSRank::mark_base_recursively_scrubbed(inodeno_t ino)
715{
716 if (mdsmap->get_tableserver() == whoami)
717 snapserver->mark_base_recursively_scrubbed(ino);
7c673cae
FG
718}
719
720void MDSRankDispatcher::tick()
721{
722 heartbeat_reset();
723
724 if (beacon.is_laggy()) {
91327a77 725 dout(1) << "skipping upkeep work because connection to Monitors appears laggy" << dendl;
7c673cae
FG
726 return;
727 }
728
729 check_ops_in_flight();
730
731 // Wake up thread in case we use to be laggy and have waiting_for_nolaggy
732 // messages to progress.
733 progress_thread.signal();
734
735 // make sure mds log flushes, trims periodically
736 mdlog->flush();
737
91327a77
AA
738 // update average session uptime
739 sessionmap.update_average_session_age();
740
7c673cae 741 if (is_active() || is_stopping()) {
7c673cae
FG
742 mdlog->trim(); // NOT during recovery!
743 }
744
7c673cae 745 // ...
eafe8130 746 if (is_cache_trimmable()) {
7c673cae 747 server->find_idle_sessions();
91327a77 748 server->evict_cap_revoke_non_responders();
7c673cae
FG
749 locker->tick();
750 }
751
11fdf7f2
TL
752 // log
753 if (logger) {
754 logger->set(l_mds_subtrees, mdcache->num_subtrees());
755 mdcache->log_stat();
756 }
757
7c673cae
FG
758 if (is_reconnect())
759 server->reconnect_tick();
760
761 if (is_active()) {
762 balancer->tick();
763 mdcache->find_stale_fragment_freeze();
764 mdcache->migrator->find_stale_export_freeze();
11fdf7f2
TL
765
766 if (mdsmap->get_tableserver() == whoami) {
7c673cae 767 snapserver->check_osd_map(false);
11fdf7f2
TL
768 // Filesystem was created by pre-mimic mds. Allow multi-active mds after
769 // all old snapshots are deleted.
770 if (!mdsmap->allows_multimds_snaps() &&
771 snapserver->can_allow_multimds_snaps()) {
772 set_mdsmap_multimds_snaps_allowed();
773 }
774 }
7c673cae
FG
775 }
776
777 if (is_active() || is_stopping()) {
11fdf7f2 778 update_targets();
7c673cae
FG
779 }
780
781 // shut down?
782 if (is_stopping()) {
783 mdlog->trim();
784 if (mdcache->shutdown_pass()) {
785 uint64_t pq_progress = 0 ;
786 uint64_t pq_total = 0;
787 size_t pq_in_flight = 0;
788 if (!purge_queue.drain(&pq_progress, &pq_total, &pq_in_flight)) {
789 dout(7) << "shutdown_pass=true, but still waiting for purge queue"
790 << dendl;
791 // This takes unbounded time, so we must indicate progress
792 // to the administrator: we do it in a slightly imperfect way
793 // by sending periodic (tick frequency) clog messages while
794 // in this state.
795 clog->info() << "MDS rank " << whoami << " waiting for purge queue ("
796 << std::dec << pq_progress << "/" << pq_total << " " << pq_in_flight
797 << " files purging" << ")";
798 } else {
799 dout(7) << "shutdown_pass=true, finished w/ shutdown, moving to "
800 "down:stopped" << dendl;
801 stopping_done();
802 }
803 }
804 else {
805 dout(7) << "shutdown_pass=false" << dendl;
806 }
807 }
808
809 // Expose ourselves to Beacon to update health indicators
810 beacon.notify_health(this);
811}
812
813void MDSRankDispatcher::shutdown()
814{
815 // It should never be possible for shutdown to get called twice, because
816 // anyone picking up mds_lock checks if stopping is true and drops
817 // out if it is.
11fdf7f2 818 ceph_assert(stopping == false);
7c673cae
FG
819 stopping = true;
820
821 dout(1) << __func__ << ": shutting down rank " << whoami << dendl;
822
92f5a8d4
TL
823 g_conf().remove_observer(this);
824
7c673cae
FG
825 timer.shutdown();
826
827 // MDLog has to shut down before the finisher, because some of its
828 // threads block on IOs that require finisher to complete.
829 mdlog->shutdown();
830
831 // shut down cache
832 mdcache->shutdown();
833
834 purge_queue.shutdown();
835
836 mds_lock.Unlock();
837 finisher->stop(); // no flushing
838 mds_lock.Lock();
839
31f18b77 840 if (objecter->initialized)
7c673cae
FG
841 objecter->shutdown();
842
843 monc->shutdown();
844
845 op_tracker.on_shutdown();
846
847 progress_thread.shutdown();
848
849 // release mds_lock for finisher/messenger threads (e.g.
850 // MDSDaemon::ms_handle_reset called from Messenger).
851 mds_lock.Unlock();
852
853 // shut down messenger
854 messenger->shutdown();
855
856 mds_lock.Lock();
857
858 // Workaround unclean shutdown: HeartbeatMap will assert if
859 // worker is not removed (as we do in ~MDS), but ~MDS is not
860 // always called after suicide.
861 if (hb) {
862 g_ceph_context->get_heartbeat_map()->remove_worker(hb);
863 hb = NULL;
864 }
865}
866
867/**
868 * Helper for simple callbacks that call a void fn with no args.
869 */
870class C_MDS_VoidFn : public MDSInternalContext
871{
872 typedef void (MDSRank::*fn_ptr)();
873 protected:
874 fn_ptr fn;
875 public:
876 C_MDS_VoidFn(MDSRank *mds_, fn_ptr fn_)
877 : MDSInternalContext(mds_), fn(fn_)
878 {
11fdf7f2
TL
879 ceph_assert(mds_);
880 ceph_assert(fn_);
7c673cae
FG
881 }
882
883 void finish(int r) override
884 {
885 (mds->*fn)();
886 }
887};
888
889int64_t MDSRank::get_metadata_pool()
890{
891 return mdsmap->get_metadata_pool();
892}
893
894MDSTableClient *MDSRank::get_table_client(int t)
895{
896 switch (t) {
897 case TABLE_ANCHOR: return NULL;
898 case TABLE_SNAP: return snapclient;
899 default: ceph_abort();
900 }
901}
902
903MDSTableServer *MDSRank::get_table_server(int t)
904{
905 switch (t) {
906 case TABLE_ANCHOR: return NULL;
907 case TABLE_SNAP: return snapserver;
908 default: ceph_abort();
909 }
910}
911
912void MDSRank::suicide()
913{
914 if (suicide_hook) {
915 suicide_hook->complete(0);
916 suicide_hook = NULL;
917 }
918}
919
920void MDSRank::respawn()
921{
922 if (respawn_hook) {
923 respawn_hook->complete(0);
924 respawn_hook = NULL;
925 }
926}
927
928void MDSRank::damaged()
929{
11fdf7f2
TL
930 ceph_assert(whoami != MDS_RANK_NONE);
931 ceph_assert(mds_lock.is_locked_by_me());
7c673cae 932
11fdf7f2 933 beacon.set_want_state(*mdsmap, MDSMap::STATE_DAMAGED);
7c673cae
FG
934 monc->flush_log(); // Flush any clog error from before we were called
935 beacon.notify_health(this); // Include latest status in our swan song
11fdf7f2 936 beacon.send_and_wait(g_conf()->mds_mon_shutdown_timeout);
7c673cae
FG
937
938 // It's okay if we timed out and the mon didn't get our beacon, because
939 // another daemon (or ourselves after respawn) will eventually take the
940 // rank and report DAMAGED again when it hits same problem we did.
941
942 respawn(); // Respawn into standby in case mon has other work for us
943}
944
945void MDSRank::damaged_unlocked()
946{
11fdf7f2 947 std::lock_guard l(mds_lock);
7c673cae
FG
948 damaged();
949}
950
951void MDSRank::handle_write_error(int err)
952{
953 if (err == -EBLACKLISTED) {
954 derr << "we have been blacklisted (fenced), respawning..." << dendl;
955 respawn();
956 return;
957 }
958
11fdf7f2 959 if (g_conf()->mds_action_on_write_error >= 2) {
7c673cae
FG
960 derr << "unhandled write error " << cpp_strerror(err) << ", suicide..." << dendl;
961 respawn();
11fdf7f2 962 } else if (g_conf()->mds_action_on_write_error == 1) {
7c673cae
FG
963 derr << "unhandled write error " << cpp_strerror(err) << ", force readonly..." << dendl;
964 mdcache->force_readonly();
965 } else {
966 // ignore;
967 derr << "unhandled write error " << cpp_strerror(err) << ", ignore..." << dendl;
968 }
969}
970
971void *MDSRank::ProgressThread::entry()
972{
11fdf7f2 973 std::lock_guard l(mds->mds_lock);
7c673cae
FG
974 while (true) {
975 while (!mds->stopping &&
976 mds->finished_queue.empty() &&
977 (mds->waiting_for_nolaggy.empty() || mds->beacon.is_laggy())) {
978 cond.Wait(mds->mds_lock);
979 }
980
981 if (mds->stopping) {
982 break;
983 }
984
985 mds->_advance_queues();
986 }
987
988 return NULL;
989}
990
991
992void MDSRank::ProgressThread::shutdown()
993{
11fdf7f2
TL
994 ceph_assert(mds->mds_lock.is_locked_by_me());
995 ceph_assert(mds->stopping);
7c673cae
FG
996
997 if (am_self()) {
998 // Stopping is set, we will fall out of our main loop naturally
999 } else {
1000 // Kick the thread to notice mds->stopping, and join it
1001 cond.Signal();
1002 mds->mds_lock.Unlock();
1003 if (is_started())
1004 join();
1005 mds->mds_lock.Lock();
1006 }
1007}
1008
11fdf7f2 1009bool MDSRankDispatcher::ms_dispatch(const Message::const_ref &m)
7c673cae 1010{
f64942e4 1011 if (m->get_source().is_client()) {
11fdf7f2 1012 Session *session = static_cast<Session*>(m->get_connection()->get_priv().get());
f64942e4
AA
1013 if (session)
1014 session->last_seen = Session::clock::now();
1015 }
1016
7c673cae 1017 inc_dispatch_depth();
f64942e4 1018 bool ret = _dispatch(m, true);
7c673cae
FG
1019 dec_dispatch_depth();
1020 return ret;
1021}
1022
11fdf7f2 1023bool MDSRank::_dispatch(const Message::const_ref &m, bool new_msg)
7c673cae
FG
1024{
1025 if (is_stale_message(m)) {
7c673cae
FG
1026 return true;
1027 }
1028
1029 if (beacon.is_laggy()) {
91327a77 1030 dout(5) << " laggy, deferring " << *m << dendl;
7c673cae
FG
1031 waiting_for_nolaggy.push_back(m);
1032 } else if (new_msg && !waiting_for_nolaggy.empty()) {
91327a77 1033 dout(5) << " there are deferred messages, deferring " << *m << dendl;
7c673cae
FG
1034 waiting_for_nolaggy.push_back(m);
1035 } else {
1036 if (!handle_deferrable_message(m)) {
7c673cae
FG
1037 return false;
1038 }
1039
1040 heartbeat_reset();
1041 }
1042
1043 if (dispatch_depth > 1)
1044 return true;
1045
1046 // finish any triggered contexts
1047 _advance_queues();
1048
1049 if (beacon.is_laggy()) {
1050 // We've gone laggy during dispatch, don't do any
1051 // more housekeeping
1052 return true;
1053 }
1054
7c673cae
FG
1055 // hack: thrash exports
1056 static utime_t start;
1057 utime_t now = ceph_clock_now();
1058 if (start == utime_t())
1059 start = now;
1060 /*double el = now - start;
1061 if (el > 30.0 &&
1062 el < 60.0)*/
11fdf7f2 1063 for (int i=0; i<g_conf()->mds_thrash_exports; i++) {
7c673cae
FG
1064 set<mds_rank_t> s;
1065 if (!is_active()) break;
1066 mdsmap->get_mds_set(s, MDSMap::STATE_ACTIVE);
1067 if (s.size() < 2 || CInode::count() < 10)
1068 break; // need peers for this to work.
11fdf7f2
TL
1069 if (mdcache->migrator->get_num_exporting() > g_conf()->mds_thrash_exports * 5 ||
1070 mdcache->migrator->get_export_queue_size() > g_conf()->mds_thrash_exports * 10)
7c673cae
FG
1071 break;
1072
11fdf7f2 1073 dout(7) << "mds thrashing exports pass " << (i+1) << "/" << g_conf()->mds_thrash_exports << dendl;
7c673cae
FG
1074
1075 // pick a random dir inode
1076 CInode *in = mdcache->hack_pick_random_inode();
1077
1078 list<CDir*> ls;
1079 in->get_dirfrags(ls);
1080 if (!ls.empty()) { // must be an open dir.
1081 list<CDir*>::iterator p = ls.begin();
1082 int n = rand() % ls.size();
1083 while (n--)
1084 ++p;
1085 CDir *dir = *p;
1086 if (!dir->get_parent_dir()) continue; // must be linked.
1087 if (!dir->is_auth()) continue; // must be auth.
1088
1089 mds_rank_t dest;
1090 do {
1091 int k = rand() % s.size();
1092 set<mds_rank_t>::iterator p = s.begin();
1093 while (k--) ++p;
1094 dest = *p;
1095 } while (dest == whoami);
1096 mdcache->migrator->export_dir_nicely(dir,dest);
1097 }
1098 }
1099 // hack: thrash fragments
11fdf7f2 1100 for (int i=0; i<g_conf()->mds_thrash_fragments; i++) {
7c673cae 1101 if (!is_active()) break;
11fdf7f2
TL
1102 if (mdcache->get_num_fragmenting_dirs() > 5 * g_conf()->mds_thrash_fragments) break;
1103 dout(7) << "mds thrashing fragments pass " << (i+1) << "/" << g_conf()->mds_thrash_fragments << dendl;
7c673cae
FG
1104
1105 // pick a random dir inode
1106 CInode *in = mdcache->hack_pick_random_inode();
1107
1108 list<CDir*> ls;
1109 in->get_dirfrags(ls);
1110 if (ls.empty()) continue; // must be an open dir.
1111 CDir *dir = ls.front();
1112 if (!dir->get_parent_dir()) continue; // must be linked.
1113 if (!dir->is_auth()) continue; // must be auth.
1114 frag_t fg = dir->get_frag();
11fdf7f2
TL
1115 if ((fg == frag_t() || (rand() % (1 << fg.bits()) == 0))) {
1116 mdcache->split_dir(dir, 1);
1117 } else {
1118 balancer->queue_merge(dir);
7c673cae
FG
1119 }
1120 }
1121
1122 // hack: force hash root?
1123 /*
1124 if (false &&
1125 mdcache->get_root() &&
1126 mdcache->get_root()->dir &&
1127 !(mdcache->get_root()->dir->is_hashed() ||
1128 mdcache->get_root()->dir->is_hashing())) {
1129 dout(0) << "hashing root" << dendl;
1130 mdcache->migrator->hash_dir(mdcache->get_root()->dir);
1131 }
1132 */
1133
c07f9fc5
FG
1134 update_mlogger();
1135 return true;
1136}
1137
1138void MDSRank::update_mlogger()
1139{
7c673cae
FG
1140 if (mlogger) {
1141 mlogger->set(l_mdm_ino, CInode::count());
1142 mlogger->set(l_mdm_dir, CDir::count());
1143 mlogger->set(l_mdm_dn, CDentry::count());
1144 mlogger->set(l_mdm_cap, Capability::count());
7c673cae
FG
1145 mlogger->set(l_mdm_inoa, CInode::increments());
1146 mlogger->set(l_mdm_inos, CInode::decrements());
1147 mlogger->set(l_mdm_dira, CDir::increments());
1148 mlogger->set(l_mdm_dirs, CDir::decrements());
1149 mlogger->set(l_mdm_dna, CDentry::increments());
1150 mlogger->set(l_mdm_dns, CDentry::decrements());
1151 mlogger->set(l_mdm_capa, Capability::increments());
1152 mlogger->set(l_mdm_caps, Capability::decrements());
7c673cae 1153 }
7c673cae
FG
1154}
1155
1156/*
1157 * lower priority messages we defer if we seem laggy
1158 */
11fdf7f2 1159bool MDSRank::handle_deferrable_message(const Message::const_ref &m)
7c673cae
FG
1160{
1161 int port = m->get_type() & 0xff00;
1162
1163 switch (port) {
1164 case MDS_PORT_CACHE:
1165 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
1166 mdcache->dispatch(m);
1167 break;
1168
1169 case MDS_PORT_MIGRATOR:
1170 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
1171 mdcache->migrator->dispatch(m);
1172 break;
1173
1174 default:
1175 switch (m->get_type()) {
1176 // SERVER
1177 case CEPH_MSG_CLIENT_SESSION:
1178 case CEPH_MSG_CLIENT_RECONNECT:
11fdf7f2 1179 case CEPH_MSG_CLIENT_RECLAIM:
7c673cae
FG
1180 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_CLIENT);
1181 // fall-thru
1182 case CEPH_MSG_CLIENT_REQUEST:
1183 server->dispatch(m);
1184 break;
1185 case MSG_MDS_SLAVE_REQUEST:
1186 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
1187 server->dispatch(m);
1188 break;
1189
1190 case MSG_MDS_HEARTBEAT:
1191 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
1192 balancer->proc_message(m);
1193 break;
1194
1195 case MSG_MDS_TABLE_REQUEST:
1196 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
1197 {
11fdf7f2
TL
1198 const MMDSTableRequest::const_ref &req = MMDSTableRequest::msgref_cast(m);
1199 if (req->op < 0) {
1200 MDSTableClient *client = get_table_client(req->table);
1201 client->handle_request(req);
1202 } else {
1203 MDSTableServer *server = get_table_server(req->table);
1204 server->handle_request(req);
1205 }
7c673cae
FG
1206 }
1207 break;
1208
1209 case MSG_MDS_LOCK:
1210 case MSG_MDS_INODEFILECAPS:
1211 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
1212 locker->dispatch(m);
1213 break;
1214
1215 case CEPH_MSG_CLIENT_CAPS:
1216 case CEPH_MSG_CLIENT_CAPRELEASE:
1217 case CEPH_MSG_CLIENT_LEASE:
1218 ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_CLIENT);
1219 locker->dispatch(m);
1220 break;
1221
1222 default:
1223 return false;
1224 }
1225 }
1226
1227 return true;
1228}
1229
1230/**
1231 * Advance finished_queue and waiting_for_nolaggy.
1232 *
1233 * Usually drain both queues, but may not drain waiting_for_nolaggy
1234 * if beacon is currently laggy.
1235 */
1236void MDSRank::_advance_queues()
1237{
11fdf7f2 1238 ceph_assert(mds_lock.is_locked_by_me());
7c673cae 1239
11fdf7f2 1240 if (!finished_queue.empty()) {
7c673cae 1241 dout(7) << "mds has " << finished_queue.size() << " queued contexts" << dendl;
11fdf7f2
TL
1242 while (!finished_queue.empty()) {
1243 auto fin = finished_queue.front();
1244 finished_queue.pop_front();
1245
1246 dout(10) << " finish " << fin << dendl;
1247 fin->complete(0);
7c673cae
FG
1248
1249 heartbeat_reset();
1250 }
1251 }
1252
1253 while (!waiting_for_nolaggy.empty()) {
1254 // stop if we're laggy now!
1255 if (beacon.is_laggy())
1256 break;
1257
11fdf7f2 1258 Message::const_ref old = waiting_for_nolaggy.front();
7c673cae
FG
1259 waiting_for_nolaggy.pop_front();
1260
11fdf7f2 1261 if (!is_stale_message(old)) {
7c673cae
FG
1262 dout(7) << " processing laggy deferred " << *old << dendl;
1263 if (!handle_deferrable_message(old)) {
1264 dout(0) << "unrecognized message " << *old << dendl;
7c673cae
FG
1265 }
1266 }
1267
1268 heartbeat_reset();
1269 }
1270}
1271
1272/**
1273 * Call this when you take mds_lock, or periodically if you're going to
1274 * hold the lock for a long time (e.g. iterating over clients/inodes)
1275 */
1276void MDSRank::heartbeat_reset()
1277{
1278 // Any thread might jump into mds_lock and call us immediately
1279 // after a call to suicide() completes, in which case MDSRank::hb
1280 // has been freed and we are a no-op.
1281 if (!hb) {
11fdf7f2 1282 ceph_assert(stopping);
7c673cae
FG
1283 return;
1284 }
1285
1286 // NB not enabling suicide grace, because the mon takes care of killing us
1287 // (by blacklisting us) when we fail to send beacons, and it's simpler to
1288 // only have one way of dying.
11fdf7f2 1289 auto grace = g_conf().get_val<double>("mds_heartbeat_grace");
f64942e4 1290 g_ceph_context->get_heartbeat_map()->reset_timeout(hb, grace, 0);
7c673cae
FG
1291}
1292
11fdf7f2 1293bool MDSRank::is_stale_message(const Message::const_ref &m) const
7c673cae
FG
1294{
1295 // from bad mds?
1296 if (m->get_source().is_mds()) {
1297 mds_rank_t from = mds_rank_t(m->get_source().num());
11fdf7f2
TL
1298 bool bad = false;
1299 if (mdsmap->is_down(from)) {
1300 bad = true;
1301 } else {
1302 // FIXME: this is a convoluted check. we should be maintaining a nice
1303 // clean map of current ConnectionRefs for current mdses!!!
1304 auto c = messenger->connect_to(CEPH_ENTITY_TYPE_MDS,
1305 mdsmap->get_addrs(from));
1306 if (c != m->get_connection()) {
1307 bad = true;
1308 dout(5) << " mds." << from << " should be " << c << " "
1309 << c->get_peer_addrs() << " but this message is "
1310 << m->get_connection() << " " << m->get_source_addrs()
1311 << dendl;
1312 }
1313 }
1314 if (bad) {
7c673cae
FG
1315 // bogus mds?
1316 if (m->get_type() == CEPH_MSG_MDS_MAP) {
1317 dout(5) << "got " << *m << " from old/bad/imposter mds " << m->get_source()
1318 << ", but it's an mdsmap, looking at it" << dendl;
1319 } else if (m->get_type() == MSG_MDS_CACHEEXPIRE &&
11fdf7f2 1320 mdsmap->get_addrs(from) == m->get_source_addrs()) {
7c673cae
FG
1321 dout(5) << "got " << *m << " from down mds " << m->get_source()
1322 << ", but it's a cache_expire, looking at it" << dendl;
1323 } else {
1324 dout(5) << "got " << *m << " from down/old/bad/imposter mds " << m->get_source()
1325 << ", dropping" << dendl;
1326 return true;
1327 }
1328 }
1329 }
1330 return false;
1331}
1332
11fdf7f2 1333Session *MDSRank::get_session(const Message::const_ref &m)
94b18763 1334{
11fdf7f2
TL
1335 // do not carry ref
1336 auto session = static_cast<Session *>(m->get_connection()->get_priv().get());
94b18763
FG
1337 if (session) {
1338 dout(20) << "get_session have " << session << " " << session->info.inst
1339 << " state " << session->get_state_name() << dendl;
28e407b8
AA
1340 // Check if we've imported an open session since (new sessions start closed)
1341 if (session->is_closed()) {
1342 Session *imported_session = sessionmap.get_session(session->info.inst.name);
1343 if (imported_session && imported_session != session) {
11fdf7f2
TL
1344 dout(10) << __func__ << " replacing connection bootstrap session "
1345 << session << " with imported session " << imported_session
1346 << dendl;
28e407b8
AA
1347 imported_session->info.auth_name = session->info.auth_name;
1348 //assert(session->info.auth_name == imported_session->info.auth_name);
11fdf7f2
TL
1349 ceph_assert(session->info.inst == imported_session->info.inst);
1350 imported_session->set_connection(session->get_connection().get());
28e407b8
AA
1351 // send out any queued messages
1352 while (!session->preopen_out_queue.empty()) {
11fdf7f2 1353 imported_session->get_connection()->send_message2(std::move(session->preopen_out_queue.front()));
28e407b8
AA
1354 session->preopen_out_queue.pop_front();
1355 }
1356 imported_session->auth_caps = session->auth_caps;
f64942e4 1357 imported_session->last_seen = session->last_seen;
11fdf7f2
TL
1358 ceph_assert(session->get_nref() == 1);
1359 imported_session->get_connection()->set_priv(imported_session->get());
28e407b8
AA
1360 session = imported_session;
1361 }
1362 }
94b18763
FG
1363 } else {
1364 dout(20) << "get_session dne for " << m->get_source_inst() << dendl;
1365 }
1366 return session;
1367}
7c673cae 1368
11fdf7f2 1369void MDSRank::send_message(const Message::ref& m, const ConnectionRef& c)
7c673cae 1370{
11fdf7f2
TL
1371 ceph_assert(c);
1372 c->send_message2(m);
7c673cae
FG
1373}
1374
1375
11fdf7f2 1376void MDSRank::send_message_mds(const Message::ref& m, mds_rank_t mds)
7c673cae
FG
1377{
1378 if (!mdsmap->is_up(mds)) {
1379 dout(10) << "send_message_mds mds." << mds << " not up, dropping " << *m << dendl;
7c673cae
FG
1380 return;
1381 }
1382
1383 // send mdsmap first?
1384 if (mds != whoami && peer_mdsmap_epoch[mds] < mdsmap->get_epoch()) {
11fdf7f2
TL
1385 auto _m = MMDSMap::create(monc->get_fsid(), *mdsmap);
1386 messenger->send_to_mds(_m.detach(), mdsmap->get_addrs(mds));
7c673cae
FG
1387 peer_mdsmap_epoch[mds] = mdsmap->get_epoch();
1388 }
1389
1390 // send message
11fdf7f2 1391 messenger->send_to_mds(Message::ref(m).detach(), mdsmap->get_addrs(mds));
7c673cae
FG
1392}
1393
11fdf7f2 1394void MDSRank::forward_message_mds(const MClientRequest::const_ref& m, mds_rank_t mds)
7c673cae 1395{
11fdf7f2 1396 ceph_assert(mds != whoami);
7c673cae 1397
11fdf7f2
TL
1398 /*
1399 * don't actually forward if non-idempotent!
1400 * client has to do it. although the MDS will ignore duplicate requests,
1401 * the affected metadata may migrate, in which case the new authority
1402 * won't have the metareq_id in the completed request map.
1403 */
1404 // NEW: always make the client resend!
1405 bool client_must_resend = true; //!creq->can_forward();
7c673cae 1406
11fdf7f2
TL
1407 // tell the client where it should go
1408 auto session = get_session(m);
1409 auto f = MClientRequestForward::create(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend);
1410 send_message_client(f, session);
7c673cae
FG
1411}
1412
11fdf7f2 1413void MDSRank::send_message_client_counted(const Message::ref& m, client_t client)
7c673cae 1414{
11fdf7f2 1415 Session *session = sessionmap.get_session(entity_name_t::CLIENT(client.v));
7c673cae
FG
1416 if (session) {
1417 send_message_client_counted(m, session);
1418 } else {
1419 dout(10) << "send_message_client_counted no session for client." << client << " " << *m << dendl;
1420 }
1421}
1422
11fdf7f2 1423void MDSRank::send_message_client_counted(const Message::ref& m, const ConnectionRef& connection)
7c673cae 1424{
11fdf7f2
TL
1425 // do not carry ref
1426 auto session = static_cast<Session *>(connection->get_priv().get());
7c673cae 1427 if (session) {
7c673cae
FG
1428 send_message_client_counted(m, session);
1429 } else {
1430 dout(10) << "send_message_client_counted has no session for " << m->get_source_inst() << dendl;
1431 // another Connection took over the Session
1432 }
1433}
1434
11fdf7f2 1435void MDSRank::send_message_client_counted(const Message::ref& m, Session* session)
7c673cae
FG
1436{
1437 version_t seq = session->inc_push_seq();
1438 dout(10) << "send_message_client_counted " << session->info.inst.name << " seq "
1439 << seq << " " << *m << dendl;
11fdf7f2
TL
1440 if (session->get_connection()) {
1441 session->get_connection()->send_message2(m);
7c673cae
FG
1442 } else {
1443 session->preopen_out_queue.push_back(m);
1444 }
1445}
1446
11fdf7f2 1447void MDSRank::send_message_client(const Message::ref& m, Session* session)
7c673cae
FG
1448{
1449 dout(10) << "send_message_client " << session->info.inst << " " << *m << dendl;
11fdf7f2
TL
1450 if (session->get_connection()) {
1451 session->get_connection()->send_message2(m);
7c673cae
FG
1452 } else {
1453 session->preopen_out_queue.push_back(m);
1454 }
1455}
1456
1457/**
1458 * This is used whenever a RADOS operation has been cancelled
1459 * or a RADOS client has been blacklisted, to cause the MDS and
1460 * any clients to wait for this OSD epoch before using any new caps.
1461 *
1462 * See doc/cephfs/eviction
1463 */
1464void MDSRank::set_osd_epoch_barrier(epoch_t e)
1465{
1466 dout(4) << __func__ << ": epoch=" << e << dendl;
1467 osd_epoch_barrier = e;
1468}
1469
11fdf7f2 1470void MDSRank::retry_dispatch(const Message::const_ref &m)
7c673cae
FG
1471{
1472 inc_dispatch_depth();
1473 _dispatch(m, false);
1474 dec_dispatch_depth();
1475}
1476
91327a77 1477double MDSRank::get_dispatch_queue_max_age(utime_t now) const
7c673cae 1478{
91327a77 1479 return messenger->get_dispatch_queue_max_age(now);
7c673cae
FG
1480}
1481
1482bool MDSRank::is_daemon_stopping() const
1483{
1484 return stopping;
1485}
1486
1487void MDSRank::request_state(MDSMap::DaemonState s)
1488{
1489 dout(3) << "request_state " << ceph_mds_state_name(s) << dendl;
11fdf7f2 1490 beacon.set_want_state(*mdsmap, s);
7c673cae
FG
1491 beacon.send();
1492}
1493
1494
1495class C_MDS_BootStart : public MDSInternalContext {
1496 MDSRank::BootStep nextstep;
1497public:
1498 C_MDS_BootStart(MDSRank *m, MDSRank::BootStep n)
1499 : MDSInternalContext(m), nextstep(n) {}
1500 void finish(int r) override {
1501 mds->boot_start(nextstep, r);
1502 }
1503};
1504
1505
1506void MDSRank::boot_start(BootStep step, int r)
1507{
1508 // Handle errors from previous step
1509 if (r < 0) {
1510 if (is_standby_replay() && (r == -EAGAIN)) {
1511 dout(0) << "boot_start encountered an error EAGAIN"
1512 << ", respawning since we fell behind journal" << dendl;
1513 respawn();
1514 } else if (r == -EINVAL || r == -ENOENT) {
1515 // Invalid or absent data, indicates damaged on-disk structures
1516 clog->error() << "Error loading MDS rank " << whoami << ": "
1517 << cpp_strerror(r);
1518 damaged();
11fdf7f2 1519 ceph_assert(r == 0); // Unreachable, damaged() calls respawn()
f64942e4
AA
1520 } else if (r == -EROFS) {
1521 dout(0) << "boot error forcing transition to read-only; MDS will try to continue" << dendl;
7c673cae
FG
1522 } else {
1523 // Completely unexpected error, give up and die
1524 dout(0) << "boot_start encountered an error, failing" << dendl;
1525 suicide();
1526 return;
1527 }
1528 }
1529
11fdf7f2 1530 ceph_assert(is_starting() || is_any_replay());
7c673cae
FG
1531
1532 switch(step) {
1533 case MDS_BOOT_INITIAL:
1534 {
1535 mdcache->init_layouts();
1536
1537 MDSGatherBuilder gather(g_ceph_context,
1538 new C_MDS_BootStart(this, MDS_BOOT_OPEN_ROOT));
a8e16298 1539 dout(2) << "Booting: " << step << ": opening inotable" << dendl;
7c673cae
FG
1540 inotable->set_rank(whoami);
1541 inotable->load(gather.new_sub());
1542
a8e16298 1543 dout(2) << "Booting: " << step << ": opening sessionmap" << dendl;
7c673cae
FG
1544 sessionmap.set_rank(whoami);
1545 sessionmap.load(gather.new_sub());
1546
a8e16298 1547 dout(2) << "Booting: " << step << ": opening mds log" << dendl;
7c673cae
FG
1548 mdlog->open(gather.new_sub());
1549
3efd9988 1550 if (is_starting()) {
a8e16298 1551 dout(2) << "Booting: " << step << ": opening purge queue" << dendl;
3efd9988
FG
1552 purge_queue.open(new C_IO_Wrapper(this, gather.new_sub()));
1553 } else if (!standby_replaying) {
a8e16298 1554 dout(2) << "Booting: " << step << ": opening purge queue (async)" << dendl;
3efd9988 1555 purge_queue.open(NULL);
11fdf7f2
TL
1556 dout(2) << "Booting: " << step << ": loading open file table (async)" << dendl;
1557 mdcache->open_file_table.load(nullptr);
3efd9988
FG
1558 }
1559
7c673cae 1560 if (mdsmap->get_tableserver() == whoami) {
a8e16298 1561 dout(2) << "Booting: " << step << ": opening snap table" << dendl;
7c673cae
FG
1562 snapserver->set_rank(whoami);
1563 snapserver->load(gather.new_sub());
1564 }
1565
1566 gather.activate();
1567 }
1568 break;
1569 case MDS_BOOT_OPEN_ROOT:
1570 {
a8e16298 1571 dout(2) << "Booting: " << step << ": loading/discovering base inodes" << dendl;
7c673cae
FG
1572
1573 MDSGatherBuilder gather(g_ceph_context,
1574 new C_MDS_BootStart(this, MDS_BOOT_PREPARE_LOG));
1575
28e407b8
AA
1576 if (is_starting()) {
1577 // load mydir frag for the first log segment (creating subtree map)
1578 mdcache->open_mydir_frag(gather.new_sub());
1579 } else {
1580 mdcache->open_mydir_inode(gather.new_sub());
1581 }
7c673cae 1582
11fdf7f2
TL
1583 mdcache->create_global_snaprealm();
1584
28e407b8
AA
1585 if (whoami == mdsmap->get_root()) { // load root inode off disk if we are auth
1586 mdcache->open_root_inode(gather.new_sub());
1587 } else if (is_any_replay()) {
1588 // replay. make up fake root inode to start with
1589 mdcache->create_root_inode();
1590 }
7c673cae
FG
1591 gather.activate();
1592 }
1593 break;
1594 case MDS_BOOT_PREPARE_LOG:
1595 if (is_any_replay()) {
a8e16298 1596 dout(2) << "Booting: " << step << ": replaying mds log" << dendl;
3efd9988
FG
1597 MDSGatherBuilder gather(g_ceph_context,
1598 new C_MDS_BootStart(this, MDS_BOOT_REPLAY_DONE));
1599
1600 if (!standby_replaying) {
a8e16298 1601 dout(2) << "Booting: " << step << ": waiting for purge queue recovered" << dendl;
3efd9988
FG
1602 purge_queue.wait_for_recovery(new C_IO_Wrapper(this, gather.new_sub()));
1603 }
1604
1605 mdlog->replay(gather.new_sub());
1606 gather.activate();
7c673cae 1607 } else {
a8e16298 1608 dout(2) << "Booting: " << step << ": positioning at end of old mds log" << dendl;
7c673cae
FG
1609 mdlog->append();
1610 starting_done();
1611 }
1612 break;
1613 case MDS_BOOT_REPLAY_DONE:
11fdf7f2 1614 ceph_assert(is_any_replay());
7c673cae
FG
1615
1616 // Sessiontable and inotable should be in sync after replay, validate
1617 // that they are consistent.
1618 validate_sessions();
1619
1620 replay_done();
1621 break;
1622 }
1623}
1624
1625void MDSRank::validate_sessions()
1626{
11fdf7f2 1627 ceph_assert(mds_lock.is_locked_by_me());
28e407b8 1628 bool valid = true;
7c673cae
FG
1629
1630 // Identify any sessions which have state inconsistent with other,
1631 // after they have been loaded from rados during startup.
1632 // Mitigate bugs like: http://tracker.ceph.com/issues/16842
11fdf7f2 1633 for (const auto &i : sessionmap.get_sessions()) {
7c673cae
FG
1634 Session *session = i.second;
1635 interval_set<inodeno_t> badones;
1636 if (inotable->intersects_free(session->info.prealloc_inos, &badones)) {
28e407b8
AA
1637 clog->error() << "client " << *session
1638 << "loaded with preallocated inodes that are inconsistent with inotable";
1639 valid = false;
7c673cae
FG
1640 }
1641 }
1642
28e407b8
AA
1643 if (!valid) {
1644 damaged();
11fdf7f2 1645 ceph_assert(valid);
7c673cae
FG
1646 }
1647}
1648
1649void MDSRank::starting_done()
1650{
1651 dout(3) << "starting_done" << dendl;
11fdf7f2 1652 ceph_assert(is_starting());
7c673cae
FG
1653 request_state(MDSMap::STATE_ACTIVE);
1654
28e407b8 1655 mdlog->start_new_segment();
11fdf7f2
TL
1656
1657 // sync snaptable cache
1658 snapclient->sync(new C_MDSInternalNoop);
7c673cae
FG
1659}
1660
1661
1662void MDSRank::calc_recovery_set()
1663{
1664 // initialize gather sets
1665 set<mds_rank_t> rs;
1666 mdsmap->get_recovery_mds_set(rs);
1667 rs.erase(whoami);
1668 mdcache->set_recovery_set(rs);
1669
1670 dout(1) << " recovery set is " << rs << dendl;
1671}
1672
1673
1674void MDSRank::replay_start()
1675{
1676 dout(1) << "replay_start" << dendl;
1677
1678 if (is_standby_replay())
1679 standby_replaying = true;
1680
1681 calc_recovery_set();
1682
1683 // Check if we need to wait for a newer OSD map before starting
1684 Context *fin = new C_IO_Wrapper(this, new C_MDS_BootStart(this, MDS_BOOT_INITIAL));
1685 bool const ready = objecter->wait_for_map(
1686 mdsmap->get_last_failure_osd_epoch(),
1687 fin);
1688
1689 if (ready) {
1690 delete fin;
1691 boot_start();
1692 } else {
1693 dout(1) << " waiting for osdmap " << mdsmap->get_last_failure_osd_epoch()
1694 << " (which blacklists prior instance)" << dendl;
1695 }
1696}
1697
1698
1699class MDSRank::C_MDS_StandbyReplayRestartFinish : public MDSIOContext {
1700 uint64_t old_read_pos;
1701public:
1702 C_MDS_StandbyReplayRestartFinish(MDSRank *mds_, uint64_t old_read_pos_) :
1703 MDSIOContext(mds_), old_read_pos(old_read_pos_) {}
1704 void finish(int r) override {
1705 mds->_standby_replay_restart_finish(r, old_read_pos);
1706 }
91327a77
AA
1707 void print(ostream& out) const override {
1708 out << "standby_replay_restart";
1709 }
7c673cae
FG
1710};
1711
1712void MDSRank::_standby_replay_restart_finish(int r, uint64_t old_read_pos)
1713{
1714 if (old_read_pos < mdlog->get_journaler()->get_trimmed_pos()) {
1715 dout(0) << "standby MDS fell behind active MDS journal's expire_pos, restarting" << dendl;
1716 respawn(); /* we're too far back, and this is easier than
1717 trying to reset everything in the cache, etc */
1718 } else {
1719 mdlog->standby_trim_segments();
1720 boot_start(MDS_BOOT_PREPARE_LOG, r);
1721 }
1722}
1723
3efd9988
FG
1724class MDSRank::C_MDS_StandbyReplayRestart : public MDSInternalContext {
1725public:
1726 explicit C_MDS_StandbyReplayRestart(MDSRank *m) : MDSInternalContext(m) {}
1727 void finish(int r) override {
11fdf7f2 1728 ceph_assert(!r);
3efd9988
FG
1729 mds->standby_replay_restart();
1730 }
1731};
1732
1733void MDSRank::standby_replay_restart()
7c673cae
FG
1734{
1735 if (standby_replaying) {
1736 /* Go around for another pass of replaying in standby */
f64942e4 1737 dout(5) << "Restarting replay as standby-replay" << dendl;
7c673cae
FG
1738 mdlog->get_journaler()->reread_head_and_probe(
1739 new C_MDS_StandbyReplayRestartFinish(
1740 this,
1741 mdlog->get_journaler()->get_read_pos()));
1742 } else {
1743 /* We are transitioning out of standby: wait for OSD map update
1744 before making final pass */
1745 dout(1) << "standby_replay_restart (final takeover pass)" << dendl;
3efd9988
FG
1746 Context *fin = new C_IO_Wrapper(this, new C_MDS_StandbyReplayRestart(this));
1747 bool ready = objecter->wait_for_map(mdsmap->get_last_failure_osd_epoch(), fin);
7c673cae
FG
1748 if (ready) {
1749 delete fin;
1750 mdlog->get_journaler()->reread_head_and_probe(
1751 new C_MDS_StandbyReplayRestartFinish(
1752 this,
1753 mdlog->get_journaler()->get_read_pos()));
3efd9988 1754
11fdf7f2 1755 dout(1) << " opening purge_queue (async)" << dendl;
3efd9988 1756 purge_queue.open(NULL);
11fdf7f2
TL
1757 dout(1) << " opening open_file_table (async)" << dendl;
1758 mdcache->open_file_table.load(nullptr);
7c673cae
FG
1759 } else {
1760 dout(1) << " waiting for osdmap " << mdsmap->get_last_failure_osd_epoch()
1761 << " (which blacklists prior instance)" << dendl;
1762 }
1763 }
1764}
1765
7c673cae
FG
1766void MDSRank::replay_done()
1767{
f64942e4
AA
1768 if (!standby_replaying) {
1769 dout(1) << "Finished replaying journal" << dendl;
1770 } else {
1771 dout(5) << "Finished replaying journal as standby-replay" << dendl;
1772 }
7c673cae
FG
1773
1774 if (is_standby_replay()) {
1775 // The replay was done in standby state, and we are still in that state
11fdf7f2 1776 ceph_assert(standby_replaying);
7c673cae 1777 dout(10) << "setting replay timer" << dendl;
11fdf7f2 1778 timer.add_event_after(g_conf()->mds_replay_interval,
7c673cae
FG
1779 new C_MDS_StandbyReplayRestart(this));
1780 return;
1781 } else if (standby_replaying) {
1782 // The replay was done in standby state, we have now _left_ that state
1783 dout(10) << " last replay pass was as a standby; making final pass" << dendl;
1784 standby_replaying = false;
1785 standby_replay_restart();
1786 return;
1787 } else {
1788 // Replay is complete, journal read should be up to date
11fdf7f2
TL
1789 ceph_assert(mdlog->get_journaler()->get_read_pos() == mdlog->get_journaler()->get_write_pos());
1790 ceph_assert(!is_standby_replay());
7c673cae
FG
1791
1792 // Reformat and come back here
11fdf7f2 1793 if (mdlog->get_journaler()->get_stream_format() < g_conf()->mds_journal_format) {
f64942e4 1794 dout(4) << "reformatting journal on standby-replay->replay transition" << dendl;
7c673cae
FG
1795 mdlog->reopen(new C_MDS_BootStart(this, MDS_BOOT_REPLAY_DONE));
1796 return;
1797 }
1798 }
1799
1800 dout(1) << "making mds journal writeable" << dendl;
1801 mdlog->get_journaler()->set_writeable();
1802 mdlog->get_journaler()->trim_tail();
1803
11fdf7f2
TL
1804 if (mdsmap->get_tableserver() == whoami &&
1805 snapserver->upgrade_format()) {
1806 dout(1) << "upgrading snaptable format" << dendl;
1807 snapserver->save(new C_MDSInternalNoop);
1808 }
1809
1810 if (g_conf()->mds_wipe_sessions) {
7c673cae
FG
1811 dout(1) << "wiping out client sessions" << dendl;
1812 sessionmap.wipe();
1813 sessionmap.save(new C_MDSInternalNoop);
1814 }
11fdf7f2 1815 if (g_conf()->mds_wipe_ino_prealloc) {
7c673cae
FG
1816 dout(1) << "wiping out ino prealloc from sessions" << dendl;
1817 sessionmap.wipe_ino_prealloc();
1818 sessionmap.save(new C_MDSInternalNoop);
1819 }
11fdf7f2
TL
1820 if (g_conf()->mds_skip_ino) {
1821 inodeno_t i = g_conf()->mds_skip_ino;
7c673cae
FG
1822 dout(1) << "skipping " << i << " inodes" << dendl;
1823 inotable->skip_inos(i);
1824 inotable->save(new C_MDSInternalNoop);
1825 }
1826
1827 if (mdsmap->get_num_in_mds() == 1 &&
1828 mdsmap->get_num_failed_mds() == 0) { // just me!
1829 dout(2) << "i am alone, moving to state reconnect" << dendl;
1830 request_state(MDSMap::STATE_RECONNECT);
11fdf7f2
TL
1831 // sync snaptable cache
1832 snapclient->sync(new C_MDSInternalNoop);
7c673cae
FG
1833 } else {
1834 dout(2) << "i am not alone, moving to state resolve" << dendl;
1835 request_state(MDSMap::STATE_RESOLVE);
1836 }
1837}
1838
1839void MDSRank::reopen_log()
1840{
1841 dout(1) << "reopen_log" << dendl;
1842 mdcache->rollback_uncommitted_fragments();
1843}
1844
7c673cae
FG
1845void MDSRank::resolve_start()
1846{
1847 dout(1) << "resolve_start" << dendl;
1848
1849 reopen_log();
1850
1851 mdcache->resolve_start(new C_MDS_VoidFn(this, &MDSRank::resolve_done));
1852 finish_contexts(g_ceph_context, waiting_for_resolve);
1853}
11fdf7f2 1854
7c673cae
FG
1855void MDSRank::resolve_done()
1856{
1857 dout(1) << "resolve_done" << dendl;
1858 request_state(MDSMap::STATE_RECONNECT);
11fdf7f2
TL
1859 // sync snaptable cache
1860 snapclient->sync(new C_MDSInternalNoop);
7c673cae
FG
1861}
1862
1863void MDSRank::reconnect_start()
1864{
1865 dout(1) << "reconnect_start" << dendl;
1866
1867 if (last_state == MDSMap::STATE_REPLAY) {
1868 reopen_log();
1869 }
1870
31f18b77
FG
1871 // Drop any blacklisted clients from the SessionMap before going
1872 // into reconnect, so that we don't wait for them.
1873 objecter->enable_blacklist_events();
1874 std::set<entity_addr_t> blacklist;
1875 epoch_t epoch = 0;
11fdf7f2 1876 objecter->with_osdmap([&blacklist, &epoch](const OSDMap& o) {
31f18b77
FG
1877 o.get_blacklist(&blacklist);
1878 epoch = o.get_epoch();
1879 });
1880 auto killed = server->apply_blacklist(blacklist);
1881 dout(4) << "reconnect_start: killed " << killed << " blacklisted sessions ("
1882 << blacklist.size() << " blacklist entries, "
1883 << sessionmap.get_sessions().size() << ")" << dendl;
1884 if (killed) {
1885 set_osd_epoch_barrier(epoch);
1886 }
1887
7c673cae
FG
1888 server->reconnect_clients(new C_MDS_VoidFn(this, &MDSRank::reconnect_done));
1889 finish_contexts(g_ceph_context, waiting_for_reconnect);
1890}
1891void MDSRank::reconnect_done()
1892{
1893 dout(1) << "reconnect_done" << dendl;
1894 request_state(MDSMap::STATE_REJOIN); // move to rejoin state
1895}
1896
1897void MDSRank::rejoin_joint_start()
1898{
1899 dout(1) << "rejoin_joint_start" << dendl;
1900 mdcache->rejoin_send_rejoins();
1901}
1902void MDSRank::rejoin_start()
1903{
1904 dout(1) << "rejoin_start" << dendl;
1905 mdcache->rejoin_start(new C_MDS_VoidFn(this, &MDSRank::rejoin_done));
a8e16298 1906 finish_contexts(g_ceph_context, waiting_for_rejoin);
7c673cae
FG
1907}
1908void MDSRank::rejoin_done()
1909{
1910 dout(1) << "rejoin_done" << dendl;
1911 mdcache->show_subtrees();
1912 mdcache->show_cache();
1913
1914 // funny case: is our cache empty? no subtrees?
1915 if (!mdcache->is_subtrees()) {
1916 if (whoami == 0) {
1917 // The root should always have a subtree!
1918 clog->error() << "No subtrees found for root MDS rank!";
1919 damaged();
11fdf7f2 1920 ceph_assert(mdcache->is_subtrees());
7c673cae
FG
1921 } else {
1922 dout(1) << " empty cache, no subtrees, leaving cluster" << dendl;
1923 request_state(MDSMap::STATE_STOPPED);
1924 }
1925 return;
1926 }
1927
11fdf7f2 1928 if (replay_queue.empty() && !server->get_num_pending_reclaim()) {
7c673cae 1929 request_state(MDSMap::STATE_ACTIVE);
11fdf7f2
TL
1930 } else {
1931 replaying_requests_done = replay_queue.empty();
7c673cae 1932 request_state(MDSMap::STATE_CLIENTREPLAY);
11fdf7f2 1933 }
7c673cae
FG
1934}
1935
1936void MDSRank::clientreplay_start()
1937{
1938 dout(1) << "clientreplay_start" << dendl;
1939 finish_contexts(g_ceph_context, waiting_for_replay); // kick waiters
1940 mdcache->start_files_to_recover();
1941 queue_one_replay();
1942}
1943
1944bool MDSRank::queue_one_replay()
1945{
11fdf7f2
TL
1946 if (!replay_queue.empty()) {
1947 queue_waiter(replay_queue.front());
1948 replay_queue.pop_front();
1949 return true;
1950 }
1951 if (!replaying_requests_done) {
1952 replaying_requests_done = true;
1953 mdlog->flush();
1954 }
1955 maybe_clientreplay_done();
1956 return false;
1957}
1958
1959void MDSRank::maybe_clientreplay_done()
1960{
1961 if (is_clientreplay() && get_want_state() == MDSMap::STATE_CLIENTREPLAY) {
1962
1963 // don't go to active if there are session waiting for being reclaimed
1964 if (replaying_requests_done && !server->get_num_pending_reclaim()) {
1965 mdlog->wait_for_safe(new C_MDS_VoidFn(this, &MDSRank::clientreplay_done));
1966 return;
1967 }
1968
1969 dout(1) << " still have " << replay_queue.size() + (int)!replaying_requests_done
1970 << " requests need to be replayed, " << server->get_num_pending_reclaim()
1971 << " sessions need to be reclaimed" << dendl;
7c673cae 1972 }
7c673cae
FG
1973}
1974
1975void MDSRank::clientreplay_done()
1976{
1977 dout(1) << "clientreplay_done" << dendl;
1978 request_state(MDSMap::STATE_ACTIVE);
1979}
1980
1981void MDSRank::active_start()
1982{
1983 dout(1) << "active_start" << dendl;
1984
28e407b8
AA
1985 if (last_state == MDSMap::STATE_CREATING ||
1986 last_state == MDSMap::STATE_STARTING) {
7c673cae
FG
1987 mdcache->open_root();
1988 }
1989
1990 mdcache->clean_open_file_lists();
1991 mdcache->export_remaining_imported_caps();
1992 finish_contexts(g_ceph_context, waiting_for_replay); // kick waiters
1993 mdcache->start_files_to_recover();
1994
1995 mdcache->reissue_all_caps();
7c673cae
FG
1996
1997 finish_contexts(g_ceph_context, waiting_for_active); // kick waiters
1998}
1999
2000void MDSRank::recovery_done(int oldstate)
2001{
2002 dout(1) << "recovery_done -- successful recovery!" << dendl;
11fdf7f2 2003 ceph_assert(is_clientreplay() || is_active());
7c673cae
FG
2004
2005 if (oldstate == MDSMap::STATE_CREATING)
2006 return;
2007
2008 mdcache->start_recovered_truncates();
2009 mdcache->do_file_recover();
2010
2011 // tell connected clients
2012 //bcast_mds_map(); // not anymore, they get this from the monitor
2013
2014 mdcache->populate_mydir();
2015}
2016
2017void MDSRank::creating_done()
2018{
2019 dout(1)<< "creating_done" << dendl;
2020 request_state(MDSMap::STATE_ACTIVE);
11fdf7f2
TL
2021 // sync snaptable cache
2022 snapclient->sync(new C_MDSInternalNoop);
7c673cae
FG
2023}
2024
2025void MDSRank::boot_create()
2026{
2027 dout(3) << "boot_create" << dendl;
2028
2029 MDSGatherBuilder fin(g_ceph_context, new C_MDS_VoidFn(this, &MDSRank::creating_done));
2030
2031 mdcache->init_layouts();
2032
7c673cae
FG
2033 inotable->set_rank(whoami);
2034 sessionmap.set_rank(whoami);
2035
2036 // start with a fresh journal
2037 dout(10) << "boot_create creating fresh journal" << dendl;
2038 mdlog->create(fin.new_sub());
2039
2040 // open new journal segment, but do not journal subtree map (yet)
2041 mdlog->prepare_new_segment();
2042
2043 if (whoami == mdsmap->get_root()) {
2044 dout(3) << "boot_create creating fresh hierarchy" << dendl;
2045 mdcache->create_empty_hierarchy(fin.get());
2046 }
2047
2048 dout(3) << "boot_create creating mydir hierarchy" << dendl;
2049 mdcache->create_mydir_hierarchy(fin.get());
2050
11fdf7f2
TL
2051 dout(3) << "boot_create creating global snaprealm" << dendl;
2052 mdcache->create_global_snaprealm();
2053
7c673cae
FG
2054 // fixme: fake out inotable (reset, pretend loaded)
2055 dout(10) << "boot_create creating fresh inotable table" << dendl;
2056 inotable->reset();
2057 inotable->save(fin.new_sub());
2058
2059 // write empty sessionmap
2060 sessionmap.save(fin.new_sub());
2061
2062 // Create empty purge queue
2063 purge_queue.create(new C_IO_Wrapper(this, fin.new_sub()));
2064
2065 // initialize tables
2066 if (mdsmap->get_tableserver() == whoami) {
2067 dout(10) << "boot_create creating fresh snaptable" << dendl;
11fdf7f2 2068 snapserver->set_rank(whoami);
7c673cae
FG
2069 snapserver->reset();
2070 snapserver->save(fin.new_sub());
2071 }
2072
11fdf7f2 2073 ceph_assert(g_conf()->mds_kill_create_at != 1);
7c673cae
FG
2074
2075 // ok now journal it
2076 mdlog->journal_segment_subtree_map(fin.new_sub());
2077 mdlog->flush();
2078
31f18b77
FG
2079 // Usually we do this during reconnect, but creation skips that.
2080 objecter->enable_blacklist_events();
2081
7c673cae
FG
2082 fin.activate();
2083}
2084
2085void MDSRank::stopping_start()
2086{
a8e16298 2087 dout(2) << "Stopping..." << dendl;
7c673cae
FG
2088
2089 if (mdsmap->get_num_in_mds() == 1 && !sessionmap.empty()) {
11fdf7f2
TL
2090 std::vector<Session*> victims;
2091 const auto& sessions = sessionmap.get_sessions();
2092 for (const auto& p : sessions) {
2093 if (!p.first.is_client()) {
2094 continue;
2095 }
2096
2097 Session *s = p.second;
2098 victims.push_back(s);
2099 }
2100
2101 dout(20) << __func__ << " matched " << victims.size() << " sessions" << dendl;
2102 ceph_assert(!victims.empty());
2103
2104 C_GatherBuilder gather(g_ceph_context, new C_MDSInternalNoop);
2105 for (const auto &s : victims) {
2106 std::stringstream ss;
2107 evict_client(s->get_client().v, false,
2108 g_conf()->mds_session_blacklist_on_evict, ss, gather.new_sub());
2109 }
2110 gather.activate();
7c673cae
FG
2111 }
2112
2113 mdcache->shutdown_start();
2114}
2115
2116void MDSRank::stopping_done()
2117{
a8e16298 2118 dout(2) << "Finished stopping..." << dendl;
7c673cae
FG
2119
2120 // tell monitor we shut down cleanly.
2121 request_state(MDSMap::STATE_STOPPED);
2122}
2123
2124void MDSRankDispatcher::handle_mds_map(
11fdf7f2
TL
2125 const MMDSMap::const_ref &m,
2126 const MDSMap &oldmap)
7c673cae
FG
2127{
2128 // I am only to be passed MDSMaps in which I hold a rank
11fdf7f2 2129 ceph_assert(whoami != MDS_RANK_NONE);
7c673cae
FG
2130
2131 MDSMap::DaemonState oldstate = state;
2132 mds_gid_t mds_gid = mds_gid_t(monc->get_global_id());
2133 state = mdsmap->get_state_gid(mds_gid);
2134 if (state != oldstate) {
2135 last_state = oldstate;
2136 incarnation = mdsmap->get_inc_gid(mds_gid);
2137 }
2138
2139 version_t epoch = m->get_epoch();
2140
2141 // note source's map version
2142 if (m->get_source().is_mds() &&
2143 peer_mdsmap_epoch[mds_rank_t(m->get_source().num())] < epoch) {
2144 dout(15) << " peer " << m->get_source()
2145 << " has mdsmap epoch >= " << epoch
2146 << dendl;
2147 peer_mdsmap_epoch[mds_rank_t(m->get_source().num())] = epoch;
2148 }
2149
2150 // Validate state transitions while I hold a rank
2151 if (!MDSMap::state_transition_valid(oldstate, state)) {
2152 derr << "Invalid state transition " << ceph_mds_state_name(oldstate)
2153 << "->" << ceph_mds_state_name(state) << dendl;
2154 respawn();
2155 }
2156
2157 if (oldstate != state) {
2158 // update messenger.
2159 if (state == MDSMap::STATE_STANDBY_REPLAY) {
2160 dout(1) << "handle_mds_map i am now mds." << mds_gid << "." << incarnation
2161 << " replaying mds." << whoami << "." << incarnation << dendl;
2162 messenger->set_myname(entity_name_t::MDS(mds_gid));
2163 } else {
2164 dout(1) << "handle_mds_map i am now mds." << whoami << "." << incarnation << dendl;
2165 messenger->set_myname(entity_name_t::MDS(whoami));
2166 }
2167 }
2168
2169 // tell objecter my incarnation
2170 if (objecter->get_client_incarnation() != incarnation)
2171 objecter->set_client_incarnation(incarnation);
2172
11fdf7f2
TL
2173 if (oldmap.get_min_compat_client() != mdsmap->get_min_compat_client())
2174 server->update_required_client_features();
2175
7c673cae 2176 // for debug
11fdf7f2 2177 if (g_conf()->mds_dump_cache_on_map)
7c673cae
FG
2178 mdcache->dump_cache();
2179
1adf2230
AA
2180 cluster_degraded = mdsmap->is_degraded();
2181
2182 // mdsmap and oldmap can be discontinuous. failover might happen in the missing mdsmap.
2183 // the 'restart' set tracks ranks that have restarted since the old mdsmap
2184 set<mds_rank_t> restart;
2185 // replaying mds does not communicate with other ranks
2186 if (state >= MDSMap::STATE_RESOLVE) {
2187 // did someone fail?
2188 // new down?
2189 set<mds_rank_t> olddown, down;
11fdf7f2 2190 oldmap.get_down_mds_set(&olddown);
1adf2230
AA
2191 mdsmap->get_down_mds_set(&down);
2192 for (const auto& r : down) {
11fdf7f2
TL
2193 if (oldmap.have_inst(r) && olddown.count(r) == 0) {
2194 messenger->mark_down_addrs(oldmap.get_addrs(r));
1adf2230
AA
2195 handle_mds_failure(r);
2196 }
2197 }
2198
2199 // did someone fail?
2200 // did their addr/inst change?
2201 set<mds_rank_t> up;
2202 mdsmap->get_up_mds_set(up);
2203 for (const auto& r : up) {
2204 auto& info = mdsmap->get_info(r);
11fdf7f2
TL
2205 if (oldmap.have_inst(r)) {
2206 auto& oldinfo = oldmap.get_info(r);
1adf2230 2207 if (info.inc != oldinfo.inc) {
11fdf7f2 2208 messenger->mark_down_addrs(oldinfo.get_addrs());
1adf2230
AA
2209 if (info.state == MDSMap::STATE_REPLAY ||
2210 info.state == MDSMap::STATE_RESOLVE) {
2211 restart.insert(r);
2212 handle_mds_failure(r);
2213 } else {
11fdf7f2 2214 ceph_assert(info.state == MDSMap::STATE_STARTING ||
1adf2230
AA
2215 info.state == MDSMap::STATE_ACTIVE);
2216 // -> stopped (missing) -> starting -> active
2217 restart.insert(r);
2218 mdcache->migrator->handle_mds_failure_or_stop(r);
11fdf7f2
TL
2219 if (mdsmap->get_tableserver() == whoami)
2220 snapserver->handle_mds_failure_or_stop(r);
1adf2230
AA
2221 }
2222 }
2223 } else {
2224 if (info.state == MDSMap::STATE_REPLAY ||
2225 info.state == MDSMap::STATE_RESOLVE) {
2226 // -> starting/creating (missing) -> active (missing) -> replay -> resolve
2227 restart.insert(r);
2228 handle_mds_failure(r);
2229 } else {
11fdf7f2 2230 ceph_assert(info.state == MDSMap::STATE_CREATING ||
1adf2230
AA
2231 info.state == MDSMap::STATE_STARTING ||
2232 info.state == MDSMap::STATE_ACTIVE);
2233 }
2234 }
2235 }
2236 }
2237
7c673cae
FG
2238 // did it change?
2239 if (oldstate != state) {
2240 dout(1) << "handle_mds_map state change "
2241 << ceph_mds_state_name(oldstate) << " --> "
2242 << ceph_mds_state_name(state) << dendl;
11fdf7f2 2243 beacon.set_want_state(*mdsmap, state);
7c673cae
FG
2244
2245 if (oldstate == MDSMap::STATE_STANDBY_REPLAY) {
2246 dout(10) << "Monitor activated us! Deactivating replay loop" << dendl;
2247 assert (state == MDSMap::STATE_REPLAY);
2248 } else {
2249 // did i just recover?
2250 if ((is_active() || is_clientreplay()) &&
2251 (oldstate == MDSMap::STATE_CREATING ||
2252 oldstate == MDSMap::STATE_REJOIN ||
2253 oldstate == MDSMap::STATE_RECONNECT))
2254 recovery_done(oldstate);
2255
2256 if (is_active()) {
2257 active_start();
2258 } else if (is_any_replay()) {
2259 replay_start();
2260 } else if (is_resolve()) {
2261 resolve_start();
2262 } else if (is_reconnect()) {
2263 reconnect_start();
2264 } else if (is_rejoin()) {
2265 rejoin_start();
2266 } else if (is_clientreplay()) {
2267 clientreplay_start();
2268 } else if (is_creating()) {
2269 boot_create();
2270 } else if (is_starting()) {
2271 boot_start();
2272 } else if (is_stopping()) {
11fdf7f2 2273 ceph_assert(oldstate == MDSMap::STATE_ACTIVE);
7c673cae
FG
2274 stopping_start();
2275 }
2276 }
2277 }
2278
2279 // RESOLVE
2280 // is someone else newly resolving?
1adf2230 2281 if (state >= MDSMap::STATE_RESOLVE) {
11fdf7f2
TL
2282 // recover snaptable
2283 if (mdsmap->get_tableserver() == whoami) {
2284 if (oldstate < MDSMap::STATE_RESOLVE) {
2285 set<mds_rank_t> s;
2286 mdsmap->get_mds_set_lower_bound(s, MDSMap::STATE_RESOLVE);
2287 snapserver->finish_recovery(s);
2288 } else {
2289 set<mds_rank_t> old_set, new_set;
2290 oldmap.get_mds_set_lower_bound(old_set, MDSMap::STATE_RESOLVE);
2291 mdsmap->get_mds_set_lower_bound(new_set, MDSMap::STATE_RESOLVE);
2292 for (const auto& r : new_set) {
2293 if (r == whoami)
2294 continue; // not me
2295 if (!old_set.count(r) || restart.count(r)) { // newly so?
2296 snapserver->handle_mds_recovery(r);
2297 }
2298 }
2299 }
2300 }
2301
2302 if ((!oldmap.is_resolving() || !restart.empty()) && mdsmap->is_resolving()) {
7c673cae
FG
2303 set<mds_rank_t> resolve;
2304 mdsmap->get_mds_set(resolve, MDSMap::STATE_RESOLVE);
2305 dout(10) << " resolve set is " << resolve << dendl;
2306 calc_recovery_set();
2307 mdcache->send_resolves();
2308 }
2309 }
2310
2311 // REJOIN
2312 // is everybody finally rejoining?
1adf2230 2313 if (state >= MDSMap::STATE_REJOIN) {
7c673cae 2314 // did we start?
11fdf7f2 2315 if (!oldmap.is_rejoining() && mdsmap->is_rejoining())
7c673cae
FG
2316 rejoin_joint_start();
2317
2318 // did we finish?
11fdf7f2
TL
2319 if (g_conf()->mds_dump_cache_after_rejoin &&
2320 oldmap.is_rejoining() && !mdsmap->is_rejoining())
7c673cae
FG
2321 mdcache->dump_cache(); // for DEBUG only
2322
d2e6a577
FG
2323 if (oldstate >= MDSMap::STATE_REJOIN ||
2324 oldstate == MDSMap::STATE_STARTING) {
7c673cae
FG
2325 // ACTIVE|CLIENTREPLAY|REJOIN => we can discover from them.
2326 set<mds_rank_t> olddis, dis;
11fdf7f2 2327 oldmap.get_mds_set_lower_bound(olddis, MDSMap::STATE_REJOIN);
1adf2230
AA
2328 mdsmap->get_mds_set_lower_bound(dis, MDSMap::STATE_REJOIN);
2329 for (const auto& r : dis) {
2330 if (r == whoami)
2331 continue; // not me
2332 if (!olddis.count(r) || restart.count(r)) { // newly so?
2333 mdcache->kick_discovers(r);
2334 mdcache->kick_open_ino_peers(r);
7c673cae 2335 }
1adf2230 2336 }
7c673cae
FG
2337 }
2338 }
2339
11fdf7f2 2340 if (oldmap.is_degraded() && !cluster_degraded && state >= MDSMap::STATE_ACTIVE) {
7c673cae
FG
2341 dout(1) << "cluster recovered." << dendl;
2342 auto it = waiting_for_active_peer.find(MDS_RANK_NONE);
2343 if (it != waiting_for_active_peer.end()) {
2344 queue_waiters(it->second);
2345 waiting_for_active_peer.erase(it);
2346 }
2347 }
2348
2349 // did someone go active?
1adf2230
AA
2350 if (state >= MDSMap::STATE_CLIENTREPLAY &&
2351 oldstate >= MDSMap::STATE_CLIENTREPLAY) {
7c673cae 2352 set<mds_rank_t> oldactive, active;
11fdf7f2 2353 oldmap.get_mds_set_lower_bound(oldactive, MDSMap::STATE_CLIENTREPLAY);
1adf2230
AA
2354 mdsmap->get_mds_set_lower_bound(active, MDSMap::STATE_CLIENTREPLAY);
2355 for (const auto& r : active) {
2356 if (r == whoami)
2357 continue; // not me
2358 if (!oldactive.count(r) || restart.count(r)) // newly so?
2359 handle_mds_recovery(r);
7c673cae
FG
2360 }
2361 }
2362
11fdf7f2 2363 if (is_clientreplay() || is_active() || is_stopping()) {
7c673cae
FG
2364 // did anyone stop?
2365 set<mds_rank_t> oldstopped, stopped;
11fdf7f2 2366 oldmap.get_stopped_mds_set(oldstopped);
7c673cae 2367 mdsmap->get_stopped_mds_set(stopped);
1adf2230 2368 for (const auto& r : stopped)
11fdf7f2 2369 if (oldstopped.count(r) == 0) { // newly so?
1adf2230 2370 mdcache->migrator->handle_mds_failure_or_stop(r);
11fdf7f2
TL
2371 if (mdsmap->get_tableserver() == whoami)
2372 snapserver->handle_mds_failure_or_stop(r);
2373 }
7c673cae
FG
2374 }
2375
2376 {
11fdf7f2 2377 map<epoch_t,MDSContext::vec >::iterator p = waiting_for_mdsmap.begin();
7c673cae 2378 while (p != waiting_for_mdsmap.end() && p->first <= mdsmap->get_epoch()) {
11fdf7f2 2379 MDSContext::vec ls;
7c673cae
FG
2380 ls.swap(p->second);
2381 waiting_for_mdsmap.erase(p++);
91327a77 2382 queue_waiters(ls);
7c673cae
FG
2383 }
2384 }
2385
2386 if (is_active()) {
2387 // Before going active, set OSD epoch barrier to latest (so that
2388 // we don't risk handing out caps to clients with old OSD maps that
2389 // might not include barriers from the previous incarnation of this MDS)
2390 set_osd_epoch_barrier(objecter->with_osdmap(
2391 std::mem_fn(&OSDMap::get_epoch)));
7c673cae 2392
11fdf7f2
TL
2393 /* Now check if we should hint to the OSD that a read may follow */
2394 if (mdsmap->has_standby_replay(whoami))
2395 mdlog->set_write_iohint(0);
2396 else
2397 mdlog->set_write_iohint(CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
7c673cae
FG
2398 }
2399
11fdf7f2 2400 if (oldmap.get_max_mds() != mdsmap->get_max_mds()) {
7c673cae
FG
2401 purge_queue.update_op_limit(*mdsmap);
2402 }
eafe8130 2403
92f5a8d4
TL
2404 if (scrubstack->is_scrubbing()) {
2405 if (mdsmap->get_max_mds() > 1) {
2406 auto c = new C_MDSInternalNoop;
2407 scrubstack->scrub_abort(c);
2408 }
2409 }
eafe8130 2410 mdcache->handle_mdsmap(*mdsmap);
7c673cae
FG
2411}
2412
2413void MDSRank::handle_mds_recovery(mds_rank_t who)
2414{
2415 dout(5) << "handle_mds_recovery mds." << who << dendl;
2416
2417 mdcache->handle_mds_recovery(who);
2418
7c673cae
FG
2419 queue_waiters(waiting_for_active_peer[who]);
2420 waiting_for_active_peer.erase(who);
2421}
2422
2423void MDSRank::handle_mds_failure(mds_rank_t who)
2424{
2425 if (who == whoami) {
2426 dout(5) << "handle_mds_failure for myself; not doing anything" << dendl;
2427 return;
2428 }
2429 dout(5) << "handle_mds_failure mds." << who << dendl;
2430
2431 mdcache->handle_mds_failure(who);
2432
11fdf7f2
TL
2433 if (mdsmap->get_tableserver() == whoami)
2434 snapserver->handle_mds_failure_or_stop(who);
2435
7c673cae
FG
2436 snapclient->handle_mds_failure(who);
2437}
2438
11fdf7f2
TL
2439bool MDSRankDispatcher::handle_asok_command(std::string_view command,
2440 const cmdmap_t& cmdmap,
2441 Formatter *f,
2442 std::ostream& ss)
7c673cae
FG
2443{
2444 if (command == "dump_ops_in_flight" ||
2445 command == "ops") {
2446 if (!op_tracker.dump_ops_in_flight(f)) {
2447 ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
11fdf7f2 2448 please enable \"mds_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
7c673cae
FG
2449 }
2450 } else if (command == "dump_blocked_ops") {
2451 if (!op_tracker.dump_ops_in_flight(f, true)) {
2452 ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
11fdf7f2 2453 Please enable \"mds_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
7c673cae
FG
2454 }
2455 } else if (command == "dump_historic_ops") {
2456 if (!op_tracker.dump_historic_ops(f)) {
2457 ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
11fdf7f2 2458 please enable \"mds_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
7c673cae
FG
2459 }
2460 } else if (command == "dump_historic_ops_by_duration") {
2461 if (!op_tracker.dump_historic_ops(f, true)) {
2462 ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
11fdf7f2 2463 please enable \"mds_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
7c673cae
FG
2464 }
2465 } else if (command == "osdmap barrier") {
2466 int64_t target_epoch = 0;
2467 bool got_val = cmd_getval(g_ceph_context, cmdmap, "target_epoch", target_epoch);
2468
2469 if (!got_val) {
2470 ss << "no target epoch given";
2471 return true;
2472 }
2473
2474 mds_lock.Lock();
2475 set_osd_epoch_barrier(target_epoch);
2476 mds_lock.Unlock();
2477
2478 C_SaferCond cond;
2479 bool already_got = objecter->wait_for_map(target_epoch, &cond);
2480 if (!already_got) {
2481 dout(4) << __func__ << ": waiting for OSD epoch " << target_epoch << dendl;
2482 cond.wait();
2483 }
2484 } else if (command == "session ls") {
11fdf7f2 2485 std::lock_guard l(mds_lock);
7c673cae
FG
2486
2487 heartbeat_reset();
2488
2489 dump_sessions(SessionFilter(), f);
2490 } else if (command == "session evict") {
2491 std::string client_id;
2492 const bool got_arg = cmd_getval(g_ceph_context, cmdmap, "client_id", client_id);
2493 if(!got_arg) {
2494 ss << "Invalid client_id specified";
2495 return true;
2496 }
2497
2498 mds_lock.Lock();
31f18b77
FG
2499 std::stringstream dss;
2500 bool evicted = evict_client(strtol(client_id.c_str(), 0, 10), true,
11fdf7f2 2501 g_conf()->mds_session_blacklist_on_evict, dss);
31f18b77 2502 if (!evicted) {
7c673cae
FG
2503 dout(15) << dss.str() << dendl;
2504 ss << dss.str();
2505 }
2506 mds_lock.Unlock();
92f5a8d4
TL
2507 } else if (command == "session config") {
2508 int64_t client_id;
2509 std::string option;
2510 std::string value;
2511
2512 cmd_getval(g_ceph_context, cmdmap, "client_id", client_id);
2513 cmd_getval(g_ceph_context, cmdmap, "option", option);
2514 bool got_value = cmd_getval(g_ceph_context, cmdmap, "value", value);
2515
2516 mds_lock.Lock();
2517 config_client(client_id, !got_value, option, value, ss);
2518 mds_lock.Unlock();
7c673cae
FG
2519 } else if (command == "scrub_path") {
2520 string path;
2521 vector<string> scrubop_vec;
2522 cmd_getval(g_ceph_context, cmdmap, "scrubops", scrubop_vec);
2523 cmd_getval(g_ceph_context, cmdmap, "path", path);
11fdf7f2 2524
92f5a8d4
TL
2525 /* Multiple MDS scrub is not currently supported. See also: https://tracker.ceph.com/issues/12274 */
2526 if (mdsmap->get_max_mds() > 1) {
2527 ss << "Scrub is not currently supported for multiple active MDS. Please reduce max_mds to 1 and then scrub.";
2528 return true;
2529 }
2530
11fdf7f2
TL
2531 C_SaferCond cond;
2532 command_scrub_start(f, path, "", scrubop_vec, &cond);
2533 cond.wait();
7c673cae
FG
2534 } else if (command == "tag path") {
2535 string path;
2536 cmd_getval(g_ceph_context, cmdmap, "path", path);
2537 string tag;
2538 cmd_getval(g_ceph_context, cmdmap, "tag", tag);
2539 command_tag_path(f, path, tag);
2540 } else if (command == "flush_path") {
2541 string path;
2542 cmd_getval(g_ceph_context, cmdmap, "path", path);
2543 command_flush_path(f, path);
2544 } else if (command == "flush journal") {
2545 command_flush_journal(f);
2546 } else if (command == "get subtrees") {
2547 command_get_subtrees(f);
2548 } else if (command == "export dir") {
2549 string path;
2550 if(!cmd_getval(g_ceph_context, cmdmap, "path", path)) {
2551 ss << "malformed path";
2552 return true;
2553 }
2554 int64_t rank;
2555 if(!cmd_getval(g_ceph_context, cmdmap, "rank", rank)) {
2556 ss << "malformed rank";
2557 return true;
2558 }
2559 command_export_dir(f, path, (mds_rank_t)rank);
2560 } else if (command == "dump cache") {
11fdf7f2 2561 std::lock_guard l(mds_lock);
7c673cae 2562 string path;
31f18b77 2563 int r;
7c673cae 2564 if(!cmd_getval(g_ceph_context, cmdmap, "path", path)) {
31f18b77 2565 r = mdcache->dump_cache(f);
7c673cae 2566 } else {
31f18b77
FG
2567 r = mdcache->dump_cache(path);
2568 }
2569
2570 if (r != 0) {
2571 ss << "Failed to dump cache: " << cpp_strerror(r);
181888fb
FG
2572 f->reset();
2573 }
2574 } else if (command == "cache status") {
11fdf7f2 2575 std::lock_guard l(mds_lock);
f64942e4 2576 mdcache->cache_status(f);
7c673cae 2577 } else if (command == "dump tree") {
11fdf7f2 2578 command_dump_tree(cmdmap, ss, f);
28e407b8 2579 } else if (command == "dump loads") {
11fdf7f2 2580 std::lock_guard l(mds_lock);
28e407b8
AA
2581 int r = balancer->dump_loads(f);
2582 if (r != 0) {
2583 ss << "Failed to dump loads: " << cpp_strerror(r);
2584 f->reset();
2585 }
11fdf7f2
TL
2586 } else if (command == "dump snaps") {
2587 std::lock_guard l(mds_lock);
2588 string server;
2589 cmd_getval(g_ceph_context, cmdmap, "server", server);
2590 if (server == "--server") {
2591 if (mdsmap->get_tableserver() == whoami) {
2592 snapserver->dump(f);
2593 } else {
2594 ss << "Not snapserver";
2595 }
2596 } else {
2597 int r = snapclient->dump_cache(f);
2598 if (r != 0) {
2599 ss << "Failed to dump snapclient: " << cpp_strerror(r);
2600 f->reset();
2601 }
2602 }
7c673cae 2603 } else if (command == "force_readonly") {
11fdf7f2 2604 std::lock_guard l(mds_lock);
7c673cae
FG
2605 mdcache->force_readonly();
2606 } else if (command == "dirfrag split") {
2607 command_dirfrag_split(cmdmap, ss);
2608 } else if (command == "dirfrag merge") {
2609 command_dirfrag_merge(cmdmap, ss);
2610 } else if (command == "dirfrag ls") {
2611 command_dirfrag_ls(cmdmap, ss, f);
11fdf7f2
TL
2612 } else if (command == "openfiles ls") {
2613 command_openfiles_ls(f);
2614 } else if (command == "dump inode") {
2615 command_dump_inode(f, cmdmap, ss);
7c673cae
FG
2616 } else {
2617 return false;
2618 }
2619
2620 return true;
2621}
2622
f64942e4 2623class C_MDS_Send_Command_Reply : public MDSInternalContext {
7c673cae 2624protected:
11fdf7f2 2625 MCommand::const_ref m;
7c673cae 2626public:
11fdf7f2
TL
2627 C_MDS_Send_Command_Reply(MDSRank *_mds, const MCommand::const_ref &_m) :
2628 MDSInternalContext(_mds), m(_m) {}
f64942e4 2629
11fdf7f2 2630 void send(int r, std::string_view ss) {
f64942e4
AA
2631 std::stringstream ds;
2632 send(r, ss, ds);
2633 }
2634
11fdf7f2 2635 void send(int r, std::string_view ss, std::stringstream &ds) {
7c673cae 2636 bufferlist bl;
f64942e4
AA
2637 bl.append(ds);
2638 MDSDaemon::send_command_reply(m, mds, r, bl, ss);
7c673cae 2639 }
f64942e4
AA
2640
2641 void finish(int r) override {
7c673cae
FG
2642 send(r, "");
2643 }
2644};
2645
11fdf7f2
TL
2646class C_ExecAndReply : public C_MDS_Send_Command_Reply {
2647public:
2648 C_ExecAndReply(MDSRank *mds, const MCommand::const_ref &m)
2649 : C_MDS_Send_Command_Reply(mds, m), f(true) {
2650 }
2651
2652 void finish(int r) override {
2653 std::stringstream ds;
2654 std::stringstream ss;
2655 if (r != 0) {
2656 f.flush(ss);
2657 } else {
2658 f.flush(ds);
2659 }
2660
2661 send(r, ss.str(), ds);
2662 }
2663
2664 virtual void exec() = 0;
2665
2666protected:
2667 JSONFormatter f;
2668};
2669
2670class C_CacheDropExecAndReply : public C_ExecAndReply {
2671public:
2672 C_CacheDropExecAndReply(MDSRank *mds, const MCommand::const_ref &m,
2673 uint64_t timeout)
2674 : C_ExecAndReply(mds, m), timeout(timeout) {
2675 }
2676
2677 void exec() override {
2678 mds->command_cache_drop(timeout, &f, this);
2679 }
2680
2681private:
2682 uint64_t timeout;
2683};
2684
2685class C_ScrubExecAndReply : public C_ExecAndReply {
2686public:
2687 C_ScrubExecAndReply(MDSRank *mds, const MCommand::const_ref &m,
2688 const std::string &path, const std::string &tag,
2689 const std::vector<std::string> &scrubop)
2690 : C_ExecAndReply(mds, m), path(path), tag(tag), scrubop(scrubop) {
2691 }
2692
2693 void exec() override {
2694 mds->command_scrub_start(&f, path, tag, scrubop, this);
2695 }
2696
2697private:
2698 std::string path;
2699 std::string tag;
2700 std::vector<std::string> scrubop;
2701};
2702
2703class C_ScrubControlExecAndReply : public C_ExecAndReply {
2704public:
2705 C_ScrubControlExecAndReply(MDSRank *mds, const MCommand::const_ref &m,
2706 const std::string &command)
2707 : C_ExecAndReply(mds, m), command(command) {
2708 }
2709
2710 void exec() override {
2711 if (command == "abort") {
2712 mds->command_scrub_abort(&f, this);
2713 } else if (command == "pause") {
2714 mds->command_scrub_pause(&f, this);
2715 } else {
2716 ceph_abort();
2717 }
2718 }
2719
2720 void finish(int r) override {
2721 f.open_object_section("result");
2722 f.dump_int("return_code", r);
2723 f.close_section();
2724 C_ExecAndReply::finish(r);
2725 }
2726
2727private:
2728 std::string command;
2729};
2730
7c673cae
FG
2731/**
2732 * This function drops the mds_lock, so don't do anything with
2733 * MDSRank after calling it (we could have gone into shutdown): just
2734 * send your result back to the calling client and finish.
2735 */
11fdf7f2 2736void MDSRankDispatcher::evict_clients(const SessionFilter &filter, const MCommand::const_ref &m)
7c673cae
FG
2737{
2738 C_MDS_Send_Command_Reply *reply = new C_MDS_Send_Command_Reply(this, m);
2739
2740 if (is_any_replay()) {
2741 reply->send(-EAGAIN, "MDS is replaying log");
2742 delete reply;
2743 return;
2744 }
2745
11fdf7f2
TL
2746 std::vector<Session*> victims;
2747 const auto& sessions = sessionmap.get_sessions();
2748 for (const auto& p : sessions) {
7c673cae
FG
2749 if (!p.first.is_client()) {
2750 continue;
2751 }
2752
2753 Session *s = p.second;
2754
2755 if (filter.match(*s, std::bind(&Server::waiting_for_reconnect, server, std::placeholders::_1))) {
2756 victims.push_back(s);
2757 }
2758 }
2759
2760 dout(20) << __func__ << " matched " << victims.size() << " sessions" << dendl;
2761
2762 if (victims.empty()) {
2763 reply->send(0, "");
2764 delete reply;
2765 return;
2766 }
2767
2768 C_GatherBuilder gather(g_ceph_context, reply);
2769 for (const auto s : victims) {
31f18b77 2770 std::stringstream ss;
11fdf7f2
TL
2771 evict_client(s->get_client().v, false,
2772 g_conf()->mds_session_blacklist_on_evict, ss, gather.new_sub());
7c673cae
FG
2773 }
2774 gather.activate();
2775}
2776
2777void MDSRankDispatcher::dump_sessions(const SessionFilter &filter, Formatter *f) const
2778{
2779 // Dump sessions, decorated with recovery/replay status
2780 f->open_array_section("sessions");
92f5a8d4
TL
2781 for (auto& [name, s] : sessionmap.get_sessions()) {
2782 if (!name.is_client()) {
7c673cae
FG
2783 continue;
2784 }
2785
7c673cae
FG
2786 if (!filter.match(*s, std::bind(&Server::waiting_for_reconnect, server, std::placeholders::_1))) {
2787 continue;
2788 }
2789
92f5a8d4 2790 f->dump_object("session", *s);
7c673cae 2791 }
92f5a8d4 2792 f->close_section(); // sessions
7c673cae
FG
2793}
2794
11fdf7f2
TL
2795void MDSRank::command_scrub_start(Formatter *f,
2796 std::string_view path, std::string_view tag,
2797 const vector<string>& scrubop_vec, Context *on_finish)
7c673cae
FG
2798{
2799 bool force = false;
2800 bool recursive = false;
2801 bool repair = false;
11fdf7f2
TL
2802 for (auto &op : scrubop_vec) {
2803 if (op == "force")
7c673cae 2804 force = true;
11fdf7f2 2805 else if (op == "recursive")
7c673cae 2806 recursive = true;
11fdf7f2 2807 else if (op == "repair")
7c673cae
FG
2808 repair = true;
2809 }
11fdf7f2
TL
2810
2811 std::lock_guard l(mds_lock);
2812 mdcache->enqueue_scrub(path, tag, force, recursive, repair, f, on_finish);
7c673cae
FG
2813 // scrub_dentry() finishers will dump the data for us; we're done!
2814}
2815
2816void MDSRank::command_tag_path(Formatter *f,
11fdf7f2 2817 std::string_view path, std::string_view tag)
7c673cae
FG
2818{
2819 C_SaferCond scond;
2820 {
11fdf7f2 2821 std::lock_guard l(mds_lock);
7c673cae
FG
2822 mdcache->enqueue_scrub(path, tag, true, true, false, f, &scond);
2823 }
2824 scond.wait();
2825}
2826
11fdf7f2
TL
2827void MDSRank::command_scrub_abort(Formatter *f, Context *on_finish) {
2828 std::lock_guard l(mds_lock);
2829 scrubstack->scrub_abort(on_finish);
2830}
2831
2832void MDSRank::command_scrub_pause(Formatter *f, Context *on_finish) {
2833 std::lock_guard l(mds_lock);
2834 scrubstack->scrub_pause(on_finish);
2835}
2836
2837void MDSRank::command_scrub_resume(Formatter *f) {
2838 int r = scrubstack->scrub_resume();
2839
2840 f->open_object_section("result");
2841 f->dump_int("return_code", r);
2842 f->close_section();
2843}
2844
2845void MDSRank::command_scrub_status(Formatter *f) {
2846 scrubstack->scrub_status(f);
2847}
2848
2849void MDSRank::command_flush_path(Formatter *f, std::string_view path)
7c673cae
FG
2850{
2851 C_SaferCond scond;
2852 {
11fdf7f2 2853 std::lock_guard l(mds_lock);
7c673cae
FG
2854 mdcache->flush_dentry(path, &scond);
2855 }
2856 int r = scond.wait();
2857 f->open_object_section("results");
2858 f->dump_int("return_code", r);
2859 f->close_section(); // results
2860}
2861
f64942e4
AA
2862// synchronous wrapper around "journal flush" asynchronous context
2863// execution.
2864void MDSRank::command_flush_journal(Formatter *f) {
2865 ceph_assert(f != NULL);
7c673cae 2866
f64942e4 2867 C_SaferCond cond;
7c673cae 2868 std::stringstream ss;
7c673cae 2869 {
11fdf7f2 2870 std::lock_guard locker(mds_lock);
f64942e4
AA
2871 C_Flush_Journal *flush_journal = new C_Flush_Journal(mdcache, mdlog, this, &ss, &cond);
2872 flush_journal->send();
7c673cae 2873 }
f64942e4 2874 int r = cond.wait();
7c673cae 2875
f64942e4
AA
2876 f->open_object_section("result");
2877 f->dump_string("message", ss.str());
2878 f->dump_int("return_code", r);
2879 f->close_section();
7c673cae
FG
2880}
2881
7c673cae
FG
2882void MDSRank::command_get_subtrees(Formatter *f)
2883{
11fdf7f2
TL
2884 ceph_assert(f != NULL);
2885 std::lock_guard l(mds_lock);
7c673cae 2886
11fdf7f2
TL
2887 std::vector<CDir*> subtrees;
2888 mdcache->get_subtrees(subtrees);
7c673cae
FG
2889
2890 f->open_array_section("subtrees");
11fdf7f2 2891 for (const auto& dir : subtrees) {
7c673cae
FG
2892 f->open_object_section("subtree");
2893 {
2894 f->dump_bool("is_auth", dir->is_auth());
2895 f->dump_int("auth_first", dir->get_dir_auth().first);
2896 f->dump_int("auth_second", dir->get_dir_auth().second);
d2e6a577 2897 f->dump_int("export_pin", dir->inode->get_export_pin());
7c673cae
FG
2898 f->open_object_section("dir");
2899 dir->dump(f);
2900 f->close_section();
2901 }
2902 f->close_section();
2903 }
2904 f->close_section();
2905}
2906
2907
2908void MDSRank::command_export_dir(Formatter *f,
11fdf7f2 2909 std::string_view path,
7c673cae
FG
2910 mds_rank_t target)
2911{
2912 int r = _command_export_dir(path, target);
2913 f->open_object_section("results");
2914 f->dump_int("return_code", r);
2915 f->close_section(); // results
2916}
2917
2918int MDSRank::_command_export_dir(
11fdf7f2 2919 std::string_view path,
7c673cae
FG
2920 mds_rank_t target)
2921{
11fdf7f2 2922 std::lock_guard l(mds_lock);
94b18763 2923 filepath fp(path);
7c673cae
FG
2924
2925 if (target == whoami || !mdsmap->is_up(target) || !mdsmap->is_in(target)) {
2926 derr << "bad MDS target " << target << dendl;
2927 return -ENOENT;
2928 }
2929
2930 CInode *in = mdcache->cache_traverse(fp);
2931 if (!in) {
2932 derr << "Bath path '" << path << "'" << dendl;
2933 return -ENOENT;
2934 }
2935 CDir *dir = in->get_dirfrag(frag_t());
2936 if (!dir || !(dir->is_auth())) {
2937 derr << "bad export_dir path dirfrag frag_t() or dir not auth" << dendl;
2938 return -EINVAL;
2939 }
2940
2941 mdcache->migrator->export_dir(dir, target);
2942 return 0;
2943}
2944
11fdf7f2
TL
2945void MDSRank::command_dump_tree(const cmdmap_t &cmdmap, std::ostream &ss, Formatter *f)
2946{
2947 std::string root;
2948 int64_t depth;
2949 cmd_getval(g_ceph_context, cmdmap, "root", root);
2950 if (!cmd_getval(g_ceph_context, cmdmap, "depth", depth))
2951 depth = -1;
2952 std::lock_guard l(mds_lock);
2953 CInode *in = mdcache->cache_traverse(filepath(root.c_str()));
2954 if (!in) {
2955 ss << "root inode is not in cache";
2956 return;
2957 }
2958 f->open_array_section("inodes");
2959 mdcache->dump_tree(in, 0, depth, f);
2960 f->close_section();
2961}
2962
7c673cae
FG
2963CDir *MDSRank::_command_dirfrag_get(
2964 const cmdmap_t &cmdmap,
2965 std::ostream &ss)
2966{
2967 std::string path;
2968 bool got = cmd_getval(g_ceph_context, cmdmap, "path", path);
2969 if (!got) {
2970 ss << "missing path argument";
2971 return NULL;
2972 }
2973
2974 std::string frag_str;
2975 if (!cmd_getval(g_ceph_context, cmdmap, "frag", frag_str)) {
2976 ss << "missing frag argument";
2977 return NULL;
2978 }
2979
2980 CInode *in = mdcache->cache_traverse(filepath(path.c_str()));
2981 if (!in) {
2982 // TODO really we should load something in if it's not in cache,
2983 // but the infrastructure is harder, and we might still be unable
2984 // to act on it if someone else is auth.
2985 ss << "directory '" << path << "' inode not in cache";
2986 return NULL;
2987 }
2988
2989 frag_t fg;
2990
2991 if (!fg.parse(frag_str.c_str())) {
2992 ss << "frag " << frag_str << " failed to parse";
2993 return NULL;
2994 }
2995
2996 CDir *dir = in->get_dirfrag(fg);
2997 if (!dir) {
11fdf7f2 2998 ss << "frag " << in->ino() << "/" << fg << " not in cache ("
7c673cae
FG
2999 "use `dirfrag ls` to see if it should exist)";
3000 return NULL;
3001 }
3002
3003 if (!dir->is_auth()) {
3004 ss << "frag " << dir->dirfrag() << " not auth (auth = "
3005 << dir->authority() << ")";
3006 return NULL;
3007 }
3008
3009 return dir;
3010}
3011
3012bool MDSRank::command_dirfrag_split(
3013 cmdmap_t cmdmap,
3014 std::ostream &ss)
3015{
11fdf7f2 3016 std::lock_guard l(mds_lock);
7c673cae
FG
3017 int64_t by = 0;
3018 if (!cmd_getval(g_ceph_context, cmdmap, "bits", by)) {
3019 ss << "missing bits argument";
3020 return false;
3021 }
3022
3023 if (by <= 0) {
3024 ss << "must split by >0 bits";
3025 return false;
3026 }
3027
3028 CDir *dir = _command_dirfrag_get(cmdmap, ss);
3029 if (!dir) {
3030 return false;
3031 }
3032
3033 mdcache->split_dir(dir, by);
3034
3035 return true;
3036}
3037
3038bool MDSRank::command_dirfrag_merge(
3039 cmdmap_t cmdmap,
3040 std::ostream &ss)
3041{
11fdf7f2 3042 std::lock_guard l(mds_lock);
7c673cae
FG
3043 std::string path;
3044 bool got = cmd_getval(g_ceph_context, cmdmap, "path", path);
3045 if (!got) {
3046 ss << "missing path argument";
3047 return false;
3048 }
3049
3050 std::string frag_str;
3051 if (!cmd_getval(g_ceph_context, cmdmap, "frag", frag_str)) {
3052 ss << "missing frag argument";
3053 return false;
3054 }
3055
3056 CInode *in = mdcache->cache_traverse(filepath(path.c_str()));
3057 if (!in) {
3058 ss << "directory '" << path << "' inode not in cache";
3059 return false;
3060 }
3061
3062 frag_t fg;
3063 if (!fg.parse(frag_str.c_str())) {
3064 ss << "frag " << frag_str << " failed to parse";
3065 return false;
3066 }
3067
3068 mdcache->merge_dir(in, fg);
3069
3070 return true;
3071}
3072
3073bool MDSRank::command_dirfrag_ls(
3074 cmdmap_t cmdmap,
3075 std::ostream &ss,
3076 Formatter *f)
3077{
11fdf7f2 3078 std::lock_guard l(mds_lock);
7c673cae
FG
3079 std::string path;
3080 bool got = cmd_getval(g_ceph_context, cmdmap, "path", path);
3081 if (!got) {
3082 ss << "missing path argument";
3083 return false;
3084 }
3085
3086 CInode *in = mdcache->cache_traverse(filepath(path.c_str()));
3087 if (!in) {
3088 ss << "directory inode not in cache";
3089 return false;
3090 }
3091
3092 f->open_array_section("frags");
11fdf7f2 3093 frag_vec_t leaves;
7c673cae
FG
3094 // NB using get_leaves_under instead of get_dirfrags to give
3095 // you the list of what dirfrags may exist, not which are in cache
11fdf7f2
TL
3096 in->dirfragtree.get_leaves_under(frag_t(), leaves);
3097 for (const auto& leaf : leaves) {
7c673cae 3098 f->open_object_section("frag");
11fdf7f2
TL
3099 f->dump_int("value", leaf.value());
3100 f->dump_int("bits", leaf.bits());
3101 CachedStackStringStream css;
3102 *css << std::hex << leaf.value() << "/" << std::dec << leaf.bits();
3103 f->dump_string("str", css->strv());
7c673cae
FG
3104 f->close_section();
3105 }
3106 f->close_section();
3107
3108 return true;
3109}
3110
11fdf7f2
TL
3111void MDSRank::command_openfiles_ls(Formatter *f)
3112{
3113 std::lock_guard l(mds_lock);
3114 mdcache->dump_openfiles(f);
3115}
3116
3117void MDSRank::command_dump_inode(Formatter *f, const cmdmap_t &cmdmap, std::ostream &ss)
3118{
3119 std::lock_guard l(mds_lock);
3120 int64_t number;
3121 bool got = cmd_getval(g_ceph_context, cmdmap, "number", number);
3122 if (!got) {
3123 ss << "missing inode number";
3124 return;
3125 }
3126
3127 bool success = mdcache->dump_inode(f, number);
3128 if (!success) {
3129 ss << "dump inode failed, wrong inode number or the inode is not cached";
3130 }
3131}
3132
7c673cae
FG
3133void MDSRank::dump_status(Formatter *f) const
3134{
3135 if (state == MDSMap::STATE_REPLAY ||
3136 state == MDSMap::STATE_STANDBY_REPLAY) {
3137 mdlog->dump_replay_status(f);
3138 } else if (state == MDSMap::STATE_RESOLVE) {
3139 mdcache->dump_resolve_status(f);
3140 } else if (state == MDSMap::STATE_RECONNECT) {
3141 server->dump_reconnect_status(f);
3142 } else if (state == MDSMap::STATE_REJOIN) {
3143 mdcache->dump_rejoin_status(f);
3144 } else if (state == MDSMap::STATE_CLIENTREPLAY) {
3145 dump_clientreplay_status(f);
3146 }
94b18763 3147 f->dump_float("rank_uptime", get_uptime().count());
7c673cae
FG
3148}
3149
3150void MDSRank::dump_clientreplay_status(Formatter *f) const
3151{
3152 f->open_object_section("clientreplay_status");
3153 f->dump_unsigned("clientreplay_queue", replay_queue.size());
3154 f->dump_unsigned("active_replay", mdcache->get_num_client_requests());
3155 f->close_section();
3156}
3157
3158void MDSRankDispatcher::update_log_config()
3159{
3160 map<string,string> log_to_monitors;
3161 map<string,string> log_to_syslog;
3162 map<string,string> log_channel;
3163 map<string,string> log_prio;
3164 map<string,string> log_to_graylog;
3165 map<string,string> log_to_graylog_host;
3166 map<string,string> log_to_graylog_port;
3167 uuid_d fsid;
3168 string host;
3169
3170 if (parse_log_client_options(g_ceph_context, log_to_monitors, log_to_syslog,
3171 log_channel, log_prio, log_to_graylog,
3172 log_to_graylog_host, log_to_graylog_port,
3173 fsid, host) == 0)
3174 clog->update_config(log_to_monitors, log_to_syslog,
3175 log_channel, log_prio, log_to_graylog,
3176 log_to_graylog_host, log_to_graylog_port,
3177 fsid, host);
3178 dout(10) << __func__ << " log_to_monitors " << log_to_monitors << dendl;
3179}
3180
3181void MDSRank::create_logger()
3182{
3183 dout(10) << "create_logger" << dendl;
3184 {
3185 PerfCountersBuilder mds_plb(g_ceph_context, "mds", l_mds_first, l_mds_last);
3186
91327a77
AA
3187 // super useful (high prio) perf stats
3188 mds_plb.add_u64_counter(l_mds_request, "request", "Requests", "req",
3189 PerfCountersBuilder::PRIO_CRITICAL);
3190 mds_plb.add_time_avg(l_mds_reply_latency, "reply_latency", "Reply latency", "rlat",
3191 PerfCountersBuilder::PRIO_CRITICAL);
3192 mds_plb.add_u64(l_mds_inodes, "inodes", "Inodes", "inos",
3193 PerfCountersBuilder::PRIO_CRITICAL);
3194 mds_plb.add_u64_counter(l_mds_forward, "forward", "Forwarding request", "fwd",
3195 PerfCountersBuilder::PRIO_INTERESTING);
3196 mds_plb.add_u64(l_mds_caps, "caps", "Capabilities", "caps",
3197 PerfCountersBuilder::PRIO_INTERESTING);
3198 mds_plb.add_u64_counter(l_mds_exported_inodes, "exported_inodes", "Exported inodes",
3199 "exi", PerfCountersBuilder::PRIO_INTERESTING);
3200 mds_plb.add_u64_counter(l_mds_imported_inodes, "imported_inodes", "Imported inodes",
3201 "imi", PerfCountersBuilder::PRIO_INTERESTING);
3202
3203 // useful dir/inode/subtree stats
3204 mds_plb.set_prio_default(PerfCountersBuilder::PRIO_USEFUL);
eafe8130
TL
3205 mds_plb.add_u64(l_mds_root_rfiles, "root_rfiles", "root inode rfiles");
3206 mds_plb.add_u64(l_mds_root_rbytes, "root_rbytes", "root inode rbytes");
3207 mds_plb.add_u64(l_mds_root_rsnaps, "root_rsnaps", "root inode rsnaps");
7c673cae
FG
3208 mds_plb.add_u64_counter(l_mds_dir_fetch, "dir_fetch", "Directory fetch");
3209 mds_plb.add_u64_counter(l_mds_dir_commit, "dir_commit", "Directory commit");
3210 mds_plb.add_u64_counter(l_mds_dir_split, "dir_split", "Directory split");
3211 mds_plb.add_u64_counter(l_mds_dir_merge, "dir_merge", "Directory merge");
7c673cae 3212 mds_plb.add_u64(l_mds_inode_max, "inode_max", "Max inodes, cache size");
7c673cae
FG
3213 mds_plb.add_u64(l_mds_inodes_pinned, "inodes_pinned", "Inodes pinned");
3214 mds_plb.add_u64(l_mds_inodes_expired, "inodes_expired", "Inodes expired");
91327a77
AA
3215 mds_plb.add_u64(l_mds_inodes_with_caps, "inodes_with_caps",
3216 "Inodes with capabilities");
7c673cae 3217 mds_plb.add_u64(l_mds_subtrees, "subtrees", "Subtrees");
91327a77 3218 mds_plb.add_u64(l_mds_load_cent, "load_cent", "Load per cent");
11fdf7f2
TL
3219 mds_plb.add_u64_counter(l_mds_openino_dir_fetch, "openino_dir_fetch",
3220 "OpenIno incomplete directory fetchings");
7c673cae 3221
91327a77
AA
3222 // low prio stats
3223 mds_plb.set_prio_default(PerfCountersBuilder::PRIO_DEBUGONLY);
3224 mds_plb.add_u64_counter(l_mds_reply, "reply", "Replies");
3225 mds_plb.add_u64(l_mds_inodes_top, "inodes_top", "Inodes on top");
3226 mds_plb.add_u64(l_mds_inodes_bottom, "inodes_bottom", "Inodes on bottom");
3227 mds_plb.add_u64(
3228 l_mds_inodes_pin_tail, "inodes_pin_tail", "Inodes on pin tail");
7c673cae
FG
3229 mds_plb.add_u64_counter(l_mds_traverse, "traverse", "Traverses");
3230 mds_plb.add_u64_counter(l_mds_traverse_hit, "traverse_hit", "Traverse hits");
3231 mds_plb.add_u64_counter(l_mds_traverse_forward, "traverse_forward",
91327a77 3232 "Traverse forwards");
7c673cae 3233 mds_plb.add_u64_counter(l_mds_traverse_discover, "traverse_discover",
91327a77 3234 "Traverse directory discovers");
7c673cae 3235 mds_plb.add_u64_counter(l_mds_traverse_dir_fetch, "traverse_dir_fetch",
91327a77 3236 "Traverse incomplete directory content fetchings");
7c673cae 3237 mds_plb.add_u64_counter(l_mds_traverse_remote_ino, "traverse_remote_ino",
91327a77 3238 "Traverse remote dentries");
7c673cae 3239 mds_plb.add_u64_counter(l_mds_traverse_lock, "traverse_lock",
91327a77 3240 "Traverse locks");
7c673cae 3241 mds_plb.add_u64(l_mds_dispatch_queue_len, "q", "Dispatch queue length");
7c673cae 3242 mds_plb.add_u64_counter(l_mds_exported, "exported", "Exports");
7c673cae 3243 mds_plb.add_u64_counter(l_mds_imported, "imported", "Imports");
11fdf7f2
TL
3244 mds_plb.add_u64_counter(l_mds_openino_backtrace_fetch, "openino_backtrace_fetch",
3245 "OpenIno backtrace fetchings");
3246 mds_plb.add_u64_counter(l_mds_openino_peer_discover, "openino_peer_discover",
3247 "OpenIno peer inode discovers");
91327a77 3248
7c673cae
FG
3249 logger = mds_plb.create_perf_counters();
3250 g_ceph_context->get_perfcounters_collection()->add(logger);
3251 }
3252
3253 {
3254 PerfCountersBuilder mdm_plb(g_ceph_context, "mds_mem", l_mdm_first, l_mdm_last);
b32b8144
FG
3255 mdm_plb.add_u64(l_mdm_ino, "ino", "Inodes", "ino",
3256 PerfCountersBuilder::PRIO_INTERESTING);
91327a77
AA
3257 mdm_plb.add_u64(l_mdm_dn, "dn", "Dentries", "dn",
3258 PerfCountersBuilder::PRIO_INTERESTING);
3259
3260 mdm_plb.set_prio_default(PerfCountersBuilder::PRIO_USEFUL);
7c673cae
FG
3261 mdm_plb.add_u64_counter(l_mdm_inoa, "ino+", "Inodes opened");
3262 mdm_plb.add_u64_counter(l_mdm_inos, "ino-", "Inodes closed");
3263 mdm_plb.add_u64(l_mdm_dir, "dir", "Directories");
3264 mdm_plb.add_u64_counter(l_mdm_dira, "dir+", "Directories opened");
3265 mdm_plb.add_u64_counter(l_mdm_dirs, "dir-", "Directories closed");
7c673cae
FG
3266 mdm_plb.add_u64_counter(l_mdm_dna, "dn+", "Dentries opened");
3267 mdm_plb.add_u64_counter(l_mdm_dns, "dn-", "Dentries closed");
3268 mdm_plb.add_u64(l_mdm_cap, "cap", "Capabilities");
3269 mdm_plb.add_u64_counter(l_mdm_capa, "cap+", "Capabilities added");
3270 mdm_plb.add_u64_counter(l_mdm_caps, "cap-", "Capabilities removed");
7c673cae 3271 mdm_plb.add_u64(l_mdm_heap, "heap", "Heap size");
91327a77
AA
3272
3273 mdm_plb.set_prio_default(PerfCountersBuilder::PRIO_DEBUGONLY);
3274 mdm_plb.add_u64(l_mdm_rss, "rss", "RSS");
3275
7c673cae
FG
3276 mlogger = mdm_plb.create_perf_counters();
3277 g_ceph_context->get_perfcounters_collection()->add(mlogger);
3278 }
3279
3280 mdlog->create_logger();
3281 server->create_logger();
3282 purge_queue.create_logger();
3283 sessionmap.register_perfcounters();
3284 mdcache->register_perfcounters();
3285}
3286
3287void MDSRank::check_ops_in_flight()
3288{
11fdf7f2 3289 string summary;
7c673cae
FG
3290 vector<string> warnings;
3291 int slow = 0;
11fdf7f2
TL
3292 if (op_tracker.check_ops_in_flight(&summary, warnings, &slow)) {
3293 clog->warn() << summary;
3294 for (const auto& warning : warnings) {
3295 clog->warn() << warning;
7c673cae
FG
3296 }
3297 }
3298
3299 // set mds slow request count
3300 mds_slow_req_count = slow;
3301 return;
3302}
3303
3304void MDSRankDispatcher::handle_osd_map()
3305{
11fdf7f2
TL
3306 if (is_active() &&
3307 mdsmap->get_tableserver() == whoami) {
7c673cae
FG
3308 snapserver->check_osd_map(true);
3309 }
3310
3311 server->handle_osd_map();
3312
3313 purge_queue.update_op_limit(*mdsmap);
3314
31f18b77
FG
3315 std::set<entity_addr_t> newly_blacklisted;
3316 objecter->consume_blacklist_events(&newly_blacklisted);
3317 auto epoch = objecter->with_osdmap([](const OSDMap &o){return o.get_epoch();});
3318 dout(4) << "handle_osd_map epoch " << epoch << ", "
3319 << newly_blacklisted.size() << " new blacklist entries" << dendl;
3320 auto victims = server->apply_blacklist(newly_blacklisted);
3321 if (victims) {
3322 set_osd_epoch_barrier(epoch);
3323 }
3324
3325
7c673cae
FG
3326 // By default the objecter only requests OSDMap updates on use,
3327 // we would like to always receive the latest maps in order to
3328 // apply policy based on the FULL flag.
3329 objecter->maybe_request_map();
3330}
3331
92f5a8d4
TL
3332int MDSRank::config_client(int64_t session_id, bool remove,
3333 const std::string& option, const std::string& value,
3334 std::ostream& ss)
3335{
3336 Session *session = sessionmap.get_session(entity_name_t(CEPH_ENTITY_TYPE_CLIENT, session_id));
3337 if (!session) {
3338 ss << "session " << session_id << " not in sessionmap!";
3339 return -ENOENT;
3340 }
3341
3342 if (option == "timeout") {
3343 if (remove) {
3344 auto it = session->info.client_metadata.find("timeout");
3345 if (it == session->info.client_metadata.end()) {
3346 ss << "Nonexistent config: " << option;
3347 return -ENODATA;
3348 }
3349 session->info.client_metadata.erase(it);
3350 } else {
3351 char *end;
3352 strtoul(value.c_str(), &end, 0);
3353 if (*end) {
3354 ss << "Invalid config for timeout: " << value;
3355 return -EINVAL;
3356 }
3357 session->info.client_metadata[option] = value;
3358 }
3359 //sessionmap._mark_dirty(session, true);
3360 } else {
3361 ss << "Invalid config option: " << option;
3362 return -EINVAL;
3363 }
3364
3365 return 0;
3366}
3367
31f18b77 3368bool MDSRank::evict_client(int64_t session_id,
11fdf7f2 3369 bool wait, bool blacklist, std::ostream& err_ss,
31f18b77 3370 Context *on_killed)
7c673cae 3371{
11fdf7f2 3372 ceph_assert(mds_lock.is_locked_by_me());
31f18b77
FG
3373
3374 // Mutually exclusive args
11fdf7f2 3375 ceph_assert(!(wait && on_killed != nullptr));
31f18b77 3376
7c673cae
FG
3377 if (is_any_replay()) {
3378 err_ss << "MDS is replaying log";
3379 return false;
3380 }
3381
31f18b77
FG
3382 Session *session = sessionmap.get_session(
3383 entity_name_t(CEPH_ENTITY_TYPE_CLIENT, session_id));
7c673cae
FG
3384 if (!session) {
3385 err_ss << "session " << session_id << " not in sessionmap!";
3386 return false;
3387 }
7c673cae 3388
a8e16298
TL
3389 auto& addr = session->info.inst.addr;
3390 {
11fdf7f2
TL
3391 CachedStackStringStream css;
3392 *css << "Evicting " << (blacklist ? "(and blacklisting) " : "")
3393 << "client session " << session_id << " (" << addr << ")";
3394 dout(1) << css->strv() << dendl;
3395 clog->info() << css->strv();
a8e16298
TL
3396 }
3397
31f18b77
FG
3398 dout(4) << "Preparing blacklist command... (wait=" << wait << ")" << dendl;
3399 stringstream ss;
3400 ss << "{\"prefix\":\"osd blacklist\", \"blacklistop\":\"add\",";
3401 ss << "\"addr\":\"";
a8e16298 3402 ss << addr;
31f18b77
FG
3403 ss << "\"}";
3404 std::string tmp = ss.str();
3405 std::vector<std::string> cmd = {tmp};
3406
91327a77 3407 auto kill_client_session = [this, session_id, wait, on_killed](){
11fdf7f2 3408 ceph_assert(mds_lock.is_locked_by_me());
31f18b77
FG
3409 Session *session = sessionmap.get_session(
3410 entity_name_t(CEPH_ENTITY_TYPE_CLIENT, session_id));
3411 if (session) {
91327a77 3412 if (on_killed || !wait) {
31f18b77
FG
3413 server->kill_session(session, on_killed);
3414 } else {
3415 C_SaferCond on_safe;
3416 server->kill_session(session, &on_safe);
3417
3418 mds_lock.Unlock();
3419 on_safe.wait();
3420 mds_lock.Lock();
3421 }
3422 } else {
3423 dout(1) << "session " << session_id << " was removed while we waited "
3424 "for blacklist" << dendl;
3425
3426 // Even though it wasn't us that removed it, kick our completion
3427 // as the session has been removed.
3428 if (on_killed) {
3429 on_killed->complete(0);
3430 }
3431 }
3432 };
3433
91327a77 3434 auto apply_blacklist = [this, cmd](std::function<void ()> fn){
11fdf7f2 3435 ceph_assert(mds_lock.is_locked_by_me());
31f18b77 3436
91327a77 3437 Context *on_blacklist_done = new FunctionContext([this, fn](int r) {
31f18b77
FG
3438 objecter->wait_for_latest_osdmap(
3439 new C_OnFinisher(
91327a77 3440 new FunctionContext([this, fn](int r) {
11fdf7f2 3441 std::lock_guard l(mds_lock);
31f18b77
FG
3442 auto epoch = objecter->with_osdmap([](const OSDMap &o){
3443 return o.get_epoch();
3444 });
3445
3446 set_osd_epoch_barrier(epoch);
3447
3448 fn();
3449 }), finisher)
3450 );
3451 });
3452
3453 dout(4) << "Sending mon blacklist command: " << cmd[0] << dendl;
3454 monc->start_mon_command(cmd, {}, nullptr, nullptr, on_blacklist_done);
3455 };
3456
31f18b77
FG
3457 if (wait) {
3458 if (blacklist) {
91327a77
AA
3459 C_SaferCond inline_ctx;
3460 apply_blacklist([&inline_ctx](){inline_ctx.complete(0);});
3461 mds_lock.Unlock();
3462 inline_ctx.wait();
3463 mds_lock.Lock();
31f18b77
FG
3464 }
3465
3466 // We dropped mds_lock, so check that session still exists
3467 session = sessionmap.get_session(entity_name_t(CEPH_ENTITY_TYPE_CLIENT,
11fdf7f2 3468 session_id));
31f18b77
FG
3469 if (!session) {
3470 dout(1) << "session " << session_id << " was removed while we waited "
3471 "for blacklist" << dendl;
3472 return true;
3473 }
91327a77 3474 kill_client_session();
7c673cae 3475 } else {
31f18b77 3476 if (blacklist) {
91327a77 3477 apply_blacklist(kill_client_session);
31f18b77 3478 } else {
91327a77 3479 kill_client_session();
31f18b77 3480 }
7c673cae 3481 }
31f18b77 3482
7c673cae
FG
3483 return true;
3484}
3485
3486void MDSRank::bcast_mds_map()
3487{
3488 dout(7) << "bcast_mds_map " << mdsmap->get_epoch() << dendl;
3489
3490 // share the map with mounted clients
3491 set<Session*> clients;
3492 sessionmap.get_client_session_set(clients);
11fdf7f2
TL
3493 for (const auto &session : clients) {
3494 auto m = MMDSMap::create(monc->get_fsid(), *mdsmap);
3495 session->get_connection()->send_message2(std::move(m));
3496 }
7c673cae
FG
3497 last_client_mdsmap_bcast = mdsmap->get_epoch();
3498}
3499
11fdf7f2
TL
3500Context *MDSRank::create_async_exec_context(C_ExecAndReply *ctx) {
3501 return new C_OnFinisher(new FunctionContext([ctx](int _) {
3502 ctx->exec();
3503 }), finisher);
3504}
3505
7c673cae
FG
3506MDSRankDispatcher::MDSRankDispatcher(
3507 mds_rank_t whoami_,
3508 Mutex &mds_lock_,
3509 LogChannelRef &clog_,
3510 SafeTimer &timer_,
3511 Beacon &beacon_,
11fdf7f2 3512 std::unique_ptr<MDSMap> &mdsmap_,
7c673cae
FG
3513 Messenger *msgr,
3514 MonClient *monc_,
3515 Context *respawn_hook_,
3516 Context *suicide_hook_)
3517 : MDSRank(whoami_, mds_lock_, clog_, timer_, beacon_, mdsmap_,
3518 msgr, monc_, respawn_hook_, suicide_hook_)
92f5a8d4
TL
3519{
3520 g_conf().add_observer(this);
3521}
7c673cae
FG
3522
3523bool MDSRankDispatcher::handle_command(
3524 const cmdmap_t &cmdmap,
11fdf7f2 3525 const MCommand::const_ref &m,
7c673cae
FG
3526 int *r,
3527 std::stringstream *ds,
3528 std::stringstream *ss,
f64942e4 3529 Context **run_later,
7c673cae
FG
3530 bool *need_reply)
3531{
11fdf7f2
TL
3532 ceph_assert(r != nullptr);
3533 ceph_assert(ds != nullptr);
3534 ceph_assert(ss != nullptr);
7c673cae
FG
3535
3536 *need_reply = true;
3537
3538 std::string prefix;
3539 cmd_getval(g_ceph_context, cmdmap, "prefix", prefix);
3540
31f18b77 3541 if (prefix == "session ls" || prefix == "client ls") {
7c673cae
FG
3542 std::vector<std::string> filter_args;
3543 cmd_getval(g_ceph_context, cmdmap, "filters", filter_args);
3544
3545 SessionFilter filter;
3546 *r = filter.parse(filter_args, ss);
3547 if (*r != 0) {
3548 return true;
3549 }
3550
f64942e4
AA
3551 JSONFormatter f(true);
3552 dump_sessions(filter, &f);
3553 f.flush(*ds);
7c673cae 3554 return true;
31f18b77 3555 } else if (prefix == "session evict" || prefix == "client evict") {
7c673cae
FG
3556 std::vector<std::string> filter_args;
3557 cmd_getval(g_ceph_context, cmdmap, "filters", filter_args);
3558
3559 SessionFilter filter;
3560 *r = filter.parse(filter_args, ss);
3561 if (*r != 0) {
3562 return true;
3563 }
3564
31f18b77 3565 evict_clients(filter, m);
7c673cae
FG
3566
3567 *need_reply = false;
3568 return true;
92f5a8d4
TL
3569 } else if (prefix == "session config" || prefix == "client config") {
3570 int64_t client_id;
3571 std::string option;
3572 std::string value;
3573
3574 cmd_getval(g_ceph_context, cmdmap, "client_id", client_id);
3575 cmd_getval(g_ceph_context, cmdmap, "option", option);
3576 bool got_value = cmd_getval(g_ceph_context, cmdmap, "value", value);
3577
3578 *r = config_client(client_id, !got_value, option, value, *ss);
3579 return true;
7c673cae 3580 } else if (prefix == "damage ls") {
f64942e4
AA
3581 JSONFormatter f(true);
3582 damage_table.dump(&f);
3583 f.flush(*ds);
7c673cae
FG
3584 return true;
3585 } else if (prefix == "damage rm") {
3586 damage_entry_id_t id = 0;
3587 bool got = cmd_getval(g_ceph_context, cmdmap, "damage_id", (int64_t&)id);
3588 if (!got) {
3589 *r = -EINVAL;
3590 return true;
3591 }
3592
3593 damage_table.erase(id);
f64942e4
AA
3594 return true;
3595 } else if (prefix == "cache drop") {
3596 int64_t timeout;
3597 if (!cmd_getval(g_ceph_context, cmdmap, "timeout", timeout)) {
3598 timeout = 0;
3599 }
3600
f64942e4 3601 *need_reply = false;
11fdf7f2
TL
3602 *run_later = create_async_exec_context(new C_CacheDropExecAndReply
3603 (this, m, (uint64_t)timeout));
3604 return true;
3605 } else if (prefix == "scrub start") {
3606 string path;
3607 string tag;
3608 vector<string> scrubop_vec;
3609 cmd_getval(g_ceph_context, cmdmap, "scrubops", scrubop_vec);
3610 cmd_getval(g_ceph_context, cmdmap, "path", path);
3611 cmd_getval(g_ceph_context, cmdmap, "tag", tag);
f64942e4 3612
92f5a8d4
TL
3613 /* Multiple MDS scrub is not currently supported. See also: https://tracker.ceph.com/issues/12274 */
3614 if (mdsmap->get_max_mds() > 1) {
3615 *ss << "Scrub is not currently supported for multiple active MDS. Please reduce max_mds to 1 and then scrub.";
3616 *r = ENOTSUP;
3617 return true;
3618 }
3619
11fdf7f2
TL
3620 *need_reply = false;
3621 *run_later = create_async_exec_context(new C_ScrubExecAndReply
3622 (this, m, path, tag, scrubop_vec));
3623 return true;
3624 } else if (prefix == "scrub abort") {
3625 *need_reply = false;
3626 *run_later = create_async_exec_context(new C_ScrubControlExecAndReply
3627 (this, m, "abort"));
3628 return true;
3629 } else if (prefix == "scrub pause") {
3630 *need_reply = false;
3631 *run_later = create_async_exec_context(new C_ScrubControlExecAndReply
3632 (this, m, "pause"));
3633 return true;
3634 } else if (prefix == "scrub resume") {
3635 JSONFormatter f(true);
3636 command_scrub_resume(&f);
3637 f.flush(*ds);
3638 return true;
3639 } else if (prefix == "scrub status") {
3640 JSONFormatter f(true);
3641 command_scrub_status(&f);
3642 f.flush(*ds);
7c673cae
FG
3643 return true;
3644 } else {
3645 return false;
3646 }
3647}
3648
f64942e4
AA
3649void MDSRank::command_cache_drop(uint64_t timeout, Formatter *f, Context *on_finish) {
3650 dout(20) << __func__ << dendl;
3651
11fdf7f2 3652 std::lock_guard locker(mds_lock);
f64942e4
AA
3653 C_Drop_Cache *request = new C_Drop_Cache(server, mdcache, mdlog, this,
3654 timeout, f, on_finish);
3655 request->send();
3656}
3657
7c673cae
FG
3658epoch_t MDSRank::get_osd_epoch() const
3659{
3660 return objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch));
3661}
3662
92f5a8d4
TL
3663const char** MDSRankDispatcher::get_tracked_conf_keys() const
3664{
3665 static const char* KEYS[] = {
3666 "clog_to_graylog",
3667 "clog_to_graylog_host",
3668 "clog_to_graylog_port",
3669 "clog_to_monitors",
3670 "clog_to_syslog",
3671 "clog_to_syslog_facility",
3672 "clog_to_syslog_level",
3673 "fsid",
3674 "host",
3675 "mds_bal_fragment_dirs",
3676 "mds_bal_fragment_interval",
3677 "mds_cache_memory_limit",
3678 "mds_cache_mid",
3679 "mds_cache_reservation",
3680 "mds_cache_size",
3681 "mds_cache_trim_decay_rate",
3682 "mds_cap_revoke_eviction_timeout",
3683 "mds_dump_cache_threshold_file",
3684 "mds_dump_cache_threshold_formatter",
3685 "mds_enable_op_tracker",
3686 "mds_health_cache_threshold",
3687 "mds_inject_migrator_session_race",
3688 "mds_log_pause",
3689 "mds_max_export_size",
3690 "mds_max_purge_files",
3691 "mds_max_purge_ops",
3692 "mds_max_purge_ops_per_pg",
3693 "mds_op_complaint_time",
3694 "mds_op_history_duration",
3695 "mds_op_history_size",
3696 "mds_op_log_threshold",
3697 "mds_recall_max_decay_rate",
3698 "mds_recall_warning_decay_rate",
3699 "mds_request_load_average_decay_rate",
3700 "mds_session_cache_liveness_decay_rate",
3701 "mds_replay_unsafe_with_closed_session",
3702 NULL
3703 };
3704 return KEYS;
3705}
3706
3707void MDSRankDispatcher::handle_conf_change(const ConfigProxy& conf, const std::set<std::string>& changed)
3708{
3709 // XXX with or without mds_lock!
3710
3711 if (changed.count("mds_op_complaint_time") || changed.count("mds_op_log_threshold")) {
3712 op_tracker.set_complaint_and_threshold(conf->mds_op_complaint_time, conf->mds_op_log_threshold);
3713 }
3714 if (changed.count("mds_op_history_size") || changed.count("mds_op_history_duration")) {
3715 op_tracker.set_history_size_and_duration(conf->mds_op_history_size, conf->mds_op_history_duration);
3716 }
3717 if (changed.count("mds_enable_op_tracker")) {
3718 op_tracker.set_tracking(conf->mds_enable_op_tracker);
3719 }
3720 if (changed.count("clog_to_monitors") ||
3721 changed.count("clog_to_syslog") ||
3722 changed.count("clog_to_syslog_level") ||
3723 changed.count("clog_to_syslog_facility") ||
3724 changed.count("clog_to_graylog") ||
3725 changed.count("clog_to_graylog_host") ||
3726 changed.count("clog_to_graylog_port") ||
3727 changed.count("host") ||
3728 changed.count("fsid")) {
3729 update_log_config();
3730 }
3731
3732 finisher->queue(new FunctionContext([this, changed](int r) {
3733 std::scoped_lock lock(mds_lock);
3734
3735 if (changed.count("mds_log_pause") && !g_conf()->mds_log_pause) {
3736 mdlog->kick_submitter();
3737 }
3738 sessionmap.handle_conf_change(changed);
3739 server->handle_conf_change(changed);
3740 mdcache->handle_conf_change(changed, *mdsmap);
3741 purge_queue.handle_conf_change(changed, *mdsmap);
3742 }));
3743}