*
*/
-#include <boost/utility/string_view.hpp>
+#include <string_view>
#include "common/debug.h"
#include "common/errno.h"
#include "messages/MClientRequestForward.h"
#include "messages/MMDSLoadTargets.h"
-#include "messages/MMDSMap.h"
#include "messages/MMDSTableRequest.h"
-#include "messages/MCommand.h"
-#include "messages/MCommandReply.h"
#include "MDSDaemon.h"
#include "MDSMap.h"
dout(20) << __func__ << dendl;
Context *ctx = new C_OnFinisher(new FunctionContext([this](int _) {
- Mutex::Locker locker(mds->mds_lock);
+ std::lock_guard locker(mds->mds_lock);
trim_expired_segments();
}), mds->finisher);
ctx->complete(0);
dout(20) << __func__ << dendl;
Context *ctx = new FunctionContext([this](int r) {
- Mutex::Locker locker(mds->mds_lock);
+ std::lock_guard locker(mds->mds_lock);
handle_write_head(r);
});
// Flush the journal header so that readers will start from after
void finish(int r) override {
Context *ctx = nullptr;
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
std::swap(on_finish, ctx);
}
if (ctx != nullptr) {
Context *timer_task = nullptr;
};
+ auto do_trim() {
+ auto [throttled, count] = mdcache->trim(UINT64_MAX);
+ dout(10) << __func__
+ << (throttled ? " (throttled)" : "")
+ << " trimmed " << count << " caps" << dendl;
+ dentries_trimmed += count;
+ return std::make_pair(throttled, count);
+ }
+
void recall_client_state() {
dout(20) << __func__ << dendl;
auto now = mono_clock::now();
auto duration = std::chrono::duration<double>(now-recall_start).count();
MDSGatherBuilder *gather = new MDSGatherBuilder(g_ceph_context);
- auto result = server->recall_client_state(gather, Server::RecallFlags::STEADY);
- auto& throttled = result.first;
- auto& count = result.second;
+ auto [throttled, count] = server->recall_client_state(gather, Server::RecallFlags::STEADY);
dout(10) << __func__
<< (throttled ? " (throttled)" : "")
<< " recalled " << count << " caps" << dendl;
caps_recalled += count;
if ((throttled || count > 0) && (recall_timeout == 0 || duration < recall_timeout)) {
- auto timer = new FunctionContext([this](int _) {
- recall_client_state();
- });
- mds->timer.add_event_after(1.0, timer);
+ C_ContextTimeout *ctx = new C_ContextTimeout(
+ mds, 1, new FunctionContext([this](int r) {
+ recall_client_state();
+ }));
+ ctx->start_timer();
+ gather->set_finisher(new MDSInternalContextWrapper(mds, ctx));
+ gather->activate();
+ mdlog->flush(); /* use down-time to incrementally flush log */
+ do_trim(); /* use down-time to incrementally trim cache */
} else {
if (!gather->has_subs()) {
delete gather;
return handle_recall_client_state(0);
} else if (recall_timeout > 0 && duration > recall_timeout) {
- delete gather;
+ gather->set_finisher(new C_MDSInternalNoop);
+ gather->activate();
return handle_recall_client_state(-ETIMEDOUT);
} else {
uint64_t remaining = (recall_timeout == 0 ? 0 : recall_timeout-duration);
void trim_cache() {
dout(20) << __func__ << dendl;
- auto p = mdcache->trim(UINT64_MAX);
- auto& throttled = p.first;
- auto& count = p.second;
- dout(10) << __func__
- << (throttled ? " (throttled)" : "")
- << " trimmed " << count << " caps" << dendl;
- dentries_trimmed += count;
+ auto [throttled, count] = do_trim();
if (throttled && count > 0) {
auto timer = new FunctionContext([this](int _) {
trim_cache();
mds_rank_t whoami;
int incarnation;
- void cmd_err(Formatter *f, boost::string_view err) {
+ void cmd_err(Formatter *f, std::string_view err) {
f->reset();
f->open_object_section("result");
f->dump_string("error", err);
LogChannelRef &clog_,
SafeTimer &timer_,
Beacon &beacon_,
- MDSMap *& mdsmap_,
+ std::unique_ptr<MDSMap>& mdsmap_,
Messenger *msgr,
MonClient *monc_,
Context *respawn_hook_,
damage_table(whoami_),
inotable(NULL), snapserver(NULL), snapclient(NULL),
sessionmap(this), logger(NULL), mlogger(NULL),
- op_tracker(g_ceph_context, g_conf->mds_enable_op_tracker,
- g_conf->osd_num_op_tracker_shard),
+ op_tracker(g_ceph_context, g_conf()->mds_enable_op_tracker,
+ g_conf()->osd_num_op_tracker_shard),
last_state(MDSMap::STATE_BOOT),
state(MDSMap::STATE_BOOT),
cluster_degraded(false), stopping(false),
if (mds_lock.is_locked_by_me()) {
handle_write_error(r);
} else {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
handle_write_error(r);
}
}
if (inotable) { delete inotable; inotable = NULL; }
if (snapserver) { delete snapserver; snapserver = NULL; }
if (snapclient) { delete snapclient; snapclient = NULL; }
- if (mdsmap) { delete mdsmap; mdsmap = 0; }
if (server) { delete server; server = 0; }
if (locker) { delete locker; locker = 0; }
finisher->start();
}
-void MDSRank::update_targets(utime_t now)
+void MDSRank::update_targets()
{
// get MonMap's idea of my export_targets
const set<mds_rank_t>& map_targets = mdsmap->get_mds_info(get_nodeid()).export_targets;
auto it = export_targets.begin();
while (it != export_targets.end()) {
mds_rank_t rank = it->first;
- double val = it->second.get(now);
- dout(20) << "export target mds." << rank << " value is " << val << " @ " << now << dendl;
+ auto &counter = it->second;
+ dout(20) << "export target mds." << rank << " is " << counter << dendl;
+ double val = counter.get();
if (val <= 0.01) {
dout(15) << "export target mds." << rank << " is no longer an export target" << dendl;
export_targets.erase(it++);
if (send) {
dout(15) << "updating export_targets, now " << new_map_targets.size() << " ranks are targets" << dendl;
- MMDSLoadTargets* m = new MMDSLoadTargets(mds_gid_t(monc->get_global_id()), new_map_targets);
- monc->send_mon_message(m);
+ auto m = MMDSLoadTargets::create(mds_gid_t(monc->get_global_id()), new_map_targets);
+ monc->send_mon_message(m.detach());
}
}
-void MDSRank::hit_export_target(utime_t now, mds_rank_t rank, double amount)
+void MDSRank::hit_export_target(mds_rank_t rank, double amount)
{
- double rate = g_conf->mds_bal_target_decay;
+ double rate = g_conf()->mds_bal_target_decay;
if (amount < 0.0) {
- amount = 100.0/g_conf->mds_bal_target_decay; /* a good default for "i am trying to keep this export_target active" */
+ amount = 100.0/g_conf()->mds_bal_target_decay; /* a good default for "i am trying to keep this export_target active" */
}
- auto em = export_targets.emplace(std::piecewise_construct, std::forward_as_tuple(rank), std::forward_as_tuple(now, DecayRate(rate)));
+ auto em = export_targets.emplace(std::piecewise_construct, std::forward_as_tuple(rank), std::forward_as_tuple(DecayRate(rate)));
+ auto &counter = em.first->second;
+ counter.hit(amount);
if (em.second) {
- dout(15) << "hit export target (new) " << amount << " @ " << now << dendl;
+ dout(15) << "hit export target (new) is " << counter << dendl;
+ } else {
+ dout(15) << "hit export target is " << counter << dendl;
+ }
+}
+
+class C_MDS_MonCommand : public MDSInternalContext {
+ std::string cmd;
+public:
+ std::string outs;
+ C_MDS_MonCommand(MDSRank *m, std::string_view c)
+ : MDSInternalContext(m), cmd(c) {}
+ void finish(int r) override {
+ mds->_mon_command_finish(r, cmd, outs);
+ }
+};
+
+void MDSRank::_mon_command_finish(int r, std::string_view cmd, std::string_view outs)
+{
+ if (r < 0) {
+ dout(0) << __func__ << ": mon command " << cmd << " failed with errno " << r
+ << " (" << outs << ")" << dendl;
} else {
- dout(15) << "hit export target " << amount << " @ " << now << dendl;
+ dout(1) << __func__ << ": mon command " << cmd << " succeed" << dendl;
}
- em.first->second.hit(now, amount);
+}
+
+void MDSRank::set_mdsmap_multimds_snaps_allowed()
+{
+ static bool already_sent = false;
+ if (already_sent)
+ return;
+
+ stringstream ss;
+ ss << "{\"prefix\":\"fs set\", \"fs_name\":\"" << mdsmap->get_fs_name() << "\", ";
+ ss << "\"var\":\"allow_multimds_snaps\", \"val\":\"true\", ";
+ ss << "\"confirm\":\"--yes-i-am-really-a-mds\"}";
+ std::vector<std::string> cmd = {ss.str()};
+
+ dout(0) << __func__ << ": sending mon command: " << cmd[0] << dendl;
+
+ C_MDS_MonCommand *fin = new C_MDS_MonCommand(this, cmd[0]);
+ monc->start_mon_command(cmd, {}, nullptr, &fin->outs, new C_IO_Wrapper(this, fin));
+
+ already_sent = true;
+}
+
+void MDSRank::mark_base_recursively_scrubbed(inodeno_t ino)
+{
+ if (mdsmap->get_tableserver() == whoami)
+ snapserver->mark_base_recursively_scrubbed(ino);
}
void MDSRankDispatcher::tick()
sessionmap.update_average_session_age();
if (is_active() || is_stopping()) {
- server->recall_client_state(nullptr, Server::RecallFlags::ENFORCE_MAX);
- mdcache->trim();
- mdcache->trim_client_leases();
- mdcache->check_memory_usage();
mdlog->trim(); // NOT during recovery!
}
- // log
- if (logger) {
- logger->set(l_mds_subtrees, mdcache->num_subtrees());
-
- mdcache->log_stat();
- }
-
// ...
if (is_clientreplay() || is_active() || is_stopping()) {
+ mdcache->trim_client_leases();
+ mdcache->trim();
+ mdcache->check_memory_usage();
+
+ server->recall_client_state(nullptr, Server::RecallFlags::ENFORCE_MAX);
+
server->find_idle_sessions();
server->evict_cap_revoke_non_responders();
locker->tick();
}
+ // log
+ if (logger) {
+ logger->set(l_mds_subtrees, mdcache->num_subtrees());
+ mdcache->log_stat();
+ }
+
if (is_reconnect())
server->reconnect_tick();
balancer->tick();
mdcache->find_stale_fragment_freeze();
mdcache->migrator->find_stale_export_freeze();
- if (snapserver)
+
+ if (mdsmap->get_tableserver() == whoami) {
snapserver->check_osd_map(false);
+ // Filesystem was created by pre-mimic mds. Allow multi-active mds after
+ // all old snapshots are deleted.
+ if (!mdsmap->allows_multimds_snaps() &&
+ snapserver->can_allow_multimds_snaps()) {
+ set_mdsmap_multimds_snaps_allowed();
+ }
+ }
}
if (is_active() || is_stopping()) {
- update_targets(ceph_clock_now());
+ update_targets();
}
// shut down?
// It should never be possible for shutdown to get called twice, because
// anyone picking up mds_lock checks if stopping is true and drops
// out if it is.
- assert(stopping == false);
+ ceph_assert(stopping == false);
stopping = true;
dout(1) << __func__ << ": shutting down rank " << whoami << dendl;
C_MDS_VoidFn(MDSRank *mds_, fn_ptr fn_)
: MDSInternalContext(mds_), fn(fn_)
{
- assert(mds_);
- assert(fn_);
+ ceph_assert(mds_);
+ ceph_assert(fn_);
}
void finish(int r) override
void MDSRank::damaged()
{
- assert(whoami != MDS_RANK_NONE);
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(whoami != MDS_RANK_NONE);
+ ceph_assert(mds_lock.is_locked_by_me());
- beacon.set_want_state(mdsmap, MDSMap::STATE_DAMAGED);
+ beacon.set_want_state(*mdsmap, MDSMap::STATE_DAMAGED);
monc->flush_log(); // Flush any clog error from before we were called
beacon.notify_health(this); // Include latest status in our swan song
- beacon.send_and_wait(g_conf->mds_mon_shutdown_timeout);
+ beacon.send_and_wait(g_conf()->mds_mon_shutdown_timeout);
// It's okay if we timed out and the mon didn't get our beacon, because
// another daemon (or ourselves after respawn) will eventually take the
void MDSRank::damaged_unlocked()
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
damaged();
}
return;
}
- if (g_conf->mds_action_on_write_error >= 2) {
+ if (g_conf()->mds_action_on_write_error >= 2) {
derr << "unhandled write error " << cpp_strerror(err) << ", suicide..." << dendl;
respawn();
- } else if (g_conf->mds_action_on_write_error == 1) {
+ } else if (g_conf()->mds_action_on_write_error == 1) {
derr << "unhandled write error " << cpp_strerror(err) << ", force readonly..." << dendl;
mdcache->force_readonly();
} else {
void *MDSRank::ProgressThread::entry()
{
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
while (true) {
while (!mds->stopping &&
mds->finished_queue.empty() &&
void MDSRank::ProgressThread::shutdown()
{
- assert(mds->mds_lock.is_locked_by_me());
- assert(mds->stopping);
+ ceph_assert(mds->mds_lock.is_locked_by_me());
+ ceph_assert(mds->stopping);
if (am_self()) {
// Stopping is set, we will fall out of our main loop naturally
}
}
-bool MDSRankDispatcher::ms_dispatch(Message *m)
+bool MDSRankDispatcher::ms_dispatch(const Message::const_ref &m)
{
if (m->get_source().is_client()) {
- Session *session = static_cast<Session*>(m->get_connection()->get_priv());
+ Session *session = static_cast<Session*>(m->get_connection()->get_priv().get());
if (session)
session->last_seen = Session::clock::now();
}
return ret;
}
-/* If this function returns true, it recognizes the message and has taken the
- * reference. If it returns false, it has done neither. */
-bool MDSRank::_dispatch(Message *m, bool new_msg)
+bool MDSRank::_dispatch(const Message::const_ref &m, bool new_msg)
{
if (is_stale_message(m)) {
- m->put();
return true;
}
return true;
}
- // done with all client replayed requests?
- if (is_clientreplay() &&
- mdcache->is_open() &&
- replay_queue.empty() &&
- beacon.get_want_state() == MDSMap::STATE_CLIENTREPLAY) {
- int num_requests = mdcache->get_num_client_requests();
- dout(10) << " still have " << num_requests << " active replay requests" << dendl;
- if (num_requests == 0)
- clientreplay_done();
- }
-
// hack: thrash exports
static utime_t start;
utime_t now = ceph_clock_now();
/*double el = now - start;
if (el > 30.0 &&
el < 60.0)*/
- for (int i=0; i<g_conf->mds_thrash_exports; i++) {
+ for (int i=0; i<g_conf()->mds_thrash_exports; i++) {
set<mds_rank_t> s;
if (!is_active()) break;
mdsmap->get_mds_set(s, MDSMap::STATE_ACTIVE);
if (s.size() < 2 || CInode::count() < 10)
break; // need peers for this to work.
- if (mdcache->migrator->get_num_exporting() > g_conf->mds_thrash_exports * 5 ||
- mdcache->migrator->get_export_queue_size() > g_conf->mds_thrash_exports * 10)
+ if (mdcache->migrator->get_num_exporting() > g_conf()->mds_thrash_exports * 5 ||
+ mdcache->migrator->get_export_queue_size() > g_conf()->mds_thrash_exports * 10)
break;
- dout(7) << "mds thrashing exports pass " << (i+1) << "/" << g_conf->mds_thrash_exports << dendl;
+ dout(7) << "mds thrashing exports pass " << (i+1) << "/" << g_conf()->mds_thrash_exports << dendl;
// pick a random dir inode
CInode *in = mdcache->hack_pick_random_inode();
}
}
// hack: thrash fragments
- for (int i=0; i<g_conf->mds_thrash_fragments; i++) {
+ for (int i=0; i<g_conf()->mds_thrash_fragments; i++) {
if (!is_active()) break;
- if (mdcache->get_num_fragmenting_dirs() > 5 * g_conf->mds_thrash_fragments) break;
- dout(7) << "mds thrashing fragments pass " << (i+1) << "/" << g_conf->mds_thrash_fragments << dendl;
+ if (mdcache->get_num_fragmenting_dirs() > 5 * g_conf()->mds_thrash_fragments) break;
+ dout(7) << "mds thrashing fragments pass " << (i+1) << "/" << g_conf()->mds_thrash_fragments << dendl;
// pick a random dir inode
CInode *in = mdcache->hack_pick_random_inode();
if (!dir->get_parent_dir()) continue; // must be linked.
if (!dir->is_auth()) continue; // must be auth.
frag_t fg = dir->get_frag();
- if (mdsmap->allows_dirfrags()) {
- if ((fg == frag_t() || (rand() % (1 << fg.bits()) == 0))) {
- mdcache->split_dir(dir, 1);
- } else {
- balancer->queue_merge(dir);
- }
+ if ((fg == frag_t() || (rand() % (1 << fg.bits()) == 0))) {
+ mdcache->split_dir(dir, 1);
+ } else {
+ balancer->queue_merge(dir);
}
}
mlogger->set(l_mdm_dns, CDentry::decrements());
mlogger->set(l_mdm_capa, Capability::increments());
mlogger->set(l_mdm_caps, Capability::decrements());
- mlogger->set(l_mdm_buf, buffer::get_total_alloc());
}
}
/*
* lower priority messages we defer if we seem laggy
*/
-bool MDSRank::handle_deferrable_message(Message *m)
+bool MDSRank::handle_deferrable_message(const Message::const_ref &m)
{
int port = m->get_type() & 0xff00;
// SERVER
case CEPH_MSG_CLIENT_SESSION:
case CEPH_MSG_CLIENT_RECONNECT:
+ case CEPH_MSG_CLIENT_RECLAIM:
ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_CLIENT);
// fall-thru
case CEPH_MSG_CLIENT_REQUEST:
case MSG_MDS_TABLE_REQUEST:
ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MDS);
{
- MMDSTableRequest *req = static_cast<MMDSTableRequest*>(m);
- if (req->op < 0) {
- MDSTableClient *client = get_table_client(req->table);
- client->handle_request(req);
- } else {
- MDSTableServer *server = get_table_server(req->table);
- server->handle_request(req);
- }
+ const MMDSTableRequest::const_ref &req = MMDSTableRequest::msgref_cast(m);
+ if (req->op < 0) {
+ MDSTableClient *client = get_table_client(req->table);
+ client->handle_request(req);
+ } else {
+ MDSTableServer *server = get_table_server(req->table);
+ server->handle_request(req);
+ }
}
break;
*/
void MDSRank::_advance_queues()
{
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
- while (!finished_queue.empty()) {
+ if (!finished_queue.empty()) {
dout(7) << "mds has " << finished_queue.size() << " queued contexts" << dendl;
- dout(10) << finished_queue << dendl;
- list<MDSInternalContextBase*> ls;
- ls.swap(finished_queue);
- while (!ls.empty()) {
- dout(10) << " finish " << ls.front() << dendl;
- ls.front()->complete(0);
- ls.pop_front();
+ while (!finished_queue.empty()) {
+ auto fin = finished_queue.front();
+ finished_queue.pop_front();
+
+ dout(10) << " finish " << fin << dendl;
+ fin->complete(0);
heartbeat_reset();
}
if (beacon.is_laggy())
break;
- Message *old = waiting_for_nolaggy.front();
+ Message::const_ref old = waiting_for_nolaggy.front();
waiting_for_nolaggy.pop_front();
- if (is_stale_message(old)) {
- old->put();
- } else {
+ if (!is_stale_message(old)) {
dout(7) << " processing laggy deferred " << *old << dendl;
if (!handle_deferrable_message(old)) {
dout(0) << "unrecognized message " << *old << dendl;
- old->put();
}
}
// after a call to suicide() completes, in which case MDSRank::hb
// has been freed and we are a no-op.
if (!hb) {
- assert(stopping);
+ ceph_assert(stopping);
return;
}
// NB not enabling suicide grace, because the mon takes care of killing us
// (by blacklisting us) when we fail to send beacons, and it's simpler to
// only have one way of dying.
- auto grace = g_conf->get_val<double>("mds_heartbeat_grace");
+ auto grace = g_conf().get_val<double>("mds_heartbeat_grace");
g_ceph_context->get_heartbeat_map()->reset_timeout(hb, grace, 0);
}
-bool MDSRank::is_stale_message(Message *m) const
+bool MDSRank::is_stale_message(const Message::const_ref &m) const
{
// from bad mds?
if (m->get_source().is_mds()) {
mds_rank_t from = mds_rank_t(m->get_source().num());
- if (!mdsmap->have_inst(from) ||
- mdsmap->get_inst(from) != m->get_source_inst() ||
- mdsmap->is_down(from)) {
+ bool bad = false;
+ if (mdsmap->is_down(from)) {
+ bad = true;
+ } else {
+ // FIXME: this is a convoluted check. we should be maintaining a nice
+ // clean map of current ConnectionRefs for current mdses!!!
+ auto c = messenger->connect_to(CEPH_ENTITY_TYPE_MDS,
+ mdsmap->get_addrs(from));
+ if (c != m->get_connection()) {
+ bad = true;
+ dout(5) << " mds." << from << " should be " << c << " "
+ << c->get_peer_addrs() << " but this message is "
+ << m->get_connection() << " " << m->get_source_addrs()
+ << dendl;
+ }
+ }
+ if (bad) {
// bogus mds?
if (m->get_type() == CEPH_MSG_MDS_MAP) {
dout(5) << "got " << *m << " from old/bad/imposter mds " << m->get_source()
<< ", but it's an mdsmap, looking at it" << dendl;
} else if (m->get_type() == MSG_MDS_CACHEEXPIRE &&
- mdsmap->get_inst(from) == m->get_source_inst()) {
+ mdsmap->get_addrs(from) == m->get_source_addrs()) {
dout(5) << "got " << *m << " from down mds " << m->get_source()
<< ", but it's a cache_expire, looking at it" << dendl;
} else {
return false;
}
-Session *MDSRank::get_session(Message *m)
+Session *MDSRank::get_session(const Message::const_ref &m)
{
- Session *session = static_cast<Session *>(m->get_connection()->get_priv());
+ // do not carry ref
+ auto session = static_cast<Session *>(m->get_connection()->get_priv().get());
if (session) {
- session->put(); // do not carry ref
dout(20) << "get_session have " << session << " " << session->info.inst
<< " state " << session->get_state_name() << dendl;
// Check if we've imported an open session since (new sessions start closed)
if (session->is_closed()) {
Session *imported_session = sessionmap.get_session(session->info.inst.name);
if (imported_session && imported_session != session) {
- dout(10) << __func__ << " replacing connection bootstrap session " << session << " with imported session " << imported_session << dendl;
+ dout(10) << __func__ << " replacing connection bootstrap session "
+ << session << " with imported session " << imported_session
+ << dendl;
imported_session->info.auth_name = session->info.auth_name;
//assert(session->info.auth_name == imported_session->info.auth_name);
- assert(session->info.inst == imported_session->info.inst);
- imported_session->connection = session->connection;
+ ceph_assert(session->info.inst == imported_session->info.inst);
+ imported_session->set_connection(session->get_connection().get());
// send out any queued messages
while (!session->preopen_out_queue.empty()) {
- imported_session->connection->send_message(session->preopen_out_queue.front());
+ imported_session->get_connection()->send_message2(std::move(session->preopen_out_queue.front()));
session->preopen_out_queue.pop_front();
}
imported_session->auth_caps = session->auth_caps;
- assert(session->get_nref() == 1);
- imported_session->connection->set_priv(imported_session->get());
imported_session->last_seen = session->last_seen;
+ ceph_assert(session->get_nref() == 1);
+ imported_session->get_connection()->set_priv(imported_session->get());
session = imported_session;
}
}
return session;
}
-void MDSRank::send_message(Message *m, Connection *c)
+void MDSRank::send_message(const Message::ref& m, const ConnectionRef& c)
{
- assert(c);
- c->send_message(m);
+ ceph_assert(c);
+ c->send_message2(m);
}
-void MDSRank::send_message_mds(Message *m, mds_rank_t mds)
+void MDSRank::send_message_mds(const Message::ref& m, mds_rank_t mds)
{
if (!mdsmap->is_up(mds)) {
dout(10) << "send_message_mds mds." << mds << " not up, dropping " << *m << dendl;
- m->put();
return;
}
// send mdsmap first?
if (mds != whoami && peer_mdsmap_epoch[mds] < mdsmap->get_epoch()) {
- messenger->send_message(new MMDSMap(monc->get_fsid(), mdsmap),
- mdsmap->get_inst(mds));
+ auto _m = MMDSMap::create(monc->get_fsid(), *mdsmap);
+ messenger->send_to_mds(_m.detach(), mdsmap->get_addrs(mds));
peer_mdsmap_epoch[mds] = mdsmap->get_epoch();
}
// send message
- messenger->send_message(m, mdsmap->get_inst(mds));
+ messenger->send_to_mds(Message::ref(m).detach(), mdsmap->get_addrs(mds));
}
-void MDSRank::forward_message_mds(Message *m, mds_rank_t mds)
+void MDSRank::forward_message_mds(const MClientRequest::const_ref& m, mds_rank_t mds)
{
- assert(mds != whoami);
-
- // client request?
- if (m->get_type() == CEPH_MSG_CLIENT_REQUEST &&
- (static_cast<MClientRequest*>(m))->get_source().is_client()) {
- MClientRequest *creq = static_cast<MClientRequest*>(m);
- creq->inc_num_fwd(); // inc forward counter
-
- /*
- * don't actually forward if non-idempotent!
- * client has to do it. although the MDS will ignore duplicate requests,
- * the affected metadata may migrate, in which case the new authority
- * won't have the metareq_id in the completed request map.
- */
- // NEW: always make the client resend!
- bool client_must_resend = true; //!creq->can_forward();
-
- // tell the client where it should go
- messenger->send_message(new MClientRequestForward(creq->get_tid(), mds, creq->get_num_fwd(),
- client_must_resend),
- creq->get_source_inst());
+ ceph_assert(mds != whoami);
- if (client_must_resend) {
- m->put();
- return;
- }
- }
-
- // these are the only types of messages we should be 'forwarding'; they
- // explicitly encode their source mds, which gets clobbered when we resend
- // them here.
- assert(m->get_type() == MSG_MDS_DIRUPDATE ||
- m->get_type() == MSG_MDS_EXPORTDIRDISCOVER);
-
- // send mdsmap first?
- if (peer_mdsmap_epoch[mds] < mdsmap->get_epoch()) {
- messenger->send_message(new MMDSMap(monc->get_fsid(), mdsmap),
- mdsmap->get_inst(mds));
- peer_mdsmap_epoch[mds] = mdsmap->get_epoch();
- }
+ /*
+ * don't actually forward if non-idempotent!
+ * client has to do it. although the MDS will ignore duplicate requests,
+ * the affected metadata may migrate, in which case the new authority
+ * won't have the metareq_id in the completed request map.
+ */
+ // NEW: always make the client resend!
+ bool client_must_resend = true; //!creq->can_forward();
- messenger->send_message(m, mdsmap->get_inst(mds));
+ // tell the client where it should go
+ auto session = get_session(m);
+ auto f = MClientRequestForward::create(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend);
+ send_message_client(f, session);
}
-
-
-void MDSRank::send_message_client_counted(Message *m, client_t client)
+void MDSRank::send_message_client_counted(const Message::ref& m, client_t client)
{
- Session *session = sessionmap.get_session(entity_name_t::CLIENT(client.v));
+ Session *session = sessionmap.get_session(entity_name_t::CLIENT(client.v));
if (session) {
send_message_client_counted(m, session);
} else {
}
}
-void MDSRank::send_message_client_counted(Message *m, Connection *connection)
+void MDSRank::send_message_client_counted(const Message::ref& m, const ConnectionRef& connection)
{
- Session *session = static_cast<Session *>(connection->get_priv());
+ // do not carry ref
+ auto session = static_cast<Session *>(connection->get_priv().get());
if (session) {
- session->put(); // do not carry ref
send_message_client_counted(m, session);
} else {
dout(10) << "send_message_client_counted has no session for " << m->get_source_inst() << dendl;
}
}
-void MDSRank::send_message_client_counted(Message *m, Session *session)
+void MDSRank::send_message_client_counted(const Message::ref& m, Session* session)
{
version_t seq = session->inc_push_seq();
dout(10) << "send_message_client_counted " << session->info.inst.name << " seq "
<< seq << " " << *m << dendl;
- if (session->connection) {
- session->connection->send_message(m);
+ if (session->get_connection()) {
+ session->get_connection()->send_message2(m);
} else {
session->preopen_out_queue.push_back(m);
}
}
-void MDSRank::send_message_client(Message *m, Session *session)
+void MDSRank::send_message_client(const Message::ref& m, Session* session)
{
dout(10) << "send_message_client " << session->info.inst << " " << *m << dendl;
- if (session->connection) {
- session->connection->send_message(m);
+ if (session->get_connection()) {
+ session->get_connection()->send_message2(m);
} else {
session->preopen_out_queue.push_back(m);
}
osd_epoch_barrier = e;
}
-void MDSRank::retry_dispatch(Message *m)
+void MDSRank::retry_dispatch(const Message::const_ref &m)
{
inc_dispatch_depth();
_dispatch(m, false);
void MDSRank::request_state(MDSMap::DaemonState s)
{
dout(3) << "request_state " << ceph_mds_state_name(s) << dendl;
- beacon.set_want_state(mdsmap, s);
+ beacon.set_want_state(*mdsmap, s);
beacon.send();
}
clog->error() << "Error loading MDS rank " << whoami << ": "
<< cpp_strerror(r);
damaged();
- assert(r == 0); // Unreachable, damaged() calls respawn()
+ ceph_assert(r == 0); // Unreachable, damaged() calls respawn()
} else if (r == -EROFS) {
dout(0) << "boot error forcing transition to read-only; MDS will try to continue" << dendl;
} else {
}
}
- assert(is_starting() || is_any_replay());
+ ceph_assert(is_starting() || is_any_replay());
switch(step) {
case MDS_BOOT_INITIAL:
} else if (!standby_replaying) {
dout(2) << "Booting: " << step << ": opening purge queue (async)" << dendl;
purge_queue.open(NULL);
+ dout(2) << "Booting: " << step << ": loading open file table (async)" << dendl;
+ mdcache->open_file_table.load(nullptr);
}
if (mdsmap->get_tableserver() == whoami) {
mdcache->open_mydir_inode(gather.new_sub());
}
+ mdcache->create_global_snaprealm();
+
if (whoami == mdsmap->get_root()) { // load root inode off disk if we are auth
mdcache->open_root_inode(gather.new_sub());
} else if (is_any_replay()) {
}
break;
case MDS_BOOT_REPLAY_DONE:
- assert(is_any_replay());
+ ceph_assert(is_any_replay());
// Sessiontable and inotable should be in sync after replay, validate
// that they are consistent.
void MDSRank::validate_sessions()
{
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
bool valid = true;
// Identify any sessions which have state inconsistent with other,
// after they have been loaded from rados during startup.
// Mitigate bugs like: http://tracker.ceph.com/issues/16842
- const auto &sessions = sessionmap.get_sessions();
- for (const auto &i : sessions) {
+ for (const auto &i : sessionmap.get_sessions()) {
Session *session = i.second;
interval_set<inodeno_t> badones;
if (inotable->intersects_free(session->info.prealloc_inos, &badones)) {
if (!valid) {
damaged();
- assert(valid);
+ ceph_assert(valid);
}
}
void MDSRank::starting_done()
{
dout(3) << "starting_done" << dendl;
- assert(is_starting());
+ ceph_assert(is_starting());
request_state(MDSMap::STATE_ACTIVE);
mdlog->start_new_segment();
+
+ // sync snaptable cache
+ snapclient->sync(new C_MDSInternalNoop);
}
public:
explicit C_MDS_StandbyReplayRestart(MDSRank *m) : MDSInternalContext(m) {}
void finish(int r) override {
- assert(!r);
+ ceph_assert(!r);
mds->standby_replay_restart();
}
};
this,
mdlog->get_journaler()->get_read_pos()));
- dout(1) << " opening purge queue (async)" << dendl;
+ dout(1) << " opening purge_queue (async)" << dendl;
purge_queue.open(NULL);
+ dout(1) << " opening open_file_table (async)" << dendl;
+ mdcache->open_file_table.load(nullptr);
} else {
dout(1) << " waiting for osdmap " << mdsmap->get_last_failure_osd_epoch()
<< " (which blacklists prior instance)" << dendl;
if (is_standby_replay()) {
// The replay was done in standby state, and we are still in that state
- assert(standby_replaying);
+ ceph_assert(standby_replaying);
dout(10) << "setting replay timer" << dendl;
- timer.add_event_after(g_conf->mds_replay_interval,
+ timer.add_event_after(g_conf()->mds_replay_interval,
new C_MDS_StandbyReplayRestart(this));
return;
} else if (standby_replaying) {
return;
} else {
// Replay is complete, journal read should be up to date
- assert(mdlog->get_journaler()->get_read_pos() == mdlog->get_journaler()->get_write_pos());
- assert(!is_standby_replay());
+ ceph_assert(mdlog->get_journaler()->get_read_pos() == mdlog->get_journaler()->get_write_pos());
+ ceph_assert(!is_standby_replay());
// Reformat and come back here
- if (mdlog->get_journaler()->get_stream_format() < g_conf->mds_journal_format) {
+ if (mdlog->get_journaler()->get_stream_format() < g_conf()->mds_journal_format) {
dout(4) << "reformatting journal on standby-replay->replay transition" << dendl;
mdlog->reopen(new C_MDS_BootStart(this, MDS_BOOT_REPLAY_DONE));
return;
mdlog->get_journaler()->set_writeable();
mdlog->get_journaler()->trim_tail();
- if (g_conf->mds_wipe_sessions) {
+ if (mdsmap->get_tableserver() == whoami &&
+ snapserver->upgrade_format()) {
+ dout(1) << "upgrading snaptable format" << dendl;
+ snapserver->save(new C_MDSInternalNoop);
+ }
+
+ if (g_conf()->mds_wipe_sessions) {
dout(1) << "wiping out client sessions" << dendl;
sessionmap.wipe();
sessionmap.save(new C_MDSInternalNoop);
}
- if (g_conf->mds_wipe_ino_prealloc) {
+ if (g_conf()->mds_wipe_ino_prealloc) {
dout(1) << "wiping out ino prealloc from sessions" << dendl;
sessionmap.wipe_ino_prealloc();
sessionmap.save(new C_MDSInternalNoop);
}
- if (g_conf->mds_skip_ino) {
- inodeno_t i = g_conf->mds_skip_ino;
+ if (g_conf()->mds_skip_ino) {
+ inodeno_t i = g_conf()->mds_skip_ino;
dout(1) << "skipping " << i << " inodes" << dendl;
inotable->skip_inos(i);
inotable->save(new C_MDSInternalNoop);
mdsmap->get_num_failed_mds() == 0) { // just me!
dout(2) << "i am alone, moving to state reconnect" << dendl;
request_state(MDSMap::STATE_RECONNECT);
+ // sync snaptable cache
+ snapclient->sync(new C_MDSInternalNoop);
} else {
dout(2) << "i am not alone, moving to state resolve" << dendl;
request_state(MDSMap::STATE_RESOLVE);
mdcache->rollback_uncommitted_fragments();
}
-
void MDSRank::resolve_start()
{
dout(1) << "resolve_start" << dendl;
mdcache->resolve_start(new C_MDS_VoidFn(this, &MDSRank::resolve_done));
finish_contexts(g_ceph_context, waiting_for_resolve);
}
+
void MDSRank::resolve_done()
{
dout(1) << "resolve_done" << dendl;
request_state(MDSMap::STATE_RECONNECT);
+ // sync snaptable cache
+ snapclient->sync(new C_MDSInternalNoop);
}
void MDSRank::reconnect_start()
objecter->enable_blacklist_events();
std::set<entity_addr_t> blacklist;
epoch_t epoch = 0;
- objecter->with_osdmap([this, &blacklist, &epoch](const OSDMap& o) {
+ objecter->with_osdmap([&blacklist, &epoch](const OSDMap& o) {
o.get_blacklist(&blacklist);
epoch = o.get_epoch();
});
// The root should always have a subtree!
clog->error() << "No subtrees found for root MDS rank!";
damaged();
- assert(mdcache->is_subtrees());
+ ceph_assert(mdcache->is_subtrees());
} else {
dout(1) << " empty cache, no subtrees, leaving cluster" << dendl;
request_state(MDSMap::STATE_STOPPED);
return;
}
- if (replay_queue.empty())
+ if (replay_queue.empty() && !server->get_num_pending_reclaim()) {
request_state(MDSMap::STATE_ACTIVE);
- else
+ } else {
+ replaying_requests_done = replay_queue.empty();
request_state(MDSMap::STATE_CLIENTREPLAY);
+ }
}
void MDSRank::clientreplay_start()
bool MDSRank::queue_one_replay()
{
- if (replay_queue.empty()) {
- mdlog->wait_for_safe(new C_MDS_VoidFn(this, &MDSRank::clientreplay_done));
- return false;
+ if (!replay_queue.empty()) {
+ queue_waiter(replay_queue.front());
+ replay_queue.pop_front();
+ return true;
+ }
+ if (!replaying_requests_done) {
+ replaying_requests_done = true;
+ mdlog->flush();
+ }
+ maybe_clientreplay_done();
+ return false;
+}
+
+void MDSRank::maybe_clientreplay_done()
+{
+ if (is_clientreplay() && get_want_state() == MDSMap::STATE_CLIENTREPLAY) {
+
+ // don't go to active if there are session waiting for being reclaimed
+ if (replaying_requests_done && !server->get_num_pending_reclaim()) {
+ mdlog->wait_for_safe(new C_MDS_VoidFn(this, &MDSRank::clientreplay_done));
+ return;
+ }
+
+ dout(1) << " still have " << replay_queue.size() + (int)!replaying_requests_done
+ << " requests need to be replayed, " << server->get_num_pending_reclaim()
+ << " sessions need to be reclaimed" << dendl;
}
- queue_waiter(replay_queue.front());
- replay_queue.pop_front();
- return true;
}
void MDSRank::clientreplay_done()
mdcache->start_files_to_recover();
mdcache->reissue_all_caps();
- mdcache->activate_stray_manager();
finish_contexts(g_ceph_context, waiting_for_active); // kick waiters
}
void MDSRank::recovery_done(int oldstate)
{
dout(1) << "recovery_done -- successful recovery!" << dendl;
- assert(is_clientreplay() || is_active());
-
- // kick snaptable (resent AGREEs)
- if (mdsmap->get_tableserver() == whoami) {
- set<mds_rank_t> active;
- mdsmap->get_mds_set_lower_bound(active, MDSMap::STATE_CLIENTREPLAY);
- snapserver->finish_recovery(active);
- }
+ ceph_assert(is_clientreplay() || is_active());
if (oldstate == MDSMap::STATE_CREATING)
return;
{
dout(1)<< "creating_done" << dendl;
request_state(MDSMap::STATE_ACTIVE);
+ // sync snaptable cache
+ snapclient->sync(new C_MDSInternalNoop);
}
void MDSRank::boot_create()
mdcache->init_layouts();
- snapserver->set_rank(whoami);
inotable->set_rank(whoami);
sessionmap.set_rank(whoami);
dout(3) << "boot_create creating mydir hierarchy" << dendl;
mdcache->create_mydir_hierarchy(fin.get());
+ dout(3) << "boot_create creating global snaprealm" << dendl;
+ mdcache->create_global_snaprealm();
+
// fixme: fake out inotable (reset, pretend loaded)
dout(10) << "boot_create creating fresh inotable table" << dendl;
inotable->reset();
// initialize tables
if (mdsmap->get_tableserver() == whoami) {
dout(10) << "boot_create creating fresh snaptable" << dendl;
+ snapserver->set_rank(whoami);
snapserver->reset();
snapserver->save(fin.new_sub());
}
- assert(g_conf->mds_kill_create_at != 1);
+ ceph_assert(g_conf()->mds_kill_create_at != 1);
// ok now journal it
mdlog->journal_segment_subtree_map(fin.new_sub());
dout(2) << "Stopping..." << dendl;
if (mdsmap->get_num_in_mds() == 1 && !sessionmap.empty()) {
- // we're the only mds up!
- dout(0) << "we are the last MDS, and have mounted clients: we cannot flush our journal. suicide!" << dendl;
- suicide();
+ std::vector<Session*> victims;
+ const auto& sessions = sessionmap.get_sessions();
+ for (const auto& p : sessions) {
+ if (!p.first.is_client()) {
+ continue;
+ }
+
+ Session *s = p.second;
+ victims.push_back(s);
+ }
+
+ dout(20) << __func__ << " matched " << victims.size() << " sessions" << dendl;
+ ceph_assert(!victims.empty());
+
+ C_GatherBuilder gather(g_ceph_context, new C_MDSInternalNoop);
+ for (const auto &s : victims) {
+ std::stringstream ss;
+ evict_client(s->get_client().v, false,
+ g_conf()->mds_session_blacklist_on_evict, ss, gather.new_sub());
+ }
+ gather.activate();
}
mdcache->shutdown_start();
}
void MDSRankDispatcher::handle_mds_map(
- MMDSMap *m,
- MDSMap *oldmap)
+ const MMDSMap::const_ref &m,
+ const MDSMap &oldmap)
{
// I am only to be passed MDSMaps in which I hold a rank
- assert(whoami != MDS_RANK_NONE);
+ ceph_assert(whoami != MDS_RANK_NONE);
MDSMap::DaemonState oldstate = state;
mds_gid_t mds_gid = mds_gid_t(monc->get_global_id());
if (objecter->get_client_incarnation() != incarnation)
objecter->set_client_incarnation(incarnation);
+ if (oldmap.get_min_compat_client() != mdsmap->get_min_compat_client())
+ server->update_required_client_features();
+
// for debug
- if (g_conf->mds_dump_cache_on_map)
+ if (g_conf()->mds_dump_cache_on_map)
mdcache->dump_cache();
cluster_degraded = mdsmap->is_degraded();
// did someone fail?
// new down?
set<mds_rank_t> olddown, down;
- oldmap->get_down_mds_set(&olddown);
+ oldmap.get_down_mds_set(&olddown);
mdsmap->get_down_mds_set(&down);
for (const auto& r : down) {
- if (oldmap->have_inst(r) && olddown.count(r) == 0) {
- messenger->mark_down(oldmap->get_inst(r).addr);
+ if (oldmap.have_inst(r) && olddown.count(r) == 0) {
+ messenger->mark_down_addrs(oldmap.get_addrs(r));
handle_mds_failure(r);
}
}
mdsmap->get_up_mds_set(up);
for (const auto& r : up) {
auto& info = mdsmap->get_info(r);
- if (oldmap->have_inst(r)) {
- auto& oldinfo = oldmap->get_info(r);
+ if (oldmap.have_inst(r)) {
+ auto& oldinfo = oldmap.get_info(r);
if (info.inc != oldinfo.inc) {
- messenger->mark_down(oldinfo.addr);
+ messenger->mark_down_addrs(oldinfo.get_addrs());
if (info.state == MDSMap::STATE_REPLAY ||
info.state == MDSMap::STATE_RESOLVE) {
restart.insert(r);
handle_mds_failure(r);
} else {
- assert(info.state == MDSMap::STATE_STARTING ||
+ ceph_assert(info.state == MDSMap::STATE_STARTING ||
info.state == MDSMap::STATE_ACTIVE);
// -> stopped (missing) -> starting -> active
restart.insert(r);
mdcache->migrator->handle_mds_failure_or_stop(r);
+ if (mdsmap->get_tableserver() == whoami)
+ snapserver->handle_mds_failure_or_stop(r);
}
}
} else {
restart.insert(r);
handle_mds_failure(r);
} else {
- assert(info.state == MDSMap::STATE_CREATING ||
+ ceph_assert(info.state == MDSMap::STATE_CREATING ||
info.state == MDSMap::STATE_STARTING ||
info.state == MDSMap::STATE_ACTIVE);
}
dout(1) << "handle_mds_map state change "
<< ceph_mds_state_name(oldstate) << " --> "
<< ceph_mds_state_name(state) << dendl;
- beacon.set_want_state(mdsmap, state);
+ beacon.set_want_state(*mdsmap, state);
if (oldstate == MDSMap::STATE_STANDBY_REPLAY) {
dout(10) << "Monitor activated us! Deactivating replay loop" << dendl;
} else if (is_starting()) {
boot_start();
} else if (is_stopping()) {
- assert(oldstate == MDSMap::STATE_ACTIVE);
+ ceph_assert(oldstate == MDSMap::STATE_ACTIVE);
stopping_start();
}
}
// RESOLVE
// is someone else newly resolving?
if (state >= MDSMap::STATE_RESOLVE) {
- if ((!oldmap->is_resolving() || !restart.empty()) && mdsmap->is_resolving()) {
+ // recover snaptable
+ if (mdsmap->get_tableserver() == whoami) {
+ if (oldstate < MDSMap::STATE_RESOLVE) {
+ set<mds_rank_t> s;
+ mdsmap->get_mds_set_lower_bound(s, MDSMap::STATE_RESOLVE);
+ snapserver->finish_recovery(s);
+ } else {
+ set<mds_rank_t> old_set, new_set;
+ oldmap.get_mds_set_lower_bound(old_set, MDSMap::STATE_RESOLVE);
+ mdsmap->get_mds_set_lower_bound(new_set, MDSMap::STATE_RESOLVE);
+ for (const auto& r : new_set) {
+ if (r == whoami)
+ continue; // not me
+ if (!old_set.count(r) || restart.count(r)) { // newly so?
+ snapserver->handle_mds_recovery(r);
+ }
+ }
+ }
+ }
+
+ if ((!oldmap.is_resolving() || !restart.empty()) && mdsmap->is_resolving()) {
set<mds_rank_t> resolve;
mdsmap->get_mds_set(resolve, MDSMap::STATE_RESOLVE);
dout(10) << " resolve set is " << resolve << dendl;
// is everybody finally rejoining?
if (state >= MDSMap::STATE_REJOIN) {
// did we start?
- if (!oldmap->is_rejoining() && mdsmap->is_rejoining())
+ if (!oldmap.is_rejoining() && mdsmap->is_rejoining())
rejoin_joint_start();
// did we finish?
- if (g_conf->mds_dump_cache_after_rejoin &&
- oldmap->is_rejoining() && !mdsmap->is_rejoining())
+ if (g_conf()->mds_dump_cache_after_rejoin &&
+ oldmap.is_rejoining() && !mdsmap->is_rejoining())
mdcache->dump_cache(); // for DEBUG only
if (oldstate >= MDSMap::STATE_REJOIN ||
oldstate == MDSMap::STATE_STARTING) {
// ACTIVE|CLIENTREPLAY|REJOIN => we can discover from them.
set<mds_rank_t> olddis, dis;
- oldmap->get_mds_set_lower_bound(olddis, MDSMap::STATE_REJOIN);
+ oldmap.get_mds_set_lower_bound(olddis, MDSMap::STATE_REJOIN);
mdsmap->get_mds_set_lower_bound(dis, MDSMap::STATE_REJOIN);
for (const auto& r : dis) {
if (r == whoami)
}
}
- if (oldmap->is_degraded() && !cluster_degraded && state >= MDSMap::STATE_ACTIVE) {
+ if (oldmap.is_degraded() && !cluster_degraded && state >= MDSMap::STATE_ACTIVE) {
dout(1) << "cluster recovered." << dendl;
auto it = waiting_for_active_peer.find(MDS_RANK_NONE);
if (it != waiting_for_active_peer.end()) {
if (state >= MDSMap::STATE_CLIENTREPLAY &&
oldstate >= MDSMap::STATE_CLIENTREPLAY) {
set<mds_rank_t> oldactive, active;
- oldmap->get_mds_set_lower_bound(oldactive, MDSMap::STATE_CLIENTREPLAY);
+ oldmap.get_mds_set_lower_bound(oldactive, MDSMap::STATE_CLIENTREPLAY);
mdsmap->get_mds_set_lower_bound(active, MDSMap::STATE_CLIENTREPLAY);
for (const auto& r : active) {
if (r == whoami)
}
}
- if (state >= MDSMap::STATE_CLIENTREPLAY) {
+ if (is_clientreplay() || is_active() || is_stopping()) {
// did anyone stop?
set<mds_rank_t> oldstopped, stopped;
- oldmap->get_stopped_mds_set(oldstopped);
+ oldmap.get_stopped_mds_set(oldstopped);
mdsmap->get_stopped_mds_set(stopped);
for (const auto& r : stopped)
- if (oldstopped.count(r) == 0) // newly so?
+ if (oldstopped.count(r) == 0) { // newly so?
mdcache->migrator->handle_mds_failure_or_stop(r);
+ if (mdsmap->get_tableserver() == whoami)
+ snapserver->handle_mds_failure_or_stop(r);
+ }
}
{
- map<epoch_t,list<MDSInternalContextBase*> >::iterator p = waiting_for_mdsmap.begin();
+ map<epoch_t,MDSContext::vec >::iterator p = waiting_for_mdsmap.begin();
while (p != waiting_for_mdsmap.end() && p->first <= mdsmap->get_epoch()) {
- list<MDSInternalContextBase*> ls;
+ MDSContext::vec ls;
ls.swap(p->second);
waiting_for_mdsmap.erase(p++);
queue_waiters(ls);
// might not include barriers from the previous incarnation of this MDS)
set_osd_epoch_barrier(objecter->with_osdmap(
std::mem_fn(&OSDMap::get_epoch)));
- }
- if (is_active()) {
- bool found = false;
- MDSMap::mds_info_t info = mdsmap->get_info(whoami);
-
- for (map<mds_gid_t,MDSMap::mds_info_t>::const_iterator p = mdsmap->get_mds_info().begin();
- p != mdsmap->get_mds_info().end();
- ++p) {
- if (p->second.state == MDSMap::STATE_STANDBY_REPLAY &&
- (p->second.standby_for_rank == whoami ||(info.name.length() && p->second.standby_for_name == info.name))) {
- found = true;
- break;
- }
- if (found)
- mdlog->set_write_iohint(0);
- else
- mdlog->set_write_iohint(CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
- }
+ /* Now check if we should hint to the OSD that a read may follow */
+ if (mdsmap->has_standby_replay(whoami))
+ mdlog->set_write_iohint(0);
+ else
+ mdlog->set_write_iohint(CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
}
- if (oldmap->get_max_mds() != mdsmap->get_max_mds()) {
+ if (oldmap.get_max_mds() != mdsmap->get_max_mds()) {
purge_queue.update_op_limit(*mdsmap);
}
}
mdcache->handle_mds_recovery(who);
- if (mdsmap->get_tableserver() == whoami) {
- snapserver->handle_mds_recovery(who);
- }
-
queue_waiters(waiting_for_active_peer[who]);
waiting_for_active_peer.erase(who);
}
mdcache->handle_mds_failure(who);
+ if (mdsmap->get_tableserver() == whoami)
+ snapserver->handle_mds_failure_or_stop(who);
+
snapclient->handle_mds_failure(who);
}
-bool MDSRankDispatcher::handle_asok_command(
- std::string command, cmdmap_t& cmdmap, Formatter *f,
- std::ostream& ss)
+bool MDSRankDispatcher::handle_asok_command(std::string_view command,
+ const cmdmap_t& cmdmap,
+ Formatter *f,
+ std::ostream& ss)
{
if (command == "dump_ops_in_flight" ||
command == "ops") {
if (!op_tracker.dump_ops_in_flight(f)) {
ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
- please enable \"osd_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
+ please enable \"mds_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
}
} else if (command == "dump_blocked_ops") {
if (!op_tracker.dump_ops_in_flight(f, true)) {
ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
- Please enable \"osd_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
+ Please enable \"mds_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
}
} else if (command == "dump_historic_ops") {
if (!op_tracker.dump_historic_ops(f)) {
ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
- please enable \"osd_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
+ please enable \"mds_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
}
} else if (command == "dump_historic_ops_by_duration") {
if (!op_tracker.dump_historic_ops(f, true)) {
ss << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
- please enable \"osd_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
+ please enable \"mds_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
}
} else if (command == "osdmap barrier") {
int64_t target_epoch = 0;
cond.wait();
}
} else if (command == "session ls") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
heartbeat_reset();
mds_lock.Lock();
std::stringstream dss;
bool evicted = evict_client(strtol(client_id.c_str(), 0, 10), true,
- g_conf->mds_session_blacklist_on_evict, dss);
+ g_conf()->mds_session_blacklist_on_evict, dss);
if (!evicted) {
dout(15) << dss.str() << dendl;
ss << dss.str();
vector<string> scrubop_vec;
cmd_getval(g_ceph_context, cmdmap, "scrubops", scrubop_vec);
cmd_getval(g_ceph_context, cmdmap, "path", path);
- command_scrub_path(f, path, scrubop_vec);
+
+ C_SaferCond cond;
+ command_scrub_start(f, path, "", scrubop_vec, &cond);
+ cond.wait();
} else if (command == "tag path") {
string path;
cmd_getval(g_ceph_context, cmdmap, "path", path);
}
command_export_dir(f, path, (mds_rank_t)rank);
} else if (command == "dump cache") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
string path;
int r;
if(!cmd_getval(g_ceph_context, cmdmap, "path", path)) {
f->reset();
}
} else if (command == "cache status") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mdcache->cache_status(f);
- } else if (command == "cache drop") {
- int64_t timeout;
- if (!cmd_getval(g_ceph_context, cmdmap, "timeout", timeout)) {
- timeout = 0;
- }
-
- C_SaferCond cond;
- command_cache_drop((uint64_t)timeout, f, &cond);
- int r = cond.wait();
- if (r != 0) {
- f->flush(ss);
- }
} else if (command == "dump tree") {
- string root;
- int64_t depth;
- cmd_getval(g_ceph_context, cmdmap, "root", root);
- if (!cmd_getval(g_ceph_context, cmdmap, "depth", depth))
- depth = -1;
- {
- Mutex::Locker l(mds_lock);
- int r = mdcache->dump_cache(root, depth, f);
- if (r != 0) {
- ss << "Failed to dump tree: " << cpp_strerror(r);
- f->reset();
- }
- }
+ command_dump_tree(cmdmap, ss, f);
} else if (command == "dump loads") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
int r = balancer->dump_loads(f);
if (r != 0) {
ss << "Failed to dump loads: " << cpp_strerror(r);
f->reset();
}
+ } else if (command == "dump snaps") {
+ std::lock_guard l(mds_lock);
+ string server;
+ cmd_getval(g_ceph_context, cmdmap, "server", server);
+ if (server == "--server") {
+ if (mdsmap->get_tableserver() == whoami) {
+ snapserver->dump(f);
+ } else {
+ ss << "Not snapserver";
+ }
+ } else {
+ int r = snapclient->dump_cache(f);
+ if (r != 0) {
+ ss << "Failed to dump snapclient: " << cpp_strerror(r);
+ f->reset();
+ }
+ }
} else if (command == "force_readonly") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mdcache->force_readonly();
} else if (command == "dirfrag split") {
command_dirfrag_split(cmdmap, ss);
command_dirfrag_merge(cmdmap, ss);
} else if (command == "dirfrag ls") {
command_dirfrag_ls(cmdmap, ss, f);
+ } else if (command == "openfiles ls") {
+ command_openfiles_ls(f);
+ } else if (command == "dump inode") {
+ command_dump_inode(f, cmdmap, ss);
} else {
return false;
}
class C_MDS_Send_Command_Reply : public MDSInternalContext {
protected:
- MCommand *m;
+ MCommand::const_ref m;
public:
- C_MDS_Send_Command_Reply(MDSRank *_mds, MCommand *_m) :
- MDSInternalContext(_mds), m(_m) { m->get(); }
+ C_MDS_Send_Command_Reply(MDSRank *_mds, const MCommand::const_ref &_m) :
+ MDSInternalContext(_mds), m(_m) {}
- void send(int r, boost::string_view ss) {
+ void send(int r, std::string_view ss) {
std::stringstream ds;
send(r, ss, ds);
}
- void send(int r, boost::string_view ss, std::stringstream &ds) {
+ void send(int r, std::string_view ss, std::stringstream &ds) {
bufferlist bl;
bl.append(ds);
MDSDaemon::send_command_reply(m, mds, r, bl, ss);
- m->put();
}
void finish(int r) override {
}
};
+class C_ExecAndReply : public C_MDS_Send_Command_Reply {
+public:
+ C_ExecAndReply(MDSRank *mds, const MCommand::const_ref &m)
+ : C_MDS_Send_Command_Reply(mds, m), f(true) {
+ }
+
+ void finish(int r) override {
+ std::stringstream ds;
+ std::stringstream ss;
+ if (r != 0) {
+ f.flush(ss);
+ } else {
+ f.flush(ds);
+ }
+
+ send(r, ss.str(), ds);
+ }
+
+ virtual void exec() = 0;
+
+protected:
+ JSONFormatter f;
+};
+
+class C_CacheDropExecAndReply : public C_ExecAndReply {
+public:
+ C_CacheDropExecAndReply(MDSRank *mds, const MCommand::const_ref &m,
+ uint64_t timeout)
+ : C_ExecAndReply(mds, m), timeout(timeout) {
+ }
+
+ void exec() override {
+ mds->command_cache_drop(timeout, &f, this);
+ }
+
+private:
+ uint64_t timeout;
+};
+
+class C_ScrubExecAndReply : public C_ExecAndReply {
+public:
+ C_ScrubExecAndReply(MDSRank *mds, const MCommand::const_ref &m,
+ const std::string &path, const std::string &tag,
+ const std::vector<std::string> &scrubop)
+ : C_ExecAndReply(mds, m), path(path), tag(tag), scrubop(scrubop) {
+ }
+
+ void exec() override {
+ mds->command_scrub_start(&f, path, tag, scrubop, this);
+ }
+
+private:
+ std::string path;
+ std::string tag;
+ std::vector<std::string> scrubop;
+};
+
+class C_ScrubControlExecAndReply : public C_ExecAndReply {
+public:
+ C_ScrubControlExecAndReply(MDSRank *mds, const MCommand::const_ref &m,
+ const std::string &command)
+ : C_ExecAndReply(mds, m), command(command) {
+ }
+
+ void exec() override {
+ if (command == "abort") {
+ mds->command_scrub_abort(&f, this);
+ } else if (command == "pause") {
+ mds->command_scrub_pause(&f, this);
+ } else {
+ ceph_abort();
+ }
+ }
+
+ void finish(int r) override {
+ f.open_object_section("result");
+ f.dump_int("return_code", r);
+ f.close_section();
+ C_ExecAndReply::finish(r);
+ }
+
+private:
+ std::string command;
+};
+
/**
* This function drops the mds_lock, so don't do anything with
* MDSRank after calling it (we could have gone into shutdown): just
* send your result back to the calling client and finish.
*/
-void MDSRankDispatcher::evict_clients(const SessionFilter &filter, MCommand *m)
+void MDSRankDispatcher::evict_clients(const SessionFilter &filter, const MCommand::const_ref &m)
{
C_MDS_Send_Command_Reply *reply = new C_MDS_Send_Command_Reply(this, m);
return;
}
- std::list<Session*> victims;
- const auto sessions = sessionmap.get_sessions();
- for (const auto p : sessions) {
+ std::vector<Session*> victims;
+ const auto& sessions = sessionmap.get_sessions();
+ for (const auto& p : sessions) {
if (!p.first.is_client()) {
continue;
}
C_GatherBuilder gather(g_ceph_context, reply);
for (const auto s : victims) {
std::stringstream ss;
- evict_client(s->info.inst.name.num(), false,
- g_conf->mds_session_blacklist_on_evict, ss, gather.new_sub());
+ evict_client(s->get_client().v, false,
+ g_conf()->mds_session_blacklist_on_evict, ss, gather.new_sub());
}
gather.activate();
}
// Dump sessions, decorated with recovery/replay status
f->open_array_section("sessions");
const ceph::unordered_map<entity_name_t, Session*> session_map = sessionmap.get_sessions();
- for (ceph::unordered_map<entity_name_t,Session*>::const_iterator p = session_map.begin();
- p != session_map.end();
- ++p) {
- if (!p->first.is_client()) {
+ for (auto& p : session_map) {
+ if (!p.first.is_client()) {
continue;
}
- Session *s = p->second;
+ Session *s = p.second;
if (!filter.match(*s, std::bind(&Server::waiting_for_reconnect, server, std::placeholders::_1))) {
continue;
}
f->open_object_section("session");
- f->dump_int("id", p->first.num());
+ f->dump_int("id", p.first.num());
f->dump_int("num_leases", s->leases.size());
f->dump_int("num_caps", s->caps.size());
f->dump_float("uptime", s->get_session_uptime());
f->dump_int("replay_requests", is_clientreplay() ? s->get_request_count() : 0);
f->dump_unsigned("completed_requests", s->get_num_completed_requests());
- f->dump_bool("reconnecting", server->waiting_for_reconnect(p->first.num()));
+ f->dump_bool("reconnecting", server->waiting_for_reconnect(p.first.num()));
f->dump_stream("inst") << s->info.inst;
f->open_object_section("client_metadata");
- for (map<string, string>::const_iterator i = s->info.client_metadata.begin();
- i != s->info.client_metadata.end(); ++i) {
- f->dump_string(i->first.c_str(), i->second);
- }
+ s->info.client_metadata.dump(f);
f->close_section(); // client_metadata
f->close_section(); //session
}
f->close_section(); //sessions
}
-void MDSRank::command_scrub_path(Formatter *f, boost::string_view path, vector<string>& scrubop_vec)
+void MDSRank::command_scrub_start(Formatter *f,
+ std::string_view path, std::string_view tag,
+ const vector<string>& scrubop_vec, Context *on_finish)
{
bool force = false;
bool recursive = false;
bool repair = false;
- for (vector<string>::iterator i = scrubop_vec.begin() ; i != scrubop_vec.end(); ++i) {
- if (*i == "force")
+ for (auto &op : scrubop_vec) {
+ if (op == "force")
force = true;
- else if (*i == "recursive")
+ else if (op == "recursive")
recursive = true;
- else if (*i == "repair")
+ else if (op == "repair")
repair = true;
}
- C_SaferCond scond;
- {
- Mutex::Locker l(mds_lock);
- mdcache->enqueue_scrub(path, "", force, recursive, repair, f, &scond);
- }
- scond.wait();
+
+ std::lock_guard l(mds_lock);
+ mdcache->enqueue_scrub(path, tag, force, recursive, repair, f, on_finish);
// scrub_dentry() finishers will dump the data for us; we're done!
}
void MDSRank::command_tag_path(Formatter *f,
- boost::string_view path, boost::string_view tag)
+ std::string_view path, std::string_view tag)
{
C_SaferCond scond;
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mdcache->enqueue_scrub(path, tag, true, true, false, f, &scond);
}
scond.wait();
}
-void MDSRank::command_flush_path(Formatter *f, boost::string_view path)
+void MDSRank::command_scrub_abort(Formatter *f, Context *on_finish) {
+ std::lock_guard l(mds_lock);
+ scrubstack->scrub_abort(on_finish);
+}
+
+void MDSRank::command_scrub_pause(Formatter *f, Context *on_finish) {
+ std::lock_guard l(mds_lock);
+ scrubstack->scrub_pause(on_finish);
+}
+
+void MDSRank::command_scrub_resume(Formatter *f) {
+ int r = scrubstack->scrub_resume();
+
+ f->open_object_section("result");
+ f->dump_int("return_code", r);
+ f->close_section();
+}
+
+void MDSRank::command_scrub_status(Formatter *f) {
+ scrubstack->scrub_status(f);
+}
+
+void MDSRank::command_flush_path(Formatter *f, std::string_view path)
{
C_SaferCond scond;
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mdcache->flush_dentry(path, &scond);
}
int r = scond.wait();
C_SaferCond cond;
std::stringstream ss;
-
{
- Mutex::Locker locker(mds_lock);
+ std::lock_guard locker(mds_lock);
C_Flush_Journal *flush_journal = new C_Flush_Journal(mdcache, mdlog, this, &ss, &cond);
flush_journal->send();
}
void MDSRank::command_get_subtrees(Formatter *f)
{
- assert(f != NULL);
- Mutex::Locker l(mds_lock);
+ ceph_assert(f != NULL);
+ std::lock_guard l(mds_lock);
- std::list<CDir*> subtrees;
- mdcache->list_subtrees(subtrees);
+ std::vector<CDir*> subtrees;
+ mdcache->get_subtrees(subtrees);
f->open_array_section("subtrees");
- for (std::list<CDir*>::iterator i = subtrees.begin(); i != subtrees.end(); ++i) {
- const CDir *dir = *i;
-
+ for (const auto& dir : subtrees) {
f->open_object_section("subtree");
{
f->dump_bool("is_auth", dir->is_auth());
void MDSRank::command_export_dir(Formatter *f,
- boost::string_view path,
+ std::string_view path,
mds_rank_t target)
{
int r = _command_export_dir(path, target);
}
int MDSRank::_command_export_dir(
- boost::string_view path,
+ std::string_view path,
mds_rank_t target)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
filepath fp(path);
if (target == whoami || !mdsmap->is_up(target) || !mdsmap->is_in(target)) {
return 0;
}
+void MDSRank::command_dump_tree(const cmdmap_t &cmdmap, std::ostream &ss, Formatter *f)
+{
+ std::string root;
+ int64_t depth;
+ cmd_getval(g_ceph_context, cmdmap, "root", root);
+ if (!cmd_getval(g_ceph_context, cmdmap, "depth", depth))
+ depth = -1;
+ std::lock_guard l(mds_lock);
+ CInode *in = mdcache->cache_traverse(filepath(root.c_str()));
+ if (!in) {
+ ss << "root inode is not in cache";
+ return;
+ }
+ f->open_array_section("inodes");
+ mdcache->dump_tree(in, 0, depth, f);
+ f->close_section();
+}
+
CDir *MDSRank::_command_dirfrag_get(
const cmdmap_t &cmdmap,
std::ostream &ss)
CDir *dir = in->get_dirfrag(fg);
if (!dir) {
- ss << "frag 0x" << std::hex << in->ino() << "/" << fg << " not in cache ("
+ ss << "frag " << in->ino() << "/" << fg << " not in cache ("
"use `dirfrag ls` to see if it should exist)";
return NULL;
}
cmdmap_t cmdmap,
std::ostream &ss)
{
- Mutex::Locker l(mds_lock);
- if (!mdsmap->allows_dirfrags()) {
- ss << "dirfrags are disallowed by the mds map!";
- return false;
- }
-
+ std::lock_guard l(mds_lock);
int64_t by = 0;
if (!cmd_getval(g_ceph_context, cmdmap, "bits", by)) {
ss << "missing bits argument";
cmdmap_t cmdmap,
std::ostream &ss)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
std::string path;
bool got = cmd_getval(g_ceph_context, cmdmap, "path", path);
if (!got) {
std::ostream &ss,
Formatter *f)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
std::string path;
bool got = cmd_getval(g_ceph_context, cmdmap, "path", path);
if (!got) {
}
f->open_array_section("frags");
- std::list<frag_t> frags;
+ frag_vec_t leaves;
// NB using get_leaves_under instead of get_dirfrags to give
// you the list of what dirfrags may exist, not which are in cache
- in->dirfragtree.get_leaves_under(frag_t(), frags);
- for (std::list<frag_t>::iterator i = frags.begin();
- i != frags.end(); ++i) {
+ in->dirfragtree.get_leaves_under(frag_t(), leaves);
+ for (const auto& leaf : leaves) {
f->open_object_section("frag");
- f->dump_int("value", i->value());
- f->dump_int("bits", i->bits());
- std::ostringstream frag_str;
- frag_str << std::hex << i->value() << "/" << std::dec << i->bits();
- f->dump_string("str", frag_str.str());
+ f->dump_int("value", leaf.value());
+ f->dump_int("bits", leaf.bits());
+ CachedStackStringStream css;
+ *css << std::hex << leaf.value() << "/" << std::dec << leaf.bits();
+ f->dump_string("str", css->strv());
f->close_section();
}
f->close_section();
return true;
}
+void MDSRank::command_openfiles_ls(Formatter *f)
+{
+ std::lock_guard l(mds_lock);
+ mdcache->dump_openfiles(f);
+}
+
+void MDSRank::command_dump_inode(Formatter *f, const cmdmap_t &cmdmap, std::ostream &ss)
+{
+ std::lock_guard l(mds_lock);
+ int64_t number;
+ bool got = cmd_getval(g_ceph_context, cmdmap, "number", number);
+ if (!got) {
+ ss << "missing inode number";
+ return;
+ }
+
+ bool success = mdcache->dump_inode(f, number);
+ if (!success) {
+ ss << "dump inode failed, wrong inode number or the inode is not cached";
+ }
+}
+
void MDSRank::dump_status(Formatter *f) const
{
if (state == MDSMap::STATE_REPLAY ||
"Inodes with capabilities");
mds_plb.add_u64(l_mds_subtrees, "subtrees", "Subtrees");
mds_plb.add_u64(l_mds_load_cent, "load_cent", "Load per cent");
+ mds_plb.add_u64_counter(l_mds_openino_dir_fetch, "openino_dir_fetch",
+ "OpenIno incomplete directory fetchings");
// low prio stats
mds_plb.set_prio_default(PerfCountersBuilder::PRIO_DEBUGONLY);
mds_plb.add_u64(l_mds_dispatch_queue_len, "q", "Dispatch queue length");
mds_plb.add_u64_counter(l_mds_exported, "exported", "Exports");
mds_plb.add_u64_counter(l_mds_imported, "imported", "Imports");
+ mds_plb.add_u64_counter(l_mds_openino_backtrace_fetch, "openino_backtrace_fetch",
+ "OpenIno backtrace fetchings");
+ mds_plb.add_u64_counter(l_mds_openino_peer_discover, "openino_peer_discover",
+ "OpenIno peer inode discovers");
logger = mds_plb.create_perf_counters();
g_ceph_context->get_perfcounters_collection()->add(logger);
mdm_plb.add_u64_counter(l_mdm_capa, "cap+", "Capabilities added");
mdm_plb.add_u64_counter(l_mdm_caps, "cap-", "Capabilities removed");
mdm_plb.add_u64(l_mdm_heap, "heap", "Heap size");
- mdm_plb.add_u64(l_mdm_buf, "buf", "Buffer size");
mdm_plb.set_prio_default(PerfCountersBuilder::PRIO_DEBUGONLY);
mdm_plb.add_u64(l_mdm_rss, "rss", "RSS");
void MDSRank::check_ops_in_flight()
{
+ string summary;
vector<string> warnings;
int slow = 0;
- if (op_tracker.check_ops_in_flight(warnings, &slow)) {
- for (vector<string>::iterator i = warnings.begin();
- i != warnings.end();
- ++i) {
- clog->warn() << *i;
+ if (op_tracker.check_ops_in_flight(&summary, warnings, &slow)) {
+ clog->warn() << summary;
+ for (const auto& warning : warnings) {
+ clog->warn() << warning;
}
}
void MDSRankDispatcher::handle_osd_map()
{
- if (is_active() && snapserver) {
+ if (is_active() &&
+ mdsmap->get_tableserver() == whoami) {
snapserver->check_osd_map(true);
}
}
bool MDSRank::evict_client(int64_t session_id,
- bool wait, bool blacklist, std::stringstream& err_ss,
+ bool wait, bool blacklist, std::ostream& err_ss,
Context *on_killed)
{
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
// Mutually exclusive args
- assert(!(wait && on_killed != nullptr));
+ ceph_assert(!(wait && on_killed != nullptr));
if (is_any_replay()) {
err_ss << "MDS is replaying log";
auto& addr = session->info.inst.addr;
{
- std::stringstream ss;
- ss << "Evicting " << (blacklist ? "(and blacklisting) " : "")
- << "client session " << session_id << " (" << addr << ")";
- dout(1) << ss.str() << dendl;
- clog->info() << ss.str();
+ CachedStackStringStream css;
+ *css << "Evicting " << (blacklist ? "(and blacklisting) " : "")
+ << "client session " << session_id << " (" << addr << ")";
+ dout(1) << css->strv() << dendl;
+ clog->info() << css->strv();
}
dout(4) << "Preparing blacklist command... (wait=" << wait << ")" << dendl;
std::vector<std::string> cmd = {tmp};
auto kill_client_session = [this, session_id, wait, on_killed](){
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
Session *session = sessionmap.get_session(
entity_name_t(CEPH_ENTITY_TYPE_CLIENT, session_id));
if (session) {
};
auto apply_blacklist = [this, cmd](std::function<void ()> fn){
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
Context *on_blacklist_done = new FunctionContext([this, fn](int r) {
objecter->wait_for_latest_osdmap(
new C_OnFinisher(
new FunctionContext([this, fn](int r) {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
auto epoch = objecter->with_osdmap([](const OSDMap &o){
return o.get_epoch();
});
// We dropped mds_lock, so check that session still exists
session = sessionmap.get_session(entity_name_t(CEPH_ENTITY_TYPE_CLIENT,
- session_id));
+ session_id));
if (!session) {
dout(1) << "session " << session_id << " was removed while we waited "
"for blacklist" << dendl;
// share the map with mounted clients
set<Session*> clients;
sessionmap.get_client_session_set(clients);
- for (set<Session*>::const_iterator p = clients.begin();
- p != clients.end();
- ++p)
- (*p)->connection->send_message(new MMDSMap(monc->get_fsid(), mdsmap));
+ for (const auto &session : clients) {
+ auto m = MMDSMap::create(monc->get_fsid(), *mdsmap);
+ session->get_connection()->send_message2(std::move(m));
+ }
last_client_mdsmap_bcast = mdsmap->get_epoch();
}
+Context *MDSRank::create_async_exec_context(C_ExecAndReply *ctx) {
+ return new C_OnFinisher(new FunctionContext([ctx](int _) {
+ ctx->exec();
+ }), finisher);
+}
+
MDSRankDispatcher::MDSRankDispatcher(
mds_rank_t whoami_,
Mutex &mds_lock_,
LogChannelRef &clog_,
SafeTimer &timer_,
Beacon &beacon_,
- MDSMap *& mdsmap_,
+ std::unique_ptr<MDSMap> &mdsmap_,
Messenger *msgr,
MonClient *monc_,
Context *respawn_hook_,
bool MDSRankDispatcher::handle_command(
const cmdmap_t &cmdmap,
- MCommand *m,
+ const MCommand::const_ref &m,
int *r,
std::stringstream *ds,
std::stringstream *ss,
Context **run_later,
bool *need_reply)
{
- assert(r != nullptr);
- assert(ds != nullptr);
- assert(ss != nullptr);
+ ceph_assert(r != nullptr);
+ ceph_assert(ds != nullptr);
+ ceph_assert(ss != nullptr);
*need_reply = true;
timeout = 0;
}
- JSONFormatter *f = new JSONFormatter(true);
- C_MDS_Send_Command_Reply *reply = new C_MDS_Send_Command_Reply(this, m);
- Context *on_finish = new FunctionContext([this, f, reply](int r) {
- cache_drop_send_reply(f, reply, r);
- delete f;
- delete reply;
- });
-
*need_reply = false;
- *run_later = new C_OnFinisher(
- new FunctionContext([this, timeout, f, on_finish](int _) {
- command_cache_drop((uint64_t)timeout, f, on_finish);
- }), finisher);
+ *run_later = create_async_exec_context(new C_CacheDropExecAndReply
+ (this, m, (uint64_t)timeout));
+ return true;
+ } else if (prefix == "scrub start") {
+ string path;
+ string tag;
+ vector<string> scrubop_vec;
+ cmd_getval(g_ceph_context, cmdmap, "scrubops", scrubop_vec);
+ cmd_getval(g_ceph_context, cmdmap, "path", path);
+ cmd_getval(g_ceph_context, cmdmap, "tag", tag);
+ *need_reply = false;
+ *run_later = create_async_exec_context(new C_ScrubExecAndReply
+ (this, m, path, tag, scrubop_vec));
+ return true;
+ } else if (prefix == "scrub abort") {
+ *need_reply = false;
+ *run_later = create_async_exec_context(new C_ScrubControlExecAndReply
+ (this, m, "abort"));
+ return true;
+ } else if (prefix == "scrub pause") {
+ *need_reply = false;
+ *run_later = create_async_exec_context(new C_ScrubControlExecAndReply
+ (this, m, "pause"));
+ return true;
+ } else if (prefix == "scrub resume") {
+ JSONFormatter f(true);
+ command_scrub_resume(&f);
+ f.flush(*ds);
+ return true;
+ } else if (prefix == "scrub status") {
+ JSONFormatter f(true);
+ command_scrub_status(&f);
+ f.flush(*ds);
return true;
} else {
return false;
}
}
-void MDSRank::cache_drop_send_reply(Formatter *f, C_MDS_Send_Command_Reply *reply, int r) {
- dout(20) << __func__ << ": r=" << r << dendl;
-
- std::stringstream ds;
- std::stringstream ss;
- if (r != 0) {
- f->flush(ss);
- } else {
- f->flush(ds);
- }
-
- reply->send(r, ss.str(), ds);
-}
-
void MDSRank::command_cache_drop(uint64_t timeout, Formatter *f, Context *on_finish) {
dout(20) << __func__ << dendl;
- Mutex::Locker locker(mds_lock);
+ std::lock_guard locker(mds_lock);
C_Drop_Cache *request = new C_Drop_Cache(server, mdcache, mdlog, this,
timeout, f, on_finish);
request->send();