]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | /* | |
4 | * Ceph - scalable distributed file system | |
5 | * | |
6 | * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> | |
7 | * | |
8 | * This is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License version 2.1, as published by the Free Software | |
11 | * Foundation. See file COPYING. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <unistd.h> | |
16 | ||
17 | #include "include/compat.h" | |
18 | #include "global/signal_handler.h" | |
19 | ||
20 | #include "include/types.h" | |
21 | #include "include/str_list.h" | |
22 | #include "common/entity_name.h" | |
23 | #include "common/Clock.h" | |
24 | #include "common/signal.h" | |
25 | #include "common/ceph_argparse.h" | |
26 | #include "common/errno.h" | |
27 | ||
28 | #include "msg/Messenger.h" | |
29 | #include "mon/MonClient.h" | |
30 | ||
31 | #include "osdc/Objecter.h" | |
32 | ||
33 | #include "MDSMap.h" | |
34 | ||
35 | #include "MDSDaemon.h" | |
36 | #include "Server.h" | |
37 | #include "Locker.h" | |
38 | ||
39 | #include "SnapServer.h" | |
40 | #include "SnapClient.h" | |
41 | ||
42 | #include "common/HeartbeatMap.h" | |
43 | ||
44 | #include "common/perf_counters.h" | |
45 | ||
46 | #include "common/Timer.h" | |
47 | ||
48 | #include "events/ESession.h" | |
49 | #include "events/ESubtreeMap.h" | |
50 | ||
51 | #include "messages/MMDSMap.h" | |
52 | ||
53 | #include "messages/MGenericMessage.h" | |
54 | ||
55 | #include "messages/MMonCommand.h" | |
56 | #include "messages/MCommand.h" | |
57 | #include "messages/MCommandReply.h" | |
58 | ||
59 | #include "auth/AuthAuthorizeHandler.h" | |
60 | #include "auth/RotatingKeyRing.h" | |
61 | #include "auth/KeyRing.h" | |
62 | ||
63 | #include "common/config.h" | |
64 | ||
65 | #include "perfglue/cpu_profiler.h" | |
66 | #include "perfglue/heap_profiler.h" | |
67 | ||
68 | #define dout_context g_ceph_context | |
69 | #define dout_subsys ceph_subsys_mds | |
70 | #undef dout_prefix | |
71 | #define dout_prefix *_dout << "mds." << name << ' ' | |
72 | ||
73 | ||
74 | class MDSDaemon::C_MDS_Tick : public Context { | |
75 | protected: | |
76 | MDSDaemon *mds_daemon; | |
77 | public: | |
78 | explicit C_MDS_Tick(MDSDaemon *m) : mds_daemon(m) {} | |
79 | void finish(int r) override { | |
80 | assert(mds_daemon->mds_lock.is_locked_by_me()); | |
81 | ||
82 | mds_daemon->tick_event = 0; | |
83 | mds_daemon->tick(); | |
84 | } | |
85 | }; | |
86 | ||
87 | // cons/des | |
88 | MDSDaemon::MDSDaemon(const std::string &n, Messenger *m, MonClient *mc) : | |
89 | Dispatcher(m->cct), | |
90 | mds_lock("MDSDaemon::mds_lock"), | |
91 | stopping(false), | |
92 | timer(m->cct, mds_lock), | |
93 | beacon(m->cct, mc, n), | |
94 | authorize_handler_cluster_registry(new AuthAuthorizeHandlerRegistry(m->cct, | |
95 | m->cct->_conf->auth_supported.empty() ? | |
96 | m->cct->_conf->auth_cluster_required : | |
97 | m->cct->_conf->auth_supported)), | |
98 | authorize_handler_service_registry(new AuthAuthorizeHandlerRegistry(m->cct, | |
99 | m->cct->_conf->auth_supported.empty() ? | |
100 | m->cct->_conf->auth_service_required : | |
101 | m->cct->_conf->auth_supported)), | |
102 | name(n), | |
103 | messenger(m), | |
104 | monc(mc), | |
105 | mgrc(m->cct, m), | |
106 | log_client(m->cct, messenger, &mc->monmap, LogClient::NO_FLAGS), | |
107 | mds_rank(NULL), | |
108 | tick_event(0), | |
109 | asok_hook(NULL) | |
110 | { | |
111 | orig_argc = 0; | |
112 | orig_argv = NULL; | |
113 | ||
114 | clog = log_client.create_channel(); | |
115 | ||
116 | monc->set_messenger(messenger); | |
117 | ||
118 | mdsmap = new MDSMap; | |
119 | } | |
120 | ||
121 | MDSDaemon::~MDSDaemon() { | |
122 | Mutex::Locker lock(mds_lock); | |
123 | ||
124 | delete mds_rank; | |
125 | mds_rank = NULL; | |
126 | delete mdsmap; | |
127 | mdsmap = NULL; | |
128 | ||
129 | delete authorize_handler_service_registry; | |
130 | delete authorize_handler_cluster_registry; | |
131 | } | |
132 | ||
133 | class MDSSocketHook : public AdminSocketHook { | |
134 | MDSDaemon *mds; | |
135 | public: | |
136 | explicit MDSSocketHook(MDSDaemon *m) : mds(m) {} | |
137 | bool call(std::string command, cmdmap_t& cmdmap, std::string format, | |
138 | bufferlist& out) override { | |
139 | stringstream ss; | |
140 | bool r = mds->asok_command(command, cmdmap, format, ss); | |
141 | out.append(ss); | |
142 | return r; | |
143 | } | |
144 | }; | |
145 | ||
146 | bool MDSDaemon::asok_command(string command, cmdmap_t& cmdmap, string format, | |
147 | ostream& ss) | |
148 | { | |
149 | dout(1) << "asok_command: " << command << " (starting...)" << dendl; | |
150 | ||
151 | Formatter *f = Formatter::create(format, "json-pretty", "json-pretty"); | |
152 | bool handled = false; | |
153 | if (command == "status") { | |
154 | dump_status(f); | |
155 | handled = true; | |
156 | } else { | |
157 | if (mds_rank == NULL) { | |
158 | dout(1) << "Can't run that command on an inactive MDS!" << dendl; | |
159 | f->dump_string("error", "mds_not_active"); | |
160 | } else { | |
161 | handled = mds_rank->handle_asok_command(command, cmdmap, f, ss); | |
162 | } | |
163 | } | |
164 | f->flush(ss); | |
165 | delete f; | |
166 | ||
167 | dout(1) << "asok_command: " << command << " (complete)" << dendl; | |
168 | ||
169 | return handled; | |
170 | } | |
171 | ||
172 | void MDSDaemon::dump_status(Formatter *f) | |
173 | { | |
174 | f->open_object_section("status"); | |
175 | f->dump_stream("cluster_fsid") << monc->get_fsid(); | |
176 | if (mds_rank) { | |
177 | f->dump_int("whoami", mds_rank->get_nodeid()); | |
178 | } else { | |
179 | f->dump_int("whoami", MDS_RANK_NONE); | |
180 | } | |
181 | ||
182 | f->dump_int("id", monc->get_global_id()); | |
183 | f->dump_string("want_state", ceph_mds_state_name(beacon.get_want_state())); | |
184 | f->dump_string("state", ceph_mds_state_name(mdsmap->get_state_gid(mds_gid_t( | |
185 | monc->get_global_id())))); | |
186 | if (mds_rank) { | |
187 | Mutex::Locker l(mds_lock); | |
188 | mds_rank->dump_status(f); | |
189 | } | |
190 | ||
191 | f->dump_unsigned("mdsmap_epoch", mdsmap->get_epoch()); | |
192 | if (mds_rank) { | |
193 | f->dump_unsigned("osdmap_epoch", mds_rank->get_osd_epoch()); | |
194 | f->dump_unsigned("osdmap_epoch_barrier", mds_rank->get_osd_epoch_barrier()); | |
195 | } else { | |
196 | f->dump_unsigned("osdmap_epoch", 0); | |
197 | f->dump_unsigned("osdmap_epoch_barrier", 0); | |
198 | } | |
199 | f->close_section(); // status | |
200 | } | |
201 | ||
202 | void MDSDaemon::set_up_admin_socket() | |
203 | { | |
204 | int r; | |
205 | AdminSocket *admin_socket = g_ceph_context->get_admin_socket(); | |
206 | assert(asok_hook == nullptr); | |
207 | asok_hook = new MDSSocketHook(this); | |
208 | r = admin_socket->register_command("status", "status", asok_hook, | |
209 | "high-level status of MDS"); | |
210 | assert(r == 0); | |
211 | r = admin_socket->register_command("dump_ops_in_flight", | |
212 | "dump_ops_in_flight", asok_hook, | |
213 | "show the ops currently in flight"); | |
214 | assert(r == 0); | |
215 | r = admin_socket->register_command("ops", | |
216 | "ops", asok_hook, | |
217 | "show the ops currently in flight"); | |
218 | assert(r == 0); | |
219 | r = admin_socket->register_command("dump_blocked_ops", "dump_blocked_ops", | |
220 | asok_hook, | |
221 | "show the blocked ops currently in flight"); | |
222 | assert(r == 0); | |
223 | r = admin_socket->register_command("dump_historic_ops", "dump_historic_ops", | |
224 | asok_hook, | |
225 | "show slowest recent ops"); | |
226 | assert(r == 0); | |
227 | r = admin_socket->register_command("dump_historic_ops_by_duration", "dump_historic_ops_by_duration", | |
228 | asok_hook, | |
229 | "show slowest recent ops, sorted by op duration"); | |
230 | assert(r == 0); | |
231 | r = admin_socket->register_command("scrub_path", | |
232 | "scrub_path name=path,type=CephString " | |
233 | "name=scrubops,type=CephChoices," | |
234 | "strings=force|recursive|repair,n=N,req=false", | |
235 | asok_hook, | |
236 | "scrub an inode and output results"); | |
237 | assert(r == 0); | |
238 | r = admin_socket->register_command("tag path", | |
239 | "tag path name=path,type=CephString" | |
240 | " name=tag,type=CephString", | |
241 | asok_hook, | |
242 | "Apply scrub tag recursively"); | |
243 | assert(r == 0); | |
244 | r = admin_socket->register_command("flush_path", | |
245 | "flush_path name=path,type=CephString", | |
246 | asok_hook, | |
247 | "flush an inode (and its dirfrags)"); | |
248 | assert(r == 0); | |
249 | r = admin_socket->register_command("export dir", | |
250 | "export dir " | |
251 | "name=path,type=CephString " | |
252 | "name=rank,type=CephInt", | |
253 | asok_hook, | |
254 | "migrate a subtree to named MDS"); | |
255 | assert(r == 0); | |
256 | r = admin_socket->register_command("dump cache", | |
257 | "dump cache name=path,type=CephString,req=false", | |
258 | asok_hook, | |
259 | "dump metadata cache (optionally to a file)"); | |
260 | assert(r == 0); | |
261 | r = admin_socket->register_command("dump tree", | |
262 | "dump tree " | |
263 | "name=root,type=CephString,req=true " | |
264 | "name=depth,type=CephInt,req=false ", | |
265 | asok_hook, | |
266 | "dump metadata cache for subtree"); | |
267 | assert(r == 0); | |
268 | r = admin_socket->register_command("session evict", | |
269 | "session evict name=client_id,type=CephString", | |
270 | asok_hook, | |
271 | "Evict a CephFS client"); | |
272 | assert(r == 0); | |
273 | r = admin_socket->register_command("osdmap barrier", | |
274 | "osdmap barrier name=target_epoch,type=CephInt", | |
275 | asok_hook, | |
276 | "Wait until the MDS has this OSD map epoch"); | |
277 | assert(r == 0); | |
278 | r = admin_socket->register_command("session ls", | |
279 | "session ls", | |
280 | asok_hook, | |
281 | "Enumerate connected CephFS clients"); | |
282 | assert(r == 0); | |
283 | r = admin_socket->register_command("flush journal", | |
284 | "flush journal", | |
285 | asok_hook, | |
286 | "Flush the journal to the backing store"); | |
287 | assert(r == 0); | |
288 | r = admin_socket->register_command("force_readonly", | |
289 | "force_readonly", | |
290 | asok_hook, | |
291 | "Force MDS to read-only mode"); | |
292 | assert(r == 0); | |
293 | r = admin_socket->register_command("get subtrees", | |
294 | "get subtrees", | |
295 | asok_hook, | |
296 | "Return the subtree map"); | |
297 | assert(r == 0); | |
298 | r = admin_socket->register_command("dirfrag split", | |
299 | "dirfrag split " | |
300 | "name=path,type=CephString,req=true " | |
301 | "name=frag,type=CephString,req=true " | |
302 | "name=bits,type=CephInt,req=true ", | |
303 | asok_hook, | |
304 | "Fragment directory by path"); | |
305 | assert(r == 0); | |
306 | r = admin_socket->register_command("dirfrag merge", | |
307 | "dirfrag merge " | |
308 | "name=path,type=CephString,req=true " | |
309 | "name=frag,type=CephString,req=true", | |
310 | asok_hook, | |
311 | "De-fragment directory by path"); | |
312 | assert(r == 0); | |
313 | r = admin_socket->register_command("dirfrag ls", | |
314 | "dirfrag ls " | |
315 | "name=path,type=CephString,req=true", | |
316 | asok_hook, | |
317 | "List fragments in directory"); | |
318 | assert(r == 0); | |
319 | } | |
320 | ||
321 | void MDSDaemon::clean_up_admin_socket() | |
322 | { | |
323 | AdminSocket *admin_socket = g_ceph_context->get_admin_socket(); | |
324 | admin_socket->unregister_command("status"); | |
325 | admin_socket->unregister_command("dump_ops_in_flight"); | |
326 | admin_socket->unregister_command("ops"); | |
327 | admin_socket->unregister_command("dump_blocked_ops"); | |
328 | admin_socket->unregister_command("dump_historic_ops"); | |
329 | admin_socket->unregister_command("dump_historic_ops_by_duration"); | |
330 | admin_socket->unregister_command("scrub_path"); | |
331 | admin_socket->unregister_command("tag path"); | |
332 | admin_socket->unregister_command("flush_path"); | |
333 | admin_socket->unregister_command("export dir"); | |
334 | admin_socket->unregister_command("dump cache"); | |
335 | admin_socket->unregister_command("dump tree"); | |
336 | admin_socket->unregister_command("session evict"); | |
337 | admin_socket->unregister_command("osdmap barrier"); | |
338 | admin_socket->unregister_command("session ls"); | |
339 | admin_socket->unregister_command("flush journal"); | |
340 | admin_socket->unregister_command("force_readonly"); | |
341 | admin_socket->unregister_command("get subtrees"); | |
342 | admin_socket->unregister_command("dirfrag split"); | |
343 | admin_socket->unregister_command("dirfrag merge"); | |
344 | admin_socket->unregister_command("dirfrag ls"); | |
345 | delete asok_hook; | |
346 | asok_hook = NULL; | |
347 | } | |
348 | ||
349 | const char** MDSDaemon::get_tracked_conf_keys() const | |
350 | { | |
351 | static const char* KEYS[] = { | |
352 | "mds_op_complaint_time", "mds_op_log_threshold", | |
353 | "mds_op_history_size", "mds_op_history_duration", | |
354 | "mds_enable_op_tracker", | |
355 | "mds_log_pause", | |
356 | // clog & admin clog | |
357 | "clog_to_monitors", | |
358 | "clog_to_syslog", | |
359 | "clog_to_syslog_facility", | |
360 | "clog_to_syslog_level", | |
361 | // PurgeQueue | |
362 | "mds_max_purge_ops", | |
363 | "mds_max_purge_ops_per_pg", | |
364 | "mds_max_purge_files", | |
365 | "clog_to_graylog", | |
366 | "clog_to_graylog_host", | |
367 | "clog_to_graylog_port", | |
368 | "host", | |
369 | "fsid", | |
370 | NULL | |
371 | }; | |
372 | return KEYS; | |
373 | } | |
374 | ||
375 | void MDSDaemon::handle_conf_change(const struct md_config_t *conf, | |
376 | const std::set <std::string> &changed) | |
377 | { | |
378 | // We may be called within mds_lock (via `tell`) or outwith the | |
379 | // lock (via admin socket `config set`), so handle either case. | |
380 | const bool initially_locked = mds_lock.is_locked_by_me(); | |
381 | if (!initially_locked) { | |
382 | mds_lock.Lock(); | |
383 | } | |
384 | ||
385 | if (changed.count("mds_op_complaint_time") || | |
386 | changed.count("mds_op_log_threshold")) { | |
387 | if (mds_rank) { | |
388 | mds_rank->op_tracker.set_complaint_and_threshold(conf->mds_op_complaint_time, | |
389 | conf->mds_op_log_threshold); | |
390 | } | |
391 | } | |
392 | if (changed.count("mds_op_history_size") || | |
393 | changed.count("mds_op_history_duration")) { | |
394 | if (mds_rank) { | |
395 | mds_rank->op_tracker.set_history_size_and_duration(conf->mds_op_history_size, | |
396 | conf->mds_op_history_duration); | |
397 | } | |
398 | } | |
399 | if (changed.count("mds_enable_op_tracker")) { | |
400 | if (mds_rank) { | |
401 | mds_rank->op_tracker.set_tracking(conf->mds_enable_op_tracker); | |
402 | } | |
403 | } | |
404 | if (changed.count("clog_to_monitors") || | |
405 | changed.count("clog_to_syslog") || | |
406 | changed.count("clog_to_syslog_level") || | |
407 | changed.count("clog_to_syslog_facility") || | |
408 | changed.count("clog_to_graylog") || | |
409 | changed.count("clog_to_graylog_host") || | |
410 | changed.count("clog_to_graylog_port") || | |
411 | changed.count("host") || | |
412 | changed.count("fsid")) { | |
413 | if (mds_rank) { | |
414 | mds_rank->update_log_config(); | |
415 | } | |
416 | } | |
417 | ||
418 | if (!g_conf->mds_log_pause && changed.count("mds_log_pause")) { | |
419 | if (mds_rank) { | |
420 | mds_rank->mdlog->kick_submitter(); | |
421 | } | |
422 | } | |
423 | ||
424 | if (mds_rank) { | |
425 | mds_rank->handle_conf_change(conf, changed); | |
426 | } | |
427 | ||
428 | if (!initially_locked) { | |
429 | mds_lock.Unlock(); | |
430 | } | |
431 | } | |
432 | ||
433 | ||
434 | int MDSDaemon::init() | |
435 | { | |
436 | dout(10) << sizeof(MDSCacheObject) << "\tMDSCacheObject" << dendl; | |
437 | dout(10) << sizeof(CInode) << "\tCInode" << dendl; | |
438 | dout(10) << sizeof(elist<void*>::item) << "\t elist<>::item *7=" << 7*sizeof(elist<void*>::item) << dendl; | |
439 | dout(10) << sizeof(inode_t) << "\t inode_t " << dendl; | |
440 | dout(10) << sizeof(nest_info_t) << "\t nest_info_t " << dendl; | |
441 | dout(10) << sizeof(frag_info_t) << "\t frag_info_t " << dendl; | |
442 | dout(10) << sizeof(SimpleLock) << "\t SimpleLock *5=" << 5*sizeof(SimpleLock) << dendl; | |
443 | dout(10) << sizeof(ScatterLock) << "\t ScatterLock *3=" << 3*sizeof(ScatterLock) << dendl; | |
444 | dout(10) << sizeof(CDentry) << "\tCDentry" << dendl; | |
445 | dout(10) << sizeof(elist<void*>::item) << "\t elist<>::item" << dendl; | |
446 | dout(10) << sizeof(SimpleLock) << "\t SimpleLock" << dendl; | |
447 | dout(10) << sizeof(CDir) << "\tCDir " << dendl; | |
448 | dout(10) << sizeof(elist<void*>::item) << "\t elist<>::item *2=" << 2*sizeof(elist<void*>::item) << dendl; | |
449 | dout(10) << sizeof(fnode_t) << "\t fnode_t " << dendl; | |
450 | dout(10) << sizeof(nest_info_t) << "\t nest_info_t *2" << dendl; | |
451 | dout(10) << sizeof(frag_info_t) << "\t frag_info_t *2" << dendl; | |
452 | dout(10) << sizeof(Capability) << "\tCapability " << dendl; | |
453 | dout(10) << sizeof(xlist<void*>::item) << "\t xlist<>::item *2=" << 2*sizeof(xlist<void*>::item) << dendl; | |
454 | ||
455 | messenger->add_dispatcher_tail(&beacon); | |
456 | messenger->add_dispatcher_tail(this); | |
457 | ||
458 | // get monmap | |
459 | monc->set_messenger(messenger); | |
460 | ||
461 | monc->set_want_keys(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD | | |
462 | CEPH_ENTITY_TYPE_MDS | CEPH_ENTITY_TYPE_MGR); | |
463 | int r = 0; | |
464 | r = monc->init(); | |
465 | if (r < 0) { | |
466 | derr << "ERROR: failed to get monmap: " << cpp_strerror(-r) << dendl; | |
467 | mds_lock.Lock(); | |
468 | suicide(); | |
469 | mds_lock.Unlock(); | |
470 | return r; | |
471 | } | |
472 | ||
473 | // tell monc about log_client so it will know about mon session resets | |
474 | monc->set_log_client(&log_client); | |
475 | ||
476 | r = monc->authenticate(); | |
477 | if (r < 0) { | |
478 | derr << "ERROR: failed to authenticate: " << cpp_strerror(-r) << dendl; | |
479 | mds_lock.Lock(); | |
480 | suicide(); | |
481 | mds_lock.Unlock(); | |
482 | return r; | |
483 | } | |
484 | ||
485 | int rotating_auth_attempts = 0; | |
486 | while (monc->wait_auth_rotating(30.0) < 0) { | |
487 | if (++rotating_auth_attempts <= g_conf->max_rotating_auth_attempts) { | |
488 | derr << "unable to obtain rotating service keys; retrying" << dendl; | |
489 | continue; | |
490 | } | |
491 | derr << "ERROR: failed to refresh rotating keys, " | |
492 | << "maximum retry time reached." << dendl; | |
493 | mds_lock.Lock(); | |
494 | suicide(); | |
495 | mds_lock.Unlock(); | |
496 | return -ETIMEDOUT; | |
497 | } | |
498 | ||
499 | mgrc.init(); | |
500 | messenger->add_dispatcher_head(&mgrc); | |
501 | ||
502 | mds_lock.Lock(); | |
503 | if (beacon.get_want_state() == CEPH_MDS_STATE_DNE) { | |
504 | dout(4) << __func__ << ": terminated already, dropping out" << dendl; | |
505 | mds_lock.Unlock(); | |
506 | return 0; | |
507 | } | |
508 | ||
509 | monc->sub_want("mdsmap", 0, 0); | |
510 | monc->sub_want("mgrmap", 0, 0); | |
511 | monc->renew_subs(); | |
512 | ||
513 | mds_lock.Unlock(); | |
514 | ||
515 | // Set up admin socket before taking mds_lock, so that ordering | |
516 | // is consistent (later we take mds_lock within asok callbacks) | |
517 | set_up_admin_socket(); | |
518 | g_conf->add_observer(this); | |
519 | mds_lock.Lock(); | |
520 | if (beacon.get_want_state() == MDSMap::STATE_DNE) { | |
521 | suicide(); // we could do something more graceful here | |
522 | dout(4) << __func__ << ": terminated already, dropping out" << dendl; | |
523 | mds_lock.Unlock(); | |
524 | return 0; | |
525 | } | |
526 | ||
527 | timer.init(); | |
528 | ||
529 | beacon.init(mdsmap); | |
530 | messenger->set_myname(entity_name_t::MDS(MDS_RANK_NONE)); | |
531 | ||
532 | // schedule tick | |
533 | reset_tick(); | |
534 | mds_lock.Unlock(); | |
535 | ||
536 | return 0; | |
537 | } | |
538 | ||
539 | void MDSDaemon::reset_tick() | |
540 | { | |
541 | // cancel old | |
542 | if (tick_event) timer.cancel_event(tick_event); | |
543 | ||
544 | // schedule | |
545 | tick_event = new C_MDS_Tick(this); | |
546 | timer.add_event_after(g_conf->mds_tick_interval, tick_event); | |
547 | } | |
548 | ||
549 | void MDSDaemon::tick() | |
550 | { | |
551 | tick_event = 0; | |
552 | ||
553 | // reschedule | |
554 | reset_tick(); | |
555 | ||
556 | // Call through to subsystems' tick functions | |
557 | if (mds_rank) { | |
558 | mds_rank->tick(); | |
559 | } | |
560 | } | |
561 | ||
562 | void MDSDaemon::send_command_reply(MCommand *m, MDSRank *mds_rank, | |
563 | int r, bufferlist outbl, | |
564 | const std::string& outs) | |
565 | { | |
566 | Session *session = static_cast<Session *>(m->get_connection()->get_priv()); | |
567 | assert(session != NULL); | |
568 | // If someone is using a closed session for sending commands (e.g. | |
569 | // the ceph CLI) then we should feel free to clean up this connection | |
570 | // as soon as we've sent them a response. | |
571 | const bool live_session = mds_rank && | |
572 | mds_rank->sessionmap.get_session(session->info.inst.name) != nullptr | |
573 | && session->get_state_seq() > 0; | |
574 | ||
575 | if (!live_session) { | |
576 | // This session only existed to issue commands, so terminate it | |
577 | // as soon as we can. | |
578 | assert(session->is_closed()); | |
579 | session->connection->mark_disposable(); | |
580 | session->put(); | |
581 | } | |
582 | ||
583 | MCommandReply *reply = new MCommandReply(r, outs); | |
584 | reply->set_tid(m->get_tid()); | |
585 | reply->set_data(outbl); | |
586 | m->get_connection()->send_message(reply); | |
587 | } | |
588 | ||
589 | /* This function DOES put the passed message before returning*/ | |
590 | void MDSDaemon::handle_command(MCommand *m) | |
591 | { | |
592 | Session *session = static_cast<Session *>(m->get_connection()->get_priv()); | |
593 | assert(session != NULL); | |
594 | ||
595 | int r = 0; | |
596 | cmdmap_t cmdmap; | |
597 | std::stringstream ss; | |
598 | std::string outs; | |
599 | bufferlist outbl; | |
600 | Context *run_after = NULL; | |
601 | bool need_reply = true; | |
602 | ||
603 | if (!session->auth_caps.allow_all()) { | |
604 | dout(1) << __func__ | |
605 | << ": received command from client without `tell` capability: " | |
606 | << m->get_connection()->peer_addr << dendl; | |
607 | ||
608 | ss << "permission denied"; | |
609 | r = -EPERM; | |
610 | } else if (m->cmd.empty()) { | |
611 | r = -EINVAL; | |
612 | ss << "no command given"; | |
613 | outs = ss.str(); | |
614 | } else if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) { | |
615 | r = -EINVAL; | |
616 | outs = ss.str(); | |
617 | } else { | |
618 | r = _handle_command(cmdmap, m, &outbl, &outs, &run_after, &need_reply); | |
619 | } | |
620 | ||
621 | if (need_reply) { | |
622 | send_command_reply(m, mds_rank, r, outbl, outs); | |
623 | } | |
624 | ||
625 | if (run_after) { | |
626 | run_after->complete(0); | |
627 | } | |
628 | ||
629 | m->put(); | |
630 | } | |
631 | ||
632 | ||
633 | struct MDSCommand { | |
634 | string cmdstring; | |
635 | string helpstring; | |
636 | string module; | |
637 | string perm; | |
638 | string availability; | |
639 | } mds_commands[] = { | |
640 | ||
641 | #define COMMAND(parsesig, helptext, module, perm, availability) \ | |
642 | {parsesig, helptext, module, perm, availability}, | |
643 | ||
644 | COMMAND("injectargs " \ | |
645 | "name=injected_args,type=CephString,n=N", | |
646 | "inject configuration arguments into running MDS", | |
647 | "mds", "*", "cli,rest") | |
648 | COMMAND("exit", | |
649 | "Terminate this MDS", | |
650 | "mds", "*", "cli,rest") | |
651 | COMMAND("respawn", | |
652 | "Restart this MDS", | |
653 | "mds", "*", "cli,rest") | |
654 | COMMAND("session kill " \ | |
655 | "name=session_id,type=CephInt", | |
656 | "End a client session", | |
657 | "mds", "*", "cli,rest") | |
658 | COMMAND("cpu_profiler " \ | |
659 | "name=arg,type=CephChoices,strings=status|flush", | |
660 | "run cpu profiling on daemon", "mds", "rw", "cli,rest") | |
661 | COMMAND("session ls " \ | |
662 | "name=filters,type=CephString,n=N,req=false", | |
663 | "List client sessions", "mds", "r", "cli,rest") | |
31f18b77 FG |
664 | COMMAND("client ls " \ |
665 | "name=filters,type=CephString,n=N,req=false", | |
666 | "List client sessions", "mds", "r", "cli,rest") | |
7c673cae FG |
667 | COMMAND("session evict " \ |
668 | "name=filters,type=CephString,n=N,req=false", | |
669 | "Evict client session(s)", "mds", "rw", "cli,rest") | |
31f18b77 FG |
670 | COMMAND("client evict " \ |
671 | "name=filters,type=CephString,n=N,req=false", | |
672 | "Evict client session(s)", "mds", "rw", "cli,rest") | |
7c673cae FG |
673 | COMMAND("damage ls", |
674 | "List detected metadata damage", "mds", "r", "cli,rest") | |
675 | COMMAND("damage rm name=damage_id,type=CephInt", | |
676 | "Remove a damage table entry", "mds", "rw", "cli,rest") | |
677 | COMMAND("heap " \ | |
678 | "name=heapcmd,type=CephChoices,strings=dump|start_profiler|stop_profiler|release|stats", \ | |
679 | "show heap usage info (available only if compiled with tcmalloc)", \ | |
680 | "mds", "*", "cli,rest") | |
681 | }; | |
682 | ||
683 | ||
684 | int MDSDaemon::_handle_command( | |
685 | const cmdmap_t &cmdmap, | |
686 | MCommand *m, | |
687 | bufferlist *outbl, | |
688 | std::string *outs, | |
689 | Context **run_later, | |
690 | bool *need_reply) | |
691 | { | |
692 | assert(outbl != NULL); | |
693 | assert(outs != NULL); | |
694 | ||
695 | class SuicideLater : public Context | |
696 | { | |
697 | MDSDaemon *mds; | |
698 | ||
699 | public: | |
700 | explicit SuicideLater(MDSDaemon *mds_) : mds(mds_) {} | |
701 | void finish(int r) override { | |
702 | // Wait a little to improve chances of caller getting | |
703 | // our response before seeing us disappear from mdsmap | |
704 | sleep(1); | |
705 | ||
706 | mds->suicide(); | |
707 | } | |
708 | }; | |
709 | ||
710 | ||
711 | class RespawnLater : public Context | |
712 | { | |
713 | MDSDaemon *mds; | |
714 | ||
715 | public: | |
716 | ||
717 | explicit RespawnLater(MDSDaemon *mds_) : mds(mds_) {} | |
718 | void finish(int r) override { | |
719 | // Wait a little to improve chances of caller getting | |
720 | // our response before seeing us disappear from mdsmap | |
721 | sleep(1); | |
722 | ||
723 | mds->respawn(); | |
724 | } | |
725 | }; | |
726 | ||
727 | std::stringstream ds; | |
728 | std::stringstream ss; | |
729 | std::string prefix; | |
730 | cmd_getval(cct, cmdmap, "prefix", prefix); | |
731 | ||
732 | int r = 0; | |
733 | ||
734 | if (prefix == "get_command_descriptions") { | |
735 | int cmdnum = 0; | |
736 | JSONFormatter *f = new JSONFormatter(); | |
737 | f->open_object_section("command_descriptions"); | |
738 | for (MDSCommand *cp = mds_commands; | |
739 | cp < &mds_commands[ARRAY_SIZE(mds_commands)]; cp++) { | |
740 | ||
741 | ostringstream secname; | |
742 | secname << "cmd" << setfill('0') << std::setw(3) << cmdnum; | |
743 | dump_cmddesc_to_json(f, secname.str(), cp->cmdstring, cp->helpstring, | |
744 | cp->module, cp->perm, cp->availability, 0); | |
745 | cmdnum++; | |
746 | } | |
747 | f->close_section(); // command_descriptions | |
748 | ||
749 | f->flush(ds); | |
750 | delete f; | |
751 | } else if (prefix == "injectargs") { | |
752 | vector<string> argsvec; | |
753 | cmd_getval(cct, cmdmap, "injected_args", argsvec); | |
754 | ||
755 | if (argsvec.empty()) { | |
756 | r = -EINVAL; | |
757 | ss << "ignoring empty injectargs"; | |
758 | goto out; | |
759 | } | |
760 | string args = argsvec.front(); | |
761 | for (vector<string>::iterator a = ++argsvec.begin(); a != argsvec.end(); ++a) | |
762 | args += " " + *a; | |
763 | r = cct->_conf->injectargs(args, &ss); | |
764 | } else if (prefix == "exit") { | |
765 | // We will send response before executing | |
766 | ss << "Exiting..."; | |
767 | *run_later = new SuicideLater(this); | |
768 | } | |
769 | else if (prefix == "respawn") { | |
770 | // We will send response before executing | |
771 | ss << "Respawning..."; | |
772 | *run_later = new RespawnLater(this); | |
773 | } else if (prefix == "session kill") { | |
774 | if (mds_rank == NULL) { | |
775 | r = -EINVAL; | |
776 | ss << "MDS not active"; | |
777 | goto out; | |
778 | } | |
779 | // FIXME harmonize `session kill` with admin socket session evict | |
780 | int64_t session_id = 0; | |
781 | bool got = cmd_getval(cct, cmdmap, "session_id", session_id); | |
782 | assert(got); | |
31f18b77 FG |
783 | bool killed = mds_rank->evict_client(session_id, false, |
784 | g_conf->mds_session_blacklist_on_evict, | |
785 | ss); | |
7c673cae FG |
786 | if (!killed) |
787 | r = -ENOENT; | |
788 | } else if (prefix == "heap") { | |
789 | if (!ceph_using_tcmalloc()) { | |
790 | r = -EOPNOTSUPP; | |
791 | ss << "could not issue heap profiler command -- not using tcmalloc!"; | |
792 | } else { | |
793 | string heapcmd; | |
794 | cmd_getval(cct, cmdmap, "heapcmd", heapcmd); | |
795 | vector<string> heapcmd_vec; | |
796 | get_str_vec(heapcmd, heapcmd_vec); | |
797 | ceph_heap_profiler_handle_command(heapcmd_vec, ds); | |
798 | } | |
799 | } else if (prefix == "cpu_profiler") { | |
800 | string arg; | |
801 | cmd_getval(cct, cmdmap, "arg", arg); | |
802 | vector<string> argvec; | |
803 | get_str_vec(arg, argvec); | |
804 | cpu_profiler_handle_command(argvec, ds); | |
805 | } else { | |
806 | // Give MDSRank a shot at the command | |
807 | if (mds_rank) { | |
808 | bool handled = mds_rank->handle_command(cmdmap, m, &r, &ds, &ss, | |
809 | need_reply); | |
810 | if (handled) { | |
811 | goto out; | |
812 | } | |
813 | } | |
814 | ||
815 | // Neither MDSDaemon nor MDSRank know this command | |
816 | std::ostringstream ss; | |
817 | ss << "unrecognized command! " << prefix; | |
818 | r = -EINVAL; | |
819 | } | |
820 | ||
821 | out: | |
822 | *outs = ss.str(); | |
823 | outbl->append(ds); | |
824 | return r; | |
825 | } | |
826 | ||
827 | /* This function deletes the passed message before returning. */ | |
828 | ||
829 | void MDSDaemon::handle_mds_map(MMDSMap *m) | |
830 | { | |
831 | version_t epoch = m->get_epoch(); | |
832 | dout(5) << "handle_mds_map epoch " << epoch << " from " << m->get_source() << dendl; | |
833 | ||
834 | // is it new? | |
835 | if (epoch <= mdsmap->get_epoch()) { | |
836 | dout(5) << " old map epoch " << epoch << " <= " << mdsmap->get_epoch() | |
837 | << ", discarding" << dendl; | |
838 | m->put(); | |
839 | return; | |
840 | } | |
841 | ||
842 | entity_addr_t addr; | |
843 | ||
844 | // keep old map, for a moment | |
845 | MDSMap *oldmap = mdsmap; | |
846 | ||
847 | // decode and process | |
848 | mdsmap = new MDSMap; | |
849 | mdsmap->decode(m->get_encoded()); | |
850 | const MDSMap::DaemonState new_state = mdsmap->get_state_gid(mds_gid_t(monc->get_global_id())); | |
851 | const int incarnation = mdsmap->get_inc_gid(mds_gid_t(monc->get_global_id())); | |
852 | ||
853 | monc->sub_got("mdsmap", mdsmap->get_epoch()); | |
854 | ||
855 | // Calculate my effective rank (either my owned rank or my | |
856 | // standby_for_rank if in standby replay) | |
857 | mds_rank_t whoami = mdsmap->get_rank_gid(mds_gid_t(monc->get_global_id())); | |
858 | ||
859 | // verify compatset | |
860 | CompatSet mdsmap_compat(get_mdsmap_compat_set_all()); | |
861 | dout(10) << " my compat " << mdsmap_compat << dendl; | |
862 | dout(10) << " mdsmap compat " << mdsmap->compat << dendl; | |
863 | if (!mdsmap_compat.writeable(mdsmap->compat)) { | |
864 | dout(0) << "handle_mds_map mdsmap compatset " << mdsmap->compat | |
865 | << " not writeable with daemon features " << mdsmap_compat | |
866 | << ", killing myself" << dendl; | |
867 | suicide(); | |
868 | goto out; | |
869 | } | |
870 | ||
871 | // mark down any failed peers | |
872 | for (map<mds_gid_t,MDSMap::mds_info_t>::const_iterator p = oldmap->get_mds_info().begin(); | |
873 | p != oldmap->get_mds_info().end(); | |
874 | ++p) { | |
875 | if (mdsmap->get_mds_info().count(p->first) == 0) { | |
876 | dout(10) << " peer mds gid " << p->first << " removed from map" << dendl; | |
877 | messenger->mark_down(p->second.addr); | |
878 | } | |
879 | } | |
880 | ||
881 | if (whoami == MDS_RANK_NONE && | |
882 | new_state == MDSMap::STATE_STANDBY_REPLAY) { | |
883 | whoami = mdsmap->get_mds_info_gid(mds_gid_t(monc->get_global_id())).standby_for_rank; | |
884 | } | |
885 | ||
886 | // see who i am | |
887 | addr = messenger->get_myaddr(); | |
888 | dout(10) << "map says i am " << addr << " mds." << whoami << "." << incarnation | |
889 | << " state " << ceph_mds_state_name(new_state) << dendl; | |
890 | ||
891 | if (whoami == MDS_RANK_NONE) { | |
892 | if (mds_rank != NULL) { | |
893 | // We have entered a rank-holding state, we shouldn't be back | |
894 | // here! | |
895 | if (g_conf->mds_enforce_unique_name) { | |
896 | if (mds_gid_t existing = mdsmap->find_mds_gid_by_name(name)) { | |
897 | const MDSMap::mds_info_t& i = mdsmap->get_info_gid(existing); | |
898 | if (i.global_id > monc->get_global_id()) { | |
899 | dout(1) << "handle_mds_map i (" << addr | |
900 | << ") dne in the mdsmap, new instance has larger gid " << i.global_id | |
901 | << ", suicide" << dendl; | |
902 | // Call suicide() rather than respawn() because if someone else | |
903 | // has taken our ID, we don't want to keep restarting and | |
904 | // fighting them for the ID. | |
905 | suicide(); | |
906 | m->put(); | |
907 | return; | |
908 | } | |
909 | } | |
910 | } | |
911 | ||
912 | dout(1) << "handle_mds_map i (" << addr | |
913 | << ") dne in the mdsmap, respawning myself" << dendl; | |
914 | respawn(); | |
915 | } | |
916 | // MDSRank not active: process the map here to see if we have | |
917 | // been assigned a rank. | |
918 | dout(10) << __func__ << ": handling map in rankless mode" << dendl; | |
919 | _handle_mds_map(oldmap); | |
920 | } else { | |
921 | ||
922 | // Did we already hold a different rank? MDSMonitor shouldn't try | |
923 | // to change that out from under me! | |
924 | if (mds_rank && whoami != mds_rank->get_nodeid()) { | |
925 | derr << "Invalid rank transition " << mds_rank->get_nodeid() << "->" | |
926 | << whoami << dendl; | |
927 | respawn(); | |
928 | } | |
929 | ||
930 | // Did I previously not hold a rank? Initialize! | |
931 | if (mds_rank == NULL) { | |
932 | mds_rank = new MDSRankDispatcher(whoami, mds_lock, clog, | |
933 | timer, beacon, mdsmap, messenger, monc, | |
934 | new FunctionContext([this](int r){respawn();}), | |
935 | new FunctionContext([this](int r){suicide();})); | |
936 | dout(10) << __func__ << ": initializing MDS rank " | |
937 | << mds_rank->get_nodeid() << dendl; | |
938 | mds_rank->init(); | |
939 | } | |
940 | ||
941 | // MDSRank is active: let him process the map, we have no say. | |
942 | dout(10) << __func__ << ": handling map as rank " | |
943 | << mds_rank->get_nodeid() << dendl; | |
944 | mds_rank->handle_mds_map(m, oldmap); | |
945 | } | |
946 | ||
947 | out: | |
948 | beacon.notify_mdsmap(mdsmap); | |
949 | m->put(); | |
950 | delete oldmap; | |
951 | } | |
952 | ||
953 | void MDSDaemon::_handle_mds_map(MDSMap *oldmap) | |
954 | { | |
955 | MDSMap::DaemonState new_state = mdsmap->get_state_gid(mds_gid_t(monc->get_global_id())); | |
956 | ||
957 | // Normal rankless case, we're marked as standby | |
958 | if (new_state == MDSMap::STATE_STANDBY) { | |
959 | beacon.set_want_state(mdsmap, new_state); | |
960 | dout(1) << "handle_mds_map standby" << dendl; | |
961 | ||
962 | return; | |
963 | } | |
964 | ||
965 | // Case where we thought we were standby, but MDSMap disagrees | |
966 | if (beacon.get_want_state() == MDSMap::STATE_STANDBY) { | |
967 | dout(10) << "dropped out of mdsmap, try to re-add myself" << dendl; | |
968 | new_state = MDSMap::STATE_BOOT; | |
969 | beacon.set_want_state(mdsmap, new_state); | |
970 | return; | |
971 | } | |
972 | ||
973 | // Case where we have sent a boot beacon that isn't reflected yet | |
974 | if (beacon.get_want_state() == MDSMap::STATE_BOOT) { | |
975 | dout(10) << "not in map yet" << dendl; | |
976 | } | |
977 | } | |
978 | ||
979 | void MDSDaemon::handle_signal(int signum) | |
980 | { | |
981 | assert(signum == SIGINT || signum == SIGTERM); | |
982 | derr << "*** got signal " << sig_str(signum) << " ***" << dendl; | |
983 | { | |
984 | Mutex::Locker l(mds_lock); | |
985 | if (stopping) { | |
986 | return; | |
987 | } | |
988 | suicide(); | |
989 | } | |
990 | } | |
991 | ||
992 | void MDSDaemon::suicide() | |
993 | { | |
994 | assert(mds_lock.is_locked()); | |
995 | ||
996 | // make sure we don't suicide twice | |
997 | assert(stopping == false); | |
998 | stopping = true; | |
999 | ||
1000 | dout(1) << "suicide. wanted state " | |
1001 | << ceph_mds_state_name(beacon.get_want_state()) << dendl; | |
1002 | ||
1003 | if (tick_event) { | |
1004 | timer.cancel_event(tick_event); | |
1005 | tick_event = 0; | |
1006 | } | |
1007 | ||
1008 | //because add_observer is called after set_up_admin_socket | |
1009 | //so we can use asok_hook to avoid assert in the remove_observer | |
1010 | if (asok_hook != NULL) | |
1011 | g_conf->remove_observer(this); | |
1012 | ||
1013 | clean_up_admin_socket(); | |
1014 | ||
1015 | // Inform MDS we are going away, then shut down beacon | |
1016 | beacon.set_want_state(mdsmap, MDSMap::STATE_DNE); | |
1017 | if (!mdsmap->is_dne_gid(mds_gid_t(monc->get_global_id()))) { | |
1018 | // Notify the MDSMonitor that we're dying, so that it doesn't have to | |
1019 | // wait for us to go laggy. Only do this if we're actually in the | |
1020 | // MDSMap, because otherwise the MDSMonitor will drop our message. | |
1021 | beacon.send_and_wait(1); | |
1022 | } | |
1023 | beacon.shutdown(); | |
1024 | ||
1025 | mgrc.shutdown(); | |
1026 | ||
1027 | if (mds_rank) { | |
1028 | mds_rank->shutdown(); | |
1029 | } else { | |
1030 | timer.shutdown(); | |
1031 | ||
1032 | monc->shutdown(); | |
1033 | messenger->shutdown(); | |
1034 | } | |
1035 | } | |
1036 | ||
1037 | void MDSDaemon::respawn() | |
1038 | { | |
1039 | dout(1) << "respawn" << dendl; | |
1040 | ||
1041 | char *new_argv[orig_argc+1]; | |
1042 | dout(1) << " e: '" << orig_argv[0] << "'" << dendl; | |
1043 | for (int i=0; i<orig_argc; i++) { | |
1044 | new_argv[i] = (char *)orig_argv[i]; | |
1045 | dout(1) << " " << i << ": '" << orig_argv[i] << "'" << dendl; | |
1046 | } | |
1047 | new_argv[orig_argc] = NULL; | |
1048 | ||
1049 | /* Determine the path to our executable, test if Linux /proc/self/exe exists. | |
1050 | * This allows us to exec the same executable even if it has since been | |
1051 | * unlinked. | |
1052 | */ | |
1053 | char exe_path[PATH_MAX] = ""; | |
1054 | if (readlink(PROCPREFIX "/proc/self/exe", exe_path, PATH_MAX-1) == -1) { | |
1055 | /* Print CWD for the user's interest */ | |
1056 | char buf[PATH_MAX]; | |
1057 | char *cwd = getcwd(buf, sizeof(buf)); | |
1058 | assert(cwd); | |
1059 | dout(1) << " cwd " << cwd << dendl; | |
1060 | ||
1061 | /* Fall back to a best-effort: just running in our CWD */ | |
1062 | strncpy(exe_path, orig_argv[0], PATH_MAX-1); | |
1063 | } else { | |
1064 | dout(1) << "respawning with exe " << exe_path << dendl; | |
1065 | strcpy(exe_path, PROCPREFIX "/proc/self/exe"); | |
1066 | } | |
1067 | ||
1068 | dout(1) << " exe_path " << exe_path << dendl; | |
1069 | ||
1070 | unblock_all_signals(NULL); | |
1071 | execv(exe_path, new_argv); | |
1072 | ||
1073 | dout(0) << "respawn execv " << orig_argv[0] | |
1074 | << " failed with " << cpp_strerror(errno) << dendl; | |
1075 | ||
1076 | // We have to assert out here, because suicide() returns, and callers | |
1077 | // to respawn expect it never to return. | |
1078 | ceph_abort(); | |
1079 | } | |
1080 | ||
1081 | ||
1082 | ||
1083 | bool MDSDaemon::ms_dispatch(Message *m) | |
1084 | { | |
1085 | Mutex::Locker l(mds_lock); | |
1086 | if (stopping) { | |
1087 | return false; | |
1088 | } | |
1089 | ||
1090 | // Drop out early if shutting down | |
1091 | if (beacon.get_want_state() == CEPH_MDS_STATE_DNE) { | |
1092 | dout(10) << " stopping, discarding " << *m << dendl; | |
1093 | m->put(); | |
1094 | return true; | |
1095 | } | |
1096 | ||
1097 | // First see if it's a daemon message | |
1098 | const bool handled_core = handle_core_message(m); | |
1099 | if (handled_core) { | |
1100 | return true; | |
1101 | } | |
1102 | ||
1103 | // Not core, try it as a rank message | |
1104 | if (mds_rank) { | |
1105 | return mds_rank->ms_dispatch(m); | |
1106 | } else { | |
1107 | return false; | |
1108 | } | |
1109 | } | |
1110 | ||
1111 | bool MDSDaemon::ms_get_authorizer(int dest_type, AuthAuthorizer **authorizer, bool force_new) | |
1112 | { | |
1113 | dout(10) << "MDSDaemon::ms_get_authorizer type=" | |
1114 | << ceph_entity_type_name(dest_type) << dendl; | |
1115 | ||
1116 | /* monitor authorization is being handled on different layer */ | |
1117 | if (dest_type == CEPH_ENTITY_TYPE_MON) | |
1118 | return true; | |
1119 | ||
1120 | if (force_new) { | |
1121 | if (monc->wait_auth_rotating(10) < 0) | |
1122 | return false; | |
1123 | } | |
1124 | ||
1125 | *authorizer = monc->build_authorizer(dest_type); | |
1126 | return *authorizer != NULL; | |
1127 | } | |
1128 | ||
1129 | ||
1130 | /* | |
1131 | * high priority messages we always process | |
1132 | */ | |
1133 | bool MDSDaemon::handle_core_message(Message *m) | |
1134 | { | |
1135 | switch (m->get_type()) { | |
1136 | case CEPH_MSG_MON_MAP: | |
1137 | ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON); | |
1138 | m->put(); | |
1139 | break; | |
1140 | ||
1141 | // MDS | |
1142 | case CEPH_MSG_MDS_MAP: | |
1143 | ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_MDS); | |
1144 | handle_mds_map(static_cast<MMDSMap*>(m)); | |
1145 | break; | |
1146 | ||
1147 | // OSD | |
1148 | case MSG_COMMAND: | |
1149 | handle_command(static_cast<MCommand*>(m)); | |
1150 | break; | |
1151 | case CEPH_MSG_OSD_MAP: | |
1152 | ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD); | |
1153 | ||
1154 | if (mds_rank) { | |
1155 | mds_rank->handle_osd_map(); | |
1156 | } | |
1157 | m->put(); | |
1158 | break; | |
1159 | ||
1160 | case MSG_MON_COMMAND: | |
1161 | ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_MON); | |
1162 | clog->warn() << "dropping `mds tell` command from legacy monitor"; | |
1163 | m->put(); | |
1164 | break; | |
1165 | ||
1166 | default: | |
1167 | return false; | |
1168 | } | |
1169 | return true; | |
1170 | } | |
1171 | ||
1172 | void MDSDaemon::ms_handle_connect(Connection *con) | |
1173 | { | |
1174 | } | |
1175 | ||
1176 | bool MDSDaemon::ms_handle_reset(Connection *con) | |
1177 | { | |
1178 | if (con->get_peer_type() != CEPH_ENTITY_TYPE_CLIENT) | |
1179 | return false; | |
1180 | ||
1181 | Mutex::Locker l(mds_lock); | |
1182 | if (stopping) { | |
1183 | return false; | |
1184 | } | |
1185 | dout(5) << "ms_handle_reset on " << con->get_peer_addr() << dendl; | |
1186 | if (beacon.get_want_state() == CEPH_MDS_STATE_DNE) | |
1187 | return false; | |
1188 | ||
1189 | Session *session = static_cast<Session *>(con->get_priv()); | |
1190 | if (session) { | |
1191 | if (session->is_closed()) { | |
1192 | dout(3) << "ms_handle_reset closing connection for session " << session->info.inst << dendl; | |
1193 | con->mark_down(); | |
1194 | con->set_priv(NULL); | |
1195 | } | |
1196 | session->put(); | |
1197 | } else { | |
1198 | con->mark_down(); | |
1199 | } | |
1200 | return false; | |
1201 | } | |
1202 | ||
1203 | ||
1204 | void MDSDaemon::ms_handle_remote_reset(Connection *con) | |
1205 | { | |
1206 | if (con->get_peer_type() != CEPH_ENTITY_TYPE_CLIENT) | |
1207 | return; | |
1208 | ||
1209 | Mutex::Locker l(mds_lock); | |
1210 | if (stopping) { | |
1211 | return; | |
1212 | } | |
1213 | ||
1214 | dout(5) << "ms_handle_remote_reset on " << con->get_peer_addr() << dendl; | |
1215 | if (beacon.get_want_state() == CEPH_MDS_STATE_DNE) | |
1216 | return; | |
1217 | ||
1218 | Session *session = static_cast<Session *>(con->get_priv()); | |
1219 | if (session) { | |
1220 | if (session->is_closed()) { | |
1221 | dout(3) << "ms_handle_remote_reset closing connection for session " << session->info.inst << dendl; | |
1222 | con->mark_down(); | |
1223 | con->set_priv(NULL); | |
1224 | } | |
1225 | session->put(); | |
1226 | } | |
1227 | } | |
1228 | ||
1229 | bool MDSDaemon::ms_handle_refused(Connection *con) | |
1230 | { | |
1231 | // do nothing for now | |
1232 | return false; | |
1233 | } | |
1234 | ||
1235 | bool MDSDaemon::ms_verify_authorizer(Connection *con, int peer_type, | |
1236 | int protocol, bufferlist& authorizer_data, bufferlist& authorizer_reply, | |
1237 | bool& is_valid, CryptoKey& session_key) | |
1238 | { | |
1239 | Mutex::Locker l(mds_lock); | |
1240 | if (stopping) { | |
1241 | return false; | |
1242 | } | |
1243 | if (beacon.get_want_state() == CEPH_MDS_STATE_DNE) | |
1244 | return false; | |
1245 | ||
1246 | AuthAuthorizeHandler *authorize_handler = 0; | |
1247 | switch (peer_type) { | |
1248 | case CEPH_ENTITY_TYPE_MDS: | |
1249 | authorize_handler = authorize_handler_cluster_registry->get_handler(protocol); | |
1250 | break; | |
1251 | default: | |
1252 | authorize_handler = authorize_handler_service_registry->get_handler(protocol); | |
1253 | } | |
1254 | if (!authorize_handler) { | |
1255 | dout(0) << "No AuthAuthorizeHandler found for protocol " << protocol << dendl; | |
1256 | is_valid = false; | |
1257 | return true; | |
1258 | } | |
1259 | ||
1260 | AuthCapsInfo caps_info; | |
1261 | EntityName name; | |
1262 | uint64_t global_id; | |
1263 | ||
1264 | is_valid = authorize_handler->verify_authorizer( | |
1265 | cct, monc->rotating_secrets.get(), | |
1266 | authorizer_data, authorizer_reply, name, global_id, caps_info, session_key); | |
1267 | ||
1268 | if (is_valid) { | |
1269 | entity_name_t n(con->get_peer_type(), global_id); | |
1270 | ||
1271 | // We allow connections and assign Session instances to connections | |
1272 | // even if we have not been assigned a rank, because clients with | |
1273 | // "allow *" are allowed to connect and do 'tell' operations before | |
1274 | // we have a rank. | |
1275 | Session *s = NULL; | |
1276 | if (mds_rank) { | |
1277 | // If we do hold a rank, see if this is an existing client establishing | |
1278 | // a new connection, rather than a new client | |
1279 | s = mds_rank->sessionmap.get_session(n); | |
1280 | } | |
1281 | ||
1282 | // Wire up a Session* to this connection | |
1283 | // It doesn't go into a SessionMap instance until it sends an explicit | |
1284 | // request to open a session (initial state of Session is `closed`) | |
1285 | if (!s) { | |
1286 | s = new Session; | |
1287 | s->info.auth_name = name; | |
1288 | s->info.inst.addr = con->get_peer_addr(); | |
1289 | s->info.inst.name = n; | |
1290 | dout(10) << " new session " << s << " for " << s->info.inst << " con " << con << dendl; | |
1291 | con->set_priv(s); | |
1292 | s->connection = con; | |
1293 | } else { | |
1294 | dout(10) << " existing session " << s << " for " << s->info.inst << " existing con " << s->connection | |
1295 | << ", new/authorizing con " << con << dendl; | |
1296 | con->set_priv(s->get()); | |
1297 | ||
1298 | ||
1299 | ||
1300 | // Wait until we fully accept the connection before setting | |
1301 | // s->connection. In particular, if there are multiple incoming | |
1302 | // connection attempts, they will all get their authorizer | |
1303 | // validated, but some of them may "lose the race" and get | |
1304 | // dropped. We only want to consider the winner(s). See | |
1305 | // ms_handle_accept(). This is important for Sessions we replay | |
1306 | // from the journal on recovery that don't have established | |
1307 | // messenger state; we want the con from only the winning | |
1308 | // connect attempt(s). (Normal reconnects that don't follow MDS | |
1309 | // recovery are reconnected to the existing con by the | |
1310 | // messenger.) | |
1311 | } | |
1312 | ||
1313 | if (caps_info.allow_all) { | |
1314 | // Flag for auth providers that don't provide cap strings | |
1315 | s->auth_caps.set_allow_all(); | |
1316 | } | |
1317 | ||
1318 | bufferlist::iterator p = caps_info.caps.begin(); | |
1319 | string auth_cap_str; | |
1320 | try { | |
1321 | ::decode(auth_cap_str, p); | |
1322 | ||
1323 | dout(10) << __func__ << ": parsing auth_cap_str='" << auth_cap_str << "'" << dendl; | |
1324 | std::ostringstream errstr; | |
1325 | if (!s->auth_caps.parse(g_ceph_context, auth_cap_str, &errstr)) { | |
1326 | dout(1) << __func__ << ": auth cap parse error: " << errstr.str() | |
1327 | << " parsing '" << auth_cap_str << "'" << dendl; | |
1328 | clog->warn() << name << " mds cap '" << auth_cap_str | |
1329 | << "' does not parse: " << errstr.str(); | |
1330 | } | |
1331 | } catch (buffer::error& e) { | |
1332 | // Assume legacy auth, defaults to: | |
1333 | // * permit all filesystem ops | |
1334 | // * permit no `tell` ops | |
1335 | dout(1) << __func__ << ": cannot decode auth caps bl of length " << caps_info.caps.length() << dendl; | |
1336 | } | |
1337 | } | |
1338 | ||
1339 | return true; // we made a decision (see is_valid) | |
1340 | } | |
1341 | ||
1342 | ||
1343 | void MDSDaemon::ms_handle_accept(Connection *con) | |
1344 | { | |
1345 | Mutex::Locker l(mds_lock); | |
1346 | if (stopping) { | |
1347 | return; | |
1348 | } | |
1349 | ||
1350 | Session *s = static_cast<Session *>(con->get_priv()); | |
1351 | dout(10) << "ms_handle_accept " << con->get_peer_addr() << " con " << con << " session " << s << dendl; | |
1352 | if (s) { | |
1353 | if (s->connection != con) { | |
1354 | dout(10) << " session connection " << s->connection << " -> " << con << dendl; | |
1355 | s->connection = con; | |
1356 | ||
1357 | // send out any queued messages | |
1358 | while (!s->preopen_out_queue.empty()) { | |
1359 | con->send_message(s->preopen_out_queue.front()); | |
1360 | s->preopen_out_queue.pop_front(); | |
1361 | } | |
1362 | } | |
1363 | s->put(); | |
1364 | } | |
1365 | } | |
1366 | ||
1367 | bool MDSDaemon::is_clean_shutdown() | |
1368 | { | |
1369 | if (mds_rank) { | |
1370 | return mds_rank->is_stopped(); | |
1371 | } else { | |
1372 | return true; | |
1373 | } | |
1374 | } |