1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2014 John Spray <john.spray@inktank.com>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
14 // Include this first to get python headers earlier
17 #include "ActivePyModules.h"
19 #include <rocksdb/version.h>
21 #include "common/errno.h"
22 #include "include/stringify.h"
24 #include "mon/MonMap.h"
25 #include "osd/OSDMap.h"
26 #include "osd/osd_types.h"
27 #include "mgr/MgrContext.h"
28 #include "mgr/TTLCache.h"
29 #include "mgr/mgr_perf_counters.h"
31 #include "DaemonKey.h"
32 #include "DaemonServer.h"
33 #include "mgr/MgrContext.h"
34 #include "PyFormatter.h"
35 // For ::mgr_store_prefix
37 #include "PyModuleRegistry.h"
40 #define dout_context g_ceph_context
41 #define dout_subsys ceph_subsys_mgr
43 #define dout_prefix *_dout << "mgr " << __func__ << " "
47 using namespace std::literals
;
49 ActivePyModules::ActivePyModules(
50 PyModuleConfig
&module_config_
,
51 std::map
<std::string
, std::string
> store_data
,
52 bool mon_provides_kv_sub
,
53 DaemonStateIndex
&ds
, ClusterState
&cs
,
54 MonClient
&mc
, LogChannelRef clog_
,
55 LogChannelRef audit_clog_
, Objecter
&objecter_
,
56 Client
&client_
, Finisher
&f
, DaemonServer
&server
,
57 PyModuleRegistry
&pmr
)
58 : module_config(module_config_
), daemon_state(ds
), cluster_state(cs
),
59 monc(mc
), clog(clog_
), audit_clog(audit_clog_
), objecter(objecter_
),
60 client(client_
), finisher(f
),
61 cmd_finisher(g_ceph_context
, "cmd_finisher", "cmdfin"),
62 server(server
), py_module_registry(pmr
)
64 store_cache
= std::move(store_data
);
65 // we can only trust our ConfigMap if the mon cluster has provided
66 // kv sub since our startup.
67 have_local_config_map
= mon_provides_kv_sub
;
68 _refresh_config_map();
72 ActivePyModules::~ActivePyModules() = default;
74 void ActivePyModules::dump_server(const std::string
&hostname
,
75 const DaemonStateCollection
&dmc
,
78 f
->dump_string("hostname", hostname
);
79 f
->open_array_section("services");
80 std::string ceph_version
;
82 for (const auto &[key
, state
] : dmc
) {
84 without_gil([&ceph_version
, &id
, state
=state
] {
85 std::lock_guard
l(state
->lock
);
86 // TODO: pick the highest version, and make sure that
87 // somewhere else (during health reporting?) we are
88 // indicating to the user if we see mixed versions
89 auto ver_iter
= state
->metadata
.find("ceph_version");
90 if (ver_iter
!= state
->metadata
.end()) {
91 ceph_version
= state
->metadata
.at("ceph_version");
93 if (state
->metadata
.find("id") != state
->metadata
.end()) {
94 id
= state
->metadata
.at("id");
97 f
->open_object_section("service");
98 f
->dump_string("type", key
.type
);
99 f
->dump_string("id", key
.name
);
100 f
->dump_string("ceph_version", ceph_version
);
102 f
->dump_string("name", id
);
108 f
->dump_string("ceph_version", ceph_version
);
111 PyObject
*ActivePyModules::get_server_python(const std::string
&hostname
)
113 const auto dmc
= without_gil([&]{
114 std::lock_guard
l(lock
);
115 dout(10) << " (" << hostname
<< ")" << dendl
;
116 return daemon_state
.get_by_server(hostname
);
119 dump_server(hostname
, dmc
, &f
);
124 PyObject
*ActivePyModules::list_servers_python()
126 dout(10) << " >" << dendl
;
128 without_gil_t no_gil
;
129 return daemon_state
.with_daemons_by_server([this, &no_gil
]
130 (const std::map
<std::string
, DaemonStateCollection
> &all
) {
131 no_gil
.acquire_gil();
132 PyFormatter
f(false, true);
133 for (const auto &[hostname
, daemon_state
] : all
) {
134 f
.open_object_section("server");
135 dump_server(hostname
, daemon_state
, &f
);
142 PyObject
*ActivePyModules::get_metadata_python(
143 const std::string
&svc_type
,
144 const std::string
&svc_id
)
146 auto metadata
= daemon_state
.get(DaemonKey
{svc_type
, svc_id
});
147 if (metadata
== nullptr) {
148 derr
<< "Requested missing service " << svc_type
<< "." << svc_id
<< dendl
;
151 auto l
= without_gil([&] {
152 return std::lock_guard(lock
);
155 f
.dump_string("hostname", metadata
->hostname
);
156 for (const auto &[key
, val
] : metadata
->metadata
) {
157 f
.dump_string(key
, val
);
163 PyObject
*ActivePyModules::get_daemon_status_python(
164 const std::string
&svc_type
,
165 const std::string
&svc_id
)
167 auto metadata
= daemon_state
.get(DaemonKey
{svc_type
, svc_id
});
168 if (metadata
== nullptr) {
169 derr
<< "Requested missing service " << svc_type
<< "." << svc_id
<< dendl
;
172 auto l
= without_gil([&] {
173 return std::lock_guard(lock
);
176 for (const auto &[daemon
, status
] : metadata
->service_status
) {
177 f
.dump_string(daemon
, status
);
182 void ActivePyModules::update_cache_metrics() {
183 auto hit_miss_ratio
= ttl_cache
.get_hit_miss_ratio();
184 perfcounter
->set(l_mgr_cache_hit
, hit_miss_ratio
.first
);
185 perfcounter
->set(l_mgr_cache_miss
, hit_miss_ratio
.second
);
188 PyObject
*ActivePyModules::cacheable_get_python(const std::string
&what
)
190 uint64_t ttl_seconds
= g_conf().get_val
<uint64_t>("mgr_ttl_cache_expire_seconds");
191 if(ttl_seconds
> 0) {
192 ttl_cache
.set_ttl(ttl_seconds
);
194 PyObject
* cached
= ttl_cache
.get(what
);
195 update_cache_metrics();
197 } catch (std::out_of_range
& e
) {}
200 PyObject
*obj
= get_python(what
);
201 if(ttl_seconds
&& ttl_cache
.is_cacheable(what
)) {
202 ttl_cache
.insert(what
, obj
);
205 update_cache_metrics();
209 PyObject
*ActivePyModules::get_python(const std::string
&what
)
211 uint64_t ttl_seconds
= g_conf().get_val
<uint64_t>("mgr_ttl_cache_expire_seconds");
215 // Use PyJSONFormatter if TTL cache is enabled.
216 Formatter
&f
= ttl_seconds
? (Formatter
&)jf
: (Formatter
&)pf
;
218 if (what
== "fs_map") {
219 without_gil_t no_gil
;
220 cluster_state
.with_fsmap([&](const FSMap
&fsmap
) {
221 no_gil
.acquire_gil();
224 } else if (what
== "osdmap_crush_map_text") {
225 without_gil_t no_gil
;
227 cluster_state
.with_osdmap([&](const OSDMap
&osd_map
){
228 osd_map
.crush
->encode(rdata
, CEPH_FEATURES_SUPPORTED_DEFAULT
);
230 std::string crush_text
= rdata
.to_str();
231 no_gil
.acquire_gil();
232 return PyUnicode_FromString(crush_text
.c_str());
233 } else if (what
.substr(0, 7) == "osd_map") {
234 without_gil_t no_gil
;
235 cluster_state
.with_osdmap([&](const OSDMap
&osd_map
){
236 no_gil
.acquire_gil();
237 if (what
== "osd_map") {
239 } else if (what
== "osd_map_tree") {
240 osd_map
.print_tree(&f
, nullptr);
241 } else if (what
== "osd_map_crush") {
242 osd_map
.crush
->dump(&f
);
245 } else if (what
== "modified_config_options") {
246 without_gil_t no_gil
;
247 auto all_daemons
= daemon_state
.get_all();
249 for (auto& [key
, daemon
] : all_daemons
) {
250 std::lock_guard
l(daemon
->lock
);
251 for (auto& [name
, valmap
] : daemon
->config
) {
255 no_gil
.acquire_gil();
256 f
.open_array_section("options");
257 for (auto& name
: names
) {
258 f
.dump_string("name", name
);
261 } else if (what
.substr(0, 6) == "config") {
262 if (what
== "config_options") {
263 g_conf().config_options(&f
);
264 } else if (what
== "config") {
265 g_conf().show_config(&f
);
267 } else if (what
== "mon_map") {
268 without_gil_t no_gil
;
269 cluster_state
.with_monmap([&](const MonMap
&monmap
) {
270 no_gil
.acquire_gil();
273 } else if (what
== "service_map") {
274 without_gil_t no_gil
;
275 cluster_state
.with_servicemap([&](const ServiceMap
&service_map
) {
276 no_gil
.acquire_gil();
277 service_map
.dump(&f
);
279 } else if (what
== "osd_metadata") {
280 without_gil_t no_gil
;
281 auto dmc
= daemon_state
.get_by_service("osd");
282 for (const auto &[key
, state
] : dmc
) {
283 std::lock_guard
l(state
->lock
);
284 with_gil(no_gil
, [&f
, &name
=key
.name
, state
=state
] {
285 f
.open_object_section(name
.c_str());
286 f
.dump_string("hostname", state
->hostname
);
287 for (const auto &[name
, val
] : state
->metadata
) {
288 f
.dump_string(name
.c_str(), val
);
293 } else if (what
== "mds_metadata") {
294 without_gil_t no_gil
;
295 auto dmc
= daemon_state
.get_by_service("mds");
296 for (const auto &[key
, state
] : dmc
) {
297 std::lock_guard
l(state
->lock
);
298 with_gil(no_gil
, [&f
, &name
=key
.name
, state
=state
] {
299 f
.open_object_section(name
.c_str());
300 f
.dump_string("hostname", state
->hostname
);
301 for (const auto &[name
, val
] : state
->metadata
) {
302 f
.dump_string(name
.c_str(), val
);
307 } else if (what
== "pg_summary") {
308 without_gil_t no_gil
;
309 cluster_state
.with_pgmap(
310 [&f
, &no_gil
](const PGMap
&pg_map
) {
311 std::map
<std::string
, std::map
<std::string
, uint32_t> > osds
;
312 std::map
<std::string
, std::map
<std::string
, uint32_t> > pools
;
313 std::map
<std::string
, uint32_t> all
;
314 for (const auto &i
: pg_map
.pg_stat
) {
315 const auto pool
= i
.first
.m_pool
;
316 const std::string state
= pg_state_string(i
.second
.state
);
317 // Insert to per-pool map
318 pools
[stringify(pool
)][state
]++;
319 for (const auto &osd_id
: i
.second
.acting
) {
320 osds
[stringify(osd_id
)][state
]++;
324 no_gil
.acquire_gil();
325 f
.open_object_section("by_osd");
326 for (const auto &i
: osds
) {
327 f
.open_object_section(i
.first
.c_str());
328 for (const auto &j
: i
.second
) {
329 f
.dump_int(j
.first
.c_str(), j
.second
);
334 f
.open_object_section("by_pool");
335 for (const auto &i
: pools
) {
336 f
.open_object_section(i
.first
.c_str());
337 for (const auto &j
: i
.second
) {
338 f
.dump_int(j
.first
.c_str(), j
.second
);
343 f
.open_object_section("all");
344 for (const auto &i
: all
) {
345 f
.dump_int(i
.first
.c_str(), i
.second
);
348 f
.open_object_section("pg_stats_sum");
349 pg_map
.pg_sum
.dump(&f
);
353 } else if (what
== "pg_status") {
354 without_gil_t no_gil
;
355 cluster_state
.with_pgmap(
356 [&](const PGMap
&pg_map
) {
357 no_gil
.acquire_gil();
358 pg_map
.print_summary(&f
, nullptr);
361 } else if (what
== "pg_dump") {
362 without_gil_t no_gil
;
363 cluster_state
.with_pgmap(
364 [&](const PGMap
&pg_map
) {
365 no_gil
.acquire_gil();
366 pg_map
.dump(&f
, false);
369 } else if (what
== "devices") {
370 without_gil_t no_gil
;
371 daemon_state
.with_devices2(
373 with_gil(no_gil
, [&] { f
.open_array_section("devices"); });
375 [&](const DeviceState
&dev
) {
376 with_gil(no_gil
, [&] { f
.dump_object("device", dev
); });
378 with_gil(no_gil
, [&] {
381 } else if (what
.size() > 7 &&
382 what
.substr(0, 7) == "device ") {
383 without_gil_t no_gil
;
384 string devid
= what
.substr(7);
385 if (!daemon_state
.with_device(devid
,
386 [&] (const DeviceState
& dev
) {
387 with_gil_t with_gil
{no_gil
};
388 f
.dump_object("device", dev
);
392 } else if (what
== "io_rate") {
393 without_gil_t no_gil
;
394 cluster_state
.with_pgmap(
395 [&](const PGMap
&pg_map
) {
396 no_gil
.acquire_gil();
397 pg_map
.dump_delta(&f
);
400 } else if (what
== "df") {
401 without_gil_t no_gil
;
402 cluster_state
.with_osdmap_and_pgmap(
404 const OSDMap
& osd_map
,
405 const PGMap
&pg_map
) {
406 no_gil
.acquire_gil();
407 pg_map
.dump_cluster_stats(nullptr, &f
, true);
408 pg_map
.dump_pool_stats_full(osd_map
, nullptr, &f
, true);
410 } else if (what
== "pg_stats") {
411 without_gil_t no_gil
;
412 cluster_state
.with_pgmap([&](const PGMap
&pg_map
) {
413 no_gil
.acquire_gil();
414 pg_map
.dump_pg_stats(&f
, false);
416 } else if (what
== "pool_stats") {
417 without_gil_t no_gil
;
418 cluster_state
.with_pgmap([&](const PGMap
&pg_map
) {
419 no_gil
.acquire_gil();
420 pg_map
.dump_pool_stats(&f
);
422 } else if (what
== "pg_ready") {
423 server
.dump_pg_ready(&f
);
424 } else if (what
== "pg_progress") {
425 without_gil_t no_gil
;
426 cluster_state
.with_pgmap([&](const PGMap
&pg_map
) {
427 no_gil
.acquire_gil();
428 pg_map
.dump_pg_progress(&f
);
429 server
.dump_pg_ready(&f
);
431 } else if (what
== "osd_stats") {
432 without_gil_t no_gil
;
433 cluster_state
.with_pgmap([&](const PGMap
&pg_map
) {
434 no_gil
.acquire_gil();
435 pg_map
.dump_osd_stats(&f
, false);
437 } else if (what
== "osd_ping_times") {
438 without_gil_t no_gil
;
439 cluster_state
.with_pgmap([&](const PGMap
&pg_map
) {
440 no_gil
.acquire_gil();
441 pg_map
.dump_osd_ping_times(&f
);
443 } else if (what
== "osd_pool_stats") {
444 without_gil_t no_gil
;
445 int64_t poolid
= -ENOENT
;
446 cluster_state
.with_osdmap_and_pgmap([&](const OSDMap
& osdmap
,
447 const PGMap
& pg_map
) {
448 no_gil
.acquire_gil();
449 f
.open_array_section("pool_stats");
450 for (auto &p
: osdmap
.get_pools()) {
452 pg_map
.dump_pool_stats_and_io_rate(poolid
, osdmap
, &f
, nullptr);
456 } else if (what
== "health") {
457 without_gil_t no_gil
;
458 cluster_state
.with_health([&](const ceph::bufferlist
&health_json
) {
459 no_gil
.acquire_gil();
460 f
.dump_string("json", health_json
.to_str());
462 } else if (what
== "mon_status") {
463 without_gil_t no_gil
;
464 cluster_state
.with_mon_status(
465 [&](const ceph::bufferlist
&mon_status_json
) {
466 no_gil
.acquire_gil();
467 f
.dump_string("json", mon_status_json
.to_str());
469 } else if (what
== "mgr_map") {
470 without_gil_t no_gil
;
471 cluster_state
.with_mgrmap([&](const MgrMap
&mgr_map
) {
472 no_gil
.acquire_gil();
475 } else if (what
== "mgr_ips") {
476 entity_addrvec_t myaddrs
= server
.get_myaddrs();
477 f
.open_array_section("ips");
478 std::set
<std::string
> did
;
479 for (auto& i
: myaddrs
.v
) {
480 std::string ip
= i
.ip_only_to_str();
481 if (auto [where
, inserted
] = did
.insert(ip
); inserted
) {
482 f
.dump_string("ip", ip
);
486 } else if (what
== "have_local_config_map") {
487 f
.dump_bool("have_local_config_map", have_local_config_map
);
488 } else if (what
== "active_clean_pgs"){
489 without_gil_t no_gil
;
490 cluster_state
.with_pgmap(
491 [&](const PGMap
&pg_map
) {
492 no_gil
.acquire_gil();
493 f
.open_array_section("pg_stats");
494 for (auto &i
: pg_map
.pg_stat
) {
495 const auto state
= i
.second
.state
;
496 const auto pgid_raw
= i
.first
;
497 const auto pgid
= stringify(pgid_raw
.m_pool
) + "." + stringify(pgid_raw
.m_seed
);
498 const auto reported_epoch
= i
.second
.reported_epoch
;
499 if (state
& PG_STATE_ACTIVE
&& state
& PG_STATE_CLEAN
) {
500 f
.open_object_section("pg_stat");
501 f
.dump_string("pgid", pgid
);
502 f
.dump_string("state", pg_state_string(state
));
503 f
.dump_unsigned("reported_epoch", reported_epoch
);
508 const auto num_pg
= pg_map
.num_pg
;
509 f
.dump_unsigned("total_num_pgs", num_pg
);
512 derr
<< "Python module requested unknown data '" << what
<< "'" << dendl
;
515 without_gil_t no_gil
;
516 no_gil
.acquire_gil();
524 void ActivePyModules::start_one(PyModuleRef py_module
)
526 std::lock_guard
l(lock
);
528 const auto name
= py_module
->get_name();
529 auto active_module
= std::make_shared
<ActivePyModule
>(py_module
, clog
);
531 pending_modules
.insert(name
);
532 // Send all python calls down a Finisher to avoid blocking
533 // C++ code, and avoid any potential lock cycles.
534 finisher
.queue(new LambdaContext([this, active_module
, name
](int) {
535 int r
= active_module
->load(this);
536 std::lock_guard
l(lock
);
537 pending_modules
.erase(name
);
539 derr
<< "Failed to run module in active mode ('" << name
<< "')"
542 auto em
= modules
.emplace(name
, active_module
);
543 ceph_assert(em
.second
); // actually inserted
545 dout(4) << "Starting thread for " << name
<< dendl
;
546 active_module
->thread
.create(active_module
->get_thread_name());
551 void ActivePyModules::shutdown()
553 std::lock_guard
locker(lock
);
555 // Signal modules to drop out of serve() and/or tear down resources
556 for (auto& [name
, module
] : modules
) {
558 dout(10) << "calling module " << name
<< " shutdown()" << dendl
;
560 dout(10) << "module " << name
<< " shutdown() returned" << dendl
;
564 // For modules implementing serve(), finish the threads where we
565 // were running that.
566 for (auto& [name
, module
] : modules
) {
568 dout(10) << "joining module " << name
<< dendl
;
569 module
->thread
.join();
570 dout(10) << "joined module " << name
<< dendl
;
574 cmd_finisher
.wait_for_empty();
580 void ActivePyModules::notify_all(const std::string
¬ify_type
,
581 const std::string
¬ify_id
)
583 std::lock_guard
l(lock
);
585 dout(10) << __func__
<< ": notify_all " << notify_type
<< dendl
;
586 for (auto& [name
, module
] : modules
) {
587 if (!py_module_registry
.should_notify(name
, notify_type
)) {
590 // Send all python calls down a Finisher to avoid blocking
591 // C++ code, and avoid any potential lock cycles.
592 dout(15) << "queuing notify (" << notify_type
<< ") to " << name
<< dendl
;
593 // workaround for https://bugs.llvm.org/show_bug.cgi?id=35984
594 finisher
.queue(new LambdaContext([module
=module
, notify_type
, notify_id
]
596 module
->notify(notify_type
, notify_id
);
601 void ActivePyModules::notify_all(const LogEntry
&log_entry
)
603 std::lock_guard
l(lock
);
605 dout(10) << __func__
<< ": notify_all (clog)" << dendl
;
606 for (auto& [name
, module
] : modules
) {
607 if (!py_module_registry
.should_notify(name
, "clog")) {
610 // Send all python calls down a Finisher to avoid blocking
611 // C++ code, and avoid any potential lock cycles.
613 // Note intentional use of non-reference lambda binding on
614 // log_entry: we take a copy because caller's instance is
615 // probably ephemeral.
616 dout(15) << "queuing notify (clog) to " << name
<< dendl
;
617 // workaround for https://bugs.llvm.org/show_bug.cgi?id=35984
618 finisher
.queue(new LambdaContext([module
=module
, log_entry
](int r
){
619 module
->notify_clog(log_entry
);
624 bool ActivePyModules::get_store(const std::string
&module_name
,
625 const std::string
&key
, std::string
*val
) const
627 without_gil_t no_gil
;
628 std::lock_guard
l(lock
);
630 const std::string global_key
= PyModule::mgr_store_prefix
631 + module_name
+ "/" + key
;
633 dout(4) << __func__
<< " key: " << global_key
<< dendl
;
635 auto i
= store_cache
.find(global_key
);
636 if (i
!= store_cache
.end()) {
644 PyObject
*ActivePyModules::dispatch_remote(
645 const std::string
&other_module
,
646 const std::string
&method
,
651 auto mod_iter
= modules
.find(other_module
);
652 ceph_assert(mod_iter
!= modules
.end());
654 return mod_iter
->second
->dispatch_remote(method
, args
, kwargs
, err
);
657 bool ActivePyModules::get_config(const std::string
&module_name
,
658 const std::string
&key
, std::string
*val
) const
660 const std::string global_key
= "mgr/" + module_name
+ "/" + key
;
662 dout(20) << " key: " << global_key
<< dendl
;
664 std::lock_guard
lock(module_config
.lock
);
666 auto i
= module_config
.config
.find(global_key
);
667 if (i
!= module_config
.config
.end()) {
675 PyObject
*ActivePyModules::get_typed_config(
676 const std::string
&module_name
,
677 const std::string
&key
,
678 const std::string
&prefix
) const
680 without_gil_t no_gil
;
682 std::string final_key
;
685 final_key
= prefix
+ "/" + key
;
686 found
= get_config(module_name
, final_key
, &value
);
690 found
= get_config(module_name
, final_key
, &value
);
693 PyModuleRef module
= py_module_registry
.get_module(module_name
);
694 no_gil
.acquire_gil();
696 derr
<< "Module '" << module_name
<< "' is not available" << dendl
;
699 // removing value to hide sensitive data going into mgr logs
700 // leaving this for debugging purposes
701 // dout(10) << __func__ << " " << final_key << " found: " << value << dendl;
702 dout(10) << __func__
<< " " << final_key
<< " found" << dendl
;
703 return module
->get_typed_option_value(key
, value
);
706 dout(10) << " [" << prefix
<< "/]" << key
<< " not found "
709 dout(10) << " " << key
<< " not found " << dendl
;
714 PyObject
*ActivePyModules::get_store_prefix(const std::string
&module_name
,
715 const std::string
&prefix
) const
717 without_gil_t no_gil
;
718 std::lock_guard
l(lock
);
719 std::lock_guard
lock(module_config
.lock
);
720 no_gil
.acquire_gil();
722 const std::string base_prefix
= PyModule::mgr_store_prefix
724 const std::string global_prefix
= base_prefix
+ prefix
;
725 dout(4) << __func__
<< " prefix: " << global_prefix
<< dendl
;
728 for (auto p
= store_cache
.lower_bound(global_prefix
);
729 p
!= store_cache
.end() && p
->first
.find(global_prefix
) == 0; ++p
) {
730 f
.dump_string(p
->first
.c_str() + base_prefix
.size(), p
->second
);
735 void ActivePyModules::set_store(const std::string
&module_name
,
736 const std::string
&key
, const std::optional
<std::string
>& val
)
738 const std::string global_key
= PyModule::mgr_store_prefix
739 + module_name
+ "/" + key
;
743 std::lock_guard
l(lock
);
745 // NOTE: this isn't strictly necessary since we'll also get an MKVData
746 // update from the mon due to our subscription *before* our command is acked.
748 store_cache
[global_key
] = *val
;
750 store_cache
.erase(global_key
);
753 std::ostringstream cmd_json
;
755 jf
.open_object_section("cmd");
757 jf
.dump_string("prefix", "config-key set");
758 jf
.dump_string("key", global_key
);
759 jf
.dump_string("val", *val
);
761 jf
.dump_string("prefix", "config-key del");
762 jf
.dump_string("key", global_key
);
766 set_cmd
.run(&monc
, cmd_json
.str());
770 if (set_cmd
.r
!= 0) {
771 // config-key set will fail if mgr's auth key has insufficient
772 // permission to set config keys
773 // FIXME: should this somehow raise an exception back into Python land?
774 dout(0) << "`config-key set " << global_key
<< " " << val
<< "` failed: "
775 << cpp_strerror(set_cmd
.r
) << dendl
;
776 dout(0) << "mon returned " << set_cmd
.r
<< ": " << set_cmd
.outs
<< dendl
;
780 std::pair
<int, std::string
> ActivePyModules::set_config(
781 const std::string
&module_name
,
782 const std::string
&key
,
783 const std::optional
<std::string
>& val
)
785 return module_config
.set_config(&monc
, module_name
, key
, val
);
788 std::map
<std::string
, std::string
> ActivePyModules::get_services() const
790 std::map
<std::string
, std::string
> result
;
791 std::lock_guard
l(lock
);
792 for (const auto& [name
, module
] : modules
) {
793 std::string svc_str
= module
->get_uri();
794 if (!svc_str
.empty()) {
795 result
[name
] = svc_str
;
802 void ActivePyModules::update_kv_data(
803 const std::string prefix
,
805 const map
<std::string
, std::optional
<bufferlist
>, std::less
<>>& data
)
807 std::lock_guard
l(lock
);
808 bool do_config
= false;
810 dout(10) << "full update on " << prefix
<< dendl
;
811 auto p
= store_cache
.lower_bound(prefix
);
812 while (p
!= store_cache
.end() && p
->first
.find(prefix
) == 0) {
813 dout(20) << " rm prior " << p
->first
<< dendl
;
814 p
= store_cache
.erase(p
);
817 dout(10) << "incremental update on " << prefix
<< dendl
;
819 for (auto& i
: data
) {
821 dout(20) << " set " << i
.first
<< " = " << i
.second
->to_str() << dendl
;
822 store_cache
[i
.first
] = i
.second
->to_str();
824 dout(20) << " rm " << i
.first
<< dendl
;
825 store_cache
.erase(i
.first
);
827 if (i
.first
.find("config/") == 0) {
832 _refresh_config_map();
836 void ActivePyModules::_refresh_config_map()
840 for (auto p
= store_cache
.lower_bound("config/");
841 p
!= store_cache
.end() && p
->first
.find("config/") == 0;
843 string key
= p
->first
.substr(7);
844 if (key
.find("mgr/") == 0) {
845 // NOTE: for now, we ignore module options. see also ceph_foreign_option_get().
848 string value
= p
->second
;
851 config_map
.parse_key(key
, &name
, &who
);
853 const Option
*opt
= g_conf().find_option(name
);
855 config_map
.stray_options
.push_back(
856 std::unique_ptr
<Option
>(
857 new Option(name
, Option::TYPE_STR
, Option::LEVEL_UNKNOWN
)));
858 opt
= config_map
.stray_options
.back().get();
862 int r
= opt
->pre_validate(&value
, &err
);
864 dout(10) << __func__
<< " pre-validate failed on '" << name
<< "' = '"
865 << value
<< "' for " << name
<< dendl
;
868 MaskedOption
mopt(opt
);
869 mopt
.raw_value
= value
;
872 !ConfigMap::parse_mask(who
, §ion_name
, &mopt
.mask
)) {
873 derr
<< __func__
<< " invalid mask for key " << key
<< dendl
;
874 } else if (opt
->has_flag(Option::FLAG_NO_MON_UPDATE
)) {
875 dout(10) << __func__
<< " NO_MON_UPDATE option '"
876 << name
<< "' = '" << value
<< "' for " << name
879 Section
*section
= &config_map
.global
;;
880 if (section_name
.size() && section_name
!= "global") {
881 if (section_name
.find('.') != std::string::npos
) {
882 section
= &config_map
.by_id
[section_name
];
884 section
= &config_map
.by_type
[section_name
];
887 section
->options
.insert(make_pair(name
, std::move(mopt
)));
892 PyObject
* ActivePyModules::with_perf_counters(
893 std::function
<void(PerfCounterInstance
& counter_instance
, PerfCounterType
& counter_type
, PyFormatter
& f
)> fct
,
894 const std::string
&svc_name
,
895 const std::string
&svc_id
,
896 const std::string
&path
) const
899 f
.open_array_section(path
);
901 without_gil_t no_gil
;
902 std::lock_guard
l(lock
);
903 auto metadata
= daemon_state
.get(DaemonKey
{svc_name
, svc_id
});
905 std::lock_guard
l2(metadata
->lock
);
906 if (metadata
->perf_counters
.instances
.count(path
)) {
907 auto counter_instance
= metadata
->perf_counters
.instances
.at(path
);
908 auto counter_type
= metadata
->perf_counters
.types
.at(path
);
909 with_gil(no_gil
, [&] {
910 fct(counter_instance
, counter_type
, f
);
913 dout(4) << "Missing counter: '" << path
<< "' ("
914 << svc_name
<< "." << svc_id
<< ")" << dendl
;
915 dout(20) << "Paths are:" << dendl
;
916 for (const auto &i
: metadata
->perf_counters
.instances
) {
917 dout(20) << i
.first
<< dendl
;
921 dout(4) << "No daemon state for " << svc_name
<< "." << svc_id
<< ")"
929 PyObject
* ActivePyModules::get_counter_python(
930 const std::string
&svc_name
,
931 const std::string
&svc_id
,
932 const std::string
&path
)
934 auto extract_counters
= [](
935 PerfCounterInstance
& counter_instance
,
936 PerfCounterType
& counter_type
,
939 if (counter_type
.type
& PERFCOUNTER_LONGRUNAVG
) {
940 const auto &avg_data
= counter_instance
.get_data_avg();
941 for (const auto &datapoint
: avg_data
) {
942 f
.open_array_section("datapoint");
943 f
.dump_float("t", datapoint
.t
);
944 f
.dump_unsigned("s", datapoint
.s
);
945 f
.dump_unsigned("c", datapoint
.c
);
949 const auto &data
= counter_instance
.get_data();
950 for (const auto &datapoint
: data
) {
951 f
.open_array_section("datapoint");
952 f
.dump_float("t", datapoint
.t
);
953 f
.dump_unsigned("v", datapoint
.v
);
958 return with_perf_counters(extract_counters
, svc_name
, svc_id
, path
);
961 PyObject
* ActivePyModules::get_latest_counter_python(
962 const std::string
&svc_name
,
963 const std::string
&svc_id
,
964 const std::string
&path
)
966 auto extract_latest_counters
= [](
967 PerfCounterInstance
& counter_instance
,
968 PerfCounterType
& counter_type
,
971 if (counter_type
.type
& PERFCOUNTER_LONGRUNAVG
) {
972 const auto &datapoint
= counter_instance
.get_latest_data_avg();
973 f
.dump_float("t", datapoint
.t
);
974 f
.dump_unsigned("s", datapoint
.s
);
975 f
.dump_unsigned("c", datapoint
.c
);
977 const auto &datapoint
= counter_instance
.get_latest_data();
978 f
.dump_float("t", datapoint
.t
);
979 f
.dump_unsigned("v", datapoint
.v
);
982 return with_perf_counters(extract_latest_counters
, svc_name
, svc_id
, path
);
985 PyObject
* ActivePyModules::get_perf_schema_python(
986 const std::string
&svc_type
,
987 const std::string
&svc_id
)
989 without_gil_t no_gil
;
990 std::lock_guard
l(lock
);
992 DaemonStateCollection daemons
;
994 if (svc_type
== "") {
995 daemons
= daemon_state
.get_all();
996 } else if (svc_id
.empty()) {
997 daemons
= daemon_state
.get_by_service(svc_type
);
999 auto key
= DaemonKey
{svc_type
, svc_id
};
1000 // so that the below can be a loop in all cases
1001 auto got
= daemon_state
.get(key
);
1002 if (got
!= nullptr) {
1007 auto f
= with_gil(no_gil
, [&] {
1008 return PyFormatter();
1010 if (!daemons
.empty()) {
1011 for (auto& [key
, state
] : daemons
) {
1012 std::lock_guard
l(state
->lock
);
1013 with_gil(no_gil
, [&, key
=ceph::to_string(key
), state
=state
] {
1014 f
.open_object_section(key
.c_str());
1015 for (auto ctr_inst_iter
: state
->perf_counters
.instances
) {
1016 const auto &counter_name
= ctr_inst_iter
.first
;
1017 f
.open_object_section(counter_name
.c_str());
1018 auto type
= state
->perf_counters
.types
[counter_name
];
1019 f
.dump_string("description", type
.description
);
1020 if (!type
.nick
.empty()) {
1021 f
.dump_string("nick", type
.nick
);
1023 f
.dump_unsigned("type", type
.type
);
1024 f
.dump_unsigned("priority", type
.priority
);
1025 f
.dump_unsigned("units", type
.unit
);
1032 dout(4) << __func__
<< ": No daemon state found for "
1033 << svc_type
<< "." << svc_id
<< ")" << dendl
;
1038 PyObject
* ActivePyModules::get_rocksdb_version()
1040 std::string version
= std::to_string(ROCKSDB_MAJOR
) + "." +
1041 std::to_string(ROCKSDB_MINOR
) + "." +
1042 std::to_string(ROCKSDB_PATCH
);
1044 return PyUnicode_FromString(version
.c_str());
1047 PyObject
*ActivePyModules::get_context()
1049 auto l
= without_gil([&] {
1050 return std::lock_guard(lock
);
1052 // Construct a capsule containing ceph context.
1053 // Not incrementing/decrementing ref count on the context because
1054 // it's the global one and it has process lifetime.
1055 auto capsule
= PyCapsule_New(g_ceph_context
, nullptr, nullptr);
1060 * Helper for our wrapped types that take a capsule in their constructor.
1062 PyObject
*construct_with_capsule(
1063 const std::string
&module_name
,
1064 const std::string
&clsname
,
1067 // Look up the OSDMap type which we will construct
1068 PyObject
*module
= PyImport_ImportModule(module_name
.c_str());
1070 derr
<< "Failed to import python module:" << dendl
;
1071 derr
<< handle_pyerror(true, module_name
,
1072 "construct_with_capsule "s
+ module_name
+ " " + clsname
) << dendl
;
1074 ceph_assert(module
);
1076 PyObject
*wrapper_type
= PyObject_GetAttrString(
1077 module
, (const char*)clsname
.c_str());
1078 if (!wrapper_type
) {
1079 derr
<< "Failed to get python type:" << dendl
;
1080 derr
<< handle_pyerror(true, module_name
,
1081 "construct_with_capsule "s
+ module_name
+ " " + clsname
) << dendl
;
1083 ceph_assert(wrapper_type
);
1085 // Construct a capsule containing an OSDMap.
1086 auto wrapped_capsule
= PyCapsule_New(wrapped
, nullptr, nullptr);
1087 ceph_assert(wrapped_capsule
);
1089 // Construct the python OSDMap
1090 auto pArgs
= PyTuple_Pack(1, wrapped_capsule
);
1091 auto wrapper_instance
= PyObject_CallObject(wrapper_type
, pArgs
);
1092 if (wrapper_instance
== nullptr) {
1093 derr
<< "Failed to construct python OSDMap:" << dendl
;
1094 derr
<< handle_pyerror(true, module_name
,
1095 "construct_with_capsule "s
+ module_name
+ " " + clsname
) << dendl
;
1097 ceph_assert(wrapper_instance
!= nullptr);
1099 Py_DECREF(wrapped_capsule
);
1101 Py_DECREF(wrapper_type
);
1104 return wrapper_instance
;
1107 PyObject
*ActivePyModules::get_osdmap()
1109 auto newmap
= without_gil([&] {
1110 OSDMap
*newmap
= new OSDMap
;
1111 cluster_state
.with_osdmap([&](const OSDMap
& o
) {
1112 newmap
->deepish_copy_from(o
);
1116 return construct_with_capsule("mgr_module", "OSDMap", (void*)newmap
);
1119 PyObject
*ActivePyModules::get_foreign_config(
1120 const std::string
& who
,
1121 const std::string
& name
)
1123 dout(10) << "ceph_foreign_option_get " << who
<< " " << name
<< dendl
;
1125 // NOTE: for now this will only work with build-in options, not module options.
1126 const Option
*opt
= g_conf().find_option(name
);
1128 dout(4) << "ceph_foreign_option_get " << name
<< " not found " << dendl
;
1129 PyErr_Format(PyExc_KeyError
, "option not found: %s", name
.c_str());
1133 // If the monitors are not yet running pacific, we cannot rely on our local
1135 if (!have_local_config_map
) {
1136 dout(20) << "mon cluster wasn't pacific when we started: falling back to 'config get'"
1138 without_gil_t no_gil
;
1141 std::lock_guard
l(lock
);
1144 "{\"prefix\": \"config get\","s
+
1145 "\"who\": \""s
+ who
+ "\","s
+
1146 "\"key\": \""s
+ name
+ "\"}");
1149 dout(10) << "ceph_foreign_option_get (mon command) " << who
<< " " << name
<< " = "
1150 << cmd
.outbl
.to_str() << dendl
;
1151 no_gil
.acquire_gil();
1152 return get_python_typed_option_value(opt
->type
, cmd
.outbl
.to_str());
1155 // mimic the behavor of mon/ConfigMonitor's 'config get' command
1157 if (!entity
.from_str(who
) &&
1158 !entity
.from_str(who
+ ".")) {
1159 dout(5) << "unrecognized entity '" << who
<< "'" << dendl
;
1160 PyErr_Format(PyExc_KeyError
, "invalid entity: %s", who
.c_str());
1164 without_gil_t no_gil
;
1167 // FIXME: this is super inefficient, since we generate the entire daemon
1168 // config just to extract one value from it!
1170 std::map
<std::string
,std::string
,std::less
<>> config
;
1171 cluster_state
.with_osdmap([&](const OSDMap
&osdmap
) {
1172 map
<string
,string
> crush_location
;
1173 string device_class
;
1174 if (entity
.is_osd()) {
1175 osdmap
.crush
->get_full_location(who
, &crush_location
);
1176 int id
= atoi(entity
.get_id().c_str());
1177 const char *c
= osdmap
.crush
->get_item_class(id
);
1181 dout(10) << __func__
<< " crush_location " << crush_location
1182 << " class " << device_class
<< dendl
;
1185 std::map
<std::string
,pair
<std::string
,const MaskedOption
*>> src
;
1186 config
= config_map
.generate_entity_map(
1194 // get a single value
1196 auto p
= config
.find(name
);
1197 if (p
!= config
.end()) {
1200 if (!entity
.is_client() &&
1201 opt
->daemon_value
!= Option::value_t
{}) {
1202 value
= Option::to_str(opt
->daemon_value
);
1204 value
= Option::to_str(opt
->value
);
1208 dout(10) << "ceph_foreign_option_get (configmap) " << who
<< " " << name
<< " = "
1211 no_gil
.acquire_gil();
1212 return get_python_typed_option_value(opt
->type
, value
);
1215 void ActivePyModules::set_health_checks(const std::string
& module_name
,
1216 health_check_map_t
&& checks
)
1218 bool changed
= false;
1221 auto p
= modules
.find(module_name
);
1222 if (p
!= modules
.end()) {
1223 changed
= p
->second
->set_health_checks(std::move(checks
));
1227 // immediately schedule a report to be sent to the monitors with the new
1228 // health checks that have changed. This is done asynchronusly to avoid
1229 // blocking python land. ActivePyModules::lock needs to be dropped to make
1232 // send_report callers: DaemonServer::lock -> PyModuleRegistery::lock
1233 // active_start: PyModuleRegistry::lock -> ActivePyModules::lock
1235 // if we don't release this->lock before calling schedule_tick a cycle is
1236 // formed with the addition of ActivePyModules::lock -> DaemonServer::lock.
1237 // This is still correct as send_report is run asynchronously under
1238 // DaemonServer::lock.
1240 server
.schedule_tick(0);
1243 int ActivePyModules::handle_command(
1244 const ModuleCommand
& module_command
,
1245 const MgrSession
& session
,
1246 const cmdmap_t
&cmdmap
,
1247 const bufferlist
&inbuf
,
1248 std::stringstream
*ds
,
1249 std::stringstream
*ss
)
1252 auto mod_iter
= modules
.find(module_command
.module_name
);
1253 if (mod_iter
== modules
.end()) {
1254 *ss
<< "Module '" << module_command
.module_name
<< "' is not available";
1260 return mod_iter
->second
->handle_command(module_command
, session
, cmdmap
,
1264 void ActivePyModules::get_health_checks(health_check_map_t
*checks
)
1266 std::lock_guard
l(lock
);
1267 for (auto& [name
, module
] : modules
) {
1268 dout(15) << "getting health checks for " << name
<< dendl
;
1269 module
->get_health_checks(checks
);
1273 void ActivePyModules::update_progress_event(
1274 const std::string
& evid
,
1275 const std::string
& desc
,
1279 std::lock_guard
l(lock
);
1280 auto& pe
= progress_events
[evid
];
1282 pe
.progress
= progress
;
1283 pe
.add_to_ceph_s
= add_to_ceph_s
;
1286 void ActivePyModules::complete_progress_event(const std::string
& evid
)
1288 std::lock_guard
l(lock
);
1289 progress_events
.erase(evid
);
1292 void ActivePyModules::clear_all_progress_events()
1294 std::lock_guard
l(lock
);
1295 progress_events
.clear();
1298 void ActivePyModules::get_progress_events(std::map
<std::string
,ProgressEvent
> *events
)
1300 std::lock_guard
l(lock
);
1301 *events
= progress_events
;
1304 void ActivePyModules::config_notify()
1306 std::lock_guard
l(lock
);
1307 for (auto& [name
, module
] : modules
) {
1308 // Send all python calls down a Finisher to avoid blocking
1309 // C++ code, and avoid any potential lock cycles.
1310 dout(15) << "notify (config) " << name
<< dendl
;
1311 // workaround for https://bugs.llvm.org/show_bug.cgi?id=35984
1312 finisher
.queue(new LambdaContext([module
=module
](int r
){
1313 module
->config_notify();
1318 void ActivePyModules::set_uri(const std::string
& module_name
,
1319 const std::string
&uri
)
1321 std::lock_guard
l(lock
);
1323 dout(4) << " module " << module_name
<< " set URI '" << uri
<< "'" << dendl
;
1325 modules
.at(module_name
)->set_uri(uri
);
1328 void ActivePyModules::set_device_wear_level(const std::string
& devid
,
1332 map
<string
,string
> meta
;
1333 daemon_state
.with_device(
1335 [wear_level
, &meta
] (DeviceState
& dev
) {
1336 dev
.set_wear_level(wear_level
);
1337 meta
= dev
.metadata
;
1341 json_spirit::Object json_object
;
1342 for (auto& i
: meta
) {
1343 json_spirit::Config::add(json_object
, i
.first
, i
.second
);
1346 json
.append(json_spirit::write(json_object
));
1349 "\"prefix\": \"config-key set\", "
1350 "\"key\": \"device/" + devid
+ "\""
1354 set_cmd
.run(&monc
, cmd
, json
);
1358 MetricQueryID
ActivePyModules::add_osd_perf_query(
1359 const OSDPerfMetricQuery
&query
,
1360 const std::optional
<OSDPerfMetricLimit
> &limit
)
1362 return server
.add_osd_perf_query(query
, limit
);
1365 void ActivePyModules::remove_osd_perf_query(MetricQueryID query_id
)
1367 int r
= server
.remove_osd_perf_query(query_id
);
1369 dout(0) << "remove_osd_perf_query for query_id=" << query_id
<< " failed: "
1370 << cpp_strerror(r
) << dendl
;
1374 PyObject
*ActivePyModules::get_osd_perf_counters(MetricQueryID query_id
)
1376 OSDPerfCollector
collector(query_id
);
1377 int r
= server
.get_osd_perf_counters(&collector
);
1379 dout(0) << "get_osd_perf_counters for query_id=" << query_id
<< " failed: "
1380 << cpp_strerror(r
) << dendl
;
1385 const std::map
<OSDPerfMetricKey
, PerformanceCounters
> &counters
= collector
.counters
;
1387 f
.open_array_section("counters");
1388 for (auto &[key
, instance_counters
] : counters
) {
1389 f
.open_object_section("i");
1390 f
.open_array_section("k");
1391 for (auto &sub_key
: key
) {
1392 f
.open_array_section("s");
1393 for (size_t i
= 0; i
< sub_key
.size(); i
++) {
1394 f
.dump_string(stringify(i
).c_str(), sub_key
[i
]);
1396 f
.close_section(); // s
1398 f
.close_section(); // k
1399 f
.open_array_section("c");
1400 for (auto &c
: instance_counters
) {
1401 f
.open_array_section("p");
1402 f
.dump_unsigned("0", c
.first
);
1403 f
.dump_unsigned("1", c
.second
);
1404 f
.close_section(); // p
1406 f
.close_section(); // c
1407 f
.close_section(); // i
1409 f
.close_section(); // counters
1414 MetricQueryID
ActivePyModules::add_mds_perf_query(
1415 const MDSPerfMetricQuery
&query
,
1416 const std::optional
<MDSPerfMetricLimit
> &limit
)
1418 return server
.add_mds_perf_query(query
, limit
);
1421 void ActivePyModules::remove_mds_perf_query(MetricQueryID query_id
)
1423 int r
= server
.remove_mds_perf_query(query_id
);
1425 dout(0) << "remove_mds_perf_query for query_id=" << query_id
<< " failed: "
1426 << cpp_strerror(r
) << dendl
;
1430 void ActivePyModules::reregister_mds_perf_queries()
1432 server
.reregister_mds_perf_queries();
1435 PyObject
*ActivePyModules::get_mds_perf_counters(MetricQueryID query_id
)
1437 MDSPerfCollector
collector(query_id
);
1438 int r
= server
.get_mds_perf_counters(&collector
);
1440 dout(0) << "get_mds_perf_counters for query_id=" << query_id
<< " failed: "
1441 << cpp_strerror(r
) << dendl
;
1446 const std::map
<MDSPerfMetricKey
, PerformanceCounters
> &counters
= collector
.counters
;
1448 f
.open_array_section("metrics");
1450 f
.open_array_section("delayed_ranks");
1451 f
.dump_string("ranks", stringify(collector
.delayed_ranks
).c_str());
1452 f
.close_section(); // delayed_ranks
1454 f
.open_array_section("counters");
1455 for (auto &[key
, instance_counters
] : counters
) {
1456 f
.open_object_section("i");
1457 f
.open_array_section("k");
1458 for (auto &sub_key
: key
) {
1459 f
.open_array_section("s");
1460 for (size_t i
= 0; i
< sub_key
.size(); i
++) {
1461 f
.dump_string(stringify(i
).c_str(), sub_key
[i
]);
1463 f
.close_section(); // s
1465 f
.close_section(); // k
1466 f
.open_array_section("c");
1467 for (auto &c
: instance_counters
) {
1468 f
.open_array_section("p");
1469 f
.dump_unsigned("0", c
.first
);
1470 f
.dump_unsigned("1", c
.second
);
1471 f
.close_section(); // p
1473 f
.close_section(); // c
1474 f
.close_section(); // i
1476 f
.close_section(); // counters
1478 f
.open_array_section("last_updated");
1479 f
.dump_float("last_updated_mono", collector
.last_updated_mono
);
1480 f
.close_section(); // last_updated
1482 f
.close_section(); // metrics
1487 void ActivePyModules::cluster_log(const std::string
&channel
, clog_type prio
,
1488 const std::string
&message
)
1490 std::lock_guard
l(lock
);
1492 auto cl
= monc
.get_log_client()->create_channel(channel
);
1493 cl
->parse_client_options(g_ceph_context
);
1494 cl
->do_log(prio
, message
);
1497 void ActivePyModules::register_client(std::string_view name
, std::string addrs
)
1499 std::lock_guard
l(lock
);
1501 entity_addrvec_t addrv
;
1502 addrv
.parse(addrs
.data());
1504 dout(7) << "registering msgr client handle " << addrv
<< dendl
;
1505 py_module_registry
.register_client(name
, std::move(addrv
));
1508 void ActivePyModules::unregister_client(std::string_view name
, std::string addrs
)
1510 std::lock_guard
l(lock
);
1512 entity_addrvec_t addrv
;
1513 addrv
.parse(addrs
.data());
1515 dout(7) << "unregistering msgr client handle " << addrv
<< dendl
;
1516 py_module_registry
.unregister_client(name
, addrv
);