]>
git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/module.py
074103a5a4b059b3fdea104469d73130b517147e
3 Demonstrate writing a Ceph web interface inside a mgr module.
6 # We must share a global reference to this instance, because it is the
7 # gatekeeper to all accesses to data from the C++ side (e.g. the REST API
8 # request handlers need to see it)
9 from collections
import defaultdict
12 _global_instance
= {'plugin': None}
13 def global_instance():
14 assert _global_instance
['plugin'] is not None
15 return _global_instance
['plugin']
30 from mgr_module
import MgrModule
, MgrStandbyModule
, CommandResult
32 from types
import OsdMap
, NotFound
, Config
, FsMap
, MonMap
, \
33 PgSummary
, Health
, MonStatus
38 from rbd_ls
import RbdLs
, RbdPoolLs
39 from cephfs_clients
import CephFSClients
41 log
= logging
.getLogger("dashboard")
44 # How many cluster log lines shall we hold onto in our
45 # python module for the convenience of the GUI?
48 # cherrypy likes to sys.exit on error. don't let it take us down too!
49 def os_exit_noop(*args
, **kwargs
):
52 os
._exit
= os_exit_noop
55 def recurse_refs(root
, path
):
56 if isinstance(root
, dict):
57 for k
, v
in root
.items():
58 recurse_refs(v
, path
+ "->%s" % k
)
59 elif isinstance(root
, list):
60 for n
, i
in enumerate(root
):
61 recurse_refs(i
, path
+ "[%d]" % n
)
63 log
.info("%s %d (%s)" % (path
, sys
.getrefcount(root
), root
.__class
__))
65 def get_prefixed_url(url
):
66 return global_instance().url_prefix
+ url
70 class StandbyModule(MgrStandbyModule
):
72 server_addr
= self
.get_localized_config('server_addr', '::')
73 server_port
= self
.get_localized_config('server_port', '7000')
74 if server_addr
is None:
75 raise RuntimeError('no server_addr configured; try "ceph config-key set mgr/dashboard/server_addr <ip>"')
76 log
.info("server_addr: %s server_port: %s" % (server_addr
, server_port
))
77 cherrypy
.config
.update({
78 'server.socket_host': server_addr
,
79 'server.socket_port': int(server_port
),
80 'engine.autoreload.on': False
83 current_dir
= os
.path
.dirname(os
.path
.abspath(__file__
))
84 jinja_loader
= jinja2
.FileSystemLoader(current_dir
)
85 env
= jinja2
.Environment(loader
=jinja_loader
)
92 active_uri
= module
.get_active_uri()
94 log
.info("Redirecting to active '{0}'".format(active_uri
))
95 raise cherrypy
.HTTPRedirect(active_uri
)
97 template
= env
.get_template("standby.html")
98 return template
.render(delay
=5)
100 cherrypy
.tree
.mount(Root(), "/", {})
101 log
.info("Starting engine...")
102 cherrypy
.engine
.start()
103 log
.info("Waiting for engine...")
104 cherrypy
.engine
.wait(state
=cherrypy
.engine
.states
.STOPPED
)
105 log
.info("Engine done.")
108 log
.info("Stopping server...")
109 cherrypy
.engine
.wait(state
=cherrypy
.engine
.states
.STARTED
)
110 cherrypy
.engine
.stop()
111 log
.info("Stopped server")
114 class Module(MgrModule
):
115 def __init__(self
, *args
, **kwargs
):
116 super(Module
, self
).__init
__(*args
, **kwargs
)
117 _global_instance
['plugin'] = self
118 self
.log
.info("Constructing module {0}: instance {1}".format(
119 __name__
, _global_instance
))
121 self
.log_primed
= False
122 self
.log_buffer
= collections
.deque(maxlen
=LOG_BUFFER_SIZE
)
123 self
.audit_buffer
= collections
.deque(maxlen
=LOG_BUFFER_SIZE
)
125 # Keep a librados instance for those that need it.
128 # Stateful instances of RbdLs, hold cached results. Key to dict
132 # Stateful instance of RbdPoolLs, hold cached list of RBD
134 self
.rbd_pool_ls
= RbdPoolLs(self
)
136 # Stateful instance of RbdISCSI
137 self
.rbd_iscsi
= rbd_iscsi
.Controller(self
)
139 # Stateful instance of RbdMirroring, hold cached results.
140 self
.rbd_mirroring
= rbd_mirroring
.Controller(self
)
142 # Stateful instances of CephFSClients, hold cached results. Key to
144 self
.cephfs_clients
= {}
146 # A short history of pool df stats
147 self
.pool_stats
= defaultdict(lambda: defaultdict(
148 lambda: collections
.deque(maxlen
=10)))
150 # A prefix for all URLs to use the dashboard with a reverse http proxy
156 A librados instance to be shared by any classes within
157 this mgr module that want one.
162 ctx_capsule
= self
.get_context()
163 self
._rados
= rados
.Rados(context
=ctx_capsule
)
164 self
._rados
.connect()
168 def update_pool_stats(self
):
169 df
= global_instance().get("df")
170 pool_stats
= dict([(p
['id'], p
['stats']) for p
in df
['pools']])
172 for pool_id
, stats
in pool_stats
.items():
173 for stat_name
, stat_val
in stats
.items():
174 self
.pool_stats
[pool_id
][stat_name
].appendleft((now
, stat_val
))
176 def notify(self
, notify_type
, notify_val
):
177 if notify_type
== "clog":
178 # Only store log messages once we've done our initial load,
179 # so that we don't end up duplicating.
181 if notify_val
['channel'] == "audit":
182 self
.audit_buffer
.appendleft(notify_val
)
184 self
.log_buffer
.appendleft(notify_val
)
185 elif notify_type
== "pg_summary":
186 self
.update_pool_stats()
190 def get_sync_object(self
, object_type
, path
=None):
191 if object_type
== OsdMap
:
192 data
= self
.get("osd_map")
194 assert data
is not None
196 data
['tree'] = self
.get("osd_map_tree")
197 data
['crush'] = self
.get("osd_map_crush")
198 data
['crush_map_text'] = self
.get("osd_map_crush_map_text")
199 data
['osd_metadata'] = self
.get("osd_metadata")
201 elif object_type
== Config
:
202 data
= self
.get("config")
204 elif object_type
== MonMap
:
205 data
= self
.get("mon_map")
207 elif object_type
== FsMap
:
208 data
= self
.get("fs_map")
210 elif object_type
== PgSummary
:
211 data
= self
.get("pg_summary")
212 self
.log
.debug("JSON: {0}".format(data
))
213 obj
= PgSummary(data
)
214 elif object_type
== Health
:
215 data
= self
.get("health")
216 obj
= Health(json
.loads(data
['json']))
217 elif object_type
== MonStatus
:
218 data
= self
.get("mon_status")
219 obj
= MonStatus(json
.loads(data
['json']))
221 raise NotImplementedError(object_type
)
223 # TODO: move 'path' handling up into C++ land so that we only
224 # Pythonize the part we're interested in
228 if isinstance(obj
, dict):
231 obj
= getattr(obj
, part
)
232 except (AttributeError, KeyError):
233 raise NotFound(object_type
, path
)
238 log
.info("Stopping server...")
239 cherrypy
.engine
.exit()
240 log
.info("Stopped server")
242 log
.info("Stopping librados...")
244 self
._rados
.shutdown()
245 log
.info("Stopped librados.")
247 def get_latest(self
, daemon_type
, daemon_name
, stat
):
248 data
= self
.get_counter(daemon_type
, daemon_name
, stat
)[stat
]
254 def get_rate(self
, daemon_type
, daemon_name
, stat
):
255 data
= self
.get_counter(daemon_type
, daemon_name
, stat
)[stat
]
257 if data
and len(data
) > 1:
258 return (data
[-1][1] - data
[-2][1]) / float(data
[-1][0] - data
[-2][0])
262 def format_dimless(self
, n
, width
, colored
=True):
264 Format a number without units, so as to fit into `width` characters, substituting
265 an appropriate unit suffix.
267 units
= [' ', 'k', 'M', 'G', 'T', 'P']
269 while len("%s" % (int(n
) // (1000**unit
))) > width
- 1:
273 truncated_float
= ("%f" % (n
/ (1000.0 ** unit
)))[0:width
- 1]
274 if truncated_float
[-1] == '.':
275 truncated_float
= " " + truncated_float
[0:-1]
277 truncated_float
= "%{wid}d".format(wid
=width
-1) % n
278 formatted
= "%s%s" % (truncated_float
, units
[unit
])
281 # TODO: html equivalent
283 # color = self.BLACK, False
285 # color = self.YELLOW, False
286 # return self.bold(self.colorize(formatted[0:-1], color[0], color[1])) \
287 # + self.bold(self.colorize(formatted[-1], self.BLACK, False))
292 def fs_status(self
, fs_id
):
293 mds_versions
= defaultdict(list)
295 fsmap
= self
.get("fs_map")
297 for fs
in fsmap
['filesystems']:
298 if fs
['id'] == fs_id
:
304 mdsmap
= filesystem
['mdsmap']
308 for rank
in mdsmap
["in"]:
309 up
= "mds_{0}".format(rank
) in mdsmap
["up"]
311 gid
= mdsmap
['up']["mds_{0}".format(rank
)]
312 info
= mdsmap
['info']['gid_{0}'.format(gid
)]
313 dns
= self
.get_latest("mds", info
['name'], "mds.inodes")
314 inos
= self
.get_latest("mds", info
['name'], "mds_mem.ino")
317 client_count
= self
.get_latest("mds", info
['name'],
318 "mds_sessions.session_count")
319 elif client_count
== 0:
320 # In case rank 0 was down, look at another rank's
321 # sessionmap to get an indication of clients.
322 client_count
= self
.get_latest("mds", info
['name'],
323 "mds_sessions.session_count")
325 laggy
= "laggy_since" in info
327 state
= info
['state'].split(":")[1]
331 # if state == "active" and not laggy:
332 # c_state = self.colorize(state, self.GREEN)
334 # c_state = self.colorize(state, self.YELLOW)
336 # Populate based on context of state, e.g. client
337 # ops for an active daemon, replay progress, reconnect
341 if state
== "active":
342 activity
= "Reqs: " + self
.format_dimless(
343 self
.get_rate("mds", info
['name'], "mds_server.handle_client_request"),
347 metadata
= self
.get_metadata('mds', info
['name'])
348 mds_versions
[metadata
.get('ceph_version', 'unknown')].append(info
['name'])
354 "activity": activity
,
372 # Find the standby replays
373 for gid_str
, daemon_info
in mdsmap
['info'].iteritems():
374 if daemon_info
['state'] != "up:standby-replay":
377 inos
= self
.get_latest("mds", daemon_info
['name'], "mds_mem.ino")
378 dns
= self
.get_latest("mds", daemon_info
['name'], "mds.inodes")
380 activity
= "Evts: " + self
.format_dimless(
381 self
.get_rate("mds", daemon_info
['name'], "mds_log.replay"),
387 "rank": "{0}-s".format(daemon_info
['rank']),
388 "state": "standby-replay",
389 "mds": daemon_info
['name'],
390 "activity": activity
,
397 pool_stats
= dict([(p
['id'], p
['stats']) for p
in df
['pools']])
398 osdmap
= self
.get("osd_map")
399 pools
= dict([(p
['pool'], p
) for p
in osdmap
['pools']])
400 metadata_pool_id
= mdsmap
['metadata_pool']
401 data_pool_ids
= mdsmap
['data_pools']
404 for pool_id
in [metadata_pool_id
] + data_pool_ids
:
405 pool_type
= "metadata" if pool_id
== metadata_pool_id
else "data"
406 stats
= pool_stats
[pool_id
]
408 "pool": pools
[pool_id
]['pool_name'],
410 "used": stats
['bytes_used'],
411 "avail": stats
['max_avail']
415 for standby
in fsmap
['standbys']:
416 metadata
= self
.get_metadata('mds', standby
['name'])
417 mds_versions
[metadata
.get('ceph_version', 'unknown')].append(standby
['name'])
419 standby_table
.append({
420 'name': standby
['name']
426 "name": mdsmap
['fs_name'],
427 "client_count": client_count
,
428 "clients_url": get_prefixed_url("/clients/{0}/".format(fs_id
)),
432 "standbys": standby_table
,
433 "versions": mds_versions
437 current_dir
= os
.path
.dirname(os
.path
.abspath(__file__
))
439 jinja_loader
= jinja2
.FileSystemLoader(current_dir
)
440 env
= jinja2
.Environment(loader
=jinja_loader
)
442 result
= CommandResult("")
443 self
.send_command(result
, "mon", "", json
.dumps({
447 r
, outb
, outs
= result
.wait()
449 # Oh well. We won't let this stop us though.
450 self
.log
.error("Error fetching log history (r={0}, \"{1}\")".format(
454 lines
= json
.loads(outb
)
456 self
.log
.error("Error decoding log history")
459 if l
['channel'] == 'audit':
460 self
.audit_buffer
.appendleft(l
)
462 self
.log_buffer
.appendleft(l
)
464 self
.log_primed
= True
466 class EndPoint(object):
467 def _health_data(self
):
468 health
= global_instance().get_sync_object(Health
).data
469 # Transform the `checks` dict into a list for the convenience
470 # of rendering from javascript.
472 for k
, v
in health
['checks'].iteritems():
476 checks
= sorted(checks
, cmp=lambda a
, b
: a
['severity'] > b
['severity'])
478 health
['checks'] = checks
482 def _toplevel_data(self
):
484 Data consumed by the base.html template
486 status
, data
= global_instance().rbd_pool_ls
.get()
488 log
.warning("Failed to get RBD pool list")
494 "url": get_prefixed_url("/rbd_pool/{0}/".format(name
))
497 ], key
=lambda k
: k
['name'])
499 status
, rbd_mirroring
= global_instance().rbd_mirroring
.toplevel
.get()
500 if rbd_mirroring
is None:
501 log
.warning("Failed to get RBD mirroring summary")
504 fsmap
= global_instance().get_sync_object(FsMap
)
508 "name": f
['mdsmap']['fs_name'],
509 "url": get_prefixed_url("/filesystem/{0}/".format(f
['id']))
511 for f
in fsmap
.data
['filesystems']
515 'rbd_pools': rbd_pools
,
516 'rbd_mirroring': rbd_mirroring
,
517 'health_status': self
._health
_data
()['status'],
518 'filesystems': filesystems
521 class Root(EndPoint
):
523 def filesystem(self
, fs_id
):
524 template
= env
.get_template("filesystem.html")
526 toplevel_data
= self
._toplevel
_data
()
529 "fs_status": global_instance().fs_status(int(fs_id
))
532 return template
.render(
533 url_prefix
= global_instance().url_prefix
,
534 ceph_version
=global_instance().version
,
535 path_info
=cherrypy
.request
.path_info
,
536 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
537 content_data
=json
.dumps(content_data
, indent
=2)
541 @cherrypy.tools
.json_out()
542 def filesystem_data(self
, fs_id
):
543 return global_instance().fs_status(int(fs_id
))
545 def _clients(self
, fs_id
):
546 cephfs_clients
= global_instance().cephfs_clients
.get(fs_id
, None)
547 if cephfs_clients
is None:
548 cephfs_clients
= CephFSClients(global_instance(), fs_id
)
549 global_instance().cephfs_clients
[fs_id
] = cephfs_clients
551 status
, clients
= cephfs_clients
.get()
552 #TODO do something sensible with status
554 # Decorate the metadata with some fields that will be
555 # indepdendent of whether it's a kernel or userspace
556 # client, so that the javascript doesn't have to grok that.
557 for client
in clients
:
558 if "ceph_version" in client
['client_metadata']:
559 client
['type'] = "userspace"
560 client
['version'] = client
['client_metadata']['ceph_version']
561 client
['hostname'] = client
['client_metadata']['hostname']
562 elif "kernel_version" in client
['client_metadata']:
563 client
['type'] = "kernel"
564 client
['version'] = client
['client_metadata']['kernel_version']
565 client
['hostname'] = client
['client_metadata']['hostname']
567 client
['type'] = "unknown"
568 client
['version'] = ""
569 client
['hostname'] = ""
574 def clients(self
, fscid_str
):
576 fscid
= int(fscid_str
)
578 raise cherrypy
.HTTPError(400,
579 "Invalid filesystem id {0}".format(fscid_str
))
582 fs_name
= FsMap(global_instance().get(
583 "fs_map")).get_filesystem(fscid
)['mdsmap']['fs_name']
585 log
.warning("Missing FSCID, dumping fsmap:\n{0}".format(
586 json
.dumps(global_instance().get("fs_map"), indent
=2)
588 raise cherrypy
.HTTPError(404,
589 "No filesystem with id {0}".format(fscid
))
591 clients
= self
._clients
(fscid
)
592 global_instance().log
.debug(json
.dumps(clients
, indent
=2))
597 "fs_url": get_prefixed_url("/filesystem/" + fscid_str
+ "/")
600 template
= env
.get_template("clients.html")
601 return template
.render(
602 url_prefix
= global_instance().url_prefix
,
603 ceph_version
=global_instance().version
,
604 path_info
=cherrypy
.request
.path_info
,
605 toplevel_data
=json
.dumps(self
._toplevel
_data
(), indent
=2),
606 content_data
=json
.dumps(content_data
, indent
=2)
610 @cherrypy.tools
.json_out()
611 def clients_data(self
, fs_id
):
612 return self
._clients
(int(fs_id
))
614 def _rbd_pool(self
, pool_name
):
615 rbd_ls
= global_instance().rbd_ls
.get(pool_name
, None)
617 rbd_ls
= RbdLs(global_instance(), pool_name
)
618 global_instance().rbd_ls
[pool_name
] = rbd_ls
620 status
, value
= rbd_ls
.get()
624 wait
= interval
- rbd_ls
.latency
629 threading
.Thread(target
=wait_and_load
).start()
631 assert status
!= RbdLs
.VALUE_NONE
# FIXME bubble status up to UI
635 def rbd_pool(self
, pool_name
):
636 template
= env
.get_template("rbd_pool.html")
638 toplevel_data
= self
._toplevel
_data
()
640 images
= self
._rbd
_pool
(pool_name
)
643 "pool_name": pool_name
646 return template
.render(
647 url_prefix
= global_instance().url_prefix
,
648 ceph_version
=global_instance().version
,
649 path_info
=cherrypy
.request
.path_info
,
650 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
651 content_data
=json
.dumps(content_data
, indent
=2)
655 @cherrypy.tools
.json_out()
656 def rbd_pool_data(self
, pool_name
):
657 return self
._rbd
_pool
(pool_name
)
659 def _rbd_mirroring(self
):
660 status
, data
= global_instance().rbd_mirroring
.content_data
.get()
662 log
.warning("Failed to get RBD mirroring status")
667 def rbd_mirroring(self
):
668 template
= env
.get_template("rbd_mirroring.html")
670 toplevel_data
= self
._toplevel
_data
()
671 content_data
= self
._rbd
_mirroring
()
673 return template
.render(
674 url_prefix
= global_instance().url_prefix
,
675 ceph_version
=global_instance().version
,
676 path_info
=cherrypy
.request
.path_info
,
677 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
678 content_data
=json
.dumps(content_data
, indent
=2)
682 @cherrypy.tools
.json_out()
683 def rbd_mirroring_data(self
):
684 return self
._rbd
_mirroring
()
686 def _rbd_iscsi(self
):
687 status
, data
= global_instance().rbd_iscsi
.content_data
.get()
689 log
.warning("Failed to get RBD iSCSI status")
695 template
= env
.get_template("rbd_iscsi.html")
697 toplevel_data
= self
._toplevel
_data
()
698 content_data
= self
._rbd
_iscsi
()
700 return template
.render(
701 url_prefix
= global_instance().url_prefix
,
702 ceph_version
=global_instance().version
,
703 path_info
=cherrypy
.request
.path_info
,
704 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
705 content_data
=json
.dumps(content_data
, indent
=2)
709 @cherrypy.tools
.json_out()
710 def rbd_iscsi_data(self
):
711 return self
._rbd
_iscsi
()
715 template
= env
.get_template("health.html")
716 return template
.render(
717 url_prefix
= global_instance().url_prefix
,
718 ceph_version
=global_instance().version
,
719 path_info
=cherrypy
.request
.path_info
,
720 toplevel_data
=json
.dumps(self
._toplevel
_data
(), indent
=2),
721 content_data
=json
.dumps(self
._health
(), indent
=2)
726 template
= env
.get_template("servers.html")
727 return template
.render(
728 url_prefix
= global_instance().url_prefix
,
729 ceph_version
=global_instance().version
,
730 path_info
=cherrypy
.request
.path_info
,
731 toplevel_data
=json
.dumps(self
._toplevel
_data
(), indent
=2),
732 content_data
=json
.dumps(self
._servers
(), indent
=2)
737 'servers': global_instance().list_servers()
741 @cherrypy.tools
.json_out()
742 def servers_data(self
):
743 return self
._servers
()
746 # Fuse osdmap with pg_summary to get description of pools
747 # including their PG states
748 osd_map
= global_instance().get_sync_object(OsdMap
).data
749 pg_summary
= global_instance().get_sync_object(PgSummary
).data
752 if len(global_instance().pool_stats
) == 0:
753 global_instance().update_pool_stats()
755 for pool
in osd_map
['pools']:
756 pool
['pg_status'] = pg_summary
['by_pool'][pool
['pool'].__str
__()]
757 stats
= global_instance().pool_stats
[pool
['pool']]
760 def get_rate(series
):
762 return (float(series
[0][1]) - float(series
[1][1])) / (float(series
[0][0]) - float(series
[1][0]))
766 for stat_name
, stat_series
in stats
.items():
768 'latest': stat_series
[0][1],
769 'rate': get_rate(stat_series
),
770 'series': [i
for i
in stat_series
]
775 # Not needed, skip the effort of transmitting this
777 del osd_map
['pg_temp']
779 df
= global_instance().get("df")
780 df
['stats']['total_objects'] = sum(
781 [p
['stats']['objects'] for p
in df
['pools']])
784 "health": self
._health
_data
(),
785 "mon_status": global_instance().get_sync_object(
787 "fs_map": global_instance().get_sync_object(FsMap
).data
,
789 "clog": list(global_instance().log_buffer
),
790 "audit_log": list(global_instance().audit_buffer
),
792 "mgr_map": global_instance().get("mgr_map"),
797 @cherrypy.tools
.json_out()
798 def health_data(self
):
799 return self
._health
()
806 @cherrypy.tools
.json_out()
807 def toplevel_data(self
):
808 return self
._toplevel
_data
()
810 def _get_mds_names(self
, filesystem_id
=None):
813 fsmap
= global_instance().get("fs_map")
814 for fs
in fsmap
['filesystems']:
815 if filesystem_id
is not None and fs
['id'] != filesystem_id
:
817 names
.extend([info
['name'] for _
, info
in fs
['mdsmap']['info'].items()])
819 if filesystem_id
is None:
820 names
.extend(info
['name'] for info
in fsmap
['standbys'])
825 @cherrypy.tools
.json_out()
826 def mds_counters(self
, fs_id
):
828 Result format: map of daemon name to map of counter to list of datapoints
831 # Opinionated list of interesting performance counters for the GUI --
832 # if you need something else just add it. See how simple life is
833 # when you don't have to write general purpose APIs?
835 "mds_server.handle_client_request",
837 "mds_cache.num_strays",
839 "mds.exported_inodes",
841 "mds.imported_inodes",
848 mds_names
= self
._get
_mds
_names
(int(fs_id
))
850 for mds_name
in mds_names
:
851 result
[mds_name
] = {}
852 for counter
in counters
:
853 data
= global_instance().get_counter("mds", mds_name
, counter
)
855 result
[mds_name
][counter
] = data
[counter
]
857 result
[mds_name
][counter
] = []
862 @cherrypy.tools
.json_out()
863 def get_counter(self
, type, id, path
):
864 return global_instance().get_counter(type, id, path
)
867 @cherrypy.tools
.json_out()
868 def get_perf_schema(self
, **args
):
869 type = args
.get('type', '')
870 id = args
.get('id', '')
871 schema
= global_instance().get_perf_schema(type, id)
873 for k1
in schema
.keys(): # 'perf_schema'
874 ret
[k1
] = collections
.OrderedDict()
875 for k2
in sorted(schema
[k1
].keys()):
876 sorted_dict
= collections
.OrderedDict(
877 sorted(schema
[k1
][k2
].items(), key
=lambda i
: i
[0])
879 ret
[k1
][k2
] = sorted_dict
882 url_prefix
= self
.get_config('url_prefix')
883 if url_prefix
== None:
886 if len(url_prefix
) != 0:
887 if url_prefix
[0] != '/':
888 url_prefix
= '/'+url_prefix
889 if url_prefix
[-1] == '/':
890 url_prefix
= url_prefix
[:-1]
891 self
.url_prefix
= url_prefix
893 server_addr
= self
.get_localized_config('server_addr', '::')
894 server_port
= self
.get_localized_config('server_port', '7000')
895 if server_addr
is None:
896 raise RuntimeError('no server_addr configured; try "ceph config-key set mgr/dashboard/server_addr <ip>"')
897 log
.info("server_addr: %s server_port: %s" % (server_addr
, server_port
))
898 cherrypy
.config
.update({
899 'server.socket_host': server_addr
,
900 'server.socket_port': int(server_port
),
901 'engine.autoreload.on': False
904 osdmap
= self
.get_osdmap()
905 log
.info("latest osdmap is %d" % osdmap
.get_epoch())
907 # Publish the URI that others may use to access the service we're
908 # about to start serving
909 self
.set_uri("http://{0}:{1}/".format(
910 socket
.getfqdn() if server_addr
== "::" else server_addr
,
914 static_dir
= os
.path
.join(current_dir
, 'static')
917 "tools.staticdir.on": True,
918 'tools.staticdir.dir': static_dir
921 log
.info("Serving static from {0}".format(static_dir
))
923 class OSDEndpoint(EndPoint
):
924 def _osd(self
, osd_id
):
927 osd_map
= global_instance().get("osd_map")
930 for o
in osd_map
['osds']:
931 if o
['osd'] == osd_id
:
935 assert osd
is not None # TODO 400
937 osd_spec
= "{0}".format(osd_id
)
939 osd_metadata
= global_instance().get_metadata(
942 result
= CommandResult("")
943 global_instance().send_command(result
, "osd", osd_spec
,
945 "prefix": "perf histogram dump",
948 r
, outb
, outs
= result
.wait()
950 histogram
= json
.loads(outb
)
954 "osd_metadata": osd_metadata
,
955 "osd_histogram": histogram
959 def perf(self
, osd_id
):
960 template
= env
.get_template("osd_perf.html")
961 toplevel_data
= self
._toplevel
_data
()
963 return template
.render(
964 url_prefix
= global_instance().url_prefix
,
965 ceph_version
=global_instance().version
,
966 path_info
='/osd' + cherrypy
.request
.path_info
,
967 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
968 content_data
=json
.dumps(self
._osd
(osd_id
), indent
=2)
972 @cherrypy.tools
.json_out()
973 def perf_data(self
, osd_id
):
974 return self
._osd
(osd_id
)
977 @cherrypy.tools
.json_out()
979 return self
._osds
_by
_server
()
981 def _osd_summary(self
, osd_id
, osd_info
):
983 The info used for displaying an OSD in a table
986 osd_spec
= "{0}".format(osd_id
)
989 result
['id'] = osd_id
991 result
['stats_history'] = {}
994 for s
in ['osd.op_w', 'osd.op_in_bytes', 'osd.op_r', 'osd.op_out_bytes']:
995 result
['stats'][s
.split(".")[1]] = global_instance().get_rate('osd', osd_spec
, s
)
996 result
['stats_history'][s
.split(".")[1]] = \
997 global_instance().get_counter('osd', osd_spec
, s
)[s
]
1000 for s
in ["osd.numpg", "osd.stat_bytes", "osd.stat_bytes_used"]:
1001 result
['stats'][s
.split(".")[1]] = global_instance().get_latest('osd', osd_spec
, s
)
1003 result
['up'] = osd_info
['up']
1004 result
['in'] = osd_info
['in']
1006 result
['url'] = get_prefixed_url("/osd/perf/{0}".format(osd_id
))
1010 def _osds_by_server(self
):
1011 result
= defaultdict(list)
1012 servers
= global_instance().list_servers()
1014 osd_map
= global_instance().get_sync_object(OsdMap
)
1016 for server
in servers
:
1017 hostname
= server
['hostname']
1018 services
= server
['services']
1020 if s
["type"] == "osd":
1021 osd_id
= int(s
["id"])
1022 # If metadata doesn't tally with osdmap, drop it.
1023 if osd_id
not in osd_map
.osds_by_id
:
1024 global_instance().log
.warn(
1025 "OSD service {0} missing in OSDMap, stale metadata?".format(osd_id
))
1027 summary
= self
._osd
_summary
(osd_id
,
1028 osd_map
.osds_by_id
[osd_id
])
1030 result
[hostname
].append(summary
)
1032 result
[hostname
].sort(key
=lambda a
: a
['id'])
1033 if len(result
[hostname
]):
1034 result
[hostname
][0]['first'] = True
1036 global_instance().log
.warn("result.size {0} servers.size {1}".format(
1037 len(result
), len(servers
)
1040 # Return list form for convenience of rendering
1041 return sorted(result
.items(), key
=lambda a
: a
[0])
1046 List of all OSDS grouped by host
1050 template
= env
.get_template("osds.html")
1051 toplevel_data
= self
._toplevel
_data
()
1054 "osds_by_server": self
._osds
_by
_server
()
1057 return template
.render(
1058 url_prefix
= global_instance().url_prefix
,
1059 ceph_version
=global_instance().version
,
1060 path_info
='/osd' + cherrypy
.request
.path_info
,
1061 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
1062 content_data
=json
.dumps(content_data
, indent
=2)
1065 cherrypy
.tree
.mount(Root(), get_prefixed_url("/"), conf
)
1066 cherrypy
.tree
.mount(OSDEndpoint(), get_prefixed_url("/osd"), conf
)
1068 log
.info("Starting engine on {0}:{1}...".format(
1069 server_addr
, server_port
))
1070 cherrypy
.engine
.start()
1071 log
.info("Waiting for engine...")
1072 cherrypy
.engine
.block()
1073 log
.info("Engine done.")