]>
git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/module.py
3 Demonstrate writing a Ceph web interface inside a mgr module.
6 # We must share a global reference to this instance, because it is the
7 # gatekeeper to all accesses to data from the C++ side (e.g. the REST API
8 # request handlers need to see it)
9 from collections
import defaultdict
12 _global_instance
= {'plugin': None}
13 def global_instance():
14 assert _global_instance
['plugin'] is not None
15 return _global_instance
['plugin']
31 from mgr_module
import MgrModule
, MgrStandbyModule
, CommandResult
33 from types
import OsdMap
, NotFound
, Config
, FsMap
, MonMap
, \
34 PgSummary
, Health
, MonStatus
39 from rbd_ls
import RbdLs
, RbdPoolLs
40 from cephfs_clients
import CephFSClients
42 log
= logging
.getLogger("dashboard")
45 # How many cluster log lines shall we hold onto in our
46 # python module for the convenience of the GUI?
49 # cherrypy likes to sys.exit on error. don't let it take us down too!
50 def os_exit_noop(*args
, **kwargs
):
53 os
._exit
= os_exit_noop
56 def recurse_refs(root
, path
):
57 if isinstance(root
, dict):
58 for k
, v
in root
.items():
59 recurse_refs(v
, path
+ "->%s" % k
)
60 elif isinstance(root
, list):
61 for n
, i
in enumerate(root
):
62 recurse_refs(i
, path
+ "[%d]" % n
)
64 log
.info("%s %d (%s)" % (path
, sys
.getrefcount(root
), root
.__class
__))
66 def get_prefixed_url(url
):
67 return global_instance().url_prefix
.rstrip('/') + url
71 def prepare_url_prefix(url_prefix
):
73 return '' if no prefix, or '/prefix' without slash in the end.
75 url_prefix
= urlparse
.urljoin('/', url_prefix
)
76 return url_prefix
.rstrip('/')
78 class StandbyModule(MgrStandbyModule
):
80 server_addr
= self
.get_localized_config('server_addr', '::')
81 server_port
= self
.get_localized_config('server_port', '7000')
82 url_prefix
= prepare_url_prefix(self
.get_config('url_prefix', default
=''))
84 if server_addr
is None:
85 raise RuntimeError('no server_addr configured; try "ceph config-key set mgr/dashboard/server_addr <ip>"')
86 log
.info("server_addr: %s server_port: %s" % (server_addr
, server_port
))
87 cherrypy
.config
.update({
88 'server.socket_host': server_addr
,
89 'server.socket_port': int(server_port
),
90 'engine.autoreload.on': False
93 current_dir
= os
.path
.dirname(os
.path
.abspath(__file__
))
94 jinja_loader
= jinja2
.FileSystemLoader(current_dir
)
95 env
= jinja2
.Environment(loader
=jinja_loader
)
101 def default(self
, *args
, **kwargs
):
102 active_uri
= module
.get_active_uri()
104 log
.info("Redirecting to active '{0}'".format(active_uri
+ "/".join(args
)))
105 raise cherrypy
.HTTPRedirect(active_uri
+ "/".join(args
))
107 template
= env
.get_template("standby.html")
108 return template
.render(delay
=5)
110 cherrypy
.tree
.mount(Root(), url_prefix
, {})
111 log
.info("Starting engine...")
112 cherrypy
.engine
.start()
113 log
.info("Waiting for engine...")
114 cherrypy
.engine
.wait(state
=cherrypy
.engine
.states
.STOPPED
)
115 log
.info("Engine done.")
118 log
.info("Stopping server...")
119 cherrypy
.engine
.wait(state
=cherrypy
.engine
.states
.STARTED
)
120 cherrypy
.engine
.stop()
121 log
.info("Stopped server")
124 class Module(MgrModule
):
125 def __init__(self
, *args
, **kwargs
):
126 super(Module
, self
).__init
__(*args
, **kwargs
)
127 _global_instance
['plugin'] = self
128 self
.log
.info("Constructing module {0}: instance {1}".format(
129 __name__
, _global_instance
))
131 self
.log_primed
= False
132 self
.log_buffer
= collections
.deque(maxlen
=LOG_BUFFER_SIZE
)
133 self
.audit_buffer
= collections
.deque(maxlen
=LOG_BUFFER_SIZE
)
135 # Keep a librados instance for those that need it.
138 # Stateful instances of RbdLs, hold cached results. Key to dict
142 # Stateful instance of RbdPoolLs, hold cached list of RBD
144 self
.rbd_pool_ls
= RbdPoolLs(self
)
146 # Stateful instance of RbdISCSI
147 self
.rbd_iscsi
= rbd_iscsi
.Controller(self
)
149 # Stateful instance of RbdMirroring, hold cached results.
150 self
.rbd_mirroring
= rbd_mirroring
.Controller(self
)
152 # Stateful instances of CephFSClients, hold cached results. Key to
154 self
.cephfs_clients
= {}
156 # A short history of pool df stats
157 self
.pool_stats
= defaultdict(lambda: defaultdict(
158 lambda: collections
.deque(maxlen
=10)))
160 # A prefix for all URLs to use the dashboard with a reverse http proxy
166 A librados instance to be shared by any classes within
167 this mgr module that want one.
172 ctx_capsule
= self
.get_context()
173 self
._rados
= rados
.Rados(context
=ctx_capsule
)
174 self
._rados
.connect()
178 def update_pool_stats(self
):
179 df
= global_instance().get("df")
180 pool_stats
= dict([(p
['id'], p
['stats']) for p
in df
['pools']])
182 for pool_id
, stats
in pool_stats
.items():
183 for stat_name
, stat_val
in stats
.items():
184 self
.pool_stats
[pool_id
][stat_name
].appendleft((now
, stat_val
))
186 def notify(self
, notify_type
, notify_val
):
187 if notify_type
== "clog":
188 # Only store log messages once we've done our initial load,
189 # so that we don't end up duplicating.
191 if notify_val
['channel'] == "audit":
192 self
.audit_buffer
.appendleft(notify_val
)
194 self
.log_buffer
.appendleft(notify_val
)
195 elif notify_type
== "pg_summary":
196 self
.update_pool_stats()
200 def get_sync_object(self
, object_type
, path
=None):
201 if object_type
== OsdMap
:
202 data
= self
.get("osd_map")
204 assert data
is not None
206 data
['tree'] = self
.get("osd_map_tree")
207 data
['crush'] = self
.get("osd_map_crush")
208 data
['crush_map_text'] = self
.get("osd_map_crush_map_text")
209 data
['osd_metadata'] = self
.get("osd_metadata")
211 elif object_type
== Config
:
212 data
= self
.get("config")
214 elif object_type
== MonMap
:
215 data
= self
.get("mon_map")
217 elif object_type
== FsMap
:
218 data
= self
.get("fs_map")
220 elif object_type
== PgSummary
:
221 data
= self
.get("pg_summary")
222 self
.log
.debug("JSON: {0}".format(data
))
223 obj
= PgSummary(data
)
224 elif object_type
== Health
:
225 data
= self
.get("health")
226 obj
= Health(json
.loads(data
['json']))
227 elif object_type
== MonStatus
:
228 data
= self
.get("mon_status")
229 obj
= MonStatus(json
.loads(data
['json']))
231 raise NotImplementedError(object_type
)
233 # TODO: move 'path' handling up into C++ land so that we only
234 # Pythonize the part we're interested in
238 if isinstance(obj
, dict):
241 obj
= getattr(obj
, part
)
242 except (AttributeError, KeyError):
243 raise NotFound(object_type
, path
)
248 log
.info("Stopping server...")
249 cherrypy
.engine
.exit()
250 log
.info("Stopped server")
252 log
.info("Stopping librados...")
254 self
._rados
.shutdown()
255 log
.info("Stopped librados.")
257 def get_latest(self
, daemon_type
, daemon_name
, stat
):
258 data
= self
.get_counter(daemon_type
, daemon_name
, stat
)[stat
]
264 def get_rate(self
, daemon_type
, daemon_name
, stat
):
265 data
= self
.get_counter(daemon_type
, daemon_name
, stat
)[stat
]
267 if data
and len(data
) > 1:
268 return (data
[-1][1] - data
[-2][1]) / float(data
[-1][0] - data
[-2][0])
272 def format_dimless(self
, n
, width
, colored
=True):
274 Format a number without units, so as to fit into `width` characters, substituting
275 an appropriate unit suffix.
277 units
= [' ', 'k', 'M', 'G', 'T', 'P']
279 while len("%s" % (int(n
) // (1000**unit
))) > width
- 1:
283 truncated_float
= ("%f" % (n
/ (1000.0 ** unit
)))[0:width
- 1]
284 if truncated_float
[-1] == '.':
285 truncated_float
= " " + truncated_float
[0:-1]
287 truncated_float
= "%{wid}d".format(wid
=width
-1) % n
288 formatted
= "%s%s" % (truncated_float
, units
[unit
])
291 # TODO: html equivalent
293 # color = self.BLACK, False
295 # color = self.YELLOW, False
296 # return self.bold(self.colorize(formatted[0:-1], color[0], color[1])) \
297 # + self.bold(self.colorize(formatted[-1], self.BLACK, False))
302 def fs_status(self
, fs_id
):
303 mds_versions
= defaultdict(list)
305 fsmap
= self
.get("fs_map")
307 for fs
in fsmap
['filesystems']:
308 if fs
['id'] == fs_id
:
312 if filesystem
is None:
313 raise cherrypy
.HTTPError(404,
314 "Filesystem id {0} not found".format(fs_id
))
318 mdsmap
= filesystem
['mdsmap']
322 for rank
in mdsmap
["in"]:
323 up
= "mds_{0}".format(rank
) in mdsmap
["up"]
325 gid
= mdsmap
['up']["mds_{0}".format(rank
)]
326 info
= mdsmap
['info']['gid_{0}'.format(gid
)]
327 dns
= self
.get_latest("mds", info
['name'], "mds.inodes")
328 inos
= self
.get_latest("mds", info
['name'], "mds_mem.ino")
331 client_count
= self
.get_latest("mds", info
['name'],
332 "mds_sessions.session_count")
333 elif client_count
== 0:
334 # In case rank 0 was down, look at another rank's
335 # sessionmap to get an indication of clients.
336 client_count
= self
.get_latest("mds", info
['name'],
337 "mds_sessions.session_count")
339 laggy
= "laggy_since" in info
341 state
= info
['state'].split(":")[1]
345 # if state == "active" and not laggy:
346 # c_state = self.colorize(state, self.GREEN)
348 # c_state = self.colorize(state, self.YELLOW)
350 # Populate based on context of state, e.g. client
351 # ops for an active daemon, replay progress, reconnect
355 if state
== "active":
356 activity
= "Reqs: " + self
.format_dimless(
357 self
.get_rate("mds", info
['name'], "mds_server.handle_client_request"),
361 metadata
= self
.get_metadata('mds', info
['name'])
362 mds_versions
[metadata
.get('ceph_version', 'unknown')].append(info
['name'])
368 "activity": activity
,
386 # Find the standby replays
387 for gid_str
, daemon_info
in mdsmap
['info'].iteritems():
388 if daemon_info
['state'] != "up:standby-replay":
391 inos
= self
.get_latest("mds", daemon_info
['name'], "mds_mem.ino")
392 dns
= self
.get_latest("mds", daemon_info
['name'], "mds.inodes")
394 activity
= "Evts: " + self
.format_dimless(
395 self
.get_rate("mds", daemon_info
['name'], "mds_log.replay"),
401 "rank": "{0}-s".format(daemon_info
['rank']),
402 "state": "standby-replay",
403 "mds": daemon_info
['name'],
404 "activity": activity
,
411 pool_stats
= dict([(p
['id'], p
['stats']) for p
in df
['pools']])
412 osdmap
= self
.get("osd_map")
413 pools
= dict([(p
['pool'], p
) for p
in osdmap
['pools']])
414 metadata_pool_id
= mdsmap
['metadata_pool']
415 data_pool_ids
= mdsmap
['data_pools']
418 for pool_id
in [metadata_pool_id
] + data_pool_ids
:
419 pool_type
= "metadata" if pool_id
== metadata_pool_id
else "data"
420 stats
= pool_stats
[pool_id
]
422 "pool": pools
[pool_id
]['pool_name'],
424 "used": stats
['bytes_used'],
425 "avail": stats
['max_avail']
429 for standby
in fsmap
['standbys']:
430 metadata
= self
.get_metadata('mds', standby
['name'])
431 mds_versions
[metadata
.get('ceph_version', 'unknown')].append(standby
['name'])
433 standby_table
.append({
434 'name': standby
['name']
440 "name": mdsmap
['fs_name'],
441 "client_count": client_count
,
442 "clients_url": get_prefixed_url("/clients/{0}/".format(fs_id
)),
446 "standbys": standby_table
,
447 "versions": mds_versions
450 def _prime_log(self
):
451 def load_buffer(buf
, channel_name
):
452 result
= CommandResult("")
453 self
.send_command(result
, "mon", "", json
.dumps({
454 "prefix": "log last",
456 "channel": channel_name
,
457 "num": LOG_BUFFER_SIZE
459 r
, outb
, outs
= result
.wait()
461 # Oh well. We won't let this stop us though.
462 self
.log
.error("Error fetching log history (r={0}, \"{1}\")".format(
466 lines
= json
.loads(outb
)
468 self
.log
.error("Error decoding log history")
473 load_buffer(self
.log_buffer
, "cluster")
474 load_buffer(self
.audit_buffer
, "audit")
475 self
.log_primed
= True
478 current_dir
= os
.path
.dirname(os
.path
.abspath(__file__
))
480 jinja_loader
= jinja2
.FileSystemLoader(current_dir
)
481 env
= jinja2
.Environment(loader
=jinja_loader
)
485 class EndPoint(object):
486 def _health_data(self
):
487 health
= global_instance().get_sync_object(Health
).data
488 # Transform the `checks` dict into a list for the convenience
489 # of rendering from javascript.
491 for k
, v
in health
['checks'].iteritems():
495 checks
= sorted(checks
, cmp=lambda a
, b
: a
['severity'] > b
['severity'])
497 health
['checks'] = checks
501 def _toplevel_data(self
):
503 Data consumed by the base.html template
505 status
, data
= global_instance().rbd_pool_ls
.get()
507 log
.warning("Failed to get RBD pool list")
513 "url": get_prefixed_url("/rbd_pool/{0}/".format(name
))
516 ], key
=lambda k
: k
['name'])
518 status
, rbd_mirroring
= global_instance().rbd_mirroring
.toplevel
.get()
519 if rbd_mirroring
is None:
520 log
.warning("Failed to get RBD mirroring summary")
523 fsmap
= global_instance().get_sync_object(FsMap
)
527 "name": f
['mdsmap']['fs_name'],
528 "url": get_prefixed_url("/filesystem/{0}/".format(f
['id']))
530 for f
in fsmap
.data
['filesystems']
534 'rbd_pools': rbd_pools
,
535 'rbd_mirroring': rbd_mirroring
,
536 'health_status': self
._health
_data
()['status'],
537 'filesystems': filesystems
540 class Root(EndPoint
):
542 def filesystem(self
, fs_id
):
543 template
= env
.get_template("filesystem.html")
545 toplevel_data
= self
._toplevel
_data
()
548 "fs_status": global_instance().fs_status(int(fs_id
))
551 return template
.render(
552 url_prefix
= global_instance().url_prefix
,
553 ceph_version
=global_instance().version
,
554 path_info
=cherrypy
.request
.path_info
,
555 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
556 content_data
=json
.dumps(content_data
, indent
=2)
560 @cherrypy.tools
.json_out()
561 def filesystem_data(self
, fs_id
):
562 return global_instance().fs_status(int(fs_id
))
564 def _clients(self
, fs_id
):
565 cephfs_clients
= global_instance().cephfs_clients
.get(fs_id
, None)
566 if cephfs_clients
is None:
567 cephfs_clients
= CephFSClients(global_instance(), fs_id
)
568 global_instance().cephfs_clients
[fs_id
] = cephfs_clients
570 status
, clients
= cephfs_clients
.get()
571 #TODO do something sensible with status
573 # Decorate the metadata with some fields that will be
574 # indepdendent of whether it's a kernel or userspace
575 # client, so that the javascript doesn't have to grok that.
576 for client
in clients
:
577 if "ceph_version" in client
['client_metadata']:
578 client
['type'] = "userspace"
579 client
['version'] = client
['client_metadata']['ceph_version']
580 client
['hostname'] = client
['client_metadata']['hostname']
581 elif "kernel_version" in client
['client_metadata']:
582 client
['type'] = "kernel"
583 client
['version'] = client
['client_metadata']['kernel_version']
584 client
['hostname'] = client
['client_metadata']['hostname']
586 client
['type'] = "unknown"
587 client
['version'] = ""
588 client
['hostname'] = ""
593 def clients(self
, fscid_str
):
595 fscid
= int(fscid_str
)
597 raise cherrypy
.HTTPError(400,
598 "Invalid filesystem id {0}".format(fscid_str
))
601 fs_name
= FsMap(global_instance().get(
602 "fs_map")).get_filesystem(fscid
)['mdsmap']['fs_name']
604 log
.warning("Missing FSCID, dumping fsmap:\n{0}".format(
605 json
.dumps(global_instance().get("fs_map"), indent
=2)
607 raise cherrypy
.HTTPError(404,
608 "No filesystem with id {0}".format(fscid
))
610 clients
= self
._clients
(fscid
)
611 global_instance().log
.debug(json
.dumps(clients
, indent
=2))
616 "fs_url": get_prefixed_url("/filesystem/" + fscid_str
+ "/")
619 template
= env
.get_template("clients.html")
620 return template
.render(
621 url_prefix
= global_instance().url_prefix
,
622 ceph_version
=global_instance().version
,
623 path_info
=cherrypy
.request
.path_info
,
624 toplevel_data
=json
.dumps(self
._toplevel
_data
(), indent
=2),
625 content_data
=json
.dumps(content_data
, indent
=2)
629 @cherrypy.tools
.json_out()
630 def clients_data(self
, fs_id
):
631 return self
._clients
(int(fs_id
))
633 def _rbd_pool(self
, pool_name
):
634 rbd_ls
= global_instance().rbd_ls
.get(pool_name
, None)
636 rbd_ls
= RbdLs(global_instance(), pool_name
)
637 global_instance().rbd_ls
[pool_name
] = rbd_ls
639 status
, value
= rbd_ls
.get()
643 wait
= interval
- rbd_ls
.latency
648 threading
.Thread(target
=wait_and_load
).start()
650 assert status
!= RbdLs
.VALUE_NONE
# FIXME bubble status up to UI
654 def rbd_pool(self
, pool_name
):
655 template
= env
.get_template("rbd_pool.html")
657 toplevel_data
= self
._toplevel
_data
()
659 images
= self
._rbd
_pool
(pool_name
)
662 "pool_name": pool_name
665 return template
.render(
666 url_prefix
= global_instance().url_prefix
,
667 ceph_version
=global_instance().version
,
668 path_info
=cherrypy
.request
.path_info
,
669 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
670 content_data
=json
.dumps(content_data
, indent
=2)
674 @cherrypy.tools
.json_out()
675 def rbd_pool_data(self
, pool_name
):
676 return self
._rbd
_pool
(pool_name
)
678 def _rbd_mirroring(self
):
679 status
, data
= global_instance().rbd_mirroring
.content_data
.get()
681 log
.warning("Failed to get RBD mirroring status")
686 def rbd_mirroring(self
):
687 template
= env
.get_template("rbd_mirroring.html")
689 toplevel_data
= self
._toplevel
_data
()
690 content_data
= self
._rbd
_mirroring
()
692 return template
.render(
693 url_prefix
= global_instance().url_prefix
,
694 ceph_version
=global_instance().version
,
695 path_info
=cherrypy
.request
.path_info
,
696 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
697 content_data
=json
.dumps(content_data
, indent
=2)
701 @cherrypy.tools
.json_out()
702 def rbd_mirroring_data(self
):
703 return self
._rbd
_mirroring
()
705 def _rbd_iscsi(self
):
706 status
, data
= global_instance().rbd_iscsi
.content_data
.get()
708 log
.warning("Failed to get RBD iSCSI status")
714 template
= env
.get_template("rbd_iscsi.html")
716 toplevel_data
= self
._toplevel
_data
()
717 content_data
= self
._rbd
_iscsi
()
719 return template
.render(
720 url_prefix
= global_instance().url_prefix
,
721 ceph_version
=global_instance().version
,
722 path_info
=cherrypy
.request
.path_info
,
723 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
724 content_data
=json
.dumps(content_data
, indent
=2)
728 @cherrypy.tools
.json_out()
729 def rbd_iscsi_data(self
):
730 return self
._rbd
_iscsi
()
734 template
= env
.get_template("health.html")
735 return template
.render(
736 url_prefix
= global_instance().url_prefix
,
737 ceph_version
=global_instance().version
,
738 path_info
=cherrypy
.request
.path_info
,
739 toplevel_data
=json
.dumps(self
._toplevel
_data
(), indent
=2),
740 content_data
=json
.dumps(self
._health
(), indent
=2)
745 template
= env
.get_template("servers.html")
746 return template
.render(
747 url_prefix
= global_instance().url_prefix
,
748 ceph_version
=global_instance().version
,
749 path_info
=cherrypy
.request
.path_info
,
750 toplevel_data
=json
.dumps(self
._toplevel
_data
(), indent
=2),
751 content_data
=json
.dumps(self
._servers
(), indent
=2)
756 'servers': global_instance().list_servers()
760 @cherrypy.tools
.json_out()
761 def servers_data(self
):
762 return self
._servers
()
765 # Fuse osdmap with pg_summary to get description of pools
766 # including their PG states
767 osd_map
= global_instance().get_sync_object(OsdMap
).data
768 pg_summary
= global_instance().get_sync_object(PgSummary
).data
771 if len(global_instance().pool_stats
) == 0:
772 global_instance().update_pool_stats()
774 for pool
in osd_map
['pools']:
775 pool
['pg_status'] = pg_summary
['by_pool'][pool
['pool'].__str
__()]
776 stats
= global_instance().pool_stats
[pool
['pool']]
779 def get_rate(series
):
781 return (float(series
[0][1]) - float(series
[1][1])) / (float(series
[0][0]) - float(series
[1][0]))
785 for stat_name
, stat_series
in stats
.items():
787 'latest': stat_series
[0][1],
788 'rate': get_rate(stat_series
),
789 'series': [i
for i
in stat_series
]
794 # Not needed, skip the effort of transmitting this
796 del osd_map
['pg_temp']
798 df
= global_instance().get("df")
799 df
['stats']['total_objects'] = sum(
800 [p
['stats']['objects'] for p
in df
['pools']])
803 "health": self
._health
_data
(),
804 "mon_status": global_instance().get_sync_object(
806 "fs_map": global_instance().get_sync_object(FsMap
).data
,
808 "clog": list(global_instance().log_buffer
),
809 "audit_log": list(global_instance().audit_buffer
),
811 "mgr_map": global_instance().get("mgr_map"),
816 @cherrypy.tools
.json_out()
817 def health_data(self
):
818 return self
._health
()
825 @cherrypy.tools
.json_out()
826 def toplevel_data(self
):
827 return self
._toplevel
_data
()
829 def _get_mds_names(self
, filesystem_id
=None):
832 fsmap
= global_instance().get("fs_map")
833 for fs
in fsmap
['filesystems']:
834 if filesystem_id
is not None and fs
['id'] != filesystem_id
:
836 names
.extend([info
['name'] for _
, info
in fs
['mdsmap']['info'].items()])
838 if filesystem_id
is None:
839 names
.extend(info
['name'] for info
in fsmap
['standbys'])
844 @cherrypy.tools
.json_out()
845 def mds_counters(self
, fs_id
):
847 Result format: map of daemon name to map of counter to list of datapoints
850 # Opinionated list of interesting performance counters for the GUI --
851 # if you need something else just add it. See how simple life is
852 # when you don't have to write general purpose APIs?
854 "mds_server.handle_client_request",
856 "mds_cache.num_strays",
858 "mds.exported_inodes",
860 "mds.imported_inodes",
867 mds_names
= self
._get
_mds
_names
(int(fs_id
))
869 for mds_name
in mds_names
:
870 result
[mds_name
] = {}
871 for counter
in counters
:
872 data
= global_instance().get_counter("mds", mds_name
, counter
)
874 result
[mds_name
][counter
] = data
[counter
]
876 result
[mds_name
][counter
] = []
881 @cherrypy.tools
.json_out()
882 def get_counter(self
, type, id, path
):
883 return global_instance().get_counter(type, id, path
)
886 @cherrypy.tools
.json_out()
887 def get_perf_schema(self
, **args
):
888 type = args
.get('type', '')
889 id = args
.get('id', '')
890 schema
= global_instance().get_perf_schema(type, id)
892 for k1
in schema
.keys(): # 'perf_schema'
893 ret
[k1
] = collections
.OrderedDict()
894 for k2
in sorted(schema
[k1
].keys()):
895 sorted_dict
= collections
.OrderedDict(
896 sorted(schema
[k1
][k2
].items(), key
=lambda i
: i
[0])
898 ret
[k1
][k2
] = sorted_dict
901 url_prefix
= prepare_url_prefix(self
.get_config('url_prefix', default
=''))
902 self
.url_prefix
= url_prefix
904 server_addr
= self
.get_localized_config('server_addr', '::')
905 server_port
= self
.get_localized_config('server_port', '7000')
906 if server_addr
is None:
907 raise RuntimeError('no server_addr configured; try "ceph config-key set mgr/dashboard/server_addr <ip>"')
908 log
.info("server_addr: %s server_port: %s" % (server_addr
, server_port
))
909 cherrypy
.config
.update({
910 'server.socket_host': server_addr
,
911 'server.socket_port': int(server_port
),
912 'engine.autoreload.on': False
915 osdmap
= self
.get_osdmap()
916 log
.info("latest osdmap is %d" % osdmap
.get_epoch())
918 # Publish the URI that others may use to access the service we're
919 # about to start serving
920 self
.set_uri("http://{0}:{1}{2}/".format(
921 socket
.getfqdn() if server_addr
== "::" else server_addr
,
926 static_dir
= os
.path
.join(current_dir
, 'static')
929 "tools.staticdir.on": True,
930 'tools.staticdir.dir': static_dir
933 log
.info("Serving static from {0}".format(static_dir
))
935 class OSDEndpoint(EndPoint
):
936 def _osd(self
, osd_id
):
939 osd_map
= global_instance().get("osd_map")
942 for o
in osd_map
['osds']:
943 if o
['osd'] == osd_id
:
947 assert osd
is not None # TODO 400
949 osd_spec
= "{0}".format(osd_id
)
951 osd_metadata
= global_instance().get_metadata(
954 result
= CommandResult("")
955 global_instance().send_command(result
, "osd", osd_spec
,
957 "prefix": "perf histogram dump",
960 r
, outb
, outs
= result
.wait()
962 histogram
= json
.loads(outb
)
966 "osd_metadata": osd_metadata
,
967 "osd_histogram": histogram
971 def perf(self
, osd_id
):
972 template
= env
.get_template("osd_perf.html")
973 toplevel_data
= self
._toplevel
_data
()
975 return template
.render(
976 url_prefix
= global_instance().url_prefix
,
977 ceph_version
=global_instance().version
,
978 path_info
='/osd' + cherrypy
.request
.path_info
,
979 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
980 content_data
=json
.dumps(self
._osd
(osd_id
), indent
=2)
984 @cherrypy.tools
.json_out()
985 def perf_data(self
, osd_id
):
986 return self
._osd
(osd_id
)
989 @cherrypy.tools
.json_out()
991 return self
._osds
_by
_server
()
993 def _osd_summary(self
, osd_id
, osd_info
):
995 The info used for displaying an OSD in a table
998 osd_spec
= "{0}".format(osd_id
)
1001 result
['id'] = osd_id
1002 result
['stats'] = {}
1003 result
['stats_history'] = {}
1006 for s
in ['osd.op_w', 'osd.op_in_bytes', 'osd.op_r', 'osd.op_out_bytes']:
1007 result
['stats'][s
.split(".")[1]] = global_instance().get_rate('osd', osd_spec
, s
)
1008 result
['stats_history'][s
.split(".")[1]] = \
1009 global_instance().get_counter('osd', osd_spec
, s
)[s
]
1012 for s
in ["osd.numpg", "osd.stat_bytes", "osd.stat_bytes_used"]:
1013 result
['stats'][s
.split(".")[1]] = global_instance().get_latest('osd', osd_spec
, s
)
1015 result
['up'] = osd_info
['up']
1016 result
['in'] = osd_info
['in']
1018 result
['url'] = get_prefixed_url("/osd/perf/{0}".format(osd_id
))
1022 def _osds_by_server(self
):
1023 result
= defaultdict(list)
1024 servers
= global_instance().list_servers()
1026 osd_map
= global_instance().get_sync_object(OsdMap
)
1028 for server
in servers
:
1029 hostname
= server
['hostname']
1030 services
= server
['services']
1032 if s
["type"] == "osd":
1033 osd_id
= int(s
["id"])
1034 # If metadata doesn't tally with osdmap, drop it.
1035 if osd_id
not in osd_map
.osds_by_id
:
1036 global_instance().log
.warn(
1037 "OSD service {0} missing in OSDMap, stale metadata?".format(osd_id
))
1039 summary
= self
._osd
_summary
(osd_id
,
1040 osd_map
.osds_by_id
[osd_id
])
1042 result
[hostname
].append(summary
)
1044 result
[hostname
].sort(key
=lambda a
: a
['id'])
1045 if len(result
[hostname
]):
1046 result
[hostname
][0]['first'] = True
1048 global_instance().log
.warn("result.size {0} servers.size {1}".format(
1049 len(result
), len(servers
)
1052 # Return list form for convenience of rendering
1053 return sorted(result
.items(), key
=lambda a
: a
[0])
1058 List of all OSDS grouped by host
1062 template
= env
.get_template("osds.html")
1063 toplevel_data
= self
._toplevel
_data
()
1066 "osds_by_server": self
._osds
_by
_server
()
1069 return template
.render(
1070 url_prefix
= global_instance().url_prefix
,
1071 ceph_version
=global_instance().version
,
1072 path_info
='/osd' + cherrypy
.request
.path_info
,
1073 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
1074 content_data
=json
.dumps(content_data
, indent
=2)
1077 cherrypy
.tree
.mount(Root(), get_prefixed_url("/"), conf
)
1078 cherrypy
.tree
.mount(OSDEndpoint(), get_prefixed_url("/osd"), conf
)
1080 log
.info("Starting engine on {0}:{1}...".format(
1081 server_addr
, server_port
))
1082 cherrypy
.engine
.start()
1083 log
.info("Waiting for engine...")
1084 cherrypy
.engine
.block()
1085 log
.info("Engine done.")