]>
git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/module.py
3 Demonstrate writing a Ceph web interface inside a mgr module.
6 # We must share a global reference to this instance, because it is the
7 # gatekeeper to all accesses to data from the C++ side (e.g. the REST API
8 # request handlers need to see it)
9 from collections
import defaultdict
12 _global_instance
= {'plugin': None}
13 def global_instance():
14 assert _global_instance
['plugin'] is not None
15 return _global_instance
['plugin']
29 from mgr_module
import MgrModule
, CommandResult
31 from types
import OsdMap
, NotFound
, Config
, FsMap
, MonMap
, \
32 PgSummary
, Health
, MonStatus
37 from rbd_ls
import RbdLs
, RbdPoolLs
38 from cephfs_clients
import CephFSClients
40 log
= logging
.getLogger("dashboard")
43 # How many cluster log lines shall we hold onto in our
44 # python module for the convenience of the GUI?
47 # cherrypy likes to sys.exit on error. don't let it take us down too!
51 os
._exit
= os_exit_noop
54 def recurse_refs(root
, path
):
55 if isinstance(root
, dict):
56 for k
, v
in root
.items():
57 recurse_refs(v
, path
+ "->%s" % k
)
58 elif isinstance(root
, list):
59 for n
, i
in enumerate(root
):
60 recurse_refs(i
, path
+ "[%d]" % n
)
62 log
.info("%s %d (%s)" % (path
, sys
.getrefcount(root
), root
.__class
__))
65 class Module(MgrModule
):
66 def __init__(self
, *args
, **kwargs
):
67 super(Module
, self
).__init
__(*args
, **kwargs
)
68 _global_instance
['plugin'] = self
69 self
.log
.info("Constructing module {0}: instance {1}".format(
70 __name__
, _global_instance
))
72 self
.log_primed
= False
73 self
.log_buffer
= collections
.deque(maxlen
=LOG_BUFFER_SIZE
)
74 self
.audit_buffer
= collections
.deque(maxlen
=LOG_BUFFER_SIZE
)
76 # Keep a librados instance for those that need it.
79 # Stateful instances of RbdLs, hold cached results. Key to dict
83 # Stateful instance of RbdPoolLs, hold cached list of RBD
85 self
.rbd_pool_ls
= RbdPoolLs(self
)
87 # Stateful instance of RbdISCSI
88 self
.rbd_iscsi
= rbd_iscsi
.Controller(self
)
90 # Stateful instance of RbdMirroring, hold cached results.
91 self
.rbd_mirroring
= rbd_mirroring
.Controller(self
)
93 # Stateful instances of CephFSClients, hold cached results. Key to
95 self
.cephfs_clients
= {}
97 # A short history of pool df stats
98 self
.pool_stats
= defaultdict(lambda: defaultdict(
99 lambda: collections
.deque(maxlen
=10)))
104 A librados instance to be shared by any classes within
105 this mgr module that want one.
110 from mgr_module
import ceph_state
111 ctx_capsule
= ceph_state
.get_context()
112 self
._rados
= rados
.Rados(context
=ctx_capsule
)
113 self
._rados
.connect()
117 def update_pool_stats(self
):
118 df
= global_instance().get("df")
119 pool_stats
= dict([(p
['id'], p
['stats']) for p
in df
['pools']])
121 for pool_id
, stats
in pool_stats
.items():
122 for stat_name
, stat_val
in stats
.items():
123 self
.pool_stats
[pool_id
][stat_name
].appendleft((now
, stat_val
))
125 def notify(self
, notify_type
, notify_val
):
126 if notify_type
== "clog":
127 # Only store log messages once we've done our initial load,
128 # so that we don't end up duplicating.
130 if notify_val
['channel'] == "audit":
131 self
.audit_buffer
.appendleft(notify_val
)
133 self
.log_buffer
.appendleft(notify_val
)
134 elif notify_type
== "pg_summary":
135 self
.update_pool_stats()
139 def get_sync_object(self
, object_type
, path
=None):
140 if object_type
== OsdMap
:
141 data
= self
.get("osd_map")
143 assert data
is not None
145 data
['tree'] = self
.get("osd_map_tree")
146 data
['crush'] = self
.get("osd_map_crush")
147 data
['crush_map_text'] = self
.get("osd_map_crush_map_text")
148 data
['osd_metadata'] = self
.get("osd_metadata")
150 elif object_type
== Config
:
151 data
= self
.get("config")
153 elif object_type
== MonMap
:
154 data
= self
.get("mon_map")
156 elif object_type
== FsMap
:
157 data
= self
.get("fs_map")
159 elif object_type
== PgSummary
:
160 data
= self
.get("pg_summary")
161 self
.log
.debug("JSON: {0}".format(data
))
162 obj
= PgSummary(data
)
163 elif object_type
== Health
:
164 data
= self
.get("health")
165 obj
= Health(json
.loads(data
['json']))
166 elif object_type
== MonStatus
:
167 data
= self
.get("mon_status")
168 obj
= MonStatus(json
.loads(data
['json']))
170 raise NotImplementedError(object_type
)
172 # TODO: move 'path' handling up into C++ land so that we only
173 # Pythonize the part we're interested in
177 if isinstance(obj
, dict):
180 obj
= getattr(obj
, part
)
181 except (AttributeError, KeyError):
182 raise NotFound(object_type
, path
)
187 log
.info("Stopping server...")
188 cherrypy
.engine
.exit()
189 log
.info("Stopped server")
191 log
.info("Stopping librados...")
193 self
._rados
.shutdown()
194 log
.info("Stopped librados.")
196 def get_latest(self
, daemon_type
, daemon_name
, stat
):
197 data
= self
.get_counter(daemon_type
, daemon_name
, stat
)[stat
]
203 def get_rate(self
, daemon_type
, daemon_name
, stat
):
204 data
= self
.get_counter(daemon_type
, daemon_name
, stat
)[stat
]
206 if data
and len(data
) > 1:
207 return (data
[-1][1] - data
[-2][1]) / float(data
[-1][0] - data
[-2][0])
211 def format_dimless(self
, n
, width
, colored
=True):
213 Format a number without units, so as to fit into `width` characters, substituting
214 an appropriate unit suffix.
216 units
= [' ', 'k', 'M', 'G', 'T', 'P']
218 while len("%s" % (int(n
) // (1000**unit
))) > width
- 1:
222 truncated_float
= ("%f" % (n
/ (1000.0 ** unit
)))[0:width
- 1]
223 if truncated_float
[-1] == '.':
224 truncated_float
= " " + truncated_float
[0:-1]
226 truncated_float
= "%{wid}d".format(wid
=width
-1) % n
227 formatted
= "%s%s" % (truncated_float
, units
[unit
])
230 # TODO: html equivalent
232 # color = self.BLACK, False
234 # color = self.YELLOW, False
235 # return self.bold(self.colorize(formatted[0:-1], color[0], color[1])) \
236 # + self.bold(self.colorize(formatted[-1], self.BLACK, False))
241 def fs_status(self
, fs_id
):
242 mds_versions
= defaultdict(list)
244 fsmap
= self
.get("fs_map")
246 for fs
in fsmap
['filesystems']:
247 if fs
['id'] == fs_id
:
253 mdsmap
= filesystem
['mdsmap']
257 for rank
in mdsmap
["in"]:
258 up
= "mds_{0}".format(rank
) in mdsmap
["up"]
260 gid
= mdsmap
['up']["mds_{0}".format(rank
)]
261 info
= mdsmap
['info']['gid_{0}'.format(gid
)]
262 dns
= self
.get_latest("mds", info
['name'], "mds.inodes")
263 inos
= self
.get_latest("mds", info
['name'], "mds_mem.ino")
266 client_count
= self
.get_latest("mds", info
['name'],
267 "mds_sessions.session_count")
268 elif client_count
== 0:
269 # In case rank 0 was down, look at another rank's
270 # sessionmap to get an indication of clients.
271 client_count
= self
.get_latest("mds", info
['name'],
272 "mds_sessions.session_count")
274 laggy
= "laggy_since" in info
276 state
= info
['state'].split(":")[1]
280 # if state == "active" and not laggy:
281 # c_state = self.colorize(state, self.GREEN)
283 # c_state = self.colorize(state, self.YELLOW)
285 # Populate based on context of state, e.g. client
286 # ops for an active daemon, replay progress, reconnect
290 if state
== "active":
291 activity
= "Reqs: " + self
.format_dimless(
292 self
.get_rate("mds", info
['name'], "mds_server.handle_client_request"),
296 metadata
= self
.get_metadata('mds', info
['name'])
297 mds_versions
[metadata
['ceph_version']].append(info
['name'])
303 "activity": activity
,
321 # Find the standby replays
322 for gid_str
, daemon_info
in mdsmap
['info'].iteritems():
323 if daemon_info
['state'] != "up:standby-replay":
326 inos
= self
.get_latest("mds", daemon_info
['name'], "mds_mem.ino")
327 dns
= self
.get_latest("mds", daemon_info
['name'], "mds.inodes")
329 activity
= "Evts: " + self
.format_dimless(
330 self
.get_rate("mds", daemon_info
['name'], "mds_log.replay"),
336 "rank": "{0}-s".format(daemon_info
['rank']),
337 "state": "standby-replay",
338 "mds": daemon_info
['name'],
339 "activity": activity
,
346 pool_stats
= dict([(p
['id'], p
['stats']) for p
in df
['pools']])
347 osdmap
= self
.get("osd_map")
348 pools
= dict([(p
['pool'], p
) for p
in osdmap
['pools']])
349 metadata_pool_id
= mdsmap
['metadata_pool']
350 data_pool_ids
= mdsmap
['data_pools']
353 for pool_id
in [metadata_pool_id
] + data_pool_ids
:
354 pool_type
= "metadata" if pool_id
== metadata_pool_id
else "data"
355 stats
= pool_stats
[pool_id
]
357 "pool": pools
[pool_id
]['pool_name'],
359 "used": stats
['bytes_used'],
360 "avail": stats
['max_avail']
364 for standby
in fsmap
['standbys']:
365 metadata
= self
.get_metadata('mds', standby
['name'])
366 mds_versions
[metadata
['ceph_version']].append(standby
['name'])
368 standby_table
.append({
369 'name': standby
['name']
375 "name": mdsmap
['fs_name'],
376 "client_count": client_count
,
377 "clients_url": "/clients/{0}/".format(fs_id
),
381 "standbys": standby_table
,
382 "versions": mds_versions
386 current_dir
= os
.path
.dirname(os
.path
.abspath(__file__
))
388 jinja_loader
= jinja2
.FileSystemLoader(current_dir
)
389 env
= jinja2
.Environment(loader
=jinja_loader
)
391 result
= CommandResult("")
392 self
.send_command(result
, "mon", "", json
.dumps({
396 r
, outb
, outs
= result
.wait()
398 # Oh well. We won't let this stop us though.
399 self
.log
.error("Error fetching log history (r={0}, \"{1}\")".format(
403 lines
= json
.loads(outb
)
405 self
.log
.error("Error decoding log history")
408 if l
['channel'] == 'audit':
409 self
.audit_buffer
.appendleft(l
)
411 self
.log_buffer
.appendleft(l
)
413 self
.log_primed
= True
415 class EndPoint(object):
416 def _health_data(self
):
417 health
= global_instance().get_sync_object(Health
).data
418 # Transform the `checks` dict into a list for the convenience
419 # of rendering from javascript.
421 for k
, v
in health
['checks'].iteritems():
425 checks
= sorted(checks
, cmp=lambda a
, b
: a
['severity'] > b
['severity'])
427 health
['checks'] = checks
431 def _toplevel_data(self
):
433 Data consumed by the base.html template
435 status
, data
= global_instance().rbd_pool_ls
.get()
437 log
.warning("Failed to get RBD pool list")
443 "url": "/rbd_pool/{0}/".format(name
)
446 ], key
=lambda k
: k
['name'])
448 status
, rbd_mirroring
= global_instance().rbd_mirroring
.toplevel
.get()
449 if rbd_mirroring
is None:
450 log
.warning("Failed to get RBD mirroring summary")
453 fsmap
= global_instance().get_sync_object(FsMap
)
457 "name": f
['mdsmap']['fs_name'],
458 "url": "/filesystem/{0}/".format(f
['id'])
460 for f
in fsmap
.data
['filesystems']
464 'rbd_pools': rbd_pools
,
465 'rbd_mirroring': rbd_mirroring
,
466 'health_status': self
._health
_data
()['status'],
467 'filesystems': filesystems
470 class Root(EndPoint
):
472 def filesystem(self
, fs_id
):
473 template
= env
.get_template("filesystem.html")
475 toplevel_data
= self
._toplevel
_data
()
478 "fs_status": global_instance().fs_status(int(fs_id
))
481 return template
.render(
482 ceph_version
=global_instance().version
,
483 path_info
=cherrypy
.request
.path_info
,
484 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
485 content_data
=json
.dumps(content_data
, indent
=2)
489 @cherrypy.tools
.json_out()
490 def filesystem_data(self
, fs_id
):
491 return global_instance().fs_status(int(fs_id
))
493 def _clients(self
, fs_id
):
494 cephfs_clients
= global_instance().cephfs_clients
.get(fs_id
, None)
495 if cephfs_clients
is None:
496 cephfs_clients
= CephFSClients(global_instance(), fs_id
)
497 global_instance().cephfs_clients
[fs_id
] = cephfs_clients
499 status
, clients
= cephfs_clients
.get()
500 #TODO do something sensible with status
502 # Decorate the metadata with some fields that will be
503 # indepdendent of whether it's a kernel or userspace
504 # client, so that the javascript doesn't have to grok that.
505 for client
in clients
:
506 if "ceph_version" in client
['client_metadata']:
507 client
['type'] = "userspace"
508 client
['version'] = client
['client_metadata']['ceph_version']
509 client
['hostname'] = client
['client_metadata']['hostname']
510 elif "kernel_version" in client
['client_metadata']:
511 client
['type'] = "kernel"
512 client
['version'] = client
['client_metadata']['kernel_version']
513 client
['hostname'] = client
['client_metadata']['hostname']
515 client
['type'] = "unknown"
516 client
['version'] = ""
517 client
['hostname'] = ""
522 def clients(self
, fscid_str
):
524 fscid
= int(fscid_str
)
526 raise cherrypy
.HTTPError(400,
527 "Invalid filesystem id {0}".format(fscid_str
))
530 fs_name
= FsMap(global_instance().get(
531 "fs_map")).get_filesystem(fscid
)['mdsmap']['fs_name']
533 log
.warning("Missing FSCID, dumping fsmap:\n{0}".format(
534 json
.dumps(global_instance().get("fs_map"), indent
=2)
536 raise cherrypy
.HTTPError(404,
537 "No filesystem with id {0}".format(fscid
))
539 clients
= self
._clients
(fscid
)
540 global_instance().log
.debug(json
.dumps(clients
, indent
=2))
545 "fs_url": "/filesystem/" + fscid_str
+ "/"
548 template
= env
.get_template("clients.html")
549 return template
.render(
550 ceph_version
=global_instance().version
,
551 path_info
=cherrypy
.request
.path_info
,
552 toplevel_data
=json
.dumps(self
._toplevel
_data
(), indent
=2),
553 content_data
=json
.dumps(content_data
, indent
=2)
557 @cherrypy.tools
.json_out()
558 def clients_data(self
, fs_id
):
559 return self
._clients
(int(fs_id
))
561 def _rbd_pool(self
, pool_name
):
562 rbd_ls
= global_instance().rbd_ls
.get(pool_name
, None)
564 rbd_ls
= RbdLs(global_instance(), pool_name
)
565 global_instance().rbd_ls
[pool_name
] = rbd_ls
567 status
, value
= rbd_ls
.get()
571 wait
= interval
- rbd_ls
.latency
576 threading
.Thread(target
=wait_and_load
).start()
578 assert status
!= RbdLs
.VALUE_NONE
# FIXME bubble status up to UI
582 def rbd_pool(self
, pool_name
):
583 template
= env
.get_template("rbd_pool.html")
585 toplevel_data
= self
._toplevel
_data
()
587 images
= self
._rbd
_pool
(pool_name
)
590 "pool_name": pool_name
593 return template
.render(
594 ceph_version
=global_instance().version
,
595 path_info
=cherrypy
.request
.path_info
,
596 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
597 content_data
=json
.dumps(content_data
, indent
=2)
601 @cherrypy.tools
.json_out()
602 def rbd_pool_data(self
, pool_name
):
603 return self
._rbd
_pool
(pool_name
)
605 def _rbd_mirroring(self
):
606 status
, data
= global_instance().rbd_mirroring
.content_data
.get()
608 log
.warning("Failed to get RBD mirroring status")
613 def rbd_mirroring(self
):
614 template
= env
.get_template("rbd_mirroring.html")
616 toplevel_data
= self
._toplevel
_data
()
617 content_data
= self
._rbd
_mirroring
()
619 return template
.render(
620 ceph_version
=global_instance().version
,
621 path_info
=cherrypy
.request
.path_info
,
622 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
623 content_data
=json
.dumps(content_data
, indent
=2)
627 @cherrypy.tools
.json_out()
628 def rbd_mirroring_data(self
):
629 return self
._rbd
_mirroring
()
631 def _rbd_iscsi(self
):
632 status
, data
= global_instance().rbd_iscsi
.content_data
.get()
634 log
.warning("Failed to get RBD iSCSI status")
640 template
= env
.get_template("rbd_iscsi.html")
642 toplevel_data
= self
._toplevel
_data
()
643 content_data
= self
._rbd
_iscsi
()
645 return template
.render(
646 ceph_version
=global_instance().version
,
647 path_info
=cherrypy
.request
.path_info
,
648 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
649 content_data
=json
.dumps(content_data
, indent
=2)
653 @cherrypy.tools
.json_out()
654 def rbd_iscsi_data(self
):
655 return self
._rbd
_iscsi
()
659 template
= env
.get_template("health.html")
660 return template
.render(
661 ceph_version
=global_instance().version
,
662 path_info
=cherrypy
.request
.path_info
,
663 toplevel_data
=json
.dumps(self
._toplevel
_data
(), indent
=2),
664 content_data
=json
.dumps(self
._health
(), indent
=2)
669 template
= env
.get_template("servers.html")
670 return template
.render(
671 ceph_version
=global_instance().version
,
672 path_info
=cherrypy
.request
.path_info
,
673 toplevel_data
=json
.dumps(self
._toplevel
_data
(), indent
=2),
674 content_data
=json
.dumps(self
._servers
(), indent
=2)
679 'servers': global_instance().list_servers()
683 @cherrypy.tools
.json_out()
684 def servers_data(self
):
685 return self
._servers
()
688 # Fuse osdmap with pg_summary to get description of pools
689 # including their PG states
690 osd_map
= global_instance().get_sync_object(OsdMap
).data
691 pg_summary
= global_instance().get_sync_object(PgSummary
).data
694 if len(global_instance().pool_stats
) == 0:
695 global_instance().update_pool_stats()
697 for pool
in osd_map
['pools']:
698 pool
['pg_status'] = pg_summary
['by_pool'][pool
['pool'].__str
__()]
699 stats
= global_instance().pool_stats
[pool
['pool']]
702 def get_rate(series
):
704 return (float(series
[0][1]) - float(series
[1][1])) / (float(series
[0][0]) - float(series
[1][0]))
708 for stat_name
, stat_series
in stats
.items():
710 'latest': stat_series
[0][1],
711 'rate': get_rate(stat_series
),
712 'series': [i
for i
in stat_series
]
717 # Not needed, skip the effort of transmitting this
719 del osd_map
['pg_temp']
721 df
= global_instance().get("df")
722 df
['stats']['total_objects'] = sum(
723 [p
['stats']['objects'] for p
in df
['pools']])
726 "health": self
._health
_data
(),
727 "mon_status": global_instance().get_sync_object(
729 "fs_map": global_instance().get_sync_object(FsMap
).data
,
731 "clog": list(global_instance().log_buffer
),
732 "audit_log": list(global_instance().audit_buffer
),
734 "mgr_map": global_instance().get("mgr_map"),
739 @cherrypy.tools
.json_out()
740 def health_data(self
):
741 return self
._health
()
748 @cherrypy.tools
.json_out()
749 def toplevel_data(self
):
750 return self
._toplevel
_data
()
752 def _get_mds_names(self
, filesystem_id
=None):
755 fsmap
= global_instance().get("fs_map")
756 for fs
in fsmap
['filesystems']:
757 if filesystem_id
is not None and fs
['id'] != filesystem_id
:
759 names
.extend([info
['name'] for _
, info
in fs
['mdsmap']['info'].items()])
761 if filesystem_id
is None:
762 names
.extend(info
['name'] for info
in fsmap
['standbys'])
767 @cherrypy.tools
.json_out()
768 def mds_counters(self
, fs_id
):
770 Result format: map of daemon name to map of counter to list of datapoints
773 # Opinionated list of interesting performance counters for the GUI --
774 # if you need something else just add it. See how simple life is
775 # when you don't have to write general purpose APIs?
777 "mds_server.handle_client_request",
779 "mds_cache.num_strays",
781 "mds.exported_inodes",
783 "mds.imported_inodes",
790 mds_names
= self
._get
_mds
_names
(int(fs_id
))
792 for mds_name
in mds_names
:
793 result
[mds_name
] = {}
794 for counter
in counters
:
795 data
= global_instance().get_counter("mds", mds_name
, counter
)
797 result
[mds_name
][counter
] = data
[counter
]
799 result
[mds_name
][counter
] = []
804 @cherrypy.tools
.json_out()
805 def get_counter(self
, type, id, path
):
806 return global_instance().get_counter(type, id, path
)
809 @cherrypy.tools
.json_out()
810 def get_perf_schema(self
, **args
):
811 type = args
.get('type', '')
812 id = args
.get('id', '')
813 schema
= global_instance().get_perf_schema(type, id)
815 for k1
in schema
.keys(): # 'perf_schema'
816 ret
[k1
] = collections
.OrderedDict()
817 for k2
in sorted(schema
[k1
].keys()):
818 sorted_dict
= collections
.OrderedDict(
819 sorted(schema
[k1
][k2
].items(), key
=lambda i
: i
[0])
821 ret
[k1
][k2
] = sorted_dict
824 server_addr
= self
.get_localized_config('server_addr', '::')
825 server_port
= self
.get_localized_config('server_port', '7000')
826 if server_addr
is None:
827 raise RuntimeError('no server_addr configured; try "ceph config-key set mgr/dashboard/server_addr <ip>"')
828 log
.info("server_addr: %s server_port: %s" % (server_addr
, server_port
))
829 cherrypy
.config
.update({
830 'server.socket_host': server_addr
,
831 'server.socket_port': int(server_port
),
832 'engine.autoreload.on': False
835 static_dir
= os
.path
.join(current_dir
, 'static')
838 "tools.staticdir.on": True,
839 'tools.staticdir.dir': static_dir
842 log
.info("Serving static from {0}".format(static_dir
))
844 class OSDEndpoint(EndPoint
):
845 def _osd(self
, osd_id
):
848 osd_map
= global_instance().get("osd_map")
851 for o
in osd_map
['osds']:
852 if o
['osd'] == osd_id
:
856 assert osd
is not None # TODO 400
858 osd_spec
= "{0}".format(osd_id
)
860 osd_metadata
= global_instance().get_metadata(
863 result
= CommandResult("")
864 global_instance().send_command(result
, "osd", osd_spec
,
866 "prefix": "perf histogram dump",
869 r
, outb
, outs
= result
.wait()
871 histogram
= json
.loads(outb
)
875 "osd_metadata": osd_metadata
,
876 "osd_histogram": histogram
880 def perf(self
, osd_id
):
881 template
= env
.get_template("osd_perf.html")
882 toplevel_data
= self
._toplevel
_data
()
884 return template
.render(
885 ceph_version
=global_instance().version
,
886 path_info
='/osd' + cherrypy
.request
.path_info
,
887 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
888 content_data
=json
.dumps(self
._osd
(osd_id
), indent
=2)
892 @cherrypy.tools
.json_out()
893 def perf_data(self
, osd_id
):
894 return self
._osd
(osd_id
)
897 @cherrypy.tools
.json_out()
899 return self
._osds
_by
_server
()
901 def _osd_summary(self
, osd_id
, osd_info
):
903 The info used for displaying an OSD in a table
906 osd_spec
= "{0}".format(osd_id
)
909 result
['id'] = osd_id
911 result
['stats_history'] = {}
914 for s
in ['osd.op_w', 'osd.op_in_bytes', 'osd.op_r', 'osd.op_out_bytes']:
915 result
['stats'][s
.split(".")[1]] = global_instance().get_rate('osd', osd_spec
, s
)
916 result
['stats_history'][s
.split(".")[1]] = \
917 global_instance().get_counter('osd', osd_spec
, s
)[s
]
920 for s
in ["osd.numpg", "osd.stat_bytes", "osd.stat_bytes_used"]:
921 result
['stats'][s
.split(".")[1]] = global_instance().get_latest('osd', osd_spec
, s
)
923 result
['up'] = osd_info
['up']
924 result
['in'] = osd_info
['in']
926 result
['url'] = "/osd/perf/{0}".format(osd_id
)
930 def _osds_by_server(self
):
931 result
= defaultdict(list)
932 servers
= global_instance().list_servers()
934 osd_map
= global_instance().get_sync_object(OsdMap
)
936 for server
in servers
:
937 hostname
= server
['hostname']
938 services
= server
['services']
941 if s
["type"] == "osd":
942 osd_id
= int(s
["id"])
943 # If metadata doesn't tally with osdmap, drop it.
944 if osd_id
not in osd_map
.osds_by_id
:
945 global_instance().log
.warn(
946 "OSD service {0} missing in OSDMap, stale metadata?".format(osd_id
))
948 summary
= self
._osd
_summary
(osd_id
,
949 osd_map
.osds_by_id
[osd_id
])
952 # A little helper for rendering
953 summary
['first'] = True
955 result
[hostname
].append(summary
)
957 global_instance().log
.warn("result.size {0} servers.size {1}".format(
958 len(result
), len(servers
)
961 # Return list form for convenience of rendering
962 return result
.items()
967 List of all OSDS grouped by host
971 template
= env
.get_template("osds.html")
972 toplevel_data
= self
._toplevel
_data
()
975 "osds_by_server": self
._osds
_by
_server
()
978 return template
.render(
979 ceph_version
=global_instance().version
,
980 path_info
='/osd' + cherrypy
.request
.path_info
,
981 toplevel_data
=json
.dumps(toplevel_data
, indent
=2),
982 content_data
=json
.dumps(content_data
, indent
=2)
985 cherrypy
.tree
.mount(Root(), "/", conf
)
986 cherrypy
.tree
.mount(OSDEndpoint(), "/osd", conf
)
988 log
.info("Starting engine...")
989 cherrypy
.engine
.start()
990 log
.info("Waiting for engine...")
991 cherrypy
.engine
.block()
992 log
.info("Engine done.")