]>
Commit | Line | Data |
---|---|---|
31f18b77 FG |
1 | |
2 | """ | |
3 | Demonstrate writing a Ceph web interface inside a mgr module. | |
4 | """ | |
5 | ||
6 | # We must share a global reference to this instance, because it is the | |
7 | # gatekeeper to all accesses to data from the C++ side (e.g. the REST API | |
8 | # request handlers need to see it) | |
9 | from collections import defaultdict | |
10 | import collections | |
11 | ||
12 | _global_instance = {'plugin': None} | |
13 | def global_instance(): | |
14 | assert _global_instance['plugin'] is not None | |
15 | return _global_instance['plugin'] | |
16 | ||
17 | ||
18 | import os | |
19 | import logging | |
20 | import logging.config | |
21 | import json | |
22 | import sys | |
23 | import time | |
24 | import threading | |
3efd9988 | 25 | import socket |
31f18b77 FG |
26 | |
27 | import cherrypy | |
28 | import jinja2 | |
b32b8144 | 29 | import urlparse |
a8e16298 | 30 | from distutils.version import StrictVersion |
31f18b77 | 31 | |
3efd9988 | 32 | from mgr_module import MgrModule, MgrStandbyModule, CommandResult |
31f18b77 FG |
33 | |
34 | from types import OsdMap, NotFound, Config, FsMap, MonMap, \ | |
35 | PgSummary, Health, MonStatus | |
36 | ||
37 | import rados | |
c07f9fc5 FG |
38 | import rbd_iscsi |
39 | import rbd_mirroring | |
224ce89b | 40 | from rbd_ls import RbdLs, RbdPoolLs |
31f18b77 FG |
41 | from cephfs_clients import CephFSClients |
42 | ||
31f18b77 FG |
43 | log = logging.getLogger("dashboard") |
44 | ||
45 | ||
46 | # How many cluster log lines shall we hold onto in our | |
47 | # python module for the convenience of the GUI? | |
48 | LOG_BUFFER_SIZE = 30 | |
49 | ||
a8e16298 TL |
50 | # When the CherryPy server in 3.2.2 (and later) starts it attempts to verify |
51 | # that the ports its listening on are in fact bound. When using the any address | |
52 | # "::" it tries both ipv4 and ipv6, and in some environments (e.g. kubernetes) | |
53 | # ipv6 isn't yet configured / supported and CherryPy throws an uncaught | |
54 | # exception. | |
55 | if cherrypy is not None: | |
56 | v = StrictVersion(cherrypy.__version__) | |
57 | # the issue was fixed in 3.2.3. it's present in 3.2.2 (current version on | |
58 | # centos:7) and back to at least 3.0.0. | |
59 | if StrictVersion("3.1.2") <= v < StrictVersion("3.2.3"): | |
60 | # https://github.com/cherrypy/cherrypy/issues/1100 | |
61 | from cherrypy.process import servers | |
62 | servers.wait_for_occupied_port = lambda host, port: None | |
63 | ||
31f18b77 | 64 | # cherrypy likes to sys.exit on error. don't let it take us down too! |
3efd9988 | 65 | def os_exit_noop(*args, **kwargs): |
31f18b77 FG |
66 | pass |
67 | ||
68 | os._exit = os_exit_noop | |
69 | ||
70 | ||
71 | def recurse_refs(root, path): | |
72 | if isinstance(root, dict): | |
73 | for k, v in root.items(): | |
74 | recurse_refs(v, path + "->%s" % k) | |
75 | elif isinstance(root, list): | |
76 | for n, i in enumerate(root): | |
77 | recurse_refs(i, path + "[%d]" % n) | |
78 | ||
79 | log.info("%s %d (%s)" % (path, sys.getrefcount(root), root.__class__)) | |
80 | ||
3efd9988 | 81 | def get_prefixed_url(url): |
b32b8144 | 82 | return global_instance().url_prefix.rstrip('/') + url |
3efd9988 FG |
83 | |
84 | ||
85 | ||
b32b8144 FG |
86 | def prepare_url_prefix(url_prefix): |
87 | """ | |
88 | return '' if no prefix, or '/prefix' without slash in the end. | |
89 | """ | |
90 | url_prefix = urlparse.urljoin('/', url_prefix) | |
91 | return url_prefix.rstrip('/') | |
92 | ||
3efd9988 FG |
93 | class StandbyModule(MgrStandbyModule): |
94 | def serve(self): | |
95 | server_addr = self.get_localized_config('server_addr', '::') | |
96 | server_port = self.get_localized_config('server_port', '7000') | |
b32b8144 FG |
97 | url_prefix = prepare_url_prefix(self.get_config('url_prefix', default='')) |
98 | ||
3efd9988 FG |
99 | if server_addr is None: |
100 | raise RuntimeError('no server_addr configured; try "ceph config-key set mgr/dashboard/server_addr <ip>"') | |
101 | log.info("server_addr: %s server_port: %s" % (server_addr, server_port)) | |
102 | cherrypy.config.update({ | |
103 | 'server.socket_host': server_addr, | |
104 | 'server.socket_port': int(server_port), | |
105 | 'engine.autoreload.on': False | |
106 | }) | |
107 | ||
108 | current_dir = os.path.dirname(os.path.abspath(__file__)) | |
109 | jinja_loader = jinja2.FileSystemLoader(current_dir) | |
110 | env = jinja2.Environment(loader=jinja_loader) | |
111 | ||
112 | module = self | |
113 | ||
114 | class Root(object): | |
115 | @cherrypy.expose | |
b32b8144 | 116 | def default(self, *args, **kwargs): |
3efd9988 FG |
117 | active_uri = module.get_active_uri() |
118 | if active_uri: | |
b32b8144 FG |
119 | log.info("Redirecting to active '{0}'".format(active_uri + "/".join(args))) |
120 | raise cherrypy.HTTPRedirect(active_uri + "/".join(args)) | |
3efd9988 FG |
121 | else: |
122 | template = env.get_template("standby.html") | |
123 | return template.render(delay=5) | |
124 | ||
b32b8144 | 125 | cherrypy.tree.mount(Root(), url_prefix, {}) |
3efd9988 FG |
126 | log.info("Starting engine...") |
127 | cherrypy.engine.start() | |
128 | log.info("Waiting for engine...") | |
129 | cherrypy.engine.wait(state=cherrypy.engine.states.STOPPED) | |
130 | log.info("Engine done.") | |
131 | ||
132 | def shutdown(self): | |
133 | log.info("Stopping server...") | |
134 | cherrypy.engine.wait(state=cherrypy.engine.states.STARTED) | |
135 | cherrypy.engine.stop() | |
136 | log.info("Stopped server") | |
137 | ||
31f18b77 FG |
138 | |
139 | class Module(MgrModule): | |
140 | def __init__(self, *args, **kwargs): | |
141 | super(Module, self).__init__(*args, **kwargs) | |
142 | _global_instance['plugin'] = self | |
143 | self.log.info("Constructing module {0}: instance {1}".format( | |
144 | __name__, _global_instance)) | |
145 | ||
146 | self.log_primed = False | |
147 | self.log_buffer = collections.deque(maxlen=LOG_BUFFER_SIZE) | |
148 | self.audit_buffer = collections.deque(maxlen=LOG_BUFFER_SIZE) | |
149 | ||
150 | # Keep a librados instance for those that need it. | |
151 | self._rados = None | |
152 | ||
153 | # Stateful instances of RbdLs, hold cached results. Key to dict | |
154 | # is pool name. | |
155 | self.rbd_ls = {} | |
156 | ||
224ce89b WB |
157 | # Stateful instance of RbdPoolLs, hold cached list of RBD |
158 | # pools | |
159 | self.rbd_pool_ls = RbdPoolLs(self) | |
160 | ||
c07f9fc5 FG |
161 | # Stateful instance of RbdISCSI |
162 | self.rbd_iscsi = rbd_iscsi.Controller(self) | |
163 | ||
164 | # Stateful instance of RbdMirroring, hold cached results. | |
165 | self.rbd_mirroring = rbd_mirroring.Controller(self) | |
166 | ||
31f18b77 FG |
167 | # Stateful instances of CephFSClients, hold cached results. Key to |
168 | # dict is FSCID | |
169 | self.cephfs_clients = {} | |
170 | ||
171 | # A short history of pool df stats | |
172 | self.pool_stats = defaultdict(lambda: defaultdict( | |
173 | lambda: collections.deque(maxlen=10))) | |
174 | ||
3efd9988 FG |
175 | # A prefix for all URLs to use the dashboard with a reverse http proxy |
176 | self.url_prefix = '' | |
177 | ||
31f18b77 FG |
178 | @property |
179 | def rados(self): | |
180 | """ | |
181 | A librados instance to be shared by any classes within | |
182 | this mgr module that want one. | |
183 | """ | |
184 | if self._rados: | |
185 | return self._rados | |
186 | ||
3efd9988 | 187 | ctx_capsule = self.get_context() |
31f18b77 FG |
188 | self._rados = rados.Rados(context=ctx_capsule) |
189 | self._rados.connect() | |
190 | ||
191 | return self._rados | |
192 | ||
31f18b77 FG |
193 | def update_pool_stats(self): |
194 | df = global_instance().get("df") | |
195 | pool_stats = dict([(p['id'], p['stats']) for p in df['pools']]) | |
196 | now = time.time() | |
197 | for pool_id, stats in pool_stats.items(): | |
198 | for stat_name, stat_val in stats.items(): | |
199 | self.pool_stats[pool_id][stat_name].appendleft((now, stat_val)) | |
200 | ||
201 | def notify(self, notify_type, notify_val): | |
202 | if notify_type == "clog": | |
203 | # Only store log messages once we've done our initial load, | |
204 | # so that we don't end up duplicating. | |
205 | if self.log_primed: | |
206 | if notify_val['channel'] == "audit": | |
207 | self.audit_buffer.appendleft(notify_val) | |
208 | else: | |
209 | self.log_buffer.appendleft(notify_val) | |
210 | elif notify_type == "pg_summary": | |
211 | self.update_pool_stats() | |
212 | else: | |
213 | pass | |
214 | ||
215 | def get_sync_object(self, object_type, path=None): | |
216 | if object_type == OsdMap: | |
217 | data = self.get("osd_map") | |
218 | ||
219 | assert data is not None | |
220 | ||
221 | data['tree'] = self.get("osd_map_tree") | |
222 | data['crush'] = self.get("osd_map_crush") | |
223 | data['crush_map_text'] = self.get("osd_map_crush_map_text") | |
224 | data['osd_metadata'] = self.get("osd_metadata") | |
225 | obj = OsdMap(data) | |
226 | elif object_type == Config: | |
227 | data = self.get("config") | |
228 | obj = Config( data) | |
229 | elif object_type == MonMap: | |
230 | data = self.get("mon_map") | |
231 | obj = MonMap(data) | |
232 | elif object_type == FsMap: | |
233 | data = self.get("fs_map") | |
234 | obj = FsMap(data) | |
235 | elif object_type == PgSummary: | |
236 | data = self.get("pg_summary") | |
237 | self.log.debug("JSON: {0}".format(data)) | |
238 | obj = PgSummary(data) | |
239 | elif object_type == Health: | |
240 | data = self.get("health") | |
241 | obj = Health(json.loads(data['json'])) | |
242 | elif object_type == MonStatus: | |
243 | data = self.get("mon_status") | |
244 | obj = MonStatus(json.loads(data['json'])) | |
245 | else: | |
246 | raise NotImplementedError(object_type) | |
247 | ||
248 | # TODO: move 'path' handling up into C++ land so that we only | |
249 | # Pythonize the part we're interested in | |
250 | if path: | |
251 | try: | |
252 | for part in path: | |
253 | if isinstance(obj, dict): | |
254 | obj = obj[part] | |
255 | else: | |
256 | obj = getattr(obj, part) | |
257 | except (AttributeError, KeyError): | |
258 | raise NotFound(object_type, path) | |
259 | ||
260 | return obj | |
261 | ||
262 | def shutdown(self): | |
263 | log.info("Stopping server...") | |
264 | cherrypy.engine.exit() | |
265 | log.info("Stopped server") | |
266 | ||
267 | log.info("Stopping librados...") | |
268 | if self._rados: | |
269 | self._rados.shutdown() | |
270 | log.info("Stopped librados.") | |
271 | ||
272 | def get_latest(self, daemon_type, daemon_name, stat): | |
273 | data = self.get_counter(daemon_type, daemon_name, stat)[stat] | |
274 | if data: | |
275 | return data[-1][1] | |
276 | else: | |
277 | return 0 | |
278 | ||
279 | def get_rate(self, daemon_type, daemon_name, stat): | |
280 | data = self.get_counter(daemon_type, daemon_name, stat)[stat] | |
281 | ||
282 | if data and len(data) > 1: | |
283 | return (data[-1][1] - data[-2][1]) / float(data[-1][0] - data[-2][0]) | |
284 | else: | |
285 | return 0 | |
286 | ||
287 | def format_dimless(self, n, width, colored=True): | |
288 | """ | |
289 | Format a number without units, so as to fit into `width` characters, substituting | |
290 | an appropriate unit suffix. | |
291 | """ | |
292 | units = [' ', 'k', 'M', 'G', 'T', 'P'] | |
293 | unit = 0 | |
294 | while len("%s" % (int(n) // (1000**unit))) > width - 1: | |
295 | unit += 1 | |
296 | ||
297 | if unit > 0: | |
298 | truncated_float = ("%f" % (n / (1000.0 ** unit)))[0:width - 1] | |
299 | if truncated_float[-1] == '.': | |
300 | truncated_float = " " + truncated_float[0:-1] | |
301 | else: | |
302 | truncated_float = "%{wid}d".format(wid=width-1) % n | |
303 | formatted = "%s%s" % (truncated_float, units[unit]) | |
304 | ||
305 | if colored: | |
306 | # TODO: html equivalent | |
307 | # if n == 0: | |
308 | # color = self.BLACK, False | |
309 | # else: | |
310 | # color = self.YELLOW, False | |
311 | # return self.bold(self.colorize(formatted[0:-1], color[0], color[1])) \ | |
312 | # + self.bold(self.colorize(formatted[-1], self.BLACK, False)) | |
313 | return formatted | |
314 | else: | |
315 | return formatted | |
316 | ||
317 | def fs_status(self, fs_id): | |
318 | mds_versions = defaultdict(list) | |
319 | ||
320 | fsmap = self.get("fs_map") | |
321 | filesystem = None | |
322 | for fs in fsmap['filesystems']: | |
323 | if fs['id'] == fs_id: | |
324 | filesystem = fs | |
325 | break | |
326 | ||
b32b8144 FG |
327 | if filesystem is None: |
328 | raise cherrypy.HTTPError(404, | |
329 | "Filesystem id {0} not found".format(fs_id)) | |
330 | ||
31f18b77 FG |
331 | rank_table = [] |
332 | ||
333 | mdsmap = filesystem['mdsmap'] | |
334 | ||
335 | client_count = 0 | |
336 | ||
337 | for rank in mdsmap["in"]: | |
338 | up = "mds_{0}".format(rank) in mdsmap["up"] | |
339 | if up: | |
340 | gid = mdsmap['up']["mds_{0}".format(rank)] | |
341 | info = mdsmap['info']['gid_{0}'.format(gid)] | |
342 | dns = self.get_latest("mds", info['name'], "mds.inodes") | |
343 | inos = self.get_latest("mds", info['name'], "mds_mem.ino") | |
344 | ||
345 | if rank == 0: | |
346 | client_count = self.get_latest("mds", info['name'], | |
347 | "mds_sessions.session_count") | |
348 | elif client_count == 0: | |
349 | # In case rank 0 was down, look at another rank's | |
350 | # sessionmap to get an indication of clients. | |
351 | client_count = self.get_latest("mds", info['name'], | |
352 | "mds_sessions.session_count") | |
353 | ||
354 | laggy = "laggy_since" in info | |
355 | ||
356 | state = info['state'].split(":")[1] | |
357 | if laggy: | |
358 | state += "(laggy)" | |
359 | ||
360 | # if state == "active" and not laggy: | |
361 | # c_state = self.colorize(state, self.GREEN) | |
362 | # else: | |
363 | # c_state = self.colorize(state, self.YELLOW) | |
364 | ||
365 | # Populate based on context of state, e.g. client | |
366 | # ops for an active daemon, replay progress, reconnect | |
367 | # progress | |
368 | activity = "" | |
369 | ||
370 | if state == "active": | |
371 | activity = "Reqs: " + self.format_dimless( | |
372 | self.get_rate("mds", info['name'], "mds_server.handle_client_request"), | |
373 | 5 | |
374 | ) + "/s" | |
375 | ||
376 | metadata = self.get_metadata('mds', info['name']) | |
181888fb | 377 | mds_versions[metadata.get('ceph_version', 'unknown')].append(info['name']) |
31f18b77 FG |
378 | rank_table.append( |
379 | { | |
380 | "rank": rank, | |
381 | "state": state, | |
382 | "mds": info['name'], | |
383 | "activity": activity, | |
384 | "dns": dns, | |
385 | "inos": inos | |
386 | } | |
387 | ) | |
388 | ||
389 | else: | |
390 | rank_table.append( | |
391 | { | |
392 | "rank": rank, | |
393 | "state": "failed", | |
394 | "mds": "", | |
395 | "activity": "", | |
396 | "dns": 0, | |
397 | "inos": 0 | |
398 | } | |
399 | ) | |
400 | ||
401 | # Find the standby replays | |
402 | for gid_str, daemon_info in mdsmap['info'].iteritems(): | |
403 | if daemon_info['state'] != "up:standby-replay": | |
404 | continue | |
405 | ||
406 | inos = self.get_latest("mds", daemon_info['name'], "mds_mem.ino") | |
407 | dns = self.get_latest("mds", daemon_info['name'], "mds.inodes") | |
408 | ||
409 | activity = "Evts: " + self.format_dimless( | |
410 | self.get_rate("mds", daemon_info['name'], "mds_log.replay"), | |
411 | 5 | |
412 | ) + "/s" | |
413 | ||
414 | rank_table.append( | |
415 | { | |
416 | "rank": "{0}-s".format(daemon_info['rank']), | |
417 | "state": "standby-replay", | |
418 | "mds": daemon_info['name'], | |
419 | "activity": activity, | |
420 | "dns": dns, | |
421 | "inos": inos | |
422 | } | |
423 | ) | |
424 | ||
425 | df = self.get("df") | |
426 | pool_stats = dict([(p['id'], p['stats']) for p in df['pools']]) | |
427 | osdmap = self.get("osd_map") | |
428 | pools = dict([(p['pool'], p) for p in osdmap['pools']]) | |
429 | metadata_pool_id = mdsmap['metadata_pool'] | |
430 | data_pool_ids = mdsmap['data_pools'] | |
431 | ||
432 | pools_table = [] | |
433 | for pool_id in [metadata_pool_id] + data_pool_ids: | |
434 | pool_type = "metadata" if pool_id == metadata_pool_id else "data" | |
435 | stats = pool_stats[pool_id] | |
436 | pools_table.append({ | |
437 | "pool": pools[pool_id]['pool_name'], | |
438 | "type": pool_type, | |
439 | "used": stats['bytes_used'], | |
440 | "avail": stats['max_avail'] | |
441 | }) | |
442 | ||
443 | standby_table = [] | |
444 | for standby in fsmap['standbys']: | |
445 | metadata = self.get_metadata('mds', standby['name']) | |
181888fb | 446 | mds_versions[metadata.get('ceph_version', 'unknown')].append(standby['name']) |
31f18b77 FG |
447 | |
448 | standby_table.append({ | |
449 | 'name': standby['name'] | |
450 | }) | |
451 | ||
452 | return { | |
453 | "filesystem": { | |
454 | "id": fs_id, | |
455 | "name": mdsmap['fs_name'], | |
456 | "client_count": client_count, | |
3efd9988 | 457 | "clients_url": get_prefixed_url("/clients/{0}/".format(fs_id)), |
31f18b77 FG |
458 | "ranks": rank_table, |
459 | "pools": pools_table | |
460 | }, | |
461 | "standbys": standby_table, | |
462 | "versions": mds_versions | |
463 | } | |
464 | ||
b32b8144 FG |
465 | def _prime_log(self): |
466 | def load_buffer(buf, channel_name): | |
467 | result = CommandResult("") | |
468 | self.send_command(result, "mon", "", json.dumps({ | |
469 | "prefix": "log last", | |
470 | "format": "json", | |
471 | "channel": channel_name, | |
472 | "num": LOG_BUFFER_SIZE | |
473 | }), "") | |
474 | r, outb, outs = result.wait() | |
475 | if r != 0: | |
476 | # Oh well. We won't let this stop us though. | |
477 | self.log.error("Error fetching log history (r={0}, \"{1}\")".format( | |
478 | r, outs)) | |
479 | else: | |
480 | try: | |
481 | lines = json.loads(outb) | |
482 | except ValueError: | |
483 | self.log.error("Error decoding log history") | |
484 | else: | |
485 | for l in lines: | |
486 | buf.appendleft(l) | |
487 | ||
488 | load_buffer(self.log_buffer, "cluster") | |
489 | load_buffer(self.audit_buffer, "audit") | |
490 | self.log_primed = True | |
491 | ||
31f18b77 FG |
492 | def serve(self): |
493 | current_dir = os.path.dirname(os.path.abspath(__file__)) | |
494 | ||
495 | jinja_loader = jinja2.FileSystemLoader(current_dir) | |
496 | env = jinja2.Environment(loader=jinja_loader) | |
497 | ||
b32b8144 | 498 | self._prime_log() |
31f18b77 | 499 | |
c07f9fc5 FG |
500 | class EndPoint(object): |
501 | def _health_data(self): | |
502 | health = global_instance().get_sync_object(Health).data | |
503 | # Transform the `checks` dict into a list for the convenience | |
504 | # of rendering from javascript. | |
505 | checks = [] | |
506 | for k, v in health['checks'].iteritems(): | |
507 | v['type'] = k | |
508 | checks.append(v) | |
509 | ||
510 | checks = sorted(checks, cmp=lambda a, b: a['severity'] > b['severity']) | |
511 | ||
512 | health['checks'] = checks | |
513 | ||
514 | return health | |
515 | ||
31f18b77 FG |
516 | def _toplevel_data(self): |
517 | """ | |
518 | Data consumed by the base.html template | |
519 | """ | |
224ce89b WB |
520 | status, data = global_instance().rbd_pool_ls.get() |
521 | if data is None: | |
522 | log.warning("Failed to get RBD pool list") | |
523 | data = [] | |
524 | ||
525 | rbd_pools = sorted([ | |
526 | { | |
527 | "name": name, | |
3efd9988 | 528 | "url": get_prefixed_url("/rbd_pool/{0}/".format(name)) |
224ce89b WB |
529 | } |
530 | for name in data | |
531 | ], key=lambda k: k['name']) | |
532 | ||
c07f9fc5 FG |
533 | status, rbd_mirroring = global_instance().rbd_mirroring.toplevel.get() |
534 | if rbd_mirroring is None: | |
535 | log.warning("Failed to get RBD mirroring summary") | |
536 | rbd_mirroring = {} | |
537 | ||
31f18b77 FG |
538 | fsmap = global_instance().get_sync_object(FsMap) |
539 | filesystems = [ | |
540 | { | |
541 | "id": f['id'], | |
542 | "name": f['mdsmap']['fs_name'], | |
3efd9988 | 543 | "url": get_prefixed_url("/filesystem/{0}/".format(f['id'])) |
31f18b77 FG |
544 | } |
545 | for f in fsmap.data['filesystems'] | |
546 | ] | |
547 | ||
548 | return { | |
224ce89b | 549 | 'rbd_pools': rbd_pools, |
c07f9fc5 | 550 | 'rbd_mirroring': rbd_mirroring, |
224ce89b | 551 | 'health_status': self._health_data()['status'], |
31f18b77 FG |
552 | 'filesystems': filesystems |
553 | } | |
554 | ||
c07f9fc5 | 555 | class Root(EndPoint): |
31f18b77 FG |
556 | @cherrypy.expose |
557 | def filesystem(self, fs_id): | |
558 | template = env.get_template("filesystem.html") | |
559 | ||
560 | toplevel_data = self._toplevel_data() | |
561 | ||
562 | content_data = { | |
563 | "fs_status": global_instance().fs_status(int(fs_id)) | |
564 | } | |
565 | ||
566 | return template.render( | |
3efd9988 | 567 | url_prefix = global_instance().url_prefix, |
31f18b77 | 568 | ceph_version=global_instance().version, |
c07f9fc5 | 569 | path_info=cherrypy.request.path_info, |
31f18b77 FG |
570 | toplevel_data=json.dumps(toplevel_data, indent=2), |
571 | content_data=json.dumps(content_data, indent=2) | |
572 | ) | |
573 | ||
574 | @cherrypy.expose | |
575 | @cherrypy.tools.json_out() | |
576 | def filesystem_data(self, fs_id): | |
577 | return global_instance().fs_status(int(fs_id)) | |
578 | ||
31f18b77 FG |
579 | def _clients(self, fs_id): |
580 | cephfs_clients = global_instance().cephfs_clients.get(fs_id, None) | |
581 | if cephfs_clients is None: | |
582 | cephfs_clients = CephFSClients(global_instance(), fs_id) | |
583 | global_instance().cephfs_clients[fs_id] = cephfs_clients | |
584 | ||
585 | status, clients = cephfs_clients.get() | |
586 | #TODO do something sensible with status | |
587 | ||
588 | # Decorate the metadata with some fields that will be | |
589 | # indepdendent of whether it's a kernel or userspace | |
590 | # client, so that the javascript doesn't have to grok that. | |
591 | for client in clients: | |
592 | if "ceph_version" in client['client_metadata']: | |
593 | client['type'] = "userspace" | |
594 | client['version'] = client['client_metadata']['ceph_version'] | |
595 | client['hostname'] = client['client_metadata']['hostname'] | |
596 | elif "kernel_version" in client['client_metadata']: | |
597 | client['type'] = "kernel" | |
224ce89b | 598 | client['version'] = client['client_metadata']['kernel_version'] |
31f18b77 FG |
599 | client['hostname'] = client['client_metadata']['hostname'] |
600 | else: | |
601 | client['type'] = "unknown" | |
602 | client['version'] = "" | |
603 | client['hostname'] = "" | |
604 | ||
605 | return clients | |
606 | ||
607 | @cherrypy.expose | |
224ce89b WB |
608 | def clients(self, fscid_str): |
609 | try: | |
610 | fscid = int(fscid_str) | |
611 | except ValueError: | |
612 | raise cherrypy.HTTPError(400, | |
613 | "Invalid filesystem id {0}".format(fscid_str)) | |
614 | ||
615 | try: | |
616 | fs_name = FsMap(global_instance().get( | |
617 | "fs_map")).get_filesystem(fscid)['mdsmap']['fs_name'] | |
618 | except NotFound: | |
619 | log.warning("Missing FSCID, dumping fsmap:\n{0}".format( | |
620 | json.dumps(global_instance().get("fs_map"), indent=2) | |
621 | )) | |
622 | raise cherrypy.HTTPError(404, | |
623 | "No filesystem with id {0}".format(fscid)) | |
624 | ||
625 | clients = self._clients(fscid) | |
31f18b77 FG |
626 | global_instance().log.debug(json.dumps(clients, indent=2)) |
627 | content_data = { | |
628 | "clients": clients, | |
224ce89b WB |
629 | "fs_name": fs_name, |
630 | "fscid": fscid, | |
3efd9988 | 631 | "fs_url": get_prefixed_url("/filesystem/" + fscid_str + "/") |
31f18b77 FG |
632 | } |
633 | ||
224ce89b | 634 | template = env.get_template("clients.html") |
31f18b77 | 635 | return template.render( |
3efd9988 | 636 | url_prefix = global_instance().url_prefix, |
31f18b77 | 637 | ceph_version=global_instance().version, |
c07f9fc5 | 638 | path_info=cherrypy.request.path_info, |
224ce89b | 639 | toplevel_data=json.dumps(self._toplevel_data(), indent=2), |
31f18b77 FG |
640 | content_data=json.dumps(content_data, indent=2) |
641 | ) | |
642 | ||
643 | @cherrypy.expose | |
644 | @cherrypy.tools.json_out() | |
645 | def clients_data(self, fs_id): | |
646 | return self._clients(int(fs_id)) | |
647 | ||
c07f9fc5 | 648 | def _rbd_pool(self, pool_name): |
31f18b77 FG |
649 | rbd_ls = global_instance().rbd_ls.get(pool_name, None) |
650 | if rbd_ls is None: | |
651 | rbd_ls = RbdLs(global_instance(), pool_name) | |
652 | global_instance().rbd_ls[pool_name] = rbd_ls | |
653 | ||
654 | status, value = rbd_ls.get() | |
655 | ||
656 | interval = 5 | |
657 | ||
658 | wait = interval - rbd_ls.latency | |
659 | def wait_and_load(): | |
660 | time.sleep(wait) | |
661 | rbd_ls.get() | |
662 | ||
663 | threading.Thread(target=wait_and_load).start() | |
664 | ||
665 | assert status != RbdLs.VALUE_NONE # FIXME bubble status up to UI | |
666 | return value | |
667 | ||
668 | @cherrypy.expose | |
c07f9fc5 FG |
669 | def rbd_pool(self, pool_name): |
670 | template = env.get_template("rbd_pool.html") | |
31f18b77 FG |
671 | |
672 | toplevel_data = self._toplevel_data() | |
673 | ||
c07f9fc5 | 674 | images = self._rbd_pool(pool_name) |
31f18b77 FG |
675 | content_data = { |
676 | "images": images, | |
677 | "pool_name": pool_name | |
678 | } | |
679 | ||
680 | return template.render( | |
3efd9988 | 681 | url_prefix = global_instance().url_prefix, |
31f18b77 | 682 | ceph_version=global_instance().version, |
c07f9fc5 FG |
683 | path_info=cherrypy.request.path_info, |
684 | toplevel_data=json.dumps(toplevel_data, indent=2), | |
685 | content_data=json.dumps(content_data, indent=2) | |
686 | ) | |
687 | ||
688 | @cherrypy.expose | |
689 | @cherrypy.tools.json_out() | |
690 | def rbd_pool_data(self, pool_name): | |
691 | return self._rbd_pool(pool_name) | |
692 | ||
693 | def _rbd_mirroring(self): | |
694 | status, data = global_instance().rbd_mirroring.content_data.get() | |
695 | if data is None: | |
696 | log.warning("Failed to get RBD mirroring status") | |
697 | return {} | |
698 | return data | |
699 | ||
700 | @cherrypy.expose | |
701 | def rbd_mirroring(self): | |
702 | template = env.get_template("rbd_mirroring.html") | |
703 | ||
704 | toplevel_data = self._toplevel_data() | |
705 | content_data = self._rbd_mirroring() | |
706 | ||
707 | return template.render( | |
3efd9988 | 708 | url_prefix = global_instance().url_prefix, |
c07f9fc5 FG |
709 | ceph_version=global_instance().version, |
710 | path_info=cherrypy.request.path_info, | |
711 | toplevel_data=json.dumps(toplevel_data, indent=2), | |
712 | content_data=json.dumps(content_data, indent=2) | |
713 | ) | |
714 | ||
715 | @cherrypy.expose | |
716 | @cherrypy.tools.json_out() | |
717 | def rbd_mirroring_data(self): | |
718 | return self._rbd_mirroring() | |
719 | ||
720 | def _rbd_iscsi(self): | |
721 | status, data = global_instance().rbd_iscsi.content_data.get() | |
722 | if data is None: | |
723 | log.warning("Failed to get RBD iSCSI status") | |
724 | return {} | |
725 | return data | |
726 | ||
727 | @cherrypy.expose | |
728 | def rbd_iscsi(self): | |
729 | template = env.get_template("rbd_iscsi.html") | |
730 | ||
731 | toplevel_data = self._toplevel_data() | |
732 | content_data = self._rbd_iscsi() | |
733 | ||
734 | return template.render( | |
3efd9988 | 735 | url_prefix = global_instance().url_prefix, |
c07f9fc5 FG |
736 | ceph_version=global_instance().version, |
737 | path_info=cherrypy.request.path_info, | |
31f18b77 FG |
738 | toplevel_data=json.dumps(toplevel_data, indent=2), |
739 | content_data=json.dumps(content_data, indent=2) | |
740 | ) | |
741 | ||
742 | @cherrypy.expose | |
743 | @cherrypy.tools.json_out() | |
c07f9fc5 FG |
744 | def rbd_iscsi_data(self): |
745 | return self._rbd_iscsi() | |
31f18b77 FG |
746 | |
747 | @cherrypy.expose | |
748 | def health(self): | |
749 | template = env.get_template("health.html") | |
750 | return template.render( | |
3efd9988 | 751 | url_prefix = global_instance().url_prefix, |
31f18b77 | 752 | ceph_version=global_instance().version, |
c07f9fc5 | 753 | path_info=cherrypy.request.path_info, |
31f18b77 FG |
754 | toplevel_data=json.dumps(self._toplevel_data(), indent=2), |
755 | content_data=json.dumps(self._health(), indent=2) | |
756 | ) | |
757 | ||
758 | @cherrypy.expose | |
759 | def servers(self): | |
760 | template = env.get_template("servers.html") | |
761 | return template.render( | |
3efd9988 | 762 | url_prefix = global_instance().url_prefix, |
31f18b77 | 763 | ceph_version=global_instance().version, |
c07f9fc5 | 764 | path_info=cherrypy.request.path_info, |
31f18b77 FG |
765 | toplevel_data=json.dumps(self._toplevel_data(), indent=2), |
766 | content_data=json.dumps(self._servers(), indent=2) | |
767 | ) | |
1adf2230 AA |
768 | |
769 | @cherrypy.expose | |
770 | def config_options(self, service="any"): | |
771 | template = env.get_template("config_options.html") | |
772 | return template.render( | |
773 | url_prefix = global_instance().url_prefix, | |
774 | ceph_version=global_instance().version, | |
775 | path_info=cherrypy.request.path_info, | |
776 | toplevel_data=json.dumps(self._toplevel_data(), indent=2), | |
777 | content_data=json.dumps(self.config_options_data(service), indent=2) | |
778 | ) | |
779 | ||
780 | @cherrypy.expose | |
781 | @cherrypy.tools.json_out() | |
782 | def config_options_data(self, service): | |
783 | options = {} | |
784 | options = global_instance().get("config_options") | |
785 | ||
786 | return { | |
787 | 'options': options, | |
788 | 'service': service, | |
789 | } | |
31f18b77 FG |
790 | |
791 | def _servers(self): | |
31f18b77 FG |
792 | return { |
793 | 'servers': global_instance().list_servers() | |
794 | } | |
795 | ||
796 | @cherrypy.expose | |
797 | @cherrypy.tools.json_out() | |
798 | def servers_data(self): | |
799 | return self._servers() | |
800 | ||
1adf2230 AA |
801 | @cherrypy.expose |
802 | def perf_counters(self, service_type, service_id): | |
803 | template = env.get_template("perf_counters.html") | |
804 | toplevel_data = self._toplevel_data() | |
805 | ||
806 | return template.render( | |
807 | url_prefix = global_instance().url_prefix, | |
808 | ceph_version=global_instance().version, | |
809 | path_info=cherrypy.request.path_info, | |
810 | toplevel_data=json.dumps(toplevel_data, indent=2), | |
811 | content_data=json.dumps(self.perf_counters_data(service_type, service_id), indent=2) | |
812 | ) | |
813 | ||
814 | @cherrypy.expose | |
815 | @cherrypy.tools.json_out() | |
816 | def perf_counters_data(self, service_type, service_id): | |
817 | schema = global_instance().get_perf_schema(service_type, str(service_id)).values()[0] | |
818 | counters = [] | |
819 | ||
820 | for key, value in sorted(schema.items()): | |
821 | counter = dict() | |
822 | counter["name"] = str(key) | |
823 | counter["description"] = value["description"] | |
824 | if global_instance()._stattype_to_str(value["type"]) == 'counter': | |
825 | counter["value"] = global_instance().get_rate(service_type, service_id, key) | |
826 | counter["unit"] = global_instance()._unit_to_str(value["units"]) | |
827 | else: | |
828 | counter["value"] = global_instance().get_latest(service_type, service_id, key) | |
829 | counter["unit"] = "" | |
830 | counters.append(counter) | |
831 | ||
832 | return { | |
833 | 'service_type': service_type, | |
834 | 'service_id': service_id, | |
835 | 'counters': counters, | |
836 | } | |
837 | ||
31f18b77 FG |
838 | def _health(self): |
839 | # Fuse osdmap with pg_summary to get description of pools | |
840 | # including their PG states | |
841 | osd_map = global_instance().get_sync_object(OsdMap).data | |
842 | pg_summary = global_instance().get_sync_object(PgSummary).data | |
843 | pools = [] | |
844 | ||
845 | if len(global_instance().pool_stats) == 0: | |
846 | global_instance().update_pool_stats() | |
847 | ||
848 | for pool in osd_map['pools']: | |
849 | pool['pg_status'] = pg_summary['by_pool'][pool['pool'].__str__()] | |
850 | stats = global_instance().pool_stats[pool['pool']] | |
851 | s = {} | |
852 | ||
853 | def get_rate(series): | |
854 | if len(series) >= 2: | |
855 | return (float(series[0][1]) - float(series[1][1])) / (float(series[0][0]) - float(series[1][0])) | |
856 | else: | |
857 | return 0 | |
858 | ||
859 | for stat_name, stat_series in stats.items(): | |
860 | s[stat_name] = { | |
861 | 'latest': stat_series[0][1], | |
862 | 'rate': get_rate(stat_series), | |
863 | 'series': [i for i in stat_series] | |
864 | } | |
865 | pool['stats'] = s | |
866 | pools.append(pool) | |
867 | ||
868 | # Not needed, skip the effort of transmitting this | |
869 | # to UI | |
870 | del osd_map['pg_temp'] | |
871 | ||
224ce89b WB |
872 | df = global_instance().get("df") |
873 | df['stats']['total_objects'] = sum( | |
874 | [p['stats']['objects'] for p in df['pools']]) | |
875 | ||
31f18b77 | 876 | return { |
224ce89b | 877 | "health": self._health_data(), |
31f18b77 FG |
878 | "mon_status": global_instance().get_sync_object( |
879 | MonStatus).data, | |
224ce89b | 880 | "fs_map": global_instance().get_sync_object(FsMap).data, |
31f18b77 FG |
881 | "osd_map": osd_map, |
882 | "clog": list(global_instance().log_buffer), | |
883 | "audit_log": list(global_instance().audit_buffer), | |
224ce89b WB |
884 | "pools": pools, |
885 | "mgr_map": global_instance().get("mgr_map"), | |
886 | "df": df | |
31f18b77 FG |
887 | } |
888 | ||
889 | @cherrypy.expose | |
890 | @cherrypy.tools.json_out() | |
891 | def health_data(self): | |
892 | return self._health() | |
893 | ||
894 | @cherrypy.expose | |
895 | def index(self): | |
896 | return self.health() | |
897 | ||
898 | @cherrypy.expose | |
899 | @cherrypy.tools.json_out() | |
900 | def toplevel_data(self): | |
901 | return self._toplevel_data() | |
902 | ||
903 | def _get_mds_names(self, filesystem_id=None): | |
904 | names = [] | |
905 | ||
906 | fsmap = global_instance().get("fs_map") | |
907 | for fs in fsmap['filesystems']: | |
908 | if filesystem_id is not None and fs['id'] != filesystem_id: | |
909 | continue | |
910 | names.extend([info['name'] for _, info in fs['mdsmap']['info'].items()]) | |
911 | ||
912 | if filesystem_id is None: | |
913 | names.extend(info['name'] for info in fsmap['standbys']) | |
914 | ||
915 | return names | |
916 | ||
917 | @cherrypy.expose | |
918 | @cherrypy.tools.json_out() | |
919 | def mds_counters(self, fs_id): | |
920 | """ | |
921 | Result format: map of daemon name to map of counter to list of datapoints | |
922 | """ | |
923 | ||
924 | # Opinionated list of interesting performance counters for the GUI -- | |
925 | # if you need something else just add it. See how simple life is | |
926 | # when you don't have to write general purpose APIs? | |
927 | counters = [ | |
928 | "mds_server.handle_client_request", | |
929 | "mds_log.ev", | |
930 | "mds_cache.num_strays", | |
931 | "mds.exported", | |
932 | "mds.exported_inodes", | |
933 | "mds.imported", | |
934 | "mds.imported_inodes", | |
935 | "mds.inodes", | |
936 | "mds.caps", | |
937 | "mds.subtrees" | |
938 | ] | |
939 | ||
940 | result = {} | |
941 | mds_names = self._get_mds_names(int(fs_id)) | |
942 | ||
943 | for mds_name in mds_names: | |
944 | result[mds_name] = {} | |
945 | for counter in counters: | |
946 | data = global_instance().get_counter("mds", mds_name, counter) | |
947 | if data is not None: | |
948 | result[mds_name][counter] = data[counter] | |
949 | else: | |
950 | result[mds_name][counter] = [] | |
951 | ||
952 | return dict(result) | |
953 | ||
c07f9fc5 FG |
954 | @cherrypy.expose |
955 | @cherrypy.tools.json_out() | |
956 | def get_counter(self, type, id, path): | |
957 | return global_instance().get_counter(type, id, path) | |
958 | ||
959 | @cherrypy.expose | |
960 | @cherrypy.tools.json_out() | |
961 | def get_perf_schema(self, **args): | |
962 | type = args.get('type', '') | |
963 | id = args.get('id', '') | |
964 | schema = global_instance().get_perf_schema(type, id) | |
965 | ret = dict() | |
966 | for k1 in schema.keys(): # 'perf_schema' | |
967 | ret[k1] = collections.OrderedDict() | |
968 | for k2 in sorted(schema[k1].keys()): | |
969 | sorted_dict = collections.OrderedDict( | |
970 | sorted(schema[k1][k2].items(), key=lambda i: i[0]) | |
971 | ) | |
972 | ret[k1][k2] = sorted_dict | |
973 | return ret | |
974 | ||
b32b8144 | 975 | url_prefix = prepare_url_prefix(self.get_config('url_prefix', default='')) |
3efd9988 FG |
976 | self.url_prefix = url_prefix |
977 | ||
224ce89b WB |
978 | server_addr = self.get_localized_config('server_addr', '::') |
979 | server_port = self.get_localized_config('server_port', '7000') | |
31f18b77 | 980 | if server_addr is None: |
c07f9fc5 | 981 | raise RuntimeError('no server_addr configured; try "ceph config-key set mgr/dashboard/server_addr <ip>"') |
31f18b77 FG |
982 | log.info("server_addr: %s server_port: %s" % (server_addr, server_port)) |
983 | cherrypy.config.update({ | |
984 | 'server.socket_host': server_addr, | |
985 | 'server.socket_port': int(server_port), | |
986 | 'engine.autoreload.on': False | |
987 | }) | |
988 | ||
3efd9988 FG |
989 | osdmap = self.get_osdmap() |
990 | log.info("latest osdmap is %d" % osdmap.get_epoch()) | |
991 | ||
992 | # Publish the URI that others may use to access the service we're | |
993 | # about to start serving | |
b32b8144 | 994 | self.set_uri("http://{0}:{1}{2}/".format( |
3efd9988 | 995 | socket.getfqdn() if server_addr == "::" else server_addr, |
b32b8144 FG |
996 | server_port, |
997 | url_prefix | |
3efd9988 FG |
998 | )) |
999 | ||
31f18b77 FG |
1000 | static_dir = os.path.join(current_dir, 'static') |
1001 | conf = { | |
1002 | "/static": { | |
1003 | "tools.staticdir.on": True, | |
1004 | 'tools.staticdir.dir': static_dir | |
1005 | } | |
1006 | } | |
1007 | log.info("Serving static from {0}".format(static_dir)) | |
c07f9fc5 FG |
1008 | |
1009 | class OSDEndpoint(EndPoint): | |
1010 | def _osd(self, osd_id): | |
1011 | osd_id = int(osd_id) | |
1012 | ||
1013 | osd_map = global_instance().get("osd_map") | |
1014 | ||
1015 | osd = None | |
1016 | for o in osd_map['osds']: | |
1017 | if o['osd'] == osd_id: | |
1018 | osd = o | |
1019 | break | |
1020 | ||
1021 | assert osd is not None # TODO 400 | |
1022 | ||
1023 | osd_spec = "{0}".format(osd_id) | |
1024 | ||
1025 | osd_metadata = global_instance().get_metadata( | |
1026 | "osd", osd_spec) | |
1027 | ||
1028 | result = CommandResult("") | |
1029 | global_instance().send_command(result, "osd", osd_spec, | |
1030 | json.dumps({ | |
1031 | "prefix": "perf histogram dump", | |
1032 | }), | |
1033 | "") | |
1034 | r, outb, outs = result.wait() | |
1035 | assert r == 0 | |
1036 | histogram = json.loads(outb) | |
1037 | ||
1038 | return { | |
1039 | "osd": osd, | |
1040 | "osd_metadata": osd_metadata, | |
1041 | "osd_histogram": histogram | |
1042 | } | |
1043 | ||
1044 | @cherrypy.expose | |
1045 | def perf(self, osd_id): | |
1046 | template = env.get_template("osd_perf.html") | |
1047 | toplevel_data = self._toplevel_data() | |
1048 | ||
1049 | return template.render( | |
3efd9988 | 1050 | url_prefix = global_instance().url_prefix, |
c07f9fc5 FG |
1051 | ceph_version=global_instance().version, |
1052 | path_info='/osd' + cherrypy.request.path_info, | |
1053 | toplevel_data=json.dumps(toplevel_data, indent=2), | |
1054 | content_data=json.dumps(self._osd(osd_id), indent=2) | |
1055 | ) | |
1056 | ||
1057 | @cherrypy.expose | |
1058 | @cherrypy.tools.json_out() | |
1059 | def perf_data(self, osd_id): | |
1060 | return self._osd(osd_id) | |
1061 | ||
1062 | @cherrypy.expose | |
1063 | @cherrypy.tools.json_out() | |
1064 | def list_data(self): | |
1065 | return self._osds_by_server() | |
1066 | ||
1067 | def _osd_summary(self, osd_id, osd_info): | |
1068 | """ | |
1069 | The info used for displaying an OSD in a table | |
1070 | """ | |
1071 | ||
1072 | osd_spec = "{0}".format(osd_id) | |
1073 | ||
1074 | result = {} | |
1075 | result['id'] = osd_id | |
1076 | result['stats'] = {} | |
1077 | result['stats_history'] = {} | |
1078 | ||
1079 | # Counter stats | |
1080 | for s in ['osd.op_w', 'osd.op_in_bytes', 'osd.op_r', 'osd.op_out_bytes']: | |
1081 | result['stats'][s.split(".")[1]] = global_instance().get_rate('osd', osd_spec, s) | |
1082 | result['stats_history'][s.split(".")[1]] = \ | |
1083 | global_instance().get_counter('osd', osd_spec, s)[s] | |
1084 | ||
1085 | # Gauge stats | |
1086 | for s in ["osd.numpg", "osd.stat_bytes", "osd.stat_bytes_used"]: | |
1087 | result['stats'][s.split(".")[1]] = global_instance().get_latest('osd', osd_spec, s) | |
1088 | ||
1089 | result['up'] = osd_info['up'] | |
1090 | result['in'] = osd_info['in'] | |
1091 | ||
3efd9988 | 1092 | result['url'] = get_prefixed_url("/osd/perf/{0}".format(osd_id)) |
c07f9fc5 FG |
1093 | |
1094 | return result | |
1095 | ||
1096 | def _osds_by_server(self): | |
1097 | result = defaultdict(list) | |
1098 | servers = global_instance().list_servers() | |
1099 | ||
1100 | osd_map = global_instance().get_sync_object(OsdMap) | |
1101 | ||
1102 | for server in servers: | |
1103 | hostname = server['hostname'] | |
1104 | services = server['services'] | |
c07f9fc5 FG |
1105 | for s in services: |
1106 | if s["type"] == "osd": | |
1107 | osd_id = int(s["id"]) | |
1108 | # If metadata doesn't tally with osdmap, drop it. | |
1109 | if osd_id not in osd_map.osds_by_id: | |
1110 | global_instance().log.warn( | |
1111 | "OSD service {0} missing in OSDMap, stale metadata?".format(osd_id)) | |
1112 | continue | |
1113 | summary = self._osd_summary(osd_id, | |
1114 | osd_map.osds_by_id[osd_id]) | |
1115 | ||
c07f9fc5 FG |
1116 | result[hostname].append(summary) |
1117 | ||
3efd9988 FG |
1118 | result[hostname].sort(key=lambda a: a['id']) |
1119 | if len(result[hostname]): | |
1120 | result[hostname][0]['first'] = True | |
1121 | ||
c07f9fc5 FG |
1122 | global_instance().log.warn("result.size {0} servers.size {1}".format( |
1123 | len(result), len(servers) | |
1124 | )) | |
1125 | ||
1126 | # Return list form for convenience of rendering | |
3efd9988 | 1127 | return sorted(result.items(), key=lambda a: a[0]) |
c07f9fc5 FG |
1128 | |
1129 | @cherrypy.expose | |
1130 | def index(self): | |
1131 | """ | |
1132 | List of all OSDS grouped by host | |
1133 | :return: | |
1134 | """ | |
1135 | ||
1136 | template = env.get_template("osds.html") | |
1137 | toplevel_data = self._toplevel_data() | |
1138 | ||
1139 | content_data = { | |
1140 | "osds_by_server": self._osds_by_server() | |
1141 | } | |
1142 | ||
1143 | return template.render( | |
3efd9988 | 1144 | url_prefix = global_instance().url_prefix, |
c07f9fc5 FG |
1145 | ceph_version=global_instance().version, |
1146 | path_info='/osd' + cherrypy.request.path_info, | |
1147 | toplevel_data=json.dumps(toplevel_data, indent=2), | |
1148 | content_data=json.dumps(content_data, indent=2) | |
1149 | ) | |
1150 | ||
3efd9988 FG |
1151 | cherrypy.tree.mount(Root(), get_prefixed_url("/"), conf) |
1152 | cherrypy.tree.mount(OSDEndpoint(), get_prefixed_url("/osd"), conf) | |
31f18b77 | 1153 | |
3efd9988 FG |
1154 | log.info("Starting engine on {0}:{1}...".format( |
1155 | server_addr, server_port)) | |
31f18b77 FG |
1156 | cherrypy.engine.start() |
1157 | log.info("Waiting for engine...") | |
1158 | cherrypy.engine.block() | |
1159 | log.info("Engine done.") |