]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/module.py
update sources to v12.1.1
[ceph.git] / ceph / src / pybind / mgr / dashboard / module.py
1
2 """
3 Demonstrate writing a Ceph web interface inside a mgr module.
4 """
5
6 # We must share a global reference to this instance, because it is the
7 # gatekeeper to all accesses to data from the C++ side (e.g. the REST API
8 # request handlers need to see it)
9 from collections import defaultdict
10 import collections
11
12 _global_instance = {'plugin': None}
13 def global_instance():
14 assert _global_instance['plugin'] is not None
15 return _global_instance['plugin']
16
17
18 import os
19 import logging
20 import logging.config
21 import json
22 import sys
23 import time
24 import threading
25
26 import cherrypy
27 import jinja2
28
29 from mgr_module import MgrModule, CommandResult
30
31 from types import OsdMap, NotFound, Config, FsMap, MonMap, \
32 PgSummary, Health, MonStatus
33
34 import rados
35 from rbd_ls import RbdLs, RbdPoolLs
36 from cephfs_clients import CephFSClients
37
38
39 log = logging.getLogger("dashboard")
40
41
42 # How many cluster log lines shall we hold onto in our
43 # python module for the convenience of the GUI?
44 LOG_BUFFER_SIZE = 30
45
46 # cherrypy likes to sys.exit on error. don't let it take us down too!
47 def os_exit_noop():
48 pass
49
50 os._exit = os_exit_noop
51
52
53 def recurse_refs(root, path):
54 if isinstance(root, dict):
55 for k, v in root.items():
56 recurse_refs(v, path + "->%s" % k)
57 elif isinstance(root, list):
58 for n, i in enumerate(root):
59 recurse_refs(i, path + "[%d]" % n)
60
61 log.info("%s %d (%s)" % (path, sys.getrefcount(root), root.__class__))
62
63
64 class Module(MgrModule):
65 def __init__(self, *args, **kwargs):
66 super(Module, self).__init__(*args, **kwargs)
67 _global_instance['plugin'] = self
68 self.log.info("Constructing module {0}: instance {1}".format(
69 __name__, _global_instance))
70
71 self.log_primed = False
72 self.log_buffer = collections.deque(maxlen=LOG_BUFFER_SIZE)
73 self.audit_buffer = collections.deque(maxlen=LOG_BUFFER_SIZE)
74
75 # Keep a librados instance for those that need it.
76 self._rados = None
77
78 # Stateful instances of RbdLs, hold cached results. Key to dict
79 # is pool name.
80 self.rbd_ls = {}
81
82 # Stateful instance of RbdPoolLs, hold cached list of RBD
83 # pools
84 self.rbd_pool_ls = RbdPoolLs(self)
85
86 # Stateful instances of CephFSClients, hold cached results. Key to
87 # dict is FSCID
88 self.cephfs_clients = {}
89
90 # A short history of pool df stats
91 self.pool_stats = defaultdict(lambda: defaultdict(
92 lambda: collections.deque(maxlen=10)))
93
94 @property
95 def rados(self):
96 """
97 A librados instance to be shared by any classes within
98 this mgr module that want one.
99 """
100 if self._rados:
101 return self._rados
102
103 from mgr_module import ceph_state
104 ctx_capsule = ceph_state.get_context()
105 self._rados = rados.Rados(context=ctx_capsule)
106 self._rados.connect()
107
108 return self._rados
109
110 def update_pool_stats(self):
111 df = global_instance().get("df")
112 pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
113 now = time.time()
114 for pool_id, stats in pool_stats.items():
115 for stat_name, stat_val in stats.items():
116 self.pool_stats[pool_id][stat_name].appendleft((now, stat_val))
117
118 def notify(self, notify_type, notify_val):
119 if notify_type == "clog":
120 # Only store log messages once we've done our initial load,
121 # so that we don't end up duplicating.
122 if self.log_primed:
123 if notify_val['channel'] == "audit":
124 self.audit_buffer.appendleft(notify_val)
125 else:
126 self.log_buffer.appendleft(notify_val)
127 elif notify_type == "pg_summary":
128 self.update_pool_stats()
129 else:
130 pass
131
132 def get_sync_object(self, object_type, path=None):
133 if object_type == OsdMap:
134 data = self.get("osd_map")
135
136 assert data is not None
137
138 data['tree'] = self.get("osd_map_tree")
139 data['crush'] = self.get("osd_map_crush")
140 data['crush_map_text'] = self.get("osd_map_crush_map_text")
141 data['osd_metadata'] = self.get("osd_metadata")
142 obj = OsdMap(data)
143 elif object_type == Config:
144 data = self.get("config")
145 obj = Config( data)
146 elif object_type == MonMap:
147 data = self.get("mon_map")
148 obj = MonMap(data)
149 elif object_type == FsMap:
150 data = self.get("fs_map")
151 obj = FsMap(data)
152 elif object_type == PgSummary:
153 data = self.get("pg_summary")
154 self.log.debug("JSON: {0}".format(data))
155 obj = PgSummary(data)
156 elif object_type == Health:
157 data = self.get("health")
158 obj = Health(json.loads(data['json']))
159 elif object_type == MonStatus:
160 data = self.get("mon_status")
161 obj = MonStatus(json.loads(data['json']))
162 else:
163 raise NotImplementedError(object_type)
164
165 # TODO: move 'path' handling up into C++ land so that we only
166 # Pythonize the part we're interested in
167 if path:
168 try:
169 for part in path:
170 if isinstance(obj, dict):
171 obj = obj[part]
172 else:
173 obj = getattr(obj, part)
174 except (AttributeError, KeyError):
175 raise NotFound(object_type, path)
176
177 return obj
178
179 def shutdown(self):
180 log.info("Stopping server...")
181 cherrypy.engine.exit()
182 log.info("Stopped server")
183
184 log.info("Stopping librados...")
185 if self._rados:
186 self._rados.shutdown()
187 log.info("Stopped librados.")
188
189 def get_latest(self, daemon_type, daemon_name, stat):
190 data = self.get_counter(daemon_type, daemon_name, stat)[stat]
191 if data:
192 return data[-1][1]
193 else:
194 return 0
195
196 def get_rate(self, daemon_type, daemon_name, stat):
197 data = self.get_counter(daemon_type, daemon_name, stat)[stat]
198
199 if data and len(data) > 1:
200 return (data[-1][1] - data[-2][1]) / float(data[-1][0] - data[-2][0])
201 else:
202 return 0
203
204 def format_dimless(self, n, width, colored=True):
205 """
206 Format a number without units, so as to fit into `width` characters, substituting
207 an appropriate unit suffix.
208 """
209 units = [' ', 'k', 'M', 'G', 'T', 'P']
210 unit = 0
211 while len("%s" % (int(n) // (1000**unit))) > width - 1:
212 unit += 1
213
214 if unit > 0:
215 truncated_float = ("%f" % (n / (1000.0 ** unit)))[0:width - 1]
216 if truncated_float[-1] == '.':
217 truncated_float = " " + truncated_float[0:-1]
218 else:
219 truncated_float = "%{wid}d".format(wid=width-1) % n
220 formatted = "%s%s" % (truncated_float, units[unit])
221
222 if colored:
223 # TODO: html equivalent
224 # if n == 0:
225 # color = self.BLACK, False
226 # else:
227 # color = self.YELLOW, False
228 # return self.bold(self.colorize(formatted[0:-1], color[0], color[1])) \
229 # + self.bold(self.colorize(formatted[-1], self.BLACK, False))
230 return formatted
231 else:
232 return formatted
233
234 def fs_status(self, fs_id):
235 mds_versions = defaultdict(list)
236
237 fsmap = self.get("fs_map")
238 filesystem = None
239 for fs in fsmap['filesystems']:
240 if fs['id'] == fs_id:
241 filesystem = fs
242 break
243
244 rank_table = []
245
246 mdsmap = filesystem['mdsmap']
247
248 client_count = 0
249
250 for rank in mdsmap["in"]:
251 up = "mds_{0}".format(rank) in mdsmap["up"]
252 if up:
253 gid = mdsmap['up']["mds_{0}".format(rank)]
254 info = mdsmap['info']['gid_{0}'.format(gid)]
255 dns = self.get_latest("mds", info['name'], "mds.inodes")
256 inos = self.get_latest("mds", info['name'], "mds_mem.ino")
257
258 if rank == 0:
259 client_count = self.get_latest("mds", info['name'],
260 "mds_sessions.session_count")
261 elif client_count == 0:
262 # In case rank 0 was down, look at another rank's
263 # sessionmap to get an indication of clients.
264 client_count = self.get_latest("mds", info['name'],
265 "mds_sessions.session_count")
266
267 laggy = "laggy_since" in info
268
269 state = info['state'].split(":")[1]
270 if laggy:
271 state += "(laggy)"
272
273 # if state == "active" and not laggy:
274 # c_state = self.colorize(state, self.GREEN)
275 # else:
276 # c_state = self.colorize(state, self.YELLOW)
277
278 # Populate based on context of state, e.g. client
279 # ops for an active daemon, replay progress, reconnect
280 # progress
281 activity = ""
282
283 if state == "active":
284 activity = "Reqs: " + self.format_dimless(
285 self.get_rate("mds", info['name'], "mds_server.handle_client_request"),
286 5
287 ) + "/s"
288
289 metadata = self.get_metadata('mds', info['name'])
290 mds_versions[metadata['ceph_version']].append(info['name'])
291 rank_table.append(
292 {
293 "rank": rank,
294 "state": state,
295 "mds": info['name'],
296 "activity": activity,
297 "dns": dns,
298 "inos": inos
299 }
300 )
301
302 else:
303 rank_table.append(
304 {
305 "rank": rank,
306 "state": "failed",
307 "mds": "",
308 "activity": "",
309 "dns": 0,
310 "inos": 0
311 }
312 )
313
314 # Find the standby replays
315 for gid_str, daemon_info in mdsmap['info'].iteritems():
316 if daemon_info['state'] != "up:standby-replay":
317 continue
318
319 inos = self.get_latest("mds", daemon_info['name'], "mds_mem.ino")
320 dns = self.get_latest("mds", daemon_info['name'], "mds.inodes")
321
322 activity = "Evts: " + self.format_dimless(
323 self.get_rate("mds", daemon_info['name'], "mds_log.replay"),
324 5
325 ) + "/s"
326
327 rank_table.append(
328 {
329 "rank": "{0}-s".format(daemon_info['rank']),
330 "state": "standby-replay",
331 "mds": daemon_info['name'],
332 "activity": activity,
333 "dns": dns,
334 "inos": inos
335 }
336 )
337
338 df = self.get("df")
339 pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
340 osdmap = self.get("osd_map")
341 pools = dict([(p['pool'], p) for p in osdmap['pools']])
342 metadata_pool_id = mdsmap['metadata_pool']
343 data_pool_ids = mdsmap['data_pools']
344
345 pools_table = []
346 for pool_id in [metadata_pool_id] + data_pool_ids:
347 pool_type = "metadata" if pool_id == metadata_pool_id else "data"
348 stats = pool_stats[pool_id]
349 pools_table.append({
350 "pool": pools[pool_id]['pool_name'],
351 "type": pool_type,
352 "used": stats['bytes_used'],
353 "avail": stats['max_avail']
354 })
355
356 standby_table = []
357 for standby in fsmap['standbys']:
358 metadata = self.get_metadata('mds', standby['name'])
359 mds_versions[metadata['ceph_version']].append(standby['name'])
360
361 standby_table.append({
362 'name': standby['name']
363 })
364
365 return {
366 "filesystem": {
367 "id": fs_id,
368 "name": mdsmap['fs_name'],
369 "client_count": client_count,
370 "clients_url": "/clients/{0}/".format(fs_id),
371 "ranks": rank_table,
372 "pools": pools_table
373 },
374 "standbys": standby_table,
375 "versions": mds_versions
376 }
377
378 def serve(self):
379 current_dir = os.path.dirname(os.path.abspath(__file__))
380
381 jinja_loader = jinja2.FileSystemLoader(current_dir)
382 env = jinja2.Environment(loader=jinja_loader)
383
384 result = CommandResult("")
385 self.send_command(result, "mon", "", json.dumps({
386 "prefix":"log last",
387 "format": "json"
388 }), "")
389 r, outb, outs = result.wait()
390 if r != 0:
391 # Oh well. We won't let this stop us though.
392 self.log.error("Error fetching log history (r={0}, \"{1}\")".format(
393 r, outs))
394 else:
395 try:
396 lines = json.loads(outb)
397 except ValueError:
398 self.log.error("Error decoding log history")
399 else:
400 for l in lines:
401 if l['channel'] == 'audit':
402 self.audit_buffer.appendleft(l)
403 else:
404 self.log_buffer.appendleft(l)
405
406 self.log_primed = True
407
408 class Root(object):
409 def _toplevel_data(self):
410 """
411 Data consumed by the base.html template
412 """
413 status, data = global_instance().rbd_pool_ls.get()
414 if data is None:
415 log.warning("Failed to get RBD pool list")
416 data = []
417
418 rbd_pools = sorted([
419 {
420 "name": name,
421 "url": "/rbd/{0}/".format(name)
422 }
423 for name in data
424 ], key=lambda k: k['name'])
425
426 fsmap = global_instance().get_sync_object(FsMap)
427 filesystems = [
428 {
429 "id": f['id'],
430 "name": f['mdsmap']['fs_name'],
431 "url": "/filesystem/{0}/".format(f['id'])
432 }
433 for f in fsmap.data['filesystems']
434 ]
435
436 return {
437 'rbd_pools': rbd_pools,
438 'health_status': self._health_data()['status'],
439 'filesystems': filesystems
440 }
441
442 @cherrypy.expose
443 def filesystem(self, fs_id):
444 template = env.get_template("filesystem.html")
445
446 toplevel_data = self._toplevel_data()
447
448 content_data = {
449 "fs_status": global_instance().fs_status(int(fs_id))
450 }
451
452 return template.render(
453 ceph_version=global_instance().version,
454 toplevel_data=json.dumps(toplevel_data, indent=2),
455 content_data=json.dumps(content_data, indent=2)
456 )
457
458 @cherrypy.expose
459 @cherrypy.tools.json_out()
460 def filesystem_data(self, fs_id):
461 return global_instance().fs_status(int(fs_id))
462
463 def _osd(self, osd_id):
464 #global_instance().fs_status(int(fs_id))
465 osd_id = int(osd_id)
466
467 osd_map = global_instance().get("osd_map")
468
469 osd = None
470 for o in osd_map['osds']:
471 if o['osd'] == osd_id:
472 osd = o
473 break
474
475 assert osd is not None # TODO 400
476
477 osd_spec = "{0}".format(osd_id)
478
479 osd_metadata = global_instance().get_metadata(
480 "osd", osd_spec)
481
482 result = CommandResult("")
483 global_instance().send_command(result, "osd", osd_spec,
484 json.dumps({
485 "prefix": "perf histogram dump",
486 }),
487 "")
488 r, outb, outs = result.wait()
489 assert r == 0
490 histogram = json.loads(outb)
491
492 return {
493 "osd": osd,
494 "osd_metadata": osd_metadata,
495 "osd_histogram": histogram
496 }
497
498 @cherrypy.expose
499 def osd_perf(self, osd_id):
500 template = env.get_template("osd_perf.html")
501 toplevel_data = self._toplevel_data()
502
503 return template.render(
504 ceph_version=global_instance().version,
505 toplevel_data=json.dumps(toplevel_data, indent=2),
506 content_data=json.dumps(self._osd(osd_id), indent=2)
507 )
508
509 @cherrypy.expose
510 @cherrypy.tools.json_out()
511 def osd_perf_data(self, osd_id):
512 return self._osd(osd_id)
513
514 def _clients(self, fs_id):
515 cephfs_clients = global_instance().cephfs_clients.get(fs_id, None)
516 if cephfs_clients is None:
517 cephfs_clients = CephFSClients(global_instance(), fs_id)
518 global_instance().cephfs_clients[fs_id] = cephfs_clients
519
520 status, clients = cephfs_clients.get()
521 #TODO do something sensible with status
522
523 # Decorate the metadata with some fields that will be
524 # indepdendent of whether it's a kernel or userspace
525 # client, so that the javascript doesn't have to grok that.
526 for client in clients:
527 if "ceph_version" in client['client_metadata']:
528 client['type'] = "userspace"
529 client['version'] = client['client_metadata']['ceph_version']
530 client['hostname'] = client['client_metadata']['hostname']
531 elif "kernel_version" in client['client_metadata']:
532 client['type'] = "kernel"
533 client['version'] = client['client_metadata']['kernel_version']
534 client['hostname'] = client['client_metadata']['hostname']
535 else:
536 client['type'] = "unknown"
537 client['version'] = ""
538 client['hostname'] = ""
539
540 return clients
541
542 @cherrypy.expose
543 def clients(self, fscid_str):
544 try:
545 fscid = int(fscid_str)
546 except ValueError:
547 raise cherrypy.HTTPError(400,
548 "Invalid filesystem id {0}".format(fscid_str))
549
550 try:
551 fs_name = FsMap(global_instance().get(
552 "fs_map")).get_filesystem(fscid)['mdsmap']['fs_name']
553 except NotFound:
554 log.warning("Missing FSCID, dumping fsmap:\n{0}".format(
555 json.dumps(global_instance().get("fs_map"), indent=2)
556 ))
557 raise cherrypy.HTTPError(404,
558 "No filesystem with id {0}".format(fscid))
559
560 clients = self._clients(fscid)
561 global_instance().log.debug(json.dumps(clients, indent=2))
562 content_data = {
563 "clients": clients,
564 "fs_name": fs_name,
565 "fscid": fscid,
566 "fs_url": "/filesystem/" + fscid_str + "/"
567 }
568
569 template = env.get_template("clients.html")
570 return template.render(
571 ceph_version=global_instance().version,
572 toplevel_data=json.dumps(self._toplevel_data(), indent=2),
573 content_data=json.dumps(content_data, indent=2)
574 )
575
576 @cherrypy.expose
577 @cherrypy.tools.json_out()
578 def clients_data(self, fs_id):
579 return self._clients(int(fs_id))
580
581 def _rbd(self, pool_name):
582 rbd_ls = global_instance().rbd_ls.get(pool_name, None)
583 if rbd_ls is None:
584 rbd_ls = RbdLs(global_instance(), pool_name)
585 global_instance().rbd_ls[pool_name] = rbd_ls
586
587 status, value = rbd_ls.get()
588
589 interval = 5
590
591 wait = interval - rbd_ls.latency
592 def wait_and_load():
593 time.sleep(wait)
594 rbd_ls.get()
595
596 threading.Thread(target=wait_and_load).start()
597
598 assert status != RbdLs.VALUE_NONE # FIXME bubble status up to UI
599 return value
600
601 @cherrypy.expose
602 def rbd(self, pool_name):
603 template = env.get_template("rbd.html")
604
605 toplevel_data = self._toplevel_data()
606
607 images = self._rbd(pool_name)
608 content_data = {
609 "images": images,
610 "pool_name": pool_name
611 }
612
613 return template.render(
614 ceph_version=global_instance().version,
615 toplevel_data=json.dumps(toplevel_data, indent=2),
616 content_data=json.dumps(content_data, indent=2)
617 )
618
619 @cherrypy.expose
620 @cherrypy.tools.json_out()
621 def rbd_data(self, pool_name):
622 return self._rbd(pool_name)
623
624 @cherrypy.expose
625 def health(self):
626 template = env.get_template("health.html")
627 return template.render(
628 ceph_version=global_instance().version,
629 toplevel_data=json.dumps(self._toplevel_data(), indent=2),
630 content_data=json.dumps(self._health(), indent=2)
631 )
632
633 @cherrypy.expose
634 def servers(self):
635 template = env.get_template("servers.html")
636 return template.render(
637 ceph_version=global_instance().version,
638 toplevel_data=json.dumps(self._toplevel_data(), indent=2),
639 content_data=json.dumps(self._servers(), indent=2)
640 )
641
642 def _servers(self):
643 return {
644 'servers': global_instance().list_servers()
645 }
646
647 @cherrypy.expose
648 @cherrypy.tools.json_out()
649 def servers_data(self):
650 return self._servers()
651
652 def _health_data(self):
653 health = global_instance().get_sync_object(Health).data
654 # Transform the `checks` dict into a list for the convenience
655 # of rendering from javascript.
656 checks = []
657 for k, v in health['checks'].iteritems():
658 v['type'] = k
659 checks.append(v)
660
661 checks = sorted(checks, cmp=lambda a, b: a['severity'] > b['severity'])
662
663 health['checks'] = checks
664
665 return health
666
667 def _health(self):
668 # Fuse osdmap with pg_summary to get description of pools
669 # including their PG states
670 osd_map = global_instance().get_sync_object(OsdMap).data
671 pg_summary = global_instance().get_sync_object(PgSummary).data
672 pools = []
673
674 if len(global_instance().pool_stats) == 0:
675 global_instance().update_pool_stats()
676
677 for pool in osd_map['pools']:
678 pool['pg_status'] = pg_summary['by_pool'][pool['pool'].__str__()]
679 stats = global_instance().pool_stats[pool['pool']]
680 s = {}
681
682 def get_rate(series):
683 if len(series) >= 2:
684 return (float(series[0][1]) - float(series[1][1])) / (float(series[0][0]) - float(series[1][0]))
685 else:
686 return 0
687
688 for stat_name, stat_series in stats.items():
689 s[stat_name] = {
690 'latest': stat_series[0][1],
691 'rate': get_rate(stat_series),
692 'series': [i for i in stat_series]
693 }
694 pool['stats'] = s
695 pools.append(pool)
696
697 # Not needed, skip the effort of transmitting this
698 # to UI
699 del osd_map['pg_temp']
700
701 df = global_instance().get("df")
702 df['stats']['total_objects'] = sum(
703 [p['stats']['objects'] for p in df['pools']])
704
705 return {
706 "health": self._health_data(),
707 "mon_status": global_instance().get_sync_object(
708 MonStatus).data,
709 "fs_map": global_instance().get_sync_object(FsMap).data,
710 "osd_map": osd_map,
711 "clog": list(global_instance().log_buffer),
712 "audit_log": list(global_instance().audit_buffer),
713 "pools": pools,
714 "mgr_map": global_instance().get("mgr_map"),
715 "df": df
716 }
717
718 @cherrypy.expose
719 @cherrypy.tools.json_out()
720 def health_data(self):
721 return self._health()
722
723 @cherrypy.expose
724 def index(self):
725 return self.health()
726
727 @cherrypy.expose
728 @cherrypy.tools.json_out()
729 def toplevel_data(self):
730 return self._toplevel_data()
731
732 def _get_mds_names(self, filesystem_id=None):
733 names = []
734
735 fsmap = global_instance().get("fs_map")
736 for fs in fsmap['filesystems']:
737 if filesystem_id is not None and fs['id'] != filesystem_id:
738 continue
739 names.extend([info['name'] for _, info in fs['mdsmap']['info'].items()])
740
741 if filesystem_id is None:
742 names.extend(info['name'] for info in fsmap['standbys'])
743
744 return names
745
746 @cherrypy.expose
747 @cherrypy.tools.json_out()
748 def mds_counters(self, fs_id):
749 """
750 Result format: map of daemon name to map of counter to list of datapoints
751 """
752
753 # Opinionated list of interesting performance counters for the GUI --
754 # if you need something else just add it. See how simple life is
755 # when you don't have to write general purpose APIs?
756 counters = [
757 "mds_server.handle_client_request",
758 "mds_log.ev",
759 "mds_cache.num_strays",
760 "mds.exported",
761 "mds.exported_inodes",
762 "mds.imported",
763 "mds.imported_inodes",
764 "mds.inodes",
765 "mds.caps",
766 "mds.subtrees"
767 ]
768
769 result = {}
770 mds_names = self._get_mds_names(int(fs_id))
771
772 for mds_name in mds_names:
773 result[mds_name] = {}
774 for counter in counters:
775 data = global_instance().get_counter("mds", mds_name, counter)
776 if data is not None:
777 result[mds_name][counter] = data[counter]
778 else:
779 result[mds_name][counter] = []
780
781 return dict(result)
782
783 server_addr = self.get_localized_config('server_addr', '::')
784 server_port = self.get_localized_config('server_port', '7000')
785 if server_addr is None:
786 raise RuntimeError('no server_addr configured; try "ceph config-key put mgr/dashboard/server_addr <ip>"')
787 log.info("server_addr: %s server_port: %s" % (server_addr, server_port))
788 cherrypy.config.update({
789 'server.socket_host': server_addr,
790 'server.socket_port': int(server_port),
791 'engine.autoreload.on': False
792 })
793
794 static_dir = os.path.join(current_dir, 'static')
795 conf = {
796 "/static": {
797 "tools.staticdir.on": True,
798 'tools.staticdir.dir': static_dir
799 }
800 }
801 log.info("Serving static from {0}".format(static_dir))
802 cherrypy.tree.mount(Root(), "/", conf)
803
804 log.info("Starting engine...")
805 cherrypy.engine.start()
806 log.info("Waiting for engine...")
807 cherrypy.engine.block()
808 log.info("Engine done.")