]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/dashboard/controllers/cephfs.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / pybind / mgr / dashboard / controllers / cephfs.py
CommitLineData
11fdf7f2
TL
1# -*- coding: utf-8 -*-
2from __future__ import absolute_import
3
4from collections import defaultdict
5
6import cherrypy
7
8from . import ApiController, RESTController
9from .. import mgr
10from ..exceptions import DashboardException
11from ..security import Scope
12from ..services.ceph_service import CephService
13from ..tools import ViewCache
14
15
16@ApiController('/cephfs', Scope.CEPHFS)
17class CephFS(RESTController):
18 def __init__(self):
19 super(CephFS, self).__init__()
20
21 # Stateful instances of CephFSClients, hold cached results. Key to
22 # dict is FSCID
23 self.cephfs_clients = {}
24
25 def list(self):
26 fsmap = mgr.get("fs_map")
27 return fsmap['filesystems']
28
29 def get(self, fs_id):
30 fs_id = self.fs_id_to_int(fs_id)
31
32 return self.fs_status(fs_id)
33
34 @RESTController.Resource('GET')
35 def clients(self, fs_id):
36 fs_id = self.fs_id_to_int(fs_id)
37
38 return self._clients(fs_id)
39
40 @RESTController.Resource('GET')
41 def mds_counters(self, fs_id):
42 """
43 Result format: map of daemon name to map of counter to list of datapoints
44 rtype: dict[str, dict[str, list]]
45 """
46
47 # Opinionated list of interesting performance counters for the GUI --
48 # if you need something else just add it. See how simple life is
49 # when you don't have to write general purpose APIs?
50 counters = [
51 "mds_server.handle_client_request",
52 "mds_log.ev",
53 "mds_cache.num_strays",
54 "mds.exported",
55 "mds.exported_inodes",
56 "mds.imported",
57 "mds.imported_inodes",
58 "mds.inodes",
59 "mds.caps",
60 "mds.subtrees"
61 ]
62
63 fs_id = self.fs_id_to_int(fs_id)
64
65 result = {}
66 mds_names = self._get_mds_names(fs_id)
67
68 for mds_name in mds_names:
69 result[mds_name] = {}
70 for counter in counters:
71 data = mgr.get_counter("mds", mds_name, counter)
72 if data is not None:
73 result[mds_name][counter] = data[counter]
74 else:
75 result[mds_name][counter] = []
76
77 return dict(result)
78
79 @staticmethod
80 def fs_id_to_int(fs_id):
81 try:
82 return int(fs_id)
83 except ValueError:
84 raise DashboardException(code='invalid_cephfs_id',
85 msg="Invalid cephfs ID {}".format(fs_id),
86 component='cephfs')
87
88 def _get_mds_names(self, filesystem_id=None):
89 names = []
90
91 fsmap = mgr.get("fs_map")
92 for fs in fsmap['filesystems']:
93 if filesystem_id is not None and fs['id'] != filesystem_id:
94 continue
95 names.extend([info['name']
96 for _, info in fs['mdsmap']['info'].items()])
97
98 if filesystem_id is None:
99 names.extend(info['name'] for info in fsmap['standbys'])
100
101 return names
102
103 # pylint: disable=too-many-statements,too-many-branches
104 def fs_status(self, fs_id):
105 mds_versions = defaultdict(list)
106
107 fsmap = mgr.get("fs_map")
108 filesystem = None
109 for fs in fsmap['filesystems']:
110 if fs['id'] == fs_id:
111 filesystem = fs
112 break
113
114 if filesystem is None:
115 raise cherrypy.HTTPError(404,
116 "CephFS id {0} not found".format(fs_id))
117
118 rank_table = []
119
120 mdsmap = filesystem['mdsmap']
121
122 client_count = 0
123
124 for rank in mdsmap["in"]:
125 up = "mds_{0}".format(rank) in mdsmap["up"]
126 if up:
127 gid = mdsmap['up']["mds_{0}".format(rank)]
128 info = mdsmap['info']['gid_{0}'.format(gid)]
129 dns = mgr.get_latest("mds", info['name'], "mds.inodes")
130 inos = mgr.get_latest("mds", info['name'], "mds_mem.ino")
131
132 if rank == 0:
133 client_count = mgr.get_latest("mds", info['name'],
134 "mds_sessions.session_count")
135 elif client_count == 0:
136 # In case rank 0 was down, look at another rank's
137 # sessionmap to get an indication of clients.
138 client_count = mgr.get_latest("mds", info['name'],
139 "mds_sessions.session_count")
140
141 laggy = "laggy_since" in info
142
143 state = info['state'].split(":")[1]
144 if laggy:
145 state += "(laggy)"
146
147 # Populate based on context of state, e.g. client
148 # ops for an active daemon, replay progress, reconnect
149 # progress
150 if state == "active":
151 activity = CephService.get_rate("mds",
152 info['name'],
153 "mds_server.handle_client_request")
154 else:
155 activity = 0.0
156
157 metadata = mgr.get_metadata('mds', info['name'])
158 mds_versions[metadata.get('ceph_version', 'unknown')].append(
159 info['name'])
160 rank_table.append(
161 {
162 "rank": rank,
163 "state": state,
164 "mds": info['name'],
165 "activity": activity,
166 "dns": dns,
167 "inos": inos
168 }
169 )
170
171 else:
172 rank_table.append(
173 {
174 "rank": rank,
175 "state": "failed",
176 "mds": "",
177 "activity": 0.0,
178 "dns": 0,
179 "inos": 0
180 }
181 )
182
183 # Find the standby replays
184 # pylint: disable=unused-variable
185 for gid_str, daemon_info in mdsmap['info'].items():
186 if daemon_info['state'] != "up:standby-replay":
187 continue
188
189 inos = mgr.get_latest("mds", daemon_info['name'], "mds_mem.ino")
190 dns = mgr.get_latest("mds", daemon_info['name'], "mds.inodes")
191
192 activity = CephService.get_rate(
193 "mds", daemon_info['name'], "mds_log.replay")
194
195 rank_table.append(
196 {
197 "rank": "{0}-s".format(daemon_info['rank']),
198 "state": "standby-replay",
199 "mds": daemon_info['name'],
200 "activity": activity,
201 "dns": dns,
202 "inos": inos
203 }
204 )
205
206 df = mgr.get("df")
207 pool_stats = {p['id']: p['stats'] for p in df['pools']}
208 osdmap = mgr.get("osd_map")
209 pools = {p['pool']: p for p in osdmap['pools']}
210 metadata_pool_id = mdsmap['metadata_pool']
211 data_pool_ids = mdsmap['data_pools']
212
213 pools_table = []
214 for pool_id in [metadata_pool_id] + data_pool_ids:
215 pool_type = "metadata" if pool_id == metadata_pool_id else "data"
216 stats = pool_stats[pool_id]
217 pools_table.append({
218 "pool": pools[pool_id]['pool_name'],
219 "type": pool_type,
220 "used": stats['bytes_used'],
221 "avail": stats['max_avail']
222 })
223
224 standby_table = []
225 for standby in fsmap['standbys']:
226 metadata = mgr.get_metadata('mds', standby['name'])
227 mds_versions[metadata.get('ceph_version', 'unknown')].append(
228 standby['name'])
229
230 standby_table.append({
231 'name': standby['name']
232 })
233
234 return {
235 "cephfs": {
236 "id": fs_id,
237 "name": mdsmap['fs_name'],
238 "client_count": client_count,
239 "ranks": rank_table,
240 "pools": pools_table
241 },
242 "standbys": standby_table,
243 "versions": mds_versions
244 }
245
246 def _clients(self, fs_id):
247 cephfs_clients = self.cephfs_clients.get(fs_id, None)
248 if cephfs_clients is None:
249 cephfs_clients = CephFSClients(mgr, fs_id)
250 self.cephfs_clients[fs_id] = cephfs_clients
251
252 try:
253 status, clients = cephfs_clients.get()
254 except AttributeError:
255 raise cherrypy.HTTPError(404,
256 "No cephfs with id {0}".format(fs_id))
257
258 if clients is None:
259 raise cherrypy.HTTPError(404,
260 "No cephfs with id {0}".format(fs_id))
261
262 # Decorate the metadata with some fields that will be
263 # indepdendent of whether it's a kernel or userspace
264 # client, so that the javascript doesn't have to grok that.
265 for client in clients:
266 if "ceph_version" in client['client_metadata']:
267 client['type'] = "userspace"
268 client['version'] = client['client_metadata']['ceph_version']
269 client['hostname'] = client['client_metadata']['hostname']
270 elif "kernel_version" in client['client_metadata']:
271 client['type'] = "kernel"
272 client['version'] = client['client_metadata']['kernel_version']
273 client['hostname'] = client['client_metadata']['hostname']
274 else:
275 client['type'] = "unknown"
276 client['version'] = ""
277 client['hostname'] = ""
278
279 return {
280 'status': status,
281 'data': clients
282 }
283
284
285class CephFSClients(object):
286 def __init__(self, module_inst, fscid):
287 self._module = module_inst
288 self.fscid = fscid
289
290 @ViewCache()
291 def get(self):
292 return CephService.send_command('mds', 'session ls', srv_spec='{0}:0'.format(self.fscid))