]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/dashboard/controllers/cephfs.py
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / pybind / mgr / dashboard / controllers / cephfs.py
CommitLineData
11fdf7f2
TL
1# -*- coding: utf-8 -*-
2from __future__ import absolute_import
3
4from collections import defaultdict
5
6import cherrypy
7
8from . import ApiController, RESTController
9from .. import mgr
10from ..exceptions import DashboardException
11from ..security import Scope
12from ..services.ceph_service import CephService
13from ..tools import ViewCache
14
15
16@ApiController('/cephfs', Scope.CEPHFS)
17class CephFS(RESTController):
18 def __init__(self):
19 super(CephFS, self).__init__()
20
21 # Stateful instances of CephFSClients, hold cached results. Key to
22 # dict is FSCID
23 self.cephfs_clients = {}
24
25 def list(self):
26 fsmap = mgr.get("fs_map")
27 return fsmap['filesystems']
28
29 def get(self, fs_id):
30 fs_id = self.fs_id_to_int(fs_id)
31
32 return self.fs_status(fs_id)
33
34 @RESTController.Resource('GET')
35 def clients(self, fs_id):
36 fs_id = self.fs_id_to_int(fs_id)
37
38 return self._clients(fs_id)
39
40 @RESTController.Resource('GET')
41 def mds_counters(self, fs_id):
42 """
43 Result format: map of daemon name to map of counter to list of datapoints
44 rtype: dict[str, dict[str, list]]
45 """
46
47 # Opinionated list of interesting performance counters for the GUI --
48 # if you need something else just add it. See how simple life is
49 # when you don't have to write general purpose APIs?
50 counters = [
51 "mds_server.handle_client_request",
52 "mds_log.ev",
53 "mds_cache.num_strays",
54 "mds.exported",
55 "mds.exported_inodes",
56 "mds.imported",
57 "mds.imported_inodes",
58 "mds.inodes",
59 "mds.caps",
494da23a
TL
60 "mds.subtrees",
61 "mds_mem.ino"
11fdf7f2
TL
62 ]
63
64 fs_id = self.fs_id_to_int(fs_id)
65
66 result = {}
67 mds_names = self._get_mds_names(fs_id)
68
69 for mds_name in mds_names:
70 result[mds_name] = {}
71 for counter in counters:
72 data = mgr.get_counter("mds", mds_name, counter)
73 if data is not None:
74 result[mds_name][counter] = data[counter]
75 else:
76 result[mds_name][counter] = []
77
78 return dict(result)
79
80 @staticmethod
81 def fs_id_to_int(fs_id):
82 try:
83 return int(fs_id)
84 except ValueError:
85 raise DashboardException(code='invalid_cephfs_id',
86 msg="Invalid cephfs ID {}".format(fs_id),
87 component='cephfs')
88
89 def _get_mds_names(self, filesystem_id=None):
90 names = []
91
92 fsmap = mgr.get("fs_map")
93 for fs in fsmap['filesystems']:
94 if filesystem_id is not None and fs['id'] != filesystem_id:
95 continue
96 names.extend([info['name']
97 for _, info in fs['mdsmap']['info'].items()])
98
99 if filesystem_id is None:
100 names.extend(info['name'] for info in fsmap['standbys'])
101
102 return names
103
104 # pylint: disable=too-many-statements,too-many-branches
105 def fs_status(self, fs_id):
106 mds_versions = defaultdict(list)
107
108 fsmap = mgr.get("fs_map")
109 filesystem = None
110 for fs in fsmap['filesystems']:
111 if fs['id'] == fs_id:
112 filesystem = fs
113 break
114
115 if filesystem is None:
116 raise cherrypy.HTTPError(404,
117 "CephFS id {0} not found".format(fs_id))
118
119 rank_table = []
120
121 mdsmap = filesystem['mdsmap']
122
123 client_count = 0
124
125 for rank in mdsmap["in"]:
126 up = "mds_{0}".format(rank) in mdsmap["up"]
127 if up:
128 gid = mdsmap['up']["mds_{0}".format(rank)]
129 info = mdsmap['info']['gid_{0}'.format(gid)]
494da23a 130 dns = mgr.get_latest("mds", info['name'], "mds_mem.dn")
11fdf7f2
TL
131 inos = mgr.get_latest("mds", info['name'], "mds_mem.ino")
132
133 if rank == 0:
134 client_count = mgr.get_latest("mds", info['name'],
135 "mds_sessions.session_count")
136 elif client_count == 0:
137 # In case rank 0 was down, look at another rank's
138 # sessionmap to get an indication of clients.
139 client_count = mgr.get_latest("mds", info['name'],
140 "mds_sessions.session_count")
141
142 laggy = "laggy_since" in info
143
144 state = info['state'].split(":")[1]
145 if laggy:
146 state += "(laggy)"
147
148 # Populate based on context of state, e.g. client
149 # ops for an active daemon, replay progress, reconnect
150 # progress
151 if state == "active":
152 activity = CephService.get_rate("mds",
153 info['name'],
154 "mds_server.handle_client_request")
155 else:
156 activity = 0.0
157
158 metadata = mgr.get_metadata('mds', info['name'])
159 mds_versions[metadata.get('ceph_version', 'unknown')].append(
160 info['name'])
161 rank_table.append(
162 {
163 "rank": rank,
164 "state": state,
165 "mds": info['name'],
166 "activity": activity,
167 "dns": dns,
168 "inos": inos
169 }
170 )
171
172 else:
173 rank_table.append(
174 {
175 "rank": rank,
176 "state": "failed",
177 "mds": "",
178 "activity": 0.0,
179 "dns": 0,
180 "inos": 0
181 }
182 )
183
184 # Find the standby replays
185 # pylint: disable=unused-variable
186 for gid_str, daemon_info in mdsmap['info'].items():
187 if daemon_info['state'] != "up:standby-replay":
188 continue
189
190 inos = mgr.get_latest("mds", daemon_info['name'], "mds_mem.ino")
494da23a 191 dns = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dn")
11fdf7f2
TL
192
193 activity = CephService.get_rate(
194 "mds", daemon_info['name'], "mds_log.replay")
195
196 rank_table.append(
197 {
198 "rank": "{0}-s".format(daemon_info['rank']),
199 "state": "standby-replay",
200 "mds": daemon_info['name'],
201 "activity": activity,
202 "dns": dns,
203 "inos": inos
204 }
205 )
206
207 df = mgr.get("df")
208 pool_stats = {p['id']: p['stats'] for p in df['pools']}
209 osdmap = mgr.get("osd_map")
210 pools = {p['pool']: p for p in osdmap['pools']}
211 metadata_pool_id = mdsmap['metadata_pool']
212 data_pool_ids = mdsmap['data_pools']
213
214 pools_table = []
215 for pool_id in [metadata_pool_id] + data_pool_ids:
216 pool_type = "metadata" if pool_id == metadata_pool_id else "data"
217 stats = pool_stats[pool_id]
218 pools_table.append({
219 "pool": pools[pool_id]['pool_name'],
220 "type": pool_type,
221 "used": stats['bytes_used'],
222 "avail": stats['max_avail']
223 })
224
225 standby_table = []
226 for standby in fsmap['standbys']:
227 metadata = mgr.get_metadata('mds', standby['name'])
228 mds_versions[metadata.get('ceph_version', 'unknown')].append(
229 standby['name'])
230
231 standby_table.append({
232 'name': standby['name']
233 })
234
235 return {
236 "cephfs": {
237 "id": fs_id,
238 "name": mdsmap['fs_name'],
239 "client_count": client_count,
240 "ranks": rank_table,
241 "pools": pools_table
242 },
243 "standbys": standby_table,
244 "versions": mds_versions
245 }
246
247 def _clients(self, fs_id):
248 cephfs_clients = self.cephfs_clients.get(fs_id, None)
249 if cephfs_clients is None:
250 cephfs_clients = CephFSClients(mgr, fs_id)
251 self.cephfs_clients[fs_id] = cephfs_clients
252
253 try:
254 status, clients = cephfs_clients.get()
255 except AttributeError:
256 raise cherrypy.HTTPError(404,
257 "No cephfs with id {0}".format(fs_id))
258
259 if clients is None:
260 raise cherrypy.HTTPError(404,
261 "No cephfs with id {0}".format(fs_id))
262
263 # Decorate the metadata with some fields that will be
264 # indepdendent of whether it's a kernel or userspace
265 # client, so that the javascript doesn't have to grok that.
266 for client in clients:
267 if "ceph_version" in client['client_metadata']:
268 client['type'] = "userspace"
269 client['version'] = client['client_metadata']['ceph_version']
270 client['hostname'] = client['client_metadata']['hostname']
271 elif "kernel_version" in client['client_metadata']:
272 client['type'] = "kernel"
273 client['version'] = client['client_metadata']['kernel_version']
274 client['hostname'] = client['client_metadata']['hostname']
275 else:
276 client['type'] = "unknown"
277 client['version'] = ""
278 client['hostname'] = ""
279
280 return {
281 'status': status,
282 'data': clients
283 }
284
285
286class CephFSClients(object):
287 def __init__(self, module_inst, fscid):
288 self._module = module_inst
289 self.fscid = fscid
290
291 @ViewCache()
292 def get(self):
293 return CephService.send_command('mds', 'session ls', srv_spec='{0}:0'.format(self.fscid))