]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/controllers/cephfs.py
import new upstream nautilus stable release 14.2.8
[ceph.git] / ceph / src / pybind / mgr / dashboard / controllers / cephfs.py
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3
4 from collections import defaultdict
5
6 import cherrypy
7
8 from . import ApiController, RESTController
9 from .. import mgr
10 from ..exceptions import DashboardException
11 from ..security import Scope
12 from ..services.ceph_service import CephService
13 from ..tools import ViewCache
14
15
16 @ApiController('/cephfs', Scope.CEPHFS)
17 class CephFS(RESTController):
18 def __init__(self):
19 super(CephFS, self).__init__()
20
21 # Stateful instances of CephFSClients, hold cached results. Key to
22 # dict is FSCID
23 self.cephfs_clients = {}
24
25 def list(self):
26 fsmap = mgr.get("fs_map")
27 return fsmap['filesystems']
28
29 def get(self, fs_id):
30 fs_id = self.fs_id_to_int(fs_id)
31
32 return self.fs_status(fs_id)
33
34 @RESTController.Resource('GET')
35 def clients(self, fs_id):
36 fs_id = self.fs_id_to_int(fs_id)
37
38 return self._clients(fs_id)
39
40 @RESTController.Resource('GET')
41 def mds_counters(self, fs_id):
42 """
43 Result format: map of daemon name to map of counter to list of datapoints
44 rtype: dict[str, dict[str, list]]
45 """
46
47 # Opinionated list of interesting performance counters for the GUI --
48 # if you need something else just add it. See how simple life is
49 # when you don't have to write general purpose APIs?
50 counters = [
51 "mds_server.handle_client_request",
52 "mds_log.ev",
53 "mds_cache.num_strays",
54 "mds.exported",
55 "mds.exported_inodes",
56 "mds.imported",
57 "mds.imported_inodes",
58 "mds.inodes",
59 "mds.caps",
60 "mds.subtrees",
61 "mds_mem.ino"
62 ]
63
64 fs_id = self.fs_id_to_int(fs_id)
65
66 result = {}
67 mds_names = self._get_mds_names(fs_id)
68
69 for mds_name in mds_names:
70 result[mds_name] = {}
71 for counter in counters:
72 data = mgr.get_counter("mds", mds_name, counter)
73 if data is not None:
74 result[mds_name][counter] = data[counter]
75 else:
76 result[mds_name][counter] = []
77
78 return dict(result)
79
80 @staticmethod
81 def fs_id_to_int(fs_id):
82 try:
83 return int(fs_id)
84 except ValueError:
85 raise DashboardException(code='invalid_cephfs_id',
86 msg="Invalid cephfs ID {}".format(fs_id),
87 component='cephfs')
88
89 def _get_mds_names(self, filesystem_id=None):
90 names = []
91
92 fsmap = mgr.get("fs_map")
93 for fs in fsmap['filesystems']:
94 if filesystem_id is not None and fs['id'] != filesystem_id:
95 continue
96 names.extend([info['name']
97 for _, info in fs['mdsmap']['info'].items()])
98
99 if filesystem_id is None:
100 names.extend(info['name'] for info in fsmap['standbys'])
101
102 return names
103
104 def _append_mds_metadata(self, mds_versions, metadata_key):
105 metadata = mgr.get_metadata('mds', metadata_key)
106 if metadata is None:
107 return
108 mds_versions[metadata.get('ceph_version', 'unknown')].append(metadata_key)
109
110 # pylint: disable=too-many-statements,too-many-branches
111 def fs_status(self, fs_id):
112 mds_versions = defaultdict(list)
113
114 fsmap = mgr.get("fs_map")
115 filesystem = None
116 for fs in fsmap['filesystems']:
117 if fs['id'] == fs_id:
118 filesystem = fs
119 break
120
121 if filesystem is None:
122 raise cherrypy.HTTPError(404,
123 "CephFS id {0} not found".format(fs_id))
124
125 rank_table = []
126
127 mdsmap = filesystem['mdsmap']
128
129 client_count = 0
130
131 for rank in mdsmap["in"]:
132 up = "mds_{0}".format(rank) in mdsmap["up"]
133 if up:
134 gid = mdsmap['up']["mds_{0}".format(rank)]
135 info = mdsmap['info']['gid_{0}'.format(gid)]
136 dns = mgr.get_latest("mds", info['name'], "mds_mem.dn")
137 inos = mgr.get_latest("mds", info['name'], "mds_mem.ino")
138
139 if rank == 0:
140 client_count = mgr.get_latest("mds", info['name'],
141 "mds_sessions.session_count")
142 elif client_count == 0:
143 # In case rank 0 was down, look at another rank's
144 # sessionmap to get an indication of clients.
145 client_count = mgr.get_latest("mds", info['name'],
146 "mds_sessions.session_count")
147
148 laggy = "laggy_since" in info
149
150 state = info['state'].split(":")[1]
151 if laggy:
152 state += "(laggy)"
153
154 # Populate based on context of state, e.g. client
155 # ops for an active daemon, replay progress, reconnect
156 # progress
157 if state == "active":
158 activity = CephService.get_rate("mds",
159 info['name'],
160 "mds_server.handle_client_request")
161 else:
162 activity = 0.0
163
164 self._append_mds_metadata(mds_versions, info['name'])
165 rank_table.append(
166 {
167 "rank": rank,
168 "state": state,
169 "mds": info['name'],
170 "activity": activity,
171 "dns": dns,
172 "inos": inos
173 }
174 )
175
176 else:
177 rank_table.append(
178 {
179 "rank": rank,
180 "state": "failed",
181 "mds": "",
182 "activity": 0.0,
183 "dns": 0,
184 "inos": 0
185 }
186 )
187
188 # Find the standby replays
189 # pylint: disable=unused-variable
190 for gid_str, daemon_info in mdsmap['info'].items():
191 if daemon_info['state'] != "up:standby-replay":
192 continue
193
194 inos = mgr.get_latest("mds", daemon_info['name'], "mds_mem.ino")
195 dns = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dn")
196
197 activity = CephService.get_rate(
198 "mds", daemon_info['name'], "mds_log.replay")
199
200 rank_table.append(
201 {
202 "rank": "{0}-s".format(daemon_info['rank']),
203 "state": "standby-replay",
204 "mds": daemon_info['name'],
205 "activity": activity,
206 "dns": dns,
207 "inos": inos
208 }
209 )
210
211 df = mgr.get("df")
212 pool_stats = {p['id']: p['stats'] for p in df['pools']}
213 osdmap = mgr.get("osd_map")
214 pools = {p['pool']: p for p in osdmap['pools']}
215 metadata_pool_id = mdsmap['metadata_pool']
216 data_pool_ids = mdsmap['data_pools']
217
218 pools_table = []
219 for pool_id in [metadata_pool_id] + data_pool_ids:
220 pool_type = "metadata" if pool_id == metadata_pool_id else "data"
221 stats = pool_stats[pool_id]
222 pools_table.append({
223 "pool": pools[pool_id]['pool_name'],
224 "type": pool_type,
225 "used": stats['bytes_used'],
226 "avail": stats['max_avail']
227 })
228
229 standby_table = []
230 for standby in fsmap['standbys']:
231 self._append_mds_metadata(mds_versions, standby['name'])
232 standby_table.append({
233 'name': standby['name']
234 })
235
236 return {
237 "cephfs": {
238 "id": fs_id,
239 "name": mdsmap['fs_name'],
240 "client_count": client_count,
241 "ranks": rank_table,
242 "pools": pools_table
243 },
244 "standbys": standby_table,
245 "versions": mds_versions
246 }
247
248 def _clients(self, fs_id):
249 cephfs_clients = self.cephfs_clients.get(fs_id, None)
250 if cephfs_clients is None:
251 cephfs_clients = CephFSClients(mgr, fs_id)
252 self.cephfs_clients[fs_id] = cephfs_clients
253
254 try:
255 status, clients = cephfs_clients.get()
256 except AttributeError:
257 raise cherrypy.HTTPError(404,
258 "No cephfs with id {0}".format(fs_id))
259
260 if clients is None:
261 raise cherrypy.HTTPError(404,
262 "No cephfs with id {0}".format(fs_id))
263
264 # Decorate the metadata with some fields that will be
265 # indepdendent of whether it's a kernel or userspace
266 # client, so that the javascript doesn't have to grok that.
267 for client in clients:
268 if "ceph_version" in client['client_metadata']:
269 client['type'] = "userspace"
270 client['version'] = client['client_metadata']['ceph_version']
271 client['hostname'] = client['client_metadata']['hostname']
272 elif "kernel_version" in client['client_metadata']:
273 client['type'] = "kernel"
274 client['version'] = client['client_metadata']['kernel_version']
275 client['hostname'] = client['client_metadata']['hostname']
276 else:
277 client['type'] = "unknown"
278 client['version'] = ""
279 client['hostname'] = ""
280
281 return {
282 'status': status,
283 'data': clients
284 }
285
286
287 class CephFSClients(object):
288 def __init__(self, module_inst, fscid):
289 self._module = module_inst
290 self.fscid = fscid
291
292 @ViewCache()
293 def get(self):
294 return CephService.send_command('mds', 'session ls', srv_spec='{0}:0'.format(self.fscid))