]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/nfs/export.py
import ceph pacific 16.2.5
[ceph.git] / ceph / src / pybind / mgr / nfs / export.py
1 import errno
2 import json
3 import logging
4 from typing import List
5 from os.path import isabs, normpath
6
7 from rados import TimedOut, ObjectNotFound
8
9 from .export_utils import GaneshaConfParser, Export
10 from .exception import NFSException, NFSInvalidOperation, NFSObjectNotFound, FSNotFound, \
11 ClusterNotFound
12 from .utils import POOL_NAME, available_clusters, restart_nfs_service, check_fs
13
14 log = logging.getLogger(__name__)
15
16
17 def export_cluster_checker(func):
18 def cluster_check(fs_export, *args, **kwargs):
19 """
20 This method checks if cluster exists and sets rados namespace.
21 """
22 if kwargs['cluster_id'] not in available_clusters(fs_export.mgr):
23 return -errno.ENOENT, "", "Cluster does not exists"
24 fs_export.rados_namespace = kwargs['cluster_id']
25 return func(fs_export, *args, **kwargs)
26 return cluster_check
27
28
29 def exception_handler(exception_obj, log_msg=""):
30 if log_msg:
31 log.exception(log_msg)
32 return getattr(exception_obj, 'errno', -1), "", str(exception_obj)
33
34
35 class NFSRados:
36 def __init__(self, mgr, namespace):
37 self.mgr = mgr
38 self.pool = POOL_NAME
39 self.namespace = namespace
40
41 def _make_rados_url(self, obj):
42 return "rados://{}/{}/{}".format(self.pool, self.namespace, obj)
43
44 def _create_url_block(self, obj_name):
45 return {'block_name': '%url', 'value': self._make_rados_url(obj_name)}
46
47 def write_obj(self, conf_block, obj, config_obj=''):
48 if 'export-' in obj:
49 conf_block = GaneshaConfParser.write_block(conf_block)
50
51 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
52 ioctx.set_namespace(self.namespace)
53 ioctx.write_full(obj, conf_block.encode('utf-8'))
54 if not config_obj:
55 # Return after creating empty common config object
56 return
57 log.debug("write configuration into rados object "
58 f"{self.pool}/{self.namespace}/{obj}:\n{conf_block}")
59
60 # Add created obj url to common config obj
61 ioctx.append(config_obj, GaneshaConfParser.write_block(
62 self._create_url_block(obj)).encode('utf-8'))
63 ExportMgr._check_rados_notify(ioctx, config_obj)
64 log.debug(f"Added {obj} url to {config_obj}")
65
66 def update_obj(self, conf_block, obj, config_obj):
67 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
68 ioctx.set_namespace(self.namespace)
69 ioctx.write_full(obj, conf_block.encode('utf-8'))
70 log.debug("write configuration into rados object "
71 f"{self.pool}/{self.namespace}/{obj}:\n{conf_block}")
72 ExportMgr._check_rados_notify(ioctx, config_obj)
73 log.debug(f"Update export {obj} in {config_obj}")
74
75 def remove_obj(self, obj, config_obj):
76 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
77 ioctx.set_namespace(self.namespace)
78 export_urls = ioctx.read(config_obj)
79 url = '%url "{}"\n\n'.format(self._make_rados_url(obj))
80 export_urls = export_urls.replace(url.encode('utf-8'), b'')
81 ioctx.remove_object(obj)
82 ioctx.write_full(config_obj, export_urls)
83 ExportMgr._check_rados_notify(ioctx, config_obj)
84 log.debug("Object deleted: {}".format(url))
85
86 def remove_all_obj(self):
87 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
88 ioctx.set_namespace(self.namespace)
89 for obj in ioctx.list_objects():
90 obj.remove()
91
92 def check_user_config(self):
93 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
94 ioctx.set_namespace(self.namespace)
95 for obj in ioctx.list_objects():
96 if obj.key.startswith("userconf-nfs"):
97 return True
98 return False
99
100
101 class ValidateExport:
102 @staticmethod
103 def pseudo_path(path):
104 if not isabs(path) or path == "/":
105 raise NFSInvalidOperation(f"pseudo path {path} is invalid. It should be an absolute "
106 "path and it cannot be just '/'.")
107
108 @staticmethod
109 def squash(squash):
110 valid_squash_ls = ["root", "root_squash", "rootsquash", "rootid", "root_id_squash",
111 "rootidsquash", "all", "all_squash", "allsquash", "all_anomnymous",
112 "allanonymous", "no_root_squash", "none", "noidsquash"]
113 if squash not in valid_squash_ls:
114 raise NFSInvalidOperation(f"squash {squash} not in valid list {valid_squash_ls}")
115
116 @staticmethod
117 def security_label(label):
118 if not isinstance(label, bool):
119 raise NFSInvalidOperation('Only boolean values allowed')
120
121 @staticmethod
122 def protocols(proto):
123 for p in proto:
124 if p not in [3, 4]:
125 raise NFSInvalidOperation(f"Invalid protocol {p}")
126 if 3 in proto:
127 log.warning("NFS V3 is an old version, it might not work")
128
129 @staticmethod
130 def transport(transport):
131 valid_transport = ["UDP", "TCP"]
132 for trans in transport:
133 if trans.upper() not in valid_transport:
134 raise NFSInvalidOperation(f'{trans} is not a valid transport protocol')
135
136 @staticmethod
137 def access_type(access_type):
138 valid_ones = ['RW', 'RO']
139 if access_type not in valid_ones:
140 raise NFSInvalidOperation(f'{access_type} is invalid, valid access type are'
141 f'{valid_ones}')
142
143 @staticmethod
144 def fsal(mgr, old, new):
145 if old.name != new['name']:
146 raise NFSInvalidOperation('FSAL name change not allowed')
147 if old.user_id != new['user_id']:
148 raise NFSInvalidOperation('User ID modification is not allowed')
149 if new['sec_label_xattr']:
150 raise NFSInvalidOperation('Security label xattr cannot be changed')
151 if old.fs_name != new['fs_name']:
152 if not check_fs(mgr, new['fs_name']):
153 raise FSNotFound(new['fs_name'])
154 return 1
155
156 @staticmethod
157 def _client(client):
158 ValidateExport.access_type(client['access_type'])
159 ValidateExport.squash(client['squash'])
160
161 @staticmethod
162 def clients(clients_ls):
163 for client in clients_ls:
164 ValidateExport._client(client)
165
166
167 class ExportMgr:
168 def __init__(self, mgr, namespace=None, export_ls=None):
169 self.mgr = mgr
170 self.rados_pool = POOL_NAME
171 self.rados_namespace = namespace
172 self._exports = export_ls
173
174 @staticmethod
175 def _check_rados_notify(ioctx, obj):
176 try:
177 ioctx.notify(obj)
178 except TimedOut:
179 log.exception(f"Ganesha timed out")
180
181 @property
182 def exports(self):
183 if self._exports is None:
184 self._exports = {}
185 log.info("Begin export parsing")
186 for cluster_id in available_clusters(self.mgr):
187 self.export_conf_objs = [] # type: List[Export]
188 self._read_raw_config(cluster_id)
189 self.exports[cluster_id] = self.export_conf_objs
190 log.info(f"Exports parsed successfully {self.exports.items()}")
191 return self._exports
192
193 def _fetch_export(self, pseudo_path):
194 try:
195 for ex in self.exports[self.rados_namespace]:
196 if ex.pseudo == pseudo_path:
197 return ex
198 except KeyError:
199 pass
200
201 def _delete_user(self, entity):
202 self.mgr.check_mon_command({
203 'prefix': 'auth rm',
204 'entity': 'client.{}'.format(entity),
205 })
206 log.info(f"Export user deleted is {entity}")
207
208 def _gen_export_id(self):
209 exports = sorted([ex.export_id for ex in self.exports[self.rados_namespace]])
210 nid = 1
211 for e_id in exports:
212 if e_id == nid:
213 nid += 1
214 else:
215 break
216 return nid
217
218 def _read_raw_config(self, rados_namespace):
219 with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
220 ioctx.set_namespace(rados_namespace)
221 for obj in ioctx.list_objects():
222 if obj.key.startswith("export-"):
223 size, _ = obj.stat()
224 raw_config = obj.read(size)
225 raw_config = raw_config.decode("utf-8")
226 log.debug("read export configuration from rados "
227 "object %s/%s/%s:\n%s", self.rados_pool,
228 rados_namespace, obj.key, raw_config)
229 self.export_conf_objs.append(Export.from_export_block(
230 GaneshaConfParser(raw_config).parse()[0], rados_namespace))
231
232 def _save_export(self, export):
233 self.exports[self.rados_namespace].append(export)
234 NFSRados(self.mgr, self.rados_namespace).write_obj(export.to_export_block(),
235 f'export-{export.export_id}', f'conf-nfs.{export.cluster_id}')
236
237 def _delete_export(self, cluster_id, pseudo_path, export_obj=None):
238 try:
239 if export_obj:
240 export = export_obj
241 else:
242 export = self._fetch_export(pseudo_path)
243
244 if export:
245 if pseudo_path:
246 NFSRados(self.mgr, self.rados_namespace).remove_obj(
247 f'export-{export.export_id}', f'conf-nfs.{cluster_id}')
248 self.exports[cluster_id].remove(export)
249 self._delete_user(export.fsal.user_id)
250 if not self.exports[cluster_id]:
251 del self.exports[cluster_id]
252 return 0, "Successfully deleted export", ""
253 return 0, "", "Export does not exist"
254 except Exception as e:
255 return exception_handler(e, f"Failed to delete {pseudo_path} export for {cluster_id}")
256
257 def _fetch_export_obj(self, ex_id):
258 try:
259 with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
260 ioctx.set_namespace(self.rados_namespace)
261 export = Export.from_export_block(GaneshaConfParser(ioctx.read(f"export-{ex_id}"
262 ).decode("utf-8")).parse()[0], self.rados_namespace)
263 return export
264 except ObjectNotFound:
265 log.exception(f"Export ID: {ex_id} not found")
266
267 def _update_export(self, export):
268 self.exports[self.rados_namespace].append(export)
269 NFSRados(self.mgr, self.rados_namespace).update_obj(
270 GaneshaConfParser.write_block(export.to_export_block()),
271 f'export-{export.export_id}', f'conf-nfs.{export.cluster_id}')
272
273 def format_path(self, path):
274 if path:
275 path = normpath(path.strip())
276 if path[:2] == "//":
277 path = path[1:]
278 return path
279
280 @export_cluster_checker
281 def create_export(self, **kwargs):
282 try:
283 fsal_type = kwargs.pop('fsal_type')
284 if fsal_type == 'cephfs':
285 return FSExport(self).create_export(**kwargs)
286 raise NotImplementedError()
287 except Exception as e:
288 return exception_handler(e, f"Failed to create {kwargs['pseudo_path']} export for {kwargs['cluster_id']}")
289
290 @export_cluster_checker
291 def delete_export(self, cluster_id, pseudo_path):
292 return self._delete_export(cluster_id, pseudo_path)
293
294 def delete_all_exports(self, cluster_id):
295 try:
296 export_list = list(self.exports[cluster_id])
297 except KeyError:
298 log.info("No exports to delete")
299 return
300 self.rados_namespace = cluster_id
301 for export in export_list:
302 ret, out, err = self._delete_export(cluster_id=cluster_id, pseudo_path=None,
303 export_obj=export)
304 if ret != 0:
305 raise NFSException(-1, f"Failed to delete exports: {err} and {ret}")
306 log.info(f"All exports successfully deleted for cluster id: {cluster_id}")
307
308 @export_cluster_checker
309 def list_exports(self, cluster_id, detailed):
310 try:
311 if detailed:
312 result = [export.to_dict() for export in self.exports[cluster_id]]
313 else:
314 result = [export.pseudo for export in self.exports[cluster_id]]
315 return 0, json.dumps(result, indent=2), ''
316 except KeyError:
317 log.warning(f"No exports to list for {cluster_id}")
318 return 0, '', ''
319 except Exception as e:
320 return exception_handler(e, f"Failed to list exports for {cluster_id}")
321
322 @export_cluster_checker
323 def get_export(self, cluster_id, pseudo_path):
324 try:
325 export = self._fetch_export(pseudo_path)
326 if export:
327 return 0, json.dumps(export.to_dict(), indent=2), ''
328 log.warning(f"No {pseudo_path} export to show for {cluster_id}")
329 return 0, '', ''
330 except Exception as e:
331 return exception_handler(e, f"Failed to get {pseudo_path} export for {cluster_id}")
332
333 def update_export(self, export_config):
334 try:
335 if not export_config:
336 raise NFSInvalidOperation("Empty Config!!")
337 new_export = json.loads(export_config)
338 # check export type
339 return FSExport(self).update_export(new_export)
340 except NotImplementedError:
341 return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
342 except Exception as e:
343 return exception_handler(e, f'Failed to update export: {e}')
344
345
346 class FSExport(ExportMgr):
347 def __init__(self, export_mgr_obj):
348 super().__init__(export_mgr_obj.mgr, export_mgr_obj.rados_namespace,
349 export_mgr_obj._exports)
350
351 def _validate_export(self, new_export_dict):
352 if new_export_dict['cluster_id'] not in available_clusters(self.mgr):
353 raise ClusterNotFound()
354
355 export = self._fetch_export(new_export_dict['pseudo'])
356 out_msg = ''
357 if export:
358 # Check if export id matches
359 if export.export_id != new_export_dict['export_id']:
360 raise NFSInvalidOperation('Export ID changed, Cannot update export')
361 else:
362 # Fetch export based on export id object
363 export = self._fetch_export_obj(new_export_dict['export_id'])
364 if not export:
365 raise NFSObjectNotFound('Export does not exist')
366 else:
367 new_export_dict['pseudo'] = self.format_path(new_export_dict['pseudo'])
368 ValidateExport.pseudo_path(new_export_dict['pseudo'])
369 log.debug(f"Pseudo path has changed from {export.pseudo} to "
370 f"{new_export_dict['pseudo']}")
371 # Check if squash changed
372 if export.squash != new_export_dict['squash']:
373 if new_export_dict['squash']:
374 new_export_dict['squash'] = new_export_dict['squash'].lower()
375 ValidateExport.squash(new_export_dict['squash'])
376 log.debug(f"squash has changed from {export.squash} to {new_export_dict['squash']}")
377 # Security label check
378 if export.security_label != new_export_dict['security_label']:
379 ValidateExport.security_label(new_export_dict['security_label'])
380 # Protocol Checking
381 if export.protocols != new_export_dict['protocols']:
382 ValidateExport.protocols(new_export_dict['protocols'])
383 # Transport checking
384 if export.transports != new_export_dict['transports']:
385 ValidateExport.transport(new_export_dict['transports'])
386 # Path check
387 if export.path != new_export_dict['path']:
388 new_export_dict['path'] = self.format_path(new_export_dict['path'])
389 out_msg = 'update caps'
390 # Check Access Type
391 if export.access_type != new_export_dict['access_type']:
392 ValidateExport.access_type(new_export_dict['access_type'])
393 # Fsal block check
394 if export.fsal != new_export_dict['fsal']:
395 ret = ValidateExport.fsal(self.mgr, export.fsal, new_export_dict['fsal'])
396 if ret == 1 and not out_msg:
397 out_msg = 'update caps'
398 # Check client block
399 if export.clients != new_export_dict['clients']:
400 ValidateExport.clients(new_export_dict['clients'])
401 log.debug(f'Validation succeeded for Export {export.pseudo}')
402 return export, out_msg
403
404 def _update_user_id(self, path, access_type, fs_name, user_id):
405 osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
406 self.rados_pool, self.rados_namespace, fs_name)
407 access_type = 'r' if access_type == 'RO' else 'rw'
408
409 self.mgr.check_mon_command({
410 'prefix': 'auth caps',
411 'entity': f'client.{user_id}',
412 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow {} path={}'.format(
413 access_type, path)],
414 })
415
416 log.info(f"Export user updated {user_id}")
417
418 def _create_user_key(self, entity, path, fs_name, fs_ro):
419 osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
420 self.rados_pool, self.rados_namespace, fs_name)
421 access_type = 'r' if fs_ro else 'rw'
422
423 ret, out, err = self.mgr.check_mon_command({
424 'prefix': 'auth get-or-create',
425 'entity': 'client.{}'.format(entity),
426 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow {} path={}'.format(
427 access_type, path)],
428 'format': 'json',
429 })
430
431 json_res = json.loads(out)
432 log.info("Export user created is {}".format(json_res[0]['entity']))
433 return json_res[0]['entity'], json_res[0]['key']
434
435 def create_export(self, fs_name, cluster_id, pseudo_path, read_only, path):
436 if not check_fs(self.mgr, fs_name):
437 raise FSNotFound(fs_name)
438
439 pseudo_path = self.format_path(pseudo_path)
440 ValidateExport.pseudo_path(pseudo_path)
441
442 if cluster_id not in self.exports:
443 self.exports[cluster_id] = []
444
445 if not self._fetch_export(pseudo_path):
446 ex_id = self._gen_export_id()
447 user_id = f"{cluster_id}{ex_id}"
448 user_out, key = self._create_user_key(user_id, path, fs_name, read_only)
449 access_type = "RW"
450 if read_only:
451 access_type = "RO"
452 ex_dict = {
453 'path': self.format_path(path),
454 'pseudo': pseudo_path,
455 'cluster_id': cluster_id,
456 'access_type': access_type,
457 'fsal': {"name": "CEPH", "user_id": user_id,
458 "fs_name": fs_name, "sec_label_xattr": ""},
459 'clients': []
460 }
461 export = Export.from_dict(ex_id, ex_dict)
462 export.fsal.cephx_key = key
463 self._save_export(export)
464 result = {
465 "bind": pseudo_path,
466 "fs": fs_name,
467 "path": path,
468 "cluster": cluster_id,
469 "mode": access_type,
470 }
471 return (0, json.dumps(result, indent=4), '')
472 return 0, "", "Export already exists"
473
474 def update_export(self, new_export):
475 old_export, update_user_caps = self._validate_export(new_export)
476 if update_user_caps:
477 self._update_user_id(new_export['path'], new_export['access_type'],
478 new_export['fsal']['fs_name'], new_export['fsal']['user_id'])
479 new_export = Export.from_dict(new_export['export_id'], new_export)
480 new_export.fsal.cephx_key = old_export.fsal.cephx_key
481 self._update_export(new_export)
482 export_ls = self.exports[self.rados_namespace]
483 if old_export not in export_ls:
484 # This happens when export is fetched by ID
485 old_export = self._fetch_export(old_export.pseudo)
486 export_ls.remove(old_export)
487 restart_nfs_service(self.mgr, new_export.cluster_id)
488 return 0, "Successfully updated export", ""