]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/volumes/fs/nfs.py
Import ceph 15.2.8
[ceph.git] / ceph / src / pybind / mgr / volumes / fs / nfs.py
CommitLineData
f6b5b4d7
TL
1import errno
2import json
3import logging
4from typing import List
5import socket
6from os.path import isabs, normpath
7
8from ceph.deployment.service_spec import NFSServiceSpec, PlacementSpec
9from rados import TimedOut
10
11import orchestrator
12
13from .fs_util import create_pool
14
15log = logging.getLogger(__name__)
16POOL_NAME = 'nfs-ganesha'
17
18
19def available_clusters(mgr):
20 '''
21 This method returns list of available cluster ids.
22 It removes 'ganesha-' prefixes from cluster service id returned by cephadm.
23 Example:
24 completion.result value:
25 <ServiceDescription of <NFSServiceSpec for service_name=nfs.ganesha-vstart>>
26 return value: ['ganesha-vstart'] -> ['vstart']
27 '''
28 # TODO check cephadm cluster list with rados pool conf objects
29 completion = mgr.describe_service(service_type='nfs')
30 mgr._orchestrator_wait([completion])
31 orchestrator.raise_if_exception(completion)
f91f0fd5
TL
32 return [cluster.spec.service_id.replace('ganesha-', '', 1) for cluster in completion.result
33 if cluster.spec.service_id]
f6b5b4d7
TL
34
35
36def export_cluster_checker(func):
37 def cluster_check(fs_export, *args, **kwargs):
38 """
39 This method checks if cluster exists and sets rados namespace.
40 """
41 if kwargs['cluster_id'] not in available_clusters(fs_export.mgr):
42 return -errno.ENOENT, "", "Cluster does not exists"
43 fs_export.rados_namespace = kwargs['cluster_id']
44 return func(fs_export, *args, **kwargs)
45 return cluster_check
46
47
48def cluster_setter(func):
49 def set_pool_ns_clusterid(nfs, *args, **kwargs):
50 nfs._set_pool_namespace(kwargs['cluster_id'])
51 nfs._set_cluster_id(kwargs['cluster_id'])
52 return func(nfs, *args, **kwargs)
53 return set_pool_ns_clusterid
54
55
56class GaneshaConfParser(object):
57 def __init__(self, raw_config):
58 self.pos = 0
59 self.text = ""
60 self.clean_config(raw_config)
61
62 def clean_config(self, raw_config):
63 for line in raw_config.split("\n"):
64 self.text += line
65 if line.startswith("%"):
66 self.text += "\n"
67
68 def remove_whitespaces_quotes(self):
69 if self.text.startswith("%url"):
70 self.text = self.text.replace('"', "")
71 else:
72 self.text = "".join(self.text.split())
73
74 def stream(self):
75 return self.text[self.pos:]
76
77 def parse_block_name(self):
78 idx = self.stream().find('{')
79 if idx == -1:
80 raise Exception("Cannot find block name")
81 block_name = self.stream()[:idx]
82 self.pos += idx+1
83 return block_name
84
85 def parse_block_or_section(self):
86 if self.stream().startswith("%url "):
87 # section line
88 self.pos += 5
89 idx = self.stream().find('\n')
90 if idx == -1:
91 value = self.stream()
92 self.pos += len(value)
93 else:
94 value = self.stream()[:idx]
95 self.pos += idx+1
96 block_dict = {'block_name': '%url', 'value': value}
97 return block_dict
98
99 block_dict = {'block_name': self.parse_block_name().upper()}
100 self.parse_block_body(block_dict)
101 if self.stream()[0] != '}':
102 raise Exception("No closing bracket '}' found at the end of block")
103 self.pos += 1
104 return block_dict
105
106 def parse_parameter_value(self, raw_value):
107 if raw_value.find(',') != -1:
108 return [self.parse_parameter_value(v.strip())
109 for v in raw_value.split(',')]
110 try:
111 return int(raw_value)
112 except ValueError:
113 if raw_value == "true":
114 return True
115 if raw_value == "false":
116 return False
117 if raw_value.find('"') == 0:
118 return raw_value[1:-1]
119 return raw_value
120
121 def parse_stanza(self, block_dict):
122 equal_idx = self.stream().find('=')
123 if equal_idx == -1:
124 raise Exception("Malformed stanza: no equal symbol found.")
125 semicolon_idx = self.stream().find(';')
126 parameter_name = self.stream()[:equal_idx].lower()
127 parameter_value = self.stream()[equal_idx+1:semicolon_idx]
128 block_dict[parameter_name] = self.parse_parameter_value(parameter_value)
129 self.pos += semicolon_idx+1
130
131 def parse_block_body(self, block_dict):
132 while True:
133 if self.stream().find('}') == 0:
134 # block end
135 return
136
137 last_pos = self.pos
138 semicolon_idx = self.stream().find(';')
139 lbracket_idx = self.stream().find('{')
140 is_semicolon = (semicolon_idx != -1)
141 is_lbracket = (lbracket_idx != -1)
142 is_semicolon_lt_lbracket = (semicolon_idx < lbracket_idx)
143
144 if is_semicolon and ((is_lbracket and is_semicolon_lt_lbracket) or not is_lbracket):
145 self.parse_stanza(block_dict)
146 elif is_lbracket and ((is_semicolon and not is_semicolon_lt_lbracket) or
147 (not is_semicolon)):
148 if '_blocks_' not in block_dict:
149 block_dict['_blocks_'] = []
150 block_dict['_blocks_'].append(self.parse_block_or_section())
151 else:
152 raise Exception("Malformed stanza: no semicolon found.")
153
154 if last_pos == self.pos:
155 raise Exception("Infinite loop while parsing block content")
156
157 def parse(self):
158 self.remove_whitespaces_quotes()
159 blocks = []
160 while self.stream():
161 blocks.append(self.parse_block_or_section())
162 return blocks
163
164 @staticmethod
165 def _indentation(depth, size=4):
166 conf_str = ""
167 for _ in range(0, depth*size):
168 conf_str += " "
169 return conf_str
170
171 @staticmethod
172 def write_block_body(block, depth=0):
173 def format_val(key, val):
174 if isinstance(val, list):
175 return ', '.join([format_val(key, v) for v in val])
176 if isinstance(val, bool):
177 return str(val).lower()
178 if isinstance(val, int) or (block['block_name'] == 'CLIENT'
179 and key == 'clients'):
180 return '{}'.format(val)
181 return '"{}"'.format(val)
182
183 conf_str = ""
184 for key, val in block.items():
185 if key == 'block_name':
186 continue
187 elif key == '_blocks_':
188 for blo in val:
189 conf_str += GaneshaConfParser.write_block(blo, depth)
190 elif val:
191 conf_str += GaneshaConfParser._indentation(depth)
192 conf_str += '{} = {};\n'.format(key, format_val(key, val))
193 return conf_str
194
195 @staticmethod
196 def write_block(block, depth=0):
197 if block['block_name'] == "%url":
198 return '%url "{}"\n\n'.format(block['value'])
199
200 conf_str = ""
201 conf_str += GaneshaConfParser._indentation(depth)
202 conf_str += format(block['block_name'])
203 conf_str += " {\n"
204 conf_str += GaneshaConfParser.write_block_body(block, depth+1)
205 conf_str += GaneshaConfParser._indentation(depth)
206 conf_str += "}\n"
207 return conf_str
208
209
210class CephFSFSal():
211 def __init__(self, name, user_id=None, fs_name=None, sec_label_xattr=None,
212 cephx_key=None):
213 self.name = name
214 self.fs_name = fs_name
215 self.user_id = user_id
216 self.sec_label_xattr = sec_label_xattr
217 self.cephx_key = cephx_key
218
219 @classmethod
220 def from_fsal_block(cls, fsal_block):
221 return cls(fsal_block['name'],
222 fsal_block.get('user_id', None),
223 fsal_block.get('filesystem', None),
224 fsal_block.get('sec_label_xattr', None),
225 fsal_block.get('secret_access_key', None))
226
227 def to_fsal_block(self):
228 result = {
229 'block_name': 'FSAL',
230 'name': self.name,
231 }
232 if self.user_id:
233 result['user_id'] = self.user_id
234 if self.fs_name:
235 result['filesystem'] = self.fs_name
236 if self.sec_label_xattr:
237 result['sec_label_xattr'] = self.sec_label_xattr
238 if self.cephx_key:
239 result['secret_access_key'] = self.cephx_key
240 return result
241
242 @classmethod
243 def from_dict(cls, fsal_dict):
244 return cls(fsal_dict['name'], fsal_dict['user_id'],
245 fsal_dict['fs_name'], fsal_dict['sec_label_xattr'], None)
246
247 def to_dict(self):
248 return {
249 'name': self.name,
250 'user_id': self.user_id,
251 'fs_name': self.fs_name,
252 'sec_label_xattr': self.sec_label_xattr
253 }
254
255
256class Client(object):
257 def __init__(self, addresses, access_type=None, squash=None):
258 self.addresses = addresses
259 self.access_type = access_type
260 self.squash = squash
261
262 @classmethod
263 def from_client_block(cls, client_block):
264 addresses = client_block['clients']
265 if not isinstance(addresses, list):
266 addresses = [addresses]
267 return cls(addresses,
268 client_block.get('access_type', None),
269 client_block.get('squash', None))
270
271 def to_client_block(self):
272 result = {
273 'block_name': 'CLIENT',
274 'clients': self.addresses,
275 }
276 if self.access_type:
277 result['access_type'] = self.access_type
278 if self.squash:
279 result['squash'] = self.squash
280 return result
281
282 @classmethod
283 def from_dict(cls, client_dict):
284 return cls(client_dict['addresses'], client_dict['access_type'],
285 client_dict['squash'])
286
287 def to_dict(self):
288 return {
289 'addresses': self.addresses,
290 'access_type': self.access_type,
291 'squash': self.squash
292 }
293
294
295class NFSRados:
296 def __init__(self, mgr, namespace):
297 self.mgr = mgr
298 self.pool = POOL_NAME
299 self.namespace = namespace
300
301 def _make_rados_url(self, obj):
302 return "rados://{}/{}/{}".format(self.pool, self.namespace, obj)
303
304 def _create_url_block(self, obj_name):
305 return {'block_name': '%url', 'value': self._make_rados_url(obj_name)}
306
307 def write_obj(self, conf_block, obj, config_obj=''):
308 if 'export-' in obj:
309 conf_block = GaneshaConfParser.write_block(conf_block)
310
311 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
312 ioctx.set_namespace(self.namespace)
313 ioctx.write_full(obj, conf_block.encode('utf-8'))
314 if not config_obj:
315 # Return after creating empty common config object
316 return
317 log.debug("write configuration into rados object "
318 f"{self.pool}/{self.namespace}/{obj}:\n{conf_block}")
319
320 # Add created obj url to common config obj
321 ioctx.append(config_obj, GaneshaConfParser.write_block(
322 self._create_url_block(obj)).encode('utf-8'))
323 FSExport._check_rados_notify(ioctx, config_obj)
324 log.debug(f"Added {obj} url to {config_obj}")
325
326 def remove_obj(self, obj, config_obj):
327 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
328 ioctx.set_namespace(self.namespace)
329 export_urls = ioctx.read(config_obj)
330 url = '%url "{}"\n\n'.format(self._make_rados_url(obj))
331 export_urls = export_urls.replace(url.encode('utf-8'), b'')
332 ioctx.remove_object(obj)
333 ioctx.write_full(config_obj, export_urls)
334 FSExport._check_rados_notify(ioctx, config_obj)
335 log.debug("Object deleted: {}".format(url))
336
337 def remove_all_obj(self):
338 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
339 ioctx.set_namespace(self.namespace)
340 for obj in ioctx.list_objects():
341 obj.remove()
342
343 def check_user_config(self):
344 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
345 ioctx.set_namespace(self.namespace)
346 for obj in ioctx.list_objects():
347 if obj.key.startswith("userconf-nfs"):
348 return True
349 return False
350
351
352class Export(object):
353 # pylint: disable=R0902
354 def __init__(self, export_id, path, fsal, cluster_id, pseudo,
355 access_type='R', clients=None):
356 self.export_id = export_id
357 self.path = path
358 self.fsal = fsal
359 self.cluster_id = cluster_id
360 self.pseudo = pseudo
361 self.access_type = access_type
362 self.squash = 'no_root_squash'
363 self.attr_expiration_time = 0
364 self.security_label = True
365 self.protocols = [4]
366 self.transports = ["TCP"]
367 self.clients = clients
368
369 @classmethod
370 def from_export_block(cls, export_block, cluster_id):
371 log.debug("parsing export block: %s", export_block)
372
373 fsal_block = [b for b in export_block['_blocks_']
374 if b['block_name'] == "FSAL"]
375
376 client_blocks = [b for b in export_block['_blocks_']
377 if b['block_name'] == "CLIENT"]
378
379 return cls(export_block['export_id'],
380 export_block['path'],
381 CephFSFSal.from_fsal_block(fsal_block[0]),
382 cluster_id,
383 export_block['pseudo'],
384 export_block['access_type'],
385 [Client.from_client_block(client)
386 for client in client_blocks])
387
388 def to_export_block(self):
389 # pylint: disable=too-many-branches
390 result = {
391 'block_name': 'EXPORT',
392 'export_id': self.export_id,
393 'path': self.path,
394 'pseudo': self.pseudo,
395 'access_type': self.access_type,
396 'squash': self.squash,
397 'attr_expiration_time': self.attr_expiration_time,
398 'security_label': self.security_label,
399 'protocols': self.protocols,
400 'transports': self.transports,
401 }
402 result['_blocks_'] = [self.fsal.to_fsal_block()]
403 result['_blocks_'].extend([client.to_client_block()
404 for client in self.clients])
405 return result
406
407 @classmethod
408 def from_dict(cls, export_id, ex_dict):
409 return cls(export_id,
410 ex_dict['path'],
411 CephFSFSal.from_dict(ex_dict['fsal']),
412 ex_dict['cluster_id'],
413 ex_dict['pseudo'],
414 ex_dict['access_type'],
415 [Client.from_dict(client) for client in ex_dict['clients']])
416
417 def to_dict(self):
418 return {
419 'export_id': self.export_id,
420 'path': self.path,
421 'cluster_id': self.cluster_id,
422 'pseudo': self.pseudo,
423 'access_type': self.access_type,
424 'squash': self.squash,
425 'security_label': self.security_label,
426 'protocols': sorted([p for p in self.protocols]),
427 'transports': sorted([t for t in self.transports]),
428 'fsal': self.fsal.to_dict(),
429 'clients': [client.to_dict() for client in self.clients]
430 }
431
432
433class FSExport(object):
434 def __init__(self, mgr, namespace=None):
435 self.mgr = mgr
436 self.rados_pool = POOL_NAME
437 self.rados_namespace = namespace
438 self._exports = None
439
440 @staticmethod
441 def _check_rados_notify(ioctx, obj):
442 try:
443 ioctx.notify(obj)
444 except TimedOut:
445 log.exception(f"Ganesha timed out")
446
447 @property
448 def exports(self):
449 if self._exports is None:
450 self._exports = {}
451 log.info("Begin export parsing")
452 for cluster_id in available_clusters(self.mgr):
453 self.export_conf_objs = [] # type: List[Export]
454 self._read_raw_config(cluster_id)
455 self.exports[cluster_id] = self.export_conf_objs
456 log.info(f"Exports parsed successfully {self.exports.items()}")
457 return self._exports
458
459 def _fetch_export(self, pseudo_path):
460 try:
461 for ex in self.exports[self.rados_namespace]:
462 if ex.pseudo == pseudo_path:
463 return ex
464 except KeyError:
465 pass
466
467 def _create_user_key(self, entity, path, fs_name):
468 osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
469 self.rados_pool, self.rados_namespace, fs_name)
470
471 ret, out, err = self.mgr.check_mon_command({
472 'prefix': 'auth get-or-create',
473 'entity': 'client.{}'.format(entity),
474 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow rw path={}'.format(path)],
475 'format': 'json',
476 })
477
478 json_res = json.loads(out)
479 log.info("Export user created is {}".format(json_res[0]['entity']))
480 return json_res[0]['entity'], json_res[0]['key']
481
482 def _delete_user(self, entity):
483 self.mgr.check_mon_command({
484 'prefix': 'auth rm',
485 'entity': 'client.{}'.format(entity),
486 })
487 log.info(f"Export user deleted is {entity}")
488
489 def _gen_export_id(self):
490 exports = sorted([ex.export_id for ex in self.exports[self.rados_namespace]])
491 nid = 1
492 for e_id in exports:
493 if e_id == nid:
494 nid += 1
495 else:
496 break
497 return nid
498
499 def _read_raw_config(self, rados_namespace):
500 with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
501 ioctx.set_namespace(rados_namespace)
502 for obj in ioctx.list_objects():
503 if obj.key.startswith("export-"):
504 size, _ = obj.stat()
505 raw_config = obj.read(size)
506 raw_config = raw_config.decode("utf-8")
507 log.debug("read export configuration from rados "
508 "object %s/%s/%s:\n%s", self.rados_pool,
509 rados_namespace, obj.key, raw_config)
510 self.export_conf_objs.append(Export.from_export_block(
511 GaneshaConfParser(raw_config).parse()[0], rados_namespace))
512
513 def _save_export(self, export):
514 self.exports[self.rados_namespace].append(export)
515 NFSRados(self.mgr, self.rados_namespace).write_obj(export.to_export_block(),
516 f'export-{export.export_id}', f'conf-nfs.ganesha-{export.cluster_id}')
517
518 def _delete_export(self, cluster_id, pseudo_path, export_obj=None):
519 try:
520 if export_obj:
521 export = export_obj
522 else:
523 export = self._fetch_export(pseudo_path)
524
525 if export:
526 if pseudo_path:
527 NFSRados(self.mgr, self.rados_namespace).remove_obj(
528 f'export-{export.export_id}', f'conf-nfs.ganesha-{cluster_id}')
529 self.exports[cluster_id].remove(export)
530 self._delete_user(export.fsal.user_id)
531 if not self.exports[cluster_id]:
532 del self.exports[cluster_id]
533 return 0, "Successfully deleted export", ""
534 return 0, "", "Export does not exist"
535 except Exception as e:
536 log.exception(f"Failed to delete {pseudo_path} export for {cluster_id}")
537 return getattr(e, 'errno', -1), "", str(e)
538
539 def format_path(self, path):
540 if path:
541 path = normpath(path.strip())
542 if path[:2] == "//":
543 path = path[1:]
544 return path
545
546 def check_fs(self, fs_name):
547 fs_map = self.mgr.get('fs_map')
548 return fs_name in [fs['mdsmap']['fs_name'] for fs in fs_map['filesystems']]
549
550 @export_cluster_checker
551 def create_export(self, fs_name, cluster_id, pseudo_path, read_only, path):
552 try:
553 if not self.check_fs(fs_name):
554 return -errno.ENOENT, "", f"filesystem {fs_name} not found"
555
556 pseudo_path = self.format_path(pseudo_path)
557 if not isabs(pseudo_path) or pseudo_path == "/":
558 return -errno.EINVAL, "", f"pseudo path {pseudo_path} is invalid. "\
f91f0fd5 559 "It should be an absolute path and it cannot be just '/'."
f6b5b4d7
TL
560
561 if cluster_id not in self.exports:
562 self.exports[cluster_id] = []
563
564 if not self._fetch_export(pseudo_path):
565 ex_id = self._gen_export_id()
566 user_id = f"{cluster_id}{ex_id}"
567 user_out, key = self._create_user_key(user_id, path, fs_name)
568 access_type = "RW"
569 if read_only:
570 access_type = "RO"
571 ex_dict = {
572 'path': self.format_path(path),
573 'pseudo': pseudo_path,
574 'cluster_id': cluster_id,
575 'access_type': access_type,
576 'fsal': {"name": "CEPH", "user_id": user_id,
577 "fs_name": fs_name, "sec_label_xattr": ""},
578 'clients': []
579 }
580 export = Export.from_dict(ex_id, ex_dict)
581 export.fsal.cephx_key = key
582 self._save_export(export)
583 result = {
584 "bind": pseudo_path,
585 "fs": fs_name,
586 "path": path,
587 "cluster": cluster_id,
588 "mode": access_type,
589 }
590 return (0, json.dumps(result, indent=4), '')
591 return 0, "", "Export already exists"
592 except Exception as e:
593 log.exception(f"Failed to create {pseudo_path} export for {cluster_id}")
594 return -errno.EINVAL, "", str(e)
595
596 @export_cluster_checker
597 def delete_export(self, cluster_id, pseudo_path):
598 return self._delete_export(cluster_id, pseudo_path)
599
600 def delete_all_exports(self, cluster_id):
601 try:
602 export_list = list(self.exports[cluster_id])
603 except KeyError:
604 log.info("No exports to delete")
605 return
606 self.rados_namespace = cluster_id
607 for export in export_list:
608 ret, out, err = self._delete_export(cluster_id=cluster_id, pseudo_path=None,
609 export_obj=export)
610 if ret != 0:
611 raise Exception(f"Failed to delete exports: {err} and {ret}")
612 log.info(f"All exports successfully deleted for cluster id: {cluster_id}")
613
614 @export_cluster_checker
615 def list_exports(self, cluster_id, detailed):
616 try:
617 if detailed:
618 result = [export.to_dict() for export in self.exports[cluster_id]]
619 else:
620 result = [export.pseudo for export in self.exports[cluster_id]]
621 return 0, json.dumps(result, indent=2), ''
622 except KeyError:
623 log.warning(f"No exports to list for {cluster_id}")
624 return 0, '', ''
625 except Exception as e:
626 log.exception(f"Failed to list exports for {cluster_id}")
627 return getattr(e, 'errno', -1), "", str(e)
628
629 @export_cluster_checker
630 def get_export(self, cluster_id, pseudo_path):
631 try:
632 export = self._fetch_export(pseudo_path)
633 if export:
634 return 0, json.dumps(export.to_dict(), indent=2), ''
635 log.warning(f"No {pseudo_path} export to show for {cluster_id}")
636 return 0, '', ''
637 except Exception as e:
638 log.exception(f"Failed to get {pseudo_path} export for {cluster_id}")
639 return getattr(e, 'errno', -1), "", str(e)
640
641
642class NFSCluster:
643 def __init__(self, mgr):
644 self.pool_name = POOL_NAME
645 self.pool_ns = ''
646 self.mgr = mgr
647
648 def _set_cluster_id(self, cluster_id):
649 self.cluster_id = f"ganesha-{cluster_id}"
650
651 def _set_pool_namespace(self, cluster_id):
652 self.pool_ns = cluster_id
653
654 def _get_common_conf_obj_name(self):
655 return f'conf-nfs.{self.cluster_id}'
656
657 def _get_user_conf_obj_name(self):
658 return f'userconf-nfs.{self.cluster_id}'
659
660 def _call_orch_apply_nfs(self, placement):
661 spec = NFSServiceSpec(service_type='nfs', service_id=self.cluster_id,
662 pool=self.pool_name, namespace=self.pool_ns,
663 placement=PlacementSpec.from_string(placement))
664 completion = self.mgr.apply_nfs(spec)
665 self.mgr._orchestrator_wait([completion])
666 orchestrator.raise_if_exception(completion)
667
668 def create_empty_rados_obj(self):
669 common_conf = self._get_common_conf_obj_name()
670 NFSRados(self.mgr, self.pool_ns).write_obj('', self._get_common_conf_obj_name())
671 log.info(f"Created empty object:{common_conf}")
672
673 def delete_config_obj(self):
674 NFSRados(self.mgr, self.pool_ns).remove_all_obj()
675 log.info(f"Deleted {self._get_common_conf_obj_name()} object and all objects in "
676 f"{self.pool_ns}")
677
678 def _restart_nfs_service(self):
679 completion = self.mgr.service_action(action='restart',
680 service_name='nfs.'+self.cluster_id)
681 self.mgr._orchestrator_wait([completion])
682 orchestrator.raise_if_exception(completion)
683
684 @cluster_setter
685 def create_nfs_cluster(self, export_type, cluster_id, placement):
686 if export_type != 'cephfs':
687 return -errno.EINVAL, "", f"Invalid export type: {export_type}"
688 try:
689 pool_list = [p['pool_name'] for p in self.mgr.get_osdmap().dump().get('pools', [])]
690
691 if self.pool_name not in pool_list:
692 r, out, err = create_pool(self.mgr, self.pool_name)
693 if r != 0:
694 return r, out, err
695 log.info(f"Pool Status: {out}")
696
697 self.mgr.check_mon_command({'prefix': 'osd pool application enable',
698 'pool': self.pool_name, 'app': 'nfs'})
699
700 self.create_empty_rados_obj()
701
702 if cluster_id not in available_clusters(self.mgr):
703 self._call_orch_apply_nfs(placement)
704 return 0, "NFS Cluster Created Successfully", ""
705 return 0, "", f"{cluster_id} cluster already exists"
706 except Exception as e:
707 log.exception(f"NFS Cluster {cluster_id} could not be created")
708 return getattr(e, 'errno', -1), "", str(e)
709
710 @cluster_setter
711 def update_nfs_cluster(self, cluster_id, placement):
712 try:
713 if cluster_id in available_clusters(self.mgr):
714 self._call_orch_apply_nfs(placement)
715 return 0, "NFS Cluster Updated Successfully", ""
716 return -errno.ENOENT, "", "Cluster does not exist"
717 except Exception as e:
718 log.exception(f"NFS Cluster {cluster_id} could not be updated")
719 return getattr(e, 'errno', -1), "", str(e)
720
721 @cluster_setter
722 def delete_nfs_cluster(self, cluster_id):
723 try:
724 cluster_list = available_clusters(self.mgr)
725 if cluster_id in cluster_list:
726 self.mgr.fs_export.delete_all_exports(cluster_id)
727 completion = self.mgr.remove_service('nfs.' + self.cluster_id)
728 self.mgr._orchestrator_wait([completion])
729 orchestrator.raise_if_exception(completion)
730 self.delete_config_obj()
731 return 0, "NFS Cluster Deleted Successfully", ""
732 return 0, "", "Cluster does not exist"
733 except Exception as e:
734 log.exception(f"Failed to delete NFS Cluster {cluster_id}")
735 return getattr(e, 'errno', -1), "", str(e)
736
737 def list_nfs_cluster(self):
738 try:
739 return 0, '\n'.join(available_clusters(self.mgr)), ""
740 except Exception as e:
741 log.exception("Failed to list NFS Cluster")
742 return getattr(e, 'errno', -1), "", str(e)
743
744 def _show_nfs_cluster_info(self, cluster_id):
745 self._set_cluster_id(cluster_id)
746 completion = self.mgr.list_daemons(daemon_type='nfs')
747 self.mgr._orchestrator_wait([completion])
748 orchestrator.raise_if_exception(completion)
749 host_ip = []
750 # Here completion.result is a list DaemonDescription objects
751 for cluster in completion.result:
752 if self.cluster_id == cluster.service_id():
753 """
754 getaddrinfo sample output: [(<AddressFamily.AF_INET: 2>,
755 <SocketKind.SOCK_STREAM: 1>, 6, 'xyz', ('172.217.166.98',2049)),
756 (<AddressFamily.AF_INET6: 10>, <SocketKind.SOCK_STREAM: 1>, 6, '',
757 ('2404:6800:4009:80d::200e', 2049, 0, 0))]
758 """
759 try:
760 host_ip.append({
761 "hostname": cluster.hostname,
762 "ip": list(set([ip[4][0] for ip in socket.getaddrinfo(
763 cluster.hostname, 2049, flags=socket.AI_CANONNAME,
764 type=socket.SOCK_STREAM)])),
765 "port": 2049 # Default ganesha port
766 })
767 except socket.gaierror:
768 continue
769 return host_ip
770
771 def show_nfs_cluster_info(self, cluster_id=None):
772 try:
773 cluster_ls = []
774 info_res = {}
775 if cluster_id:
776 cluster_ls = [cluster_id]
777 else:
778 cluster_ls = available_clusters(self.mgr)
779
780 for cluster_id in cluster_ls:
781 res = self._show_nfs_cluster_info(cluster_id)
782 if res:
783 info_res[cluster_id] = res
784 return (0, json.dumps(info_res, indent=4), '')
785 except Exception as e:
786 log.exception(f"Failed to show info for cluster")
787 return getattr(e, 'errno', -1), "", str(e)
788
789 @cluster_setter
790 def set_nfs_cluster_config(self, cluster_id, nfs_config):
791 try:
792 if not nfs_config:
793 return -errno.EINVAL, "", "Empty Config!!"
794 if cluster_id in available_clusters(self.mgr):
795 rados_obj = NFSRados(self.mgr, self.pool_ns)
796 if rados_obj.check_user_config():
797 return 0, "", "NFS-Ganesha User Config already exists"
798 rados_obj.write_obj(nfs_config, self._get_user_conf_obj_name(),
799 self._get_common_conf_obj_name())
800 self._restart_nfs_service()
801 return 0, "NFS-Ganesha Config Set Successfully", ""
802 return -errno.ENOENT, "", "Cluster does not exist"
803 except Exception as e:
804 log.exception(f"Setting NFS-Ganesha Config failed for {cluster_id}")
805 return getattr(e, 'errno', -1), "", str(e)
806
807 @cluster_setter
808 def reset_nfs_cluster_config(self, cluster_id):
809 try:
810 if cluster_id in available_clusters(self.mgr):
811 rados_obj = NFSRados(self.mgr, self.pool_ns)
812 if not rados_obj.check_user_config():
813 return 0, "", "NFS-Ganesha User Config does not exist"
814 rados_obj.remove_obj(self._get_user_conf_obj_name(),
815 self._get_common_conf_obj_name())
816 self._restart_nfs_service()
817 return 0, "NFS-Ganesha Config Reset Successfully", ""
818 return -errno.ENOENT, "", "Cluster does not exist"
819 except Exception as e:
820 log.exception(f"Resetting NFS-Ganesha Config failed for {cluster_id}")
821 return getattr(e, 'errno', -1), "", str(e)