]>
Commit | Line | Data |
---|---|---|
f6b5b4d7 TL |
1 | import errno |
2 | import json | |
3 | import logging | |
4 | from typing import List | |
5 | import socket | |
6 | from os.path import isabs, normpath | |
7 | ||
8 | from ceph.deployment.service_spec import NFSServiceSpec, PlacementSpec | |
f67539c2 | 9 | from rados import TimedOut, ObjectNotFound |
f6b5b4d7 TL |
10 | |
11 | import orchestrator | |
12 | ||
13 | from .fs_util import create_pool | |
14 | ||
15 | log = logging.getLogger(__name__) | |
16 | POOL_NAME = 'nfs-ganesha' | |
17 | ||
18 | ||
19 | def available_clusters(mgr): | |
20 | ''' | |
21 | This method returns list of available cluster ids. | |
f67539c2 | 22 | Service name is service_type.service_id |
f6b5b4d7 TL |
23 | Example: |
24 | completion.result value: | |
f67539c2 TL |
25 | <ServiceDescription of <NFSServiceSpec for service_name=nfs.vstart>> |
26 | return value: ['vstart'] | |
f6b5b4d7 TL |
27 | ''' |
28 | # TODO check cephadm cluster list with rados pool conf objects | |
29 | completion = mgr.describe_service(service_type='nfs') | |
f6b5b4d7 | 30 | orchestrator.raise_if_exception(completion) |
f67539c2 | 31 | return [cluster.spec.service_id for cluster in completion.result |
f91f0fd5 | 32 | if cluster.spec.service_id] |
f6b5b4d7 TL |
33 | |
34 | ||
f67539c2 TL |
35 | def restart_nfs_service(mgr, cluster_id): |
36 | ''' | |
37 | This methods restarts the nfs daemons | |
38 | ''' | |
39 | completion = mgr.service_action(action='restart', | |
40 | service_name='nfs.'+cluster_id) | |
41 | orchestrator.raise_if_exception(completion) | |
42 | ||
43 | ||
f6b5b4d7 TL |
44 | def export_cluster_checker(func): |
45 | def cluster_check(fs_export, *args, **kwargs): | |
46 | """ | |
47 | This method checks if cluster exists and sets rados namespace. | |
48 | """ | |
49 | if kwargs['cluster_id'] not in available_clusters(fs_export.mgr): | |
50 | return -errno.ENOENT, "", "Cluster does not exists" | |
51 | fs_export.rados_namespace = kwargs['cluster_id'] | |
52 | return func(fs_export, *args, **kwargs) | |
53 | return cluster_check | |
54 | ||
55 | ||
56 | def cluster_setter(func): | |
57 | def set_pool_ns_clusterid(nfs, *args, **kwargs): | |
58 | nfs._set_pool_namespace(kwargs['cluster_id']) | |
59 | nfs._set_cluster_id(kwargs['cluster_id']) | |
60 | return func(nfs, *args, **kwargs) | |
61 | return set_pool_ns_clusterid | |
62 | ||
63 | ||
f67539c2 TL |
64 | class FSExportError(Exception): |
65 | def __init__(self, err_msg, errno=-errno.EINVAL): | |
66 | self.errno = errno | |
67 | self.err_msg = err_msg | |
68 | ||
69 | def __str__(self): | |
70 | return self.err_msg | |
71 | ||
f6b5b4d7 TL |
72 | class GaneshaConfParser(object): |
73 | def __init__(self, raw_config): | |
74 | self.pos = 0 | |
75 | self.text = "" | |
76 | self.clean_config(raw_config) | |
77 | ||
78 | def clean_config(self, raw_config): | |
79 | for line in raw_config.split("\n"): | |
80 | self.text += line | |
81 | if line.startswith("%"): | |
82 | self.text += "\n" | |
83 | ||
84 | def remove_whitespaces_quotes(self): | |
85 | if self.text.startswith("%url"): | |
86 | self.text = self.text.replace('"', "") | |
87 | else: | |
88 | self.text = "".join(self.text.split()) | |
89 | ||
90 | def stream(self): | |
91 | return self.text[self.pos:] | |
92 | ||
93 | def parse_block_name(self): | |
94 | idx = self.stream().find('{') | |
95 | if idx == -1: | |
96 | raise Exception("Cannot find block name") | |
97 | block_name = self.stream()[:idx] | |
98 | self.pos += idx+1 | |
99 | return block_name | |
100 | ||
101 | def parse_block_or_section(self): | |
102 | if self.stream().startswith("%url "): | |
103 | # section line | |
104 | self.pos += 5 | |
105 | idx = self.stream().find('\n') | |
106 | if idx == -1: | |
107 | value = self.stream() | |
108 | self.pos += len(value) | |
109 | else: | |
110 | value = self.stream()[:idx] | |
111 | self.pos += idx+1 | |
112 | block_dict = {'block_name': '%url', 'value': value} | |
113 | return block_dict | |
114 | ||
115 | block_dict = {'block_name': self.parse_block_name().upper()} | |
116 | self.parse_block_body(block_dict) | |
117 | if self.stream()[0] != '}': | |
118 | raise Exception("No closing bracket '}' found at the end of block") | |
119 | self.pos += 1 | |
120 | return block_dict | |
121 | ||
122 | def parse_parameter_value(self, raw_value): | |
123 | if raw_value.find(',') != -1: | |
124 | return [self.parse_parameter_value(v.strip()) | |
125 | for v in raw_value.split(',')] | |
126 | try: | |
127 | return int(raw_value) | |
128 | except ValueError: | |
129 | if raw_value == "true": | |
130 | return True | |
131 | if raw_value == "false": | |
132 | return False | |
133 | if raw_value.find('"') == 0: | |
134 | return raw_value[1:-1] | |
135 | return raw_value | |
136 | ||
137 | def parse_stanza(self, block_dict): | |
138 | equal_idx = self.stream().find('=') | |
139 | if equal_idx == -1: | |
140 | raise Exception("Malformed stanza: no equal symbol found.") | |
141 | semicolon_idx = self.stream().find(';') | |
142 | parameter_name = self.stream()[:equal_idx].lower() | |
143 | parameter_value = self.stream()[equal_idx+1:semicolon_idx] | |
144 | block_dict[parameter_name] = self.parse_parameter_value(parameter_value) | |
145 | self.pos += semicolon_idx+1 | |
146 | ||
147 | def parse_block_body(self, block_dict): | |
148 | while True: | |
149 | if self.stream().find('}') == 0: | |
150 | # block end | |
151 | return | |
152 | ||
153 | last_pos = self.pos | |
154 | semicolon_idx = self.stream().find(';') | |
155 | lbracket_idx = self.stream().find('{') | |
156 | is_semicolon = (semicolon_idx != -1) | |
157 | is_lbracket = (lbracket_idx != -1) | |
158 | is_semicolon_lt_lbracket = (semicolon_idx < lbracket_idx) | |
159 | ||
160 | if is_semicolon and ((is_lbracket and is_semicolon_lt_lbracket) or not is_lbracket): | |
161 | self.parse_stanza(block_dict) | |
162 | elif is_lbracket and ((is_semicolon and not is_semicolon_lt_lbracket) or | |
163 | (not is_semicolon)): | |
164 | if '_blocks_' not in block_dict: | |
165 | block_dict['_blocks_'] = [] | |
166 | block_dict['_blocks_'].append(self.parse_block_or_section()) | |
167 | else: | |
168 | raise Exception("Malformed stanza: no semicolon found.") | |
169 | ||
170 | if last_pos == self.pos: | |
171 | raise Exception("Infinite loop while parsing block content") | |
172 | ||
173 | def parse(self): | |
174 | self.remove_whitespaces_quotes() | |
175 | blocks = [] | |
176 | while self.stream(): | |
177 | blocks.append(self.parse_block_or_section()) | |
178 | return blocks | |
179 | ||
180 | @staticmethod | |
181 | def _indentation(depth, size=4): | |
182 | conf_str = "" | |
183 | for _ in range(0, depth*size): | |
184 | conf_str += " " | |
185 | return conf_str | |
186 | ||
187 | @staticmethod | |
188 | def write_block_body(block, depth=0): | |
189 | def format_val(key, val): | |
190 | if isinstance(val, list): | |
191 | return ', '.join([format_val(key, v) for v in val]) | |
192 | if isinstance(val, bool): | |
193 | return str(val).lower() | |
194 | if isinstance(val, int) or (block['block_name'] == 'CLIENT' | |
195 | and key == 'clients'): | |
196 | return '{}'.format(val) | |
197 | return '"{}"'.format(val) | |
198 | ||
199 | conf_str = "" | |
200 | for key, val in block.items(): | |
201 | if key == 'block_name': | |
202 | continue | |
203 | elif key == '_blocks_': | |
204 | for blo in val: | |
205 | conf_str += GaneshaConfParser.write_block(blo, depth) | |
206 | elif val: | |
207 | conf_str += GaneshaConfParser._indentation(depth) | |
208 | conf_str += '{} = {};\n'.format(key, format_val(key, val)) | |
209 | return conf_str | |
210 | ||
211 | @staticmethod | |
212 | def write_block(block, depth=0): | |
213 | if block['block_name'] == "%url": | |
214 | return '%url "{}"\n\n'.format(block['value']) | |
215 | ||
216 | conf_str = "" | |
217 | conf_str += GaneshaConfParser._indentation(depth) | |
218 | conf_str += format(block['block_name']) | |
219 | conf_str += " {\n" | |
220 | conf_str += GaneshaConfParser.write_block_body(block, depth+1) | |
221 | conf_str += GaneshaConfParser._indentation(depth) | |
222 | conf_str += "}\n" | |
223 | return conf_str | |
224 | ||
225 | ||
226 | class CephFSFSal(): | |
227 | def __init__(self, name, user_id=None, fs_name=None, sec_label_xattr=None, | |
228 | cephx_key=None): | |
229 | self.name = name | |
230 | self.fs_name = fs_name | |
231 | self.user_id = user_id | |
232 | self.sec_label_xattr = sec_label_xattr | |
233 | self.cephx_key = cephx_key | |
234 | ||
235 | @classmethod | |
236 | def from_fsal_block(cls, fsal_block): | |
237 | return cls(fsal_block['name'], | |
238 | fsal_block.get('user_id', None), | |
239 | fsal_block.get('filesystem', None), | |
240 | fsal_block.get('sec_label_xattr', None), | |
241 | fsal_block.get('secret_access_key', None)) | |
242 | ||
243 | def to_fsal_block(self): | |
244 | result = { | |
245 | 'block_name': 'FSAL', | |
246 | 'name': self.name, | |
247 | } | |
248 | if self.user_id: | |
249 | result['user_id'] = self.user_id | |
250 | if self.fs_name: | |
251 | result['filesystem'] = self.fs_name | |
252 | if self.sec_label_xattr: | |
253 | result['sec_label_xattr'] = self.sec_label_xattr | |
254 | if self.cephx_key: | |
255 | result['secret_access_key'] = self.cephx_key | |
256 | return result | |
257 | ||
258 | @classmethod | |
259 | def from_dict(cls, fsal_dict): | |
260 | return cls(fsal_dict['name'], fsal_dict['user_id'], | |
261 | fsal_dict['fs_name'], fsal_dict['sec_label_xattr'], None) | |
262 | ||
263 | def to_dict(self): | |
264 | return { | |
265 | 'name': self.name, | |
266 | 'user_id': self.user_id, | |
267 | 'fs_name': self.fs_name, | |
268 | 'sec_label_xattr': self.sec_label_xattr | |
269 | } | |
270 | ||
271 | ||
272 | class Client(object): | |
273 | def __init__(self, addresses, access_type=None, squash=None): | |
274 | self.addresses = addresses | |
275 | self.access_type = access_type | |
276 | self.squash = squash | |
277 | ||
278 | @classmethod | |
279 | def from_client_block(cls, client_block): | |
280 | addresses = client_block['clients'] | |
281 | if not isinstance(addresses, list): | |
282 | addresses = [addresses] | |
283 | return cls(addresses, | |
284 | client_block.get('access_type', None), | |
285 | client_block.get('squash', None)) | |
286 | ||
287 | def to_client_block(self): | |
288 | result = { | |
289 | 'block_name': 'CLIENT', | |
290 | 'clients': self.addresses, | |
291 | } | |
292 | if self.access_type: | |
293 | result['access_type'] = self.access_type | |
294 | if self.squash: | |
295 | result['squash'] = self.squash | |
296 | return result | |
297 | ||
298 | @classmethod | |
299 | def from_dict(cls, client_dict): | |
300 | return cls(client_dict['addresses'], client_dict['access_type'], | |
301 | client_dict['squash']) | |
302 | ||
303 | def to_dict(self): | |
304 | return { | |
305 | 'addresses': self.addresses, | |
306 | 'access_type': self.access_type, | |
307 | 'squash': self.squash | |
308 | } | |
309 | ||
310 | ||
311 | class NFSRados: | |
312 | def __init__(self, mgr, namespace): | |
313 | self.mgr = mgr | |
314 | self.pool = POOL_NAME | |
315 | self.namespace = namespace | |
316 | ||
317 | def _make_rados_url(self, obj): | |
318 | return "rados://{}/{}/{}".format(self.pool, self.namespace, obj) | |
319 | ||
320 | def _create_url_block(self, obj_name): | |
321 | return {'block_name': '%url', 'value': self._make_rados_url(obj_name)} | |
322 | ||
323 | def write_obj(self, conf_block, obj, config_obj=''): | |
324 | if 'export-' in obj: | |
325 | conf_block = GaneshaConfParser.write_block(conf_block) | |
326 | ||
327 | with self.mgr.rados.open_ioctx(self.pool) as ioctx: | |
328 | ioctx.set_namespace(self.namespace) | |
329 | ioctx.write_full(obj, conf_block.encode('utf-8')) | |
330 | if not config_obj: | |
331 | # Return after creating empty common config object | |
332 | return | |
333 | log.debug("write configuration into rados object " | |
334 | f"{self.pool}/{self.namespace}/{obj}:\n{conf_block}") | |
335 | ||
336 | # Add created obj url to common config obj | |
337 | ioctx.append(config_obj, GaneshaConfParser.write_block( | |
338 | self._create_url_block(obj)).encode('utf-8')) | |
339 | FSExport._check_rados_notify(ioctx, config_obj) | |
340 | log.debug(f"Added {obj} url to {config_obj}") | |
341 | ||
f67539c2 TL |
342 | def update_obj(self, conf_block, obj, config_obj): |
343 | with self.mgr.rados.open_ioctx(self.pool) as ioctx: | |
344 | ioctx.set_namespace(self.namespace) | |
345 | ioctx.write_full(obj, conf_block.encode('utf-8')) | |
346 | log.debug("write configuration into rados object " | |
347 | f"{self.pool}/{self.namespace}/{obj}:\n{conf_block}") | |
348 | FSExport._check_rados_notify(ioctx, config_obj) | |
349 | log.debug(f"Update export {obj} in {config_obj}") | |
350 | ||
f6b5b4d7 TL |
351 | def remove_obj(self, obj, config_obj): |
352 | with self.mgr.rados.open_ioctx(self.pool) as ioctx: | |
353 | ioctx.set_namespace(self.namespace) | |
354 | export_urls = ioctx.read(config_obj) | |
355 | url = '%url "{}"\n\n'.format(self._make_rados_url(obj)) | |
356 | export_urls = export_urls.replace(url.encode('utf-8'), b'') | |
357 | ioctx.remove_object(obj) | |
358 | ioctx.write_full(config_obj, export_urls) | |
359 | FSExport._check_rados_notify(ioctx, config_obj) | |
360 | log.debug("Object deleted: {}".format(url)) | |
361 | ||
362 | def remove_all_obj(self): | |
363 | with self.mgr.rados.open_ioctx(self.pool) as ioctx: | |
364 | ioctx.set_namespace(self.namespace) | |
365 | for obj in ioctx.list_objects(): | |
366 | obj.remove() | |
367 | ||
368 | def check_user_config(self): | |
369 | with self.mgr.rados.open_ioctx(self.pool) as ioctx: | |
370 | ioctx.set_namespace(self.namespace) | |
371 | for obj in ioctx.list_objects(): | |
372 | if obj.key.startswith("userconf-nfs"): | |
373 | return True | |
374 | return False | |
375 | ||
376 | ||
377 | class Export(object): | |
378 | # pylint: disable=R0902 | |
f67539c2 TL |
379 | def __init__(self, export_id, path, cluster_id, pseudo, access_type, squash, security_label, |
380 | protocols, transports, fsal, clients=None): | |
f6b5b4d7 TL |
381 | self.export_id = export_id |
382 | self.path = path | |
383 | self.fsal = fsal | |
384 | self.cluster_id = cluster_id | |
385 | self.pseudo = pseudo | |
386 | self.access_type = access_type | |
f67539c2 | 387 | self.squash = squash |
f6b5b4d7 | 388 | self.attr_expiration_time = 0 |
f67539c2 TL |
389 | self.security_label = security_label |
390 | self.protocols = protocols | |
391 | self.transports = transports | |
f6b5b4d7 TL |
392 | self.clients = clients |
393 | ||
394 | @classmethod | |
395 | def from_export_block(cls, export_block, cluster_id): | |
396 | log.debug("parsing export block: %s", export_block) | |
397 | ||
398 | fsal_block = [b for b in export_block['_blocks_'] | |
399 | if b['block_name'] == "FSAL"] | |
400 | ||
401 | client_blocks = [b for b in export_block['_blocks_'] | |
402 | if b['block_name'] == "CLIENT"] | |
403 | ||
404 | return cls(export_block['export_id'], | |
405 | export_block['path'], | |
f6b5b4d7 TL |
406 | cluster_id, |
407 | export_block['pseudo'], | |
408 | export_block['access_type'], | |
f67539c2 TL |
409 | export_block['squash'], |
410 | export_block['security_label'], | |
411 | export_block['protocols'], | |
412 | export_block['transports'], | |
413 | CephFSFSal.from_fsal_block(fsal_block[0]), | |
f6b5b4d7 TL |
414 | [Client.from_client_block(client) |
415 | for client in client_blocks]) | |
416 | ||
417 | def to_export_block(self): | |
418 | # pylint: disable=too-many-branches | |
419 | result = { | |
420 | 'block_name': 'EXPORT', | |
421 | 'export_id': self.export_id, | |
422 | 'path': self.path, | |
423 | 'pseudo': self.pseudo, | |
424 | 'access_type': self.access_type, | |
425 | 'squash': self.squash, | |
426 | 'attr_expiration_time': self.attr_expiration_time, | |
427 | 'security_label': self.security_label, | |
428 | 'protocols': self.protocols, | |
429 | 'transports': self.transports, | |
430 | } | |
431 | result['_blocks_'] = [self.fsal.to_fsal_block()] | |
432 | result['_blocks_'].extend([client.to_client_block() | |
433 | for client in self.clients]) | |
434 | return result | |
435 | ||
436 | @classmethod | |
437 | def from_dict(cls, export_id, ex_dict): | |
438 | return cls(export_id, | |
439 | ex_dict['path'], | |
f6b5b4d7 TL |
440 | ex_dict['cluster_id'], |
441 | ex_dict['pseudo'], | |
f67539c2 TL |
442 | ex_dict.get('access_type', 'R'), |
443 | ex_dict.get('squash', 'no_root_squash'), | |
444 | ex_dict.get('security_label', True), | |
445 | ex_dict.get('protocols', [4]), | |
446 | ex_dict.get('transports', ['TCP']), | |
447 | CephFSFSal.from_dict(ex_dict['fsal']), | |
f6b5b4d7 TL |
448 | [Client.from_dict(client) for client in ex_dict['clients']]) |
449 | ||
450 | def to_dict(self): | |
451 | return { | |
452 | 'export_id': self.export_id, | |
453 | 'path': self.path, | |
454 | 'cluster_id': self.cluster_id, | |
455 | 'pseudo': self.pseudo, | |
456 | 'access_type': self.access_type, | |
457 | 'squash': self.squash, | |
458 | 'security_label': self.security_label, | |
459 | 'protocols': sorted([p for p in self.protocols]), | |
460 | 'transports': sorted([t for t in self.transports]), | |
461 | 'fsal': self.fsal.to_dict(), | |
462 | 'clients': [client.to_dict() for client in self.clients] | |
463 | } | |
464 | ||
465 | ||
466 | class FSExport(object): | |
467 | def __init__(self, mgr, namespace=None): | |
468 | self.mgr = mgr | |
469 | self.rados_pool = POOL_NAME | |
470 | self.rados_namespace = namespace | |
471 | self._exports = None | |
472 | ||
473 | @staticmethod | |
474 | def _check_rados_notify(ioctx, obj): | |
475 | try: | |
476 | ioctx.notify(obj) | |
477 | except TimedOut: | |
478 | log.exception(f"Ganesha timed out") | |
479 | ||
480 | @property | |
481 | def exports(self): | |
482 | if self._exports is None: | |
483 | self._exports = {} | |
484 | log.info("Begin export parsing") | |
485 | for cluster_id in available_clusters(self.mgr): | |
486 | self.export_conf_objs = [] # type: List[Export] | |
487 | self._read_raw_config(cluster_id) | |
488 | self.exports[cluster_id] = self.export_conf_objs | |
489 | log.info(f"Exports parsed successfully {self.exports.items()}") | |
490 | return self._exports | |
491 | ||
492 | def _fetch_export(self, pseudo_path): | |
493 | try: | |
494 | for ex in self.exports[self.rados_namespace]: | |
495 | if ex.pseudo == pseudo_path: | |
496 | return ex | |
497 | except KeyError: | |
498 | pass | |
499 | ||
f67539c2 | 500 | def _create_user_key(self, entity, path, fs_name, fs_ro): |
f6b5b4d7 TL |
501 | osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format( |
502 | self.rados_pool, self.rados_namespace, fs_name) | |
f67539c2 | 503 | access_type = 'r' if fs_ro else 'rw' |
f6b5b4d7 TL |
504 | |
505 | ret, out, err = self.mgr.check_mon_command({ | |
506 | 'prefix': 'auth get-or-create', | |
507 | 'entity': 'client.{}'.format(entity), | |
f67539c2 TL |
508 | 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow {} path={}'.format( |
509 | access_type, path)], | |
f6b5b4d7 TL |
510 | 'format': 'json', |
511 | }) | |
512 | ||
513 | json_res = json.loads(out) | |
514 | log.info("Export user created is {}".format(json_res[0]['entity'])) | |
515 | return json_res[0]['entity'], json_res[0]['key'] | |
516 | ||
517 | def _delete_user(self, entity): | |
518 | self.mgr.check_mon_command({ | |
519 | 'prefix': 'auth rm', | |
520 | 'entity': 'client.{}'.format(entity), | |
521 | }) | |
522 | log.info(f"Export user deleted is {entity}") | |
523 | ||
524 | def _gen_export_id(self): | |
525 | exports = sorted([ex.export_id for ex in self.exports[self.rados_namespace]]) | |
526 | nid = 1 | |
527 | for e_id in exports: | |
528 | if e_id == nid: | |
529 | nid += 1 | |
530 | else: | |
531 | break | |
532 | return nid | |
533 | ||
534 | def _read_raw_config(self, rados_namespace): | |
535 | with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx: | |
536 | ioctx.set_namespace(rados_namespace) | |
537 | for obj in ioctx.list_objects(): | |
538 | if obj.key.startswith("export-"): | |
539 | size, _ = obj.stat() | |
540 | raw_config = obj.read(size) | |
541 | raw_config = raw_config.decode("utf-8") | |
542 | log.debug("read export configuration from rados " | |
543 | "object %s/%s/%s:\n%s", self.rados_pool, | |
544 | rados_namespace, obj.key, raw_config) | |
545 | self.export_conf_objs.append(Export.from_export_block( | |
546 | GaneshaConfParser(raw_config).parse()[0], rados_namespace)) | |
547 | ||
548 | def _save_export(self, export): | |
549 | self.exports[self.rados_namespace].append(export) | |
550 | NFSRados(self.mgr, self.rados_namespace).write_obj(export.to_export_block(), | |
f67539c2 | 551 | f'export-{export.export_id}', f'conf-nfs.{export.cluster_id}') |
f6b5b4d7 TL |
552 | |
553 | def _delete_export(self, cluster_id, pseudo_path, export_obj=None): | |
554 | try: | |
555 | if export_obj: | |
556 | export = export_obj | |
557 | else: | |
558 | export = self._fetch_export(pseudo_path) | |
559 | ||
560 | if export: | |
561 | if pseudo_path: | |
562 | NFSRados(self.mgr, self.rados_namespace).remove_obj( | |
f67539c2 | 563 | f'export-{export.export_id}', f'conf-nfs.{cluster_id}') |
f6b5b4d7 TL |
564 | self.exports[cluster_id].remove(export) |
565 | self._delete_user(export.fsal.user_id) | |
566 | if not self.exports[cluster_id]: | |
567 | del self.exports[cluster_id] | |
568 | return 0, "Successfully deleted export", "" | |
569 | return 0, "", "Export does not exist" | |
570 | except Exception as e: | |
571 | log.exception(f"Failed to delete {pseudo_path} export for {cluster_id}") | |
572 | return getattr(e, 'errno', -1), "", str(e) | |
573 | ||
574 | def format_path(self, path): | |
575 | if path: | |
576 | path = normpath(path.strip()) | |
577 | if path[:2] == "//": | |
578 | path = path[1:] | |
579 | return path | |
580 | ||
581 | def check_fs(self, fs_name): | |
582 | fs_map = self.mgr.get('fs_map') | |
583 | return fs_name in [fs['mdsmap']['fs_name'] for fs in fs_map['filesystems']] | |
584 | ||
585 | @export_cluster_checker | |
586 | def create_export(self, fs_name, cluster_id, pseudo_path, read_only, path): | |
587 | try: | |
588 | if not self.check_fs(fs_name): | |
589 | return -errno.ENOENT, "", f"filesystem {fs_name} not found" | |
590 | ||
591 | pseudo_path = self.format_path(pseudo_path) | |
f67539c2 | 592 | self._validate_pseudo_path(pseudo_path) |
f6b5b4d7 TL |
593 | |
594 | if cluster_id not in self.exports: | |
595 | self.exports[cluster_id] = [] | |
596 | ||
597 | if not self._fetch_export(pseudo_path): | |
598 | ex_id = self._gen_export_id() | |
599 | user_id = f"{cluster_id}{ex_id}" | |
f67539c2 | 600 | user_out, key = self._create_user_key(user_id, path, fs_name, read_only) |
f6b5b4d7 TL |
601 | access_type = "RW" |
602 | if read_only: | |
603 | access_type = "RO" | |
604 | ex_dict = { | |
605 | 'path': self.format_path(path), | |
606 | 'pseudo': pseudo_path, | |
607 | 'cluster_id': cluster_id, | |
608 | 'access_type': access_type, | |
609 | 'fsal': {"name": "CEPH", "user_id": user_id, | |
610 | "fs_name": fs_name, "sec_label_xattr": ""}, | |
611 | 'clients': [] | |
612 | } | |
613 | export = Export.from_dict(ex_id, ex_dict) | |
614 | export.fsal.cephx_key = key | |
615 | self._save_export(export) | |
616 | result = { | |
617 | "bind": pseudo_path, | |
618 | "fs": fs_name, | |
619 | "path": path, | |
620 | "cluster": cluster_id, | |
621 | "mode": access_type, | |
622 | } | |
623 | return (0, json.dumps(result, indent=4), '') | |
624 | return 0, "", "Export already exists" | |
625 | except Exception as e: | |
626 | log.exception(f"Failed to create {pseudo_path} export for {cluster_id}") | |
f67539c2 | 627 | return getattr(e, 'errno', -1), "", str(e) |
f6b5b4d7 TL |
628 | |
629 | @export_cluster_checker | |
630 | def delete_export(self, cluster_id, pseudo_path): | |
631 | return self._delete_export(cluster_id, pseudo_path) | |
632 | ||
633 | def delete_all_exports(self, cluster_id): | |
634 | try: | |
635 | export_list = list(self.exports[cluster_id]) | |
636 | except KeyError: | |
637 | log.info("No exports to delete") | |
638 | return | |
639 | self.rados_namespace = cluster_id | |
640 | for export in export_list: | |
641 | ret, out, err = self._delete_export(cluster_id=cluster_id, pseudo_path=None, | |
642 | export_obj=export) | |
643 | if ret != 0: | |
644 | raise Exception(f"Failed to delete exports: {err} and {ret}") | |
645 | log.info(f"All exports successfully deleted for cluster id: {cluster_id}") | |
646 | ||
647 | @export_cluster_checker | |
648 | def list_exports(self, cluster_id, detailed): | |
649 | try: | |
650 | if detailed: | |
651 | result = [export.to_dict() for export in self.exports[cluster_id]] | |
652 | else: | |
653 | result = [export.pseudo for export in self.exports[cluster_id]] | |
654 | return 0, json.dumps(result, indent=2), '' | |
655 | except KeyError: | |
656 | log.warning(f"No exports to list for {cluster_id}") | |
657 | return 0, '', '' | |
658 | except Exception as e: | |
659 | log.exception(f"Failed to list exports for {cluster_id}") | |
660 | return getattr(e, 'errno', -1), "", str(e) | |
661 | ||
662 | @export_cluster_checker | |
663 | def get_export(self, cluster_id, pseudo_path): | |
664 | try: | |
665 | export = self._fetch_export(pseudo_path) | |
666 | if export: | |
667 | return 0, json.dumps(export.to_dict(), indent=2), '' | |
668 | log.warning(f"No {pseudo_path} export to show for {cluster_id}") | |
669 | return 0, '', '' | |
670 | except Exception as e: | |
671 | log.exception(f"Failed to get {pseudo_path} export for {cluster_id}") | |
672 | return getattr(e, 'errno', -1), "", str(e) | |
673 | ||
f67539c2 TL |
674 | def _validate_pseudo_path(self, path): |
675 | if not isabs(path) or path == "/": | |
676 | raise FSExportError(f"pseudo path {path} is invalid. "\ | |
677 | "It should be an absolute path and it cannot be just '/'.") | |
678 | ||
679 | def _validate_squash(self, squash): | |
680 | valid_squash_ls = ["root", "root_squash", "rootsquash", "rootid", "root_id_squash", | |
681 | "rootidsquash", "all", "all_squash", "allsquash", "all_anomnymous", "allanonymous", | |
682 | "no_root_squash", "none", "noidsquash"] | |
683 | if squash not in valid_squash_ls: | |
684 | raise FSExportError(f"squash {squash} not in valid list {valid_squash_ls}") | |
685 | ||
686 | def _validate_security_label(self, label): | |
687 | if not isinstance(label, bool): | |
688 | raise FSExportError('Only boolean values allowed') | |
689 | ||
690 | def _validate_protocols(self, proto): | |
691 | for p in proto: | |
692 | if p not in [3, 4]: | |
693 | raise FSExportError(f"Invalid protocol {p}") | |
694 | if 3 in proto: | |
695 | log.warning("NFS V3 is an old version, it might not work") | |
696 | ||
697 | def _validate_transport(self, transport): | |
698 | valid_transport = ["UDP", "TCP"] | |
699 | for trans in transport: | |
700 | if trans.upper() not in valid_transport: | |
701 | raise FSExportError(f'{trans} is not a valid transport protocol') | |
702 | ||
703 | def _validate_access_type(self, access_type): | |
704 | valid_ones = ['RW', 'RO'] | |
705 | if access_type not in valid_ones: | |
706 | raise FSExportError(f'{access_type} is invalid, valid access type are {valid_ones}') | |
707 | ||
708 | def _validate_fsal(self, old, new): | |
709 | if old.name != new['name']: | |
710 | raise FSExportError('FSAL name change not allowed') | |
711 | if old.user_id != new['user_id']: | |
712 | raise FSExportError('User ID modification is not allowed') | |
713 | if new['sec_label_xattr']: | |
714 | raise FSExportError('Security label xattr cannot be changed') | |
715 | if old.fs_name != new['fs_name']: | |
716 | if not self.check_fs(new['fs_name']): | |
717 | raise FSExportError(f"filesystem {new['fs_name']} not found") | |
718 | return 1 | |
719 | ||
720 | def _validate_client(self, client): | |
721 | self._validate_access_type(client['access_type']) | |
722 | self._validate_squash(client['squash']) | |
723 | ||
724 | def _validate_clients(self, clients_ls): | |
725 | for client in clients_ls: | |
726 | self._validate_client(client) | |
727 | ||
728 | def _fetch_export_obj(self, ex_id): | |
729 | try: | |
730 | with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx: | |
731 | ioctx.set_namespace(self.rados_namespace) | |
732 | export = Export.from_export_block(GaneshaConfParser(ioctx.read(f"export-{ex_id}" | |
733 | ).decode("utf-8")).parse()[0], self.rados_namespace) | |
734 | return export | |
735 | except ObjectNotFound: | |
736 | log.exception(f"Export ID: {ex_id} not found") | |
737 | ||
738 | def _validate_export(self, new_export_dict): | |
739 | if new_export_dict['cluster_id'] not in available_clusters(self.mgr): | |
740 | raise FSExportError(f"Cluster {new_export_dict['cluster_id']} does not exists", | |
741 | -errno.ENOENT) | |
742 | export = self._fetch_export(new_export_dict['pseudo']) | |
743 | out_msg = '' | |
744 | if export: | |
745 | # Check if export id matches | |
746 | if export.export_id != new_export_dict['export_id']: | |
747 | raise FSExportError('Export ID changed, Cannot update export') | |
748 | else: | |
749 | # Fetch export based on export id object | |
750 | export = self._fetch_export_obj(new_export_dict['export_id']) | |
751 | if not export: | |
752 | raise FSExportError('Export does not exist') | |
753 | else: | |
754 | new_export_dict['pseudo'] = self.format_path(new_export_dict['pseudo']) | |
755 | self._validate_pseudo_path(new_export_dict['pseudo']) | |
756 | log.debug(f"Pseudo path has changed from {export.pseudo} to "\ | |
757 | f"{new_export_dict['pseudo']}") | |
758 | # Check if squash changed | |
759 | if export.squash != new_export_dict['squash']: | |
760 | if new_export_dict['squash']: | |
761 | new_export_dict['squash'] = new_export_dict['squash'].lower() | |
762 | self._validate_squash(new_export_dict['squash']) | |
763 | log.debug(f"squash has changed from {export.squash} to {new_export_dict['squash']}") | |
764 | # Security label check | |
765 | if export.security_label != new_export_dict['security_label']: | |
766 | self._validate_security_label(new_export_dict['security_label']) | |
767 | # Protocol Checking | |
768 | if export.protocols != new_export_dict['protocols']: | |
769 | self._validate_protocols(new_export_dict['protocols']) | |
770 | # Transport checking | |
771 | if export.transports != new_export_dict['transports']: | |
772 | self._validate_transport(new_export_dict['transports']) | |
773 | # Path check | |
774 | if export.path != new_export_dict['path']: | |
775 | new_export_dict['path'] = self.format_path(new_export_dict['path']) | |
776 | out_msg = 'update caps' | |
777 | # Check Access Type | |
778 | if export.access_type != new_export_dict['access_type']: | |
779 | self._validate_access_type(new_export_dict['access_type']) | |
780 | # Fsal block check | |
781 | if export.fsal != new_export_dict['fsal']: | |
782 | ret = self._validate_fsal(export.fsal, new_export_dict['fsal']) | |
783 | if ret == 1 and not out_msg: | |
784 | out_msg = 'update caps' | |
785 | # Check client block | |
786 | if export.clients != new_export_dict['clients']: | |
787 | self._validate_clients(new_export_dict['clients']) | |
788 | log.debug(f'Validation succeeded for Export {export.pseudo}') | |
789 | return export, out_msg | |
790 | ||
791 | def _update_user_id(self, path, access_type, fs_name, user_id): | |
792 | osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format( | |
793 | self.rados_pool, self.rados_namespace, fs_name) | |
794 | access_type = 'r' if access_type == 'RO' else 'rw' | |
795 | ||
796 | self.mgr.check_mon_command({ | |
797 | 'prefix': 'auth caps', | |
798 | 'entity': f'client.{user_id}', | |
799 | 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow {} path={}'.format( | |
800 | access_type, path)], | |
801 | }) | |
802 | ||
803 | log.info(f"Export user updated {user_id}") | |
804 | ||
805 | def _update_export(self, export): | |
806 | self.exports[self.rados_namespace].append(export) | |
807 | NFSRados(self.mgr, self.rados_namespace).update_obj( | |
808 | GaneshaConfParser.write_block(export.to_export_block()), | |
809 | f'export-{export.export_id}', f'conf-nfs.{export.cluster_id}') | |
810 | ||
811 | def update_export(self, export_config): | |
812 | try: | |
813 | if not export_config: | |
814 | return -errno.EINVAL, "", "Empty Config!!" | |
815 | update_export = json.loads(export_config) | |
816 | old_export, update_user_caps = self._validate_export(update_export) | |
817 | if update_user_caps: | |
818 | self._update_user_id(update_export['path'], update_export['access_type'], | |
819 | update_export['fsal']['fs_name'], update_export['fsal']['user_id']) | |
820 | update_export = Export.from_dict(update_export['export_id'], update_export) | |
821 | update_export.fsal.cephx_key = old_export.fsal.cephx_key | |
822 | self._update_export(update_export) | |
823 | export_ls = self.exports[self.rados_namespace] | |
824 | if old_export not in export_ls: | |
825 | # This happens when export is fetched by ID | |
826 | old_export = self._fetch_export(old_export.pseudo) | |
827 | export_ls.remove(old_export) | |
828 | restart_nfs_service(self.mgr, update_export.cluster_id) | |
829 | return 0, "Successfully updated export", "" | |
830 | except NotImplementedError: | |
831 | return 0, " Manual Restart of NFS PODS required for successful update of exports", "" | |
832 | except Exception as e: | |
833 | return getattr(e, 'errno', -1), '', f'Failed to update export: {e}' | |
834 | ||
f6b5b4d7 TL |
835 | |
836 | class NFSCluster: | |
837 | def __init__(self, mgr): | |
838 | self.pool_name = POOL_NAME | |
839 | self.pool_ns = '' | |
840 | self.mgr = mgr | |
841 | ||
842 | def _set_cluster_id(self, cluster_id): | |
f67539c2 | 843 | self.cluster_id = cluster_id |
f6b5b4d7 TL |
844 | |
845 | def _set_pool_namespace(self, cluster_id): | |
846 | self.pool_ns = cluster_id | |
847 | ||
848 | def _get_common_conf_obj_name(self): | |
849 | return f'conf-nfs.{self.cluster_id}' | |
850 | ||
851 | def _get_user_conf_obj_name(self): | |
852 | return f'userconf-nfs.{self.cluster_id}' | |
853 | ||
854 | def _call_orch_apply_nfs(self, placement): | |
855 | spec = NFSServiceSpec(service_type='nfs', service_id=self.cluster_id, | |
856 | pool=self.pool_name, namespace=self.pool_ns, | |
857 | placement=PlacementSpec.from_string(placement)) | |
858 | completion = self.mgr.apply_nfs(spec) | |
f6b5b4d7 TL |
859 | orchestrator.raise_if_exception(completion) |
860 | ||
861 | def create_empty_rados_obj(self): | |
862 | common_conf = self._get_common_conf_obj_name() | |
863 | NFSRados(self.mgr, self.pool_ns).write_obj('', self._get_common_conf_obj_name()) | |
864 | log.info(f"Created empty object:{common_conf}") | |
865 | ||
866 | def delete_config_obj(self): | |
867 | NFSRados(self.mgr, self.pool_ns).remove_all_obj() | |
868 | log.info(f"Deleted {self._get_common_conf_obj_name()} object and all objects in " | |
869 | f"{self.pool_ns}") | |
870 | ||
f6b5b4d7 TL |
871 | @cluster_setter |
872 | def create_nfs_cluster(self, export_type, cluster_id, placement): | |
873 | if export_type != 'cephfs': | |
874 | return -errno.EINVAL, "", f"Invalid export type: {export_type}" | |
875 | try: | |
876 | pool_list = [p['pool_name'] for p in self.mgr.get_osdmap().dump().get('pools', [])] | |
877 | ||
878 | if self.pool_name not in pool_list: | |
879 | r, out, err = create_pool(self.mgr, self.pool_name) | |
880 | if r != 0: | |
881 | return r, out, err | |
882 | log.info(f"Pool Status: {out}") | |
883 | ||
884 | self.mgr.check_mon_command({'prefix': 'osd pool application enable', | |
885 | 'pool': self.pool_name, 'app': 'nfs'}) | |
886 | ||
887 | self.create_empty_rados_obj() | |
888 | ||
889 | if cluster_id not in available_clusters(self.mgr): | |
890 | self._call_orch_apply_nfs(placement) | |
891 | return 0, "NFS Cluster Created Successfully", "" | |
892 | return 0, "", f"{cluster_id} cluster already exists" | |
893 | except Exception as e: | |
894 | log.exception(f"NFS Cluster {cluster_id} could not be created") | |
895 | return getattr(e, 'errno', -1), "", str(e) | |
896 | ||
897 | @cluster_setter | |
898 | def update_nfs_cluster(self, cluster_id, placement): | |
899 | try: | |
900 | if cluster_id in available_clusters(self.mgr): | |
901 | self._call_orch_apply_nfs(placement) | |
902 | return 0, "NFS Cluster Updated Successfully", "" | |
903 | return -errno.ENOENT, "", "Cluster does not exist" | |
904 | except Exception as e: | |
905 | log.exception(f"NFS Cluster {cluster_id} could not be updated") | |
906 | return getattr(e, 'errno', -1), "", str(e) | |
907 | ||
908 | @cluster_setter | |
909 | def delete_nfs_cluster(self, cluster_id): | |
910 | try: | |
911 | cluster_list = available_clusters(self.mgr) | |
912 | if cluster_id in cluster_list: | |
913 | self.mgr.fs_export.delete_all_exports(cluster_id) | |
914 | completion = self.mgr.remove_service('nfs.' + self.cluster_id) | |
f6b5b4d7 TL |
915 | orchestrator.raise_if_exception(completion) |
916 | self.delete_config_obj() | |
917 | return 0, "NFS Cluster Deleted Successfully", "" | |
918 | return 0, "", "Cluster does not exist" | |
919 | except Exception as e: | |
920 | log.exception(f"Failed to delete NFS Cluster {cluster_id}") | |
921 | return getattr(e, 'errno', -1), "", str(e) | |
922 | ||
923 | def list_nfs_cluster(self): | |
924 | try: | |
925 | return 0, '\n'.join(available_clusters(self.mgr)), "" | |
926 | except Exception as e: | |
927 | log.exception("Failed to list NFS Cluster") | |
928 | return getattr(e, 'errno', -1), "", str(e) | |
929 | ||
930 | def _show_nfs_cluster_info(self, cluster_id): | |
931 | self._set_cluster_id(cluster_id) | |
932 | completion = self.mgr.list_daemons(daemon_type='nfs') | |
f6b5b4d7 TL |
933 | orchestrator.raise_if_exception(completion) |
934 | host_ip = [] | |
935 | # Here completion.result is a list DaemonDescription objects | |
936 | for cluster in completion.result: | |
937 | if self.cluster_id == cluster.service_id(): | |
938 | """ | |
939 | getaddrinfo sample output: [(<AddressFamily.AF_INET: 2>, | |
940 | <SocketKind.SOCK_STREAM: 1>, 6, 'xyz', ('172.217.166.98',2049)), | |
941 | (<AddressFamily.AF_INET6: 10>, <SocketKind.SOCK_STREAM: 1>, 6, '', | |
942 | ('2404:6800:4009:80d::200e', 2049, 0, 0))] | |
943 | """ | |
944 | try: | |
945 | host_ip.append({ | |
946 | "hostname": cluster.hostname, | |
947 | "ip": list(set([ip[4][0] for ip in socket.getaddrinfo( | |
948 | cluster.hostname, 2049, flags=socket.AI_CANONNAME, | |
949 | type=socket.SOCK_STREAM)])), | |
950 | "port": 2049 # Default ganesha port | |
951 | }) | |
952 | except socket.gaierror: | |
953 | continue | |
954 | return host_ip | |
955 | ||
956 | def show_nfs_cluster_info(self, cluster_id=None): | |
957 | try: | |
958 | cluster_ls = [] | |
959 | info_res = {} | |
960 | if cluster_id: | |
961 | cluster_ls = [cluster_id] | |
962 | else: | |
963 | cluster_ls = available_clusters(self.mgr) | |
964 | ||
965 | for cluster_id in cluster_ls: | |
966 | res = self._show_nfs_cluster_info(cluster_id) | |
967 | if res: | |
968 | info_res[cluster_id] = res | |
969 | return (0, json.dumps(info_res, indent=4), '') | |
970 | except Exception as e: | |
971 | log.exception(f"Failed to show info for cluster") | |
972 | return getattr(e, 'errno', -1), "", str(e) | |
973 | ||
974 | @cluster_setter | |
975 | def set_nfs_cluster_config(self, cluster_id, nfs_config): | |
976 | try: | |
977 | if not nfs_config: | |
978 | return -errno.EINVAL, "", "Empty Config!!" | |
979 | if cluster_id in available_clusters(self.mgr): | |
980 | rados_obj = NFSRados(self.mgr, self.pool_ns) | |
981 | if rados_obj.check_user_config(): | |
982 | return 0, "", "NFS-Ganesha User Config already exists" | |
983 | rados_obj.write_obj(nfs_config, self._get_user_conf_obj_name(), | |
984 | self._get_common_conf_obj_name()) | |
f67539c2 | 985 | restart_nfs_service(self.mgr, cluster_id) |
f6b5b4d7 TL |
986 | return 0, "NFS-Ganesha Config Set Successfully", "" |
987 | return -errno.ENOENT, "", "Cluster does not exist" | |
f67539c2 TL |
988 | except NotImplementedError: |
989 | return 0, "NFS-Ganesha Config Added Successfully (Manual Restart of NFS PODS required)", "" | |
f6b5b4d7 TL |
990 | except Exception as e: |
991 | log.exception(f"Setting NFS-Ganesha Config failed for {cluster_id}") | |
992 | return getattr(e, 'errno', -1), "", str(e) | |
993 | ||
994 | @cluster_setter | |
995 | def reset_nfs_cluster_config(self, cluster_id): | |
996 | try: | |
997 | if cluster_id in available_clusters(self.mgr): | |
998 | rados_obj = NFSRados(self.mgr, self.pool_ns) | |
999 | if not rados_obj.check_user_config(): | |
1000 | return 0, "", "NFS-Ganesha User Config does not exist" | |
1001 | rados_obj.remove_obj(self._get_user_conf_obj_name(), | |
1002 | self._get_common_conf_obj_name()) | |
f67539c2 | 1003 | restart_nfs_service(self.mgr, cluster_id) |
f6b5b4d7 TL |
1004 | return 0, "NFS-Ganesha Config Reset Successfully", "" |
1005 | return -errno.ENOENT, "", "Cluster does not exist" | |
f67539c2 TL |
1006 | except NotImplementedError: |
1007 | return 0, "NFS-Ganesha Config Removed Successfully (Manual Restart of NFS PODS required)", "" | |
f6b5b4d7 TL |
1008 | except Exception as e: |
1009 | log.exception(f"Resetting NFS-Ganesha Config failed for {cluster_id}") | |
1010 | return getattr(e, 'errno', -1), "", str(e) |