]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/services/ganesha.py
import 15.2.0 Octopus source
[ceph.git] / ceph / src / pybind / mgr / dashboard / services / ganesha.py
1 # -*- coding: utf-8 -*-
2 # pylint: disable=too-many-lines
3 from __future__ import absolute_import
4
5 import logging
6 import re
7
8 from orchestrator import OrchestratorError
9 from .cephfs import CephFS
10 from .cephx import CephX
11 from .orchestrator import OrchClient
12 from .rgw_client import RgwClient, RequestException, NoCredentialsException
13 from .. import mgr
14 from ..settings import Settings
15 from ..exceptions import DashboardException
16
17
18 logger = logging.getLogger('ganesha')
19
20
21 class NFSException(DashboardException):
22 def __init__(self, msg):
23 super(NFSException, self).__init__(component="nfs", msg=msg)
24
25
26 class Ganesha(object):
27 @classmethod
28 def _get_clusters_locations(cls):
29 result = {} # type: ignore
30 location_list_str = Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE
31 if not location_list_str:
32 raise NFSException("Ganesha config location is not configured. "
33 "Please set the GANESHA_RADOS_POOL_NAMESPACE "
34 "setting.")
35 location_list = [l.strip() for l in location_list_str.split(",")]
36 for location in location_list:
37 cluster = None
38 pool = None
39 namespace = None
40 if not location:
41 raise NFSException("Invalid Ganesha cluster RADOS "
42 "[cluster_id:]pool/namespace setting: {}"
43 .format(location))
44 if location.count(':') < 1:
45 # default cluster_id
46 if location.count('/') > 1:
47 raise NFSException("Invalid Ganesha RADOS pool/namespace "
48 "setting: {}".format(location))
49 # in this case accept pool/namespace only
50 cluster = "_default_"
51 if location.count('/') == 0:
52 pool, namespace = location, None
53 else:
54 pool, namespace = location.split('/', 1)
55 else:
56 cluster = location[:location.find(':')]
57 pool_nm = location[location.find(':')+1:]
58 if pool_nm.count('/') == 0:
59 pool, namespace = pool_nm, None
60 else:
61 pool, namespace = pool_nm.split('/', 1)
62
63 if cluster in result:
64 raise NFSException("Duplicate Ganesha cluster definition in "
65 "the setting: {}".format(location_list_str))
66 result[cluster] = (pool, namespace)
67
68 return result
69
70 @classmethod
71 def get_ganesha_clusters(cls):
72 return [cluster_id for cluster_id in cls._get_clusters_locations()]
73
74 @staticmethod
75 def _get_orch_nfs_instances():
76 try:
77 return OrchClient.instance().services.list("nfs")
78 except (RuntimeError, OrchestratorError, ImportError):
79 return []
80
81 @classmethod
82 def get_daemons_status(cls):
83 instances = cls._get_orch_nfs_instances()
84 if not instances:
85 return None
86
87 result = {} # type: ignore
88 for instance in instances:
89 if instance.service is None:
90 instance.service = "_default_"
91 if instance.service not in result:
92 result[instance.service] = {}
93 result[instance.service][instance.hostname] = {
94 'status': instance.status,
95 'desc': instance.status_desc,
96 }
97 return result
98
99 @classmethod
100 def parse_rados_url(cls, rados_url):
101 if not rados_url.startswith("rados://"):
102 raise NFSException("Invalid NFS Ganesha RADOS configuration URL: {}"
103 .format(rados_url))
104 rados_url = rados_url[8:]
105 url_comps = rados_url.split("/")
106 if len(url_comps) < 2 or len(url_comps) > 3:
107 raise NFSException("Invalid NFS Ganesha RADOS configuration URL: "
108 "rados://{}".format(rados_url))
109 if len(url_comps) == 2:
110 return url_comps[0], None, url_comps[1]
111 return url_comps
112
113 @classmethod
114 def make_rados_url(cls, pool, namespace, obj):
115 if namespace:
116 return "rados://{}/{}/{}".format(pool, namespace, obj)
117 return "rados://{}/{}".format(pool, obj)
118
119 @classmethod
120 def get_pool_and_namespace(cls, cluster_id):
121 instances = cls._get_orch_nfs_instances()
122 # we assume that every instance stores there configuration in the
123 # same RADOS pool/namespace
124 if instances:
125 location = instances[0].rados_config_location
126 pool, ns, _ = cls.parse_rados_url(location)
127 return pool, ns
128 locations = cls._get_clusters_locations()
129 if cluster_id not in locations:
130 raise NFSException("Cluster not found: cluster_id={}"
131 .format(cluster_id))
132 return locations[cluster_id]
133
134 @classmethod
135 def reload_daemons(cls, cluster_id, daemons_id):
136 logger.debug("issued reload of daemons: %s", daemons_id)
137 if not OrchClient.instance().available():
138 logger.debug("orchestrator not available")
139 return
140 reload_list = []
141 daemons = cls.get_daemons_status()
142 if cluster_id not in daemons:
143 raise NFSException("Cluster not found: cluster_id={}"
144 .format(cluster_id))
145 for daemon_id in daemons_id:
146 if daemon_id not in daemons[cluster_id]:
147 continue
148 if daemons[cluster_id][daemon_id] == 1:
149 reload_list.append((cluster_id, daemon_id))
150 OrchClient.instance().reload_service("nfs", reload_list)
151
152 @classmethod
153 def fsals_available(cls):
154 result = []
155 if CephFS.list_filesystems():
156 result.append("CEPH")
157 try:
158 if RgwClient.admin_instance().is_service_online() and \
159 RgwClient.admin_instance().is_system_user():
160 result.append("RGW")
161 except (NoCredentialsException, RequestException, LookupError):
162 pass
163 return result
164
165
166 class GaneshaConfParser(object):
167 def __init__(self, raw_config):
168 self.pos = 0
169 self.text = ""
170 self.clean_config(raw_config)
171
172 def clean_config(self, raw_config):
173 for line in raw_config.split("\n"):
174 cardinal_idx = line.find('#')
175 if cardinal_idx == -1:
176 self.text += line
177 else:
178 # remove comments
179 self.text += line[:cardinal_idx]
180 if line.startswith("%"):
181 self.text += "\n"
182
183 def remove_all_whitespaces(self):
184 new_text = ""
185 in_string = False
186 in_section = False
187 for i, cha in enumerate(self.text):
188 if in_section:
189 if cha != '"' and self.text[i-1] != '\\':
190 new_text += cha
191 elif cha == '\n':
192 new_text += cha
193 in_section = False
194 elif i == (len(self.text)-1):
195 if cha != '"' and self.text[i-1] != '\\':
196 new_text += cha
197 in_section = False
198 elif not in_section and (i == 0 or self.text[i-1] == '\n') and cha == '%':
199 in_section = True
200 new_text += cha
201 elif in_string or cha not in [' ', '\n', '\t']:
202 new_text += cha
203 elif cha == '"' and self.text[i-1] != '\\':
204 in_string = not in_string
205 self.text = new_text
206
207 def stream(self):
208 return self.text[self.pos:]
209
210 def parse_block_name(self):
211 idx = self.stream().find('{')
212 if idx == -1:
213 raise Exception("Cannot find block name")
214 block_name = self.stream()[:idx]
215 self.pos += idx+1
216 return block_name
217
218 def parse_block_or_section(self):
219 if self.stream().startswith("%url "):
220 # section line
221 self.pos += 5
222 idx = self.stream().find('\n')
223 if idx == -1:
224 value = self.stream()
225 self.pos += len(self.stream())
226 else:
227 value = self.stream()[:idx]
228 self.pos += idx+1
229 block_dict = {'block_name': '%url', 'value': value}
230 return block_dict
231
232 block_name = self.parse_block_name().upper()
233 block_dict = {'block_name': block_name}
234 self.parse_block_body(block_dict)
235 if self.stream()[0] != '}':
236 raise Exception("No closing bracket '}' found at the end of block")
237 self.pos += 1
238 return block_dict
239
240 def parse_parameter_value(self, raw_value):
241 colon_idx = raw_value.find(',')
242
243 if colon_idx == -1:
244 try:
245 return int(raw_value)
246 except ValueError:
247 if raw_value == "true":
248 return True
249 if raw_value == "false":
250 return False
251 if raw_value.find('"') == 0:
252 return raw_value[1:-1]
253 return raw_value
254 else:
255 return [self.parse_parameter_value(v.strip())
256 for v in raw_value.split(',')]
257
258 def parse_stanza(self, block_dict):
259 equal_idx = self.stream().find('=')
260 semicolon_idx = self.stream().find(';')
261 if equal_idx == -1:
262 raise Exception("Malformed stanza: no equal symbol found.")
263 parameter_name = self.stream()[:equal_idx].lower()
264 parameter_value = self.stream()[equal_idx+1:semicolon_idx]
265 block_dict[parameter_name] = self.parse_parameter_value(
266 parameter_value)
267 self.pos += semicolon_idx+1
268
269 def parse_block_body(self, block_dict):
270 last_pos = self.pos
271 while True:
272 semicolon_idx = self.stream().find(';')
273 lbracket_idx = self.stream().find('{')
274 rbracket_idx = self.stream().find('}')
275
276 if rbracket_idx == 0:
277 # block end
278 return
279
280 if (semicolon_idx != -1 and lbracket_idx != -1
281 and semicolon_idx < lbracket_idx) \
282 or (semicolon_idx != -1 and lbracket_idx == -1):
283 self.parse_stanza(block_dict)
284 elif (semicolon_idx != -1 and lbracket_idx != -1
285 and semicolon_idx > lbracket_idx) or (
286 semicolon_idx == -1 and lbracket_idx != -1):
287 if '_blocks_' not in block_dict:
288 block_dict['_blocks_'] = []
289 block_dict['_blocks_'].append(self.parse_block_or_section())
290 else:
291 raise Exception("Malformed stanza: no semicolon found.")
292
293 if last_pos == self.pos:
294 raise Exception("Infinite loop while parsing block content")
295 last_pos = self.pos
296
297 def parse(self):
298 self.remove_all_whitespaces()
299 blocks = []
300 while self.stream():
301 block_dict = self.parse_block_or_section()
302 blocks.append(block_dict)
303 return blocks
304
305 @staticmethod
306 def _indentation(depth, size=4):
307 conf_str = ""
308 for _ in range(0, depth*size):
309 conf_str += " "
310 return conf_str
311
312 @staticmethod
313 def write_block_body(block, depth=0):
314 def format_val(key, val):
315 if isinstance(val, list):
316 return ', '.join([format_val(key, v) for v in val])
317 if isinstance(val, bool):
318 return str(val).lower()
319 if isinstance(val, int) or (block['block_name'] == 'CLIENT'
320 and key == 'clients'):
321 return '{}'.format(val)
322 return '"{}"'.format(val)
323
324 conf_str = ""
325 for key, val in block.items():
326 if key == 'block_name':
327 continue
328 elif key == '_blocks_':
329 for blo in val:
330 conf_str += GaneshaConfParser.write_block(blo, depth)
331 elif val:
332 conf_str += GaneshaConfParser._indentation(depth)
333 conf_str += '{} = {};\n'.format(key, format_val(key, val))
334 return conf_str
335
336 @staticmethod
337 def write_block(block, depth):
338 if block['block_name'] == "%url":
339 return '%url "{}"\n\n'.format(block['value'])
340
341 conf_str = ""
342 conf_str += GaneshaConfParser._indentation(depth)
343 conf_str += format(block['block_name'])
344 conf_str += " {\n"
345 conf_str += GaneshaConfParser.write_block_body(block, depth+1)
346 conf_str += GaneshaConfParser._indentation(depth)
347 conf_str += "}\n\n"
348 return conf_str
349
350 @staticmethod
351 def write_conf(blocks):
352 if not isinstance(blocks, list):
353 blocks = [blocks]
354 conf_str = ""
355 for block in blocks:
356 conf_str += GaneshaConfParser.write_block(block, 0)
357 return conf_str
358
359
360 class FSal(object):
361 def __init__(self, name):
362 self.name = name
363
364 @classmethod
365 def validate_path(cls, _):
366 raise NotImplementedError()
367
368 def validate(self):
369 raise NotImplementedError()
370
371 def fill_keys(self):
372 raise NotImplementedError()
373
374 def create_path(self, path):
375 raise NotImplementedError()
376
377 @staticmethod
378 def from_fsal_block(fsal_block):
379 if fsal_block['name'] == "CEPH":
380 return CephFSFSal.from_fsal_block(fsal_block)
381 if fsal_block['name'] == 'RGW':
382 return RGWFSal.from_fsal_block(fsal_block)
383 return None
384
385 def to_fsal_block(self):
386 raise NotImplementedError()
387
388 @staticmethod
389 def from_dict(fsal_dict):
390 if fsal_dict['name'] == "CEPH":
391 return CephFSFSal.from_dict(fsal_dict)
392 if fsal_dict['name'] == 'RGW':
393 return RGWFSal.from_dict(fsal_dict)
394 return None
395
396 def to_dict(self):
397 raise NotImplementedError()
398
399
400 class RGWFSal(FSal):
401 def __init__(self, name, rgw_user_id, access_key, secret_key):
402 super(RGWFSal, self).__init__(name)
403 self.rgw_user_id = rgw_user_id
404 self.access_key = access_key
405 self.secret_key = secret_key
406
407 @classmethod
408 def validate_path(cls, path):
409 return path == "/" or re.match(r'^[^/><|&()#?]+$', path)
410
411 def validate(self):
412 if not self.rgw_user_id:
413 raise NFSException('RGW user must be specified')
414
415 if not RgwClient.admin_instance().user_exists(self.rgw_user_id):
416 raise NFSException("RGW user '{}' does not exist"
417 .format(self.rgw_user_id))
418
419 def fill_keys(self):
420 keys = RgwClient.admin_instance().get_user_keys(self.rgw_user_id)
421 self.access_key = keys['access_key']
422 self.secret_key = keys['secret_key']
423
424 def create_path(self, path):
425 if path == '/': # nothing to do
426 return
427 rgw = RgwClient.instance(self.rgw_user_id)
428 try:
429 exists = rgw.bucket_exists(path, self.rgw_user_id)
430 logger.debug('Checking existence of RGW bucket "%s" for user "%s": %s',
431 path, self.rgw_user_id, exists)
432 except RequestException as exp:
433 if exp.status_code == 403:
434 raise NFSException('Cannot create bucket "{}" as it already '
435 'exists, and belongs to other user.'
436 .format(path))
437 raise exp
438 if not exists:
439 logger.info('Creating new RGW bucket "%s" for user "%s"', path,
440 self.rgw_user_id)
441 rgw.create_bucket(path)
442
443 @classmethod
444 def from_fsal_block(cls, fsal_block):
445 return cls(fsal_block['name'],
446 fsal_block['user_id'],
447 fsal_block['access_key_id'],
448 fsal_block['secret_access_key'])
449
450 def to_fsal_block(self):
451 return {
452 'block_name': 'FSAL',
453 'name': self.name,
454 'user_id': self.rgw_user_id,
455 'access_key_id': self.access_key,
456 'secret_access_key': self.secret_key
457 }
458
459 @classmethod
460 def from_dict(cls, fsal_dict):
461 return cls(fsal_dict['name'], fsal_dict['rgw_user_id'], None, None)
462
463 def to_dict(self):
464 return {
465 'name': self.name,
466 'rgw_user_id': self.rgw_user_id
467 }
468
469
470 class CephFSFSal(FSal):
471 def __init__(self, name, user_id=None, fs_name=None, sec_label_xattr=None,
472 cephx_key=None):
473 super(CephFSFSal, self).__init__(name)
474 self.fs_name = fs_name
475 self.user_id = user_id
476 self.sec_label_xattr = sec_label_xattr
477 self.cephx_key = cephx_key
478
479 @classmethod
480 def validate_path(cls, path):
481 return re.match(r'^/[^><|&()?]*$', path)
482
483 def validate(self):
484 if self.user_id and self.user_id not in CephX.list_clients():
485 raise NFSException("cephx user '{}' does not exist"
486 .format(self.user_id))
487
488 def fill_keys(self):
489 if self.user_id:
490 self.cephx_key = CephX.get_client_key(self.user_id)
491
492 def create_path(self, path):
493 cfs = CephFS(self.fs_name)
494 cfs.mk_dirs(path)
495
496 @classmethod
497 def from_fsal_block(cls, fsal_block):
498 return cls(fsal_block['name'],
499 fsal_block.get('user_id', None),
500 fsal_block.get('filesystem', None),
501 fsal_block.get('sec_label_xattr', None),
502 fsal_block.get('secret_access_key', None))
503
504 def to_fsal_block(self):
505 result = {
506 'block_name': 'FSAL',
507 'name': self.name,
508 }
509 if self.user_id:
510 result['user_id'] = self.user_id
511 if self.fs_name:
512 result['filesystem'] = self.fs_name
513 if self.sec_label_xattr:
514 result['sec_label_xattr'] = self.sec_label_xattr
515 if self.cephx_key:
516 result['secret_access_key'] = self.cephx_key
517 return result
518
519 @classmethod
520 def from_dict(cls, fsal_dict):
521 return cls(fsal_dict['name'], fsal_dict['user_id'],
522 fsal_dict['fs_name'], fsal_dict['sec_label_xattr'], None)
523
524 def to_dict(self):
525 return {
526 'name': self.name,
527 'user_id': self.user_id,
528 'fs_name': self.fs_name,
529 'sec_label_xattr': self.sec_label_xattr
530 }
531
532
533 class Client(object):
534 def __init__(self, addresses, access_type=None, squash=None):
535 self.addresses = addresses
536 self.access_type = access_type
537 self.squash = GaneshaConf.format_squash(squash)
538
539 @classmethod
540 def from_client_block(cls, client_block):
541 addresses = client_block['clients']
542 if not isinstance(addresses, list):
543 addresses = [addresses]
544 return cls(addresses,
545 client_block.get('access_type', None),
546 client_block.get('squash', None))
547
548 def to_client_block(self):
549 result = {
550 'block_name': 'CLIENT',
551 'clients': self.addresses,
552 }
553 if self.access_type:
554 result['access_type'] = self.access_type
555 if self.squash:
556 result['squash'] = self.squash
557 return result
558
559 @classmethod
560 def from_dict(cls, client_dict):
561 return cls(client_dict['addresses'], client_dict['access_type'],
562 client_dict['squash'])
563
564 def to_dict(self):
565 return {
566 'addresses': self.addresses,
567 'access_type': self.access_type,
568 'squash': self.squash
569 }
570
571
572 class Export(object):
573 # pylint: disable=R0902
574 def __init__(self, export_id, path, fsal, cluster_id, daemons, pseudo=None,
575 tag=None, access_type=None, squash=None,
576 attr_expiration_time=None, security_label=False,
577 protocols=None, transports=None, clients=None):
578 self.export_id = export_id
579 self.path = GaneshaConf.format_path(path)
580 self.fsal = fsal
581 self.cluster_id = cluster_id
582 self.daemons = set(daemons)
583 self.pseudo = GaneshaConf.format_path(pseudo)
584 self.tag = tag
585 self.access_type = access_type
586 self.squash = GaneshaConf.format_squash(squash)
587 if attr_expiration_time is None:
588 self.attr_expiration_time = 0
589 else:
590 self.attr_expiration_time = attr_expiration_time
591 self.security_label = security_label
592 self.protocols = {GaneshaConf.format_protocol(p) for p in protocols}
593 self.transports = set(transports)
594 self.clients = clients
595
596 def validate(self, daemons_list):
597 # pylint: disable=R0912
598 for daemon_id in self.daemons:
599 if daemon_id not in daemons_list:
600 raise NFSException("Daemon '{}' does not exist"
601 .format(daemon_id))
602
603 if not self.fsal.validate_path(self.path):
604 raise NFSException("Export path ({}) is invalid.".format(self.path))
605
606 if not self.protocols:
607 raise NFSException(
608 "No NFS protocol version specified for the export.")
609
610 if not self.transports:
611 raise NFSException(
612 "No network transport type specified for the export.")
613
614 for t in self.transports:
615 match = re.match(r'^TCP$|^UDP$', t)
616 if not match:
617 raise NFSException(
618 "'{}' is an invalid network transport type identifier"
619 .format(t))
620
621 self.fsal.validate()
622
623 if 4 in self.protocols:
624 if not self.pseudo:
625 raise NFSException(
626 "Pseudo path is required when NFSv4 protocol is used")
627 match = re.match(r'^/[^><|&()]*$', self.pseudo)
628 if not match:
629 raise NFSException(
630 "Export pseudo path ({}) is invalid".format(self.pseudo))
631
632 if self.tag:
633 match = re.match(r'^[^/><|:&()]+$', self.tag)
634 if not match:
635 raise NFSException(
636 "Export tag ({}) is invalid".format(self.tag))
637
638 if self.fsal.name == 'RGW' and 4 not in self.protocols and not self.tag:
639 raise NFSException(
640 "Tag is mandatory for RGW export when using only NFSv3")
641
642 @classmethod
643 def from_export_block(cls, export_block, cluster_id, defaults):
644 logger.debug("parsing export block: %s", export_block)
645
646 fsal_block = [b for b in export_block['_blocks_']
647 if b['block_name'] == "FSAL"]
648
649 protocols = export_block.get('protocols', defaults['protocols'])
650 if not isinstance(protocols, list):
651 protocols = [protocols]
652
653 transports = export_block.get('transports', defaults['transports'])
654 if not isinstance(transports, list):
655 transports = [transports]
656
657 client_blocks = [b for b in export_block['_blocks_']
658 if b['block_name'] == "CLIENT"]
659
660 return cls(export_block['export_id'],
661 export_block['path'],
662 FSal.from_fsal_block(fsal_block[0]),
663 cluster_id,
664 [],
665 export_block.get('pseudo', None),
666 export_block.get('tag', None),
667 export_block.get('access_type', defaults['access_type']),
668 export_block.get('squash', defaults['squash']),
669 export_block.get('attr_expiration_time', None),
670 export_block.get('security_label', False),
671 protocols,
672 transports,
673 [Client.from_client_block(client)
674 for client in client_blocks])
675
676 def to_export_block(self, defaults):
677 # pylint: disable=too-many-branches
678 result = {
679 'block_name': 'EXPORT',
680 'export_id': self.export_id,
681 'path': self.path
682 }
683 if self.pseudo:
684 result['pseudo'] = self.pseudo
685 if self.tag:
686 result['tag'] = self.tag
687 if 'access_type' not in defaults \
688 or self.access_type != defaults['access_type']:
689 result['access_type'] = self.access_type
690 if 'squash' not in defaults or self.squash != defaults['squash']:
691 result['squash'] = self.squash
692 if self.fsal.name == 'CEPH':
693 result['attr_expiration_time'] = self.attr_expiration_time
694 result['security_label'] = self.security_label
695 if 'protocols' not in defaults:
696 result['protocols'] = [p for p in self.protocols]
697 else:
698 def_proto = defaults['protocols']
699 if not isinstance(def_proto, list):
700 def_proto = set([def_proto])
701 if self.protocols != def_proto:
702 result['protocols'] = [p for p in self.protocols]
703 if 'transports' not in defaults:
704 result['transports'] = [t for t in self.transports]
705 else:
706 def_transp = defaults['transports']
707 if not isinstance(def_transp, list):
708 def_transp = set([def_transp])
709 if self.transports != def_transp:
710 result['transports'] = [t for t in self.transports]
711
712 result['_blocks_'] = [self.fsal.to_fsal_block()]
713 result['_blocks_'].extend([client.to_client_block()
714 for client in self.clients])
715 return result
716
717 @classmethod
718 def from_dict(cls, export_id, ex_dict, old_export=None):
719 return cls(export_id,
720 ex_dict['path'],
721 FSal.from_dict(ex_dict['fsal']),
722 ex_dict['cluster_id'],
723 ex_dict['daemons'],
724 ex_dict['pseudo'],
725 ex_dict['tag'],
726 ex_dict['access_type'],
727 ex_dict['squash'],
728 old_export.attr_expiration_time if old_export else None,
729 ex_dict['security_label'],
730 ex_dict['protocols'],
731 ex_dict['transports'],
732 [Client.from_dict(client) for client in ex_dict['clients']])
733
734 def to_dict(self):
735 return {
736 'export_id': self.export_id,
737 'path': self.path,
738 'fsal': self.fsal.to_dict(),
739 'cluster_id': self.cluster_id,
740 'daemons': sorted([d for d in self.daemons]),
741 'pseudo': self.pseudo,
742 'tag': self.tag,
743 'access_type': self.access_type,
744 'squash': self.squash,
745 'security_label': self.security_label,
746 'protocols': sorted([p for p in self.protocols]),
747 'transports': sorted([t for t in self.transports]),
748 'clients': [client.to_dict() for client in self.clients]
749 }
750
751
752 class GaneshaConf(object):
753 # pylint: disable=R0902
754
755 def __init__(self, cluster_id, rados_pool, rados_namespace):
756 self.cluster_id = cluster_id
757 self.rados_pool = rados_pool
758 self.rados_namespace = rados_namespace
759 self.export_conf_blocks = [] # type: ignore
760 self.daemons_conf_blocks = {} # type: ignore
761 self._defaults = {}
762 self.exports = {}
763
764 self._read_raw_config()
765
766 # load defaults
767 def_block = [b for b in self.export_conf_blocks
768 if b['block_name'] == "EXPORT_DEFAULTS"]
769 self.export_defaults = def_block[0] if def_block else {}
770 self._defaults = self.ganesha_defaults(self.export_defaults)
771
772 for export_block in [block for block in self.export_conf_blocks
773 if block['block_name'] == "EXPORT"]:
774 export = Export.from_export_block(export_block, cluster_id,
775 self._defaults)
776 self.exports[export.export_id] = export
777
778 # link daemons to exports
779 for daemon_id, daemon_blocks in self.daemons_conf_blocks.items():
780 for block in daemon_blocks:
781 if block['block_name'] == "%url":
782 rados_url = block['value']
783 _, _, obj = Ganesha.parse_rados_url(rados_url)
784 if obj.startswith("export-"):
785 export_id = int(obj[obj.find('-')+1:])
786 self.exports[export_id].daemons.add(daemon_id)
787
788 @classmethod
789 def instance(cls, cluster_id):
790 pool, ns = Ganesha.get_pool_and_namespace(cluster_id)
791 return cls(cluster_id, pool, ns)
792
793 def _read_raw_config(self):
794 with mgr.rados.open_ioctx(self.rados_pool) as ioctx:
795 if self.rados_namespace:
796 ioctx.set_namespace(self.rados_namespace)
797 objs = ioctx.list_objects()
798 for obj in objs:
799 if obj.key.startswith("export-"):
800 size, _ = obj.stat()
801 raw_config = obj.read(size)
802 raw_config = raw_config.decode("utf-8")
803 logger.debug("read export configuration from rados "
804 "object %s/%s/%s:\n%s", self.rados_pool,
805 self.rados_namespace, obj.key, raw_config)
806 self.export_conf_blocks.extend(
807 GaneshaConfParser(raw_config).parse())
808 elif obj.key.startswith("conf-"):
809 size, _ = obj.stat()
810 raw_config = obj.read(size)
811 raw_config = raw_config.decode("utf-8")
812 logger.debug("read daemon configuration from rados "
813 "object %s/%s/%s:\n%s", self.rados_pool,
814 self.rados_namespace, obj.key, raw_config)
815 idx = obj.key.find('-')
816 self.daemons_conf_blocks[obj.key[idx+1:]] = \
817 GaneshaConfParser(raw_config).parse()
818
819 def _write_raw_config(self, conf_block, obj):
820 raw_config = GaneshaConfParser.write_conf(conf_block)
821 with mgr.rados.open_ioctx(self.rados_pool) as ioctx:
822 if self.rados_namespace:
823 ioctx.set_namespace(self.rados_namespace)
824 ioctx.write_full(obj, raw_config.encode('utf-8'))
825 logger.debug(
826 "write configuration into rados object %s/%s/%s:\n%s",
827 self.rados_pool, self.rados_namespace, obj, raw_config)
828
829 @classmethod
830 def ganesha_defaults(cls, export_defaults):
831 """
832 According to
833 https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/export.txt
834 """
835 return {
836 'access_type': export_defaults.get('access_type', 'NONE'),
837 'protocols': export_defaults.get('protocols', [3, 4]),
838 'transports': export_defaults.get('transports', ['TCP', 'UDP']),
839 'squash': export_defaults.get('squash', 'root_squash')
840 }
841
842 @classmethod
843 def format_squash(cls, squash):
844 if squash is None:
845 return None
846 if squash.lower() in ["no_root_squash", "noidsquash", "none"]:
847 return "no_root_squash"
848 if squash.lower() in ["rootid", "root_id_squash", "rootidsquash"]:
849 return "root_id_squash"
850 if squash.lower() in ["root", "root_squash", "rootsquash"]:
851 return "root_squash"
852 if squash.lower() in ["all", "all_squash", "allsquash",
853 "all_anonymous", "allanonymous"]:
854 return "all_squash"
855 logger.error("could not parse squash value: %s", squash)
856 raise NFSException("'{}' is an invalid squash option".format(squash))
857
858 @classmethod
859 def format_protocol(cls, protocol):
860 if str(protocol) in ["NFSV3", "3", "V3", "NFS3"]:
861 return 3
862 if str(protocol) in ["NFSV4", "4", "V4", "NFS4"]:
863 return 4
864 logger.error("could not parse protocol value: %s", protocol)
865 raise NFSException("'{}' is an invalid NFS protocol version identifier"
866 .format(protocol))
867
868 @classmethod
869 def format_path(cls, path):
870 if path is not None:
871 path = path.strip()
872 if len(path) > 1 and path[-1] == '/':
873 path = path[:-1]
874 return path
875
876 def validate(self, export):
877 export.validate(self.list_daemons())
878
879 if 4 in export.protocols: # NFSv4 protocol
880 len_prefix = 1
881 parent_export = None
882 for ex in self.list_exports():
883 if export.tag and ex.tag == export.tag:
884 raise NFSException(
885 "Another export exists with the same tag: {}"
886 .format(export.tag))
887
888 if export.pseudo and ex.pseudo == export.pseudo:
889 raise NFSException(
890 "Another export exists with the same pseudo path: {}"
891 .format(export.pseudo))
892
893 if not ex.pseudo:
894 continue
895
896 if export.pseudo[:export.pseudo.rfind('/')+1].startswith(ex.pseudo):
897 if export.pseudo[len(ex.pseudo)] == '/':
898 if len(ex.pseudo) > len_prefix:
899 len_prefix = len(ex.pseudo)
900 parent_export = ex
901
902 if len_prefix > 1:
903 # validate pseudo path
904 idx = len(parent_export.pseudo) # type: ignore
905 idx = idx + 1 if idx > 1 else idx
906 real_path = "{}/{}".format(
907 parent_export.path # type: ignore
908 if len(parent_export.path) > 1 else "", # type: ignore
909 export.pseudo[idx:])
910 if export.fsal.name == 'CEPH':
911 cfs = CephFS()
912 if export.path != real_path and not cfs.dir_exists(real_path):
913 raise NFSException(
914 "Pseudo path ({}) invalid, path {} does not exist."
915 .format(export.pseudo, real_path))
916
917 def _gen_export_id(self):
918 exports = sorted(self.exports)
919 nid = 1
920 for e_id in exports:
921 if e_id == nid:
922 nid += 1
923 else:
924 break
925 return nid
926
927 def _persist_daemon_configuration(self):
928 daemon_map = {} # type: ignore
929 for daemon_id in self.list_daemons():
930 daemon_map[daemon_id] = []
931
932 for _, ex in self.exports.items():
933 for daemon in ex.daemons:
934 daemon_map[daemon].append({
935 'block_name': "%url",
936 'value': Ganesha.make_rados_url(
937 self.rados_pool, self.rados_namespace,
938 "export-{}".format(ex.export_id))
939 })
940 for daemon_id, conf_blocks in daemon_map.items():
941 self._write_raw_config(conf_blocks, "conf-{}".format(daemon_id))
942
943 def _save_export(self, export):
944 self.validate(export)
945 export.fsal.create_path(export.path)
946 export.fsal.fill_keys()
947 self.exports[export.export_id] = export
948 conf_block = export.to_export_block(self.export_defaults)
949 self._write_raw_config(conf_block, "export-{}".format(export.export_id))
950 self._persist_daemon_configuration()
951
952 def _delete_export(self, export_id):
953 self._persist_daemon_configuration()
954 with mgr.rados.open_ioctx(self.rados_pool) as ioctx:
955 if self.rados_namespace:
956 ioctx.set_namespace(self.rados_namespace)
957 ioctx.remove_object("export-{}".format(export_id))
958
959 def list_exports(self):
960 return [ex for _, ex in self.exports.items()]
961
962 def create_export(self, ex_dict):
963 ex_id = self._gen_export_id()
964 export = Export.from_dict(ex_id, ex_dict)
965 self._save_export(export)
966 return ex_id
967
968 def has_export(self, export_id):
969 return export_id in self.exports
970
971 def update_export(self, ex_dict):
972 if ex_dict['export_id'] not in self.exports:
973 return None
974 old_export = self.exports[ex_dict['export_id']]
975 del self.exports[ex_dict['export_id']]
976 export = Export.from_dict(ex_dict['export_id'], ex_dict, old_export)
977 self._save_export(export)
978 self.exports[export.export_id] = export
979 return old_export
980
981 def remove_export(self, export_id):
982 if export_id not in self.exports:
983 return None
984 export = self.exports[export_id]
985 del self.exports[export_id]
986 self._delete_export(export_id)
987 return export
988
989 def get_export(self, export_id):
990 if export_id in self.exports:
991 return self.exports[export_id]
992 return None
993
994 def list_daemons(self):
995 return [daemon_id for daemon_id in self.daemons_conf_blocks]
996
997 def reload_daemons(self, daemons):
998 with mgr.rados.open_ioctx(self.rados_pool) as ioctx:
999 if self.rados_namespace:
1000 ioctx.set_namespace(self.rados_namespace)
1001 for daemon_id in daemons:
1002 ioctx.notify("conf-{}".format(daemon_id))