]>
Commit | Line | Data |
---|---|---|
b3b6e05e TL |
1 | import errno |
2 | import json | |
3 | import logging | |
1d09f67e TL |
4 | from typing import ( |
5 | List, | |
6 | Any, | |
7 | Dict, | |
8 | Tuple, | |
9 | Optional, | |
10 | TYPE_CHECKING, | |
11 | TypeVar, | |
12 | Callable, | |
13 | Set, | |
14 | cast) | |
a4b75251 | 15 | from os.path import normpath |
b3b6e05e | 16 | |
1d09f67e | 17 | from rados import TimedOut, ObjectNotFound, Rados, LIBRADOS_ALL_NSPACES |
b3b6e05e | 18 | |
1d09f67e | 19 | from orchestrator import NoOrchestrator |
a4b75251 TL |
20 | from mgr_module import NFS_POOL_NAME as POOL_NAME, NFS_GANESHA_SUPPORTED_FSALS |
21 | ||
22 | from .export_utils import GaneshaConfParser, Export, RawBlock, CephFSFSAL, RGWFSAL | |
1d09f67e TL |
23 | from .exception import NFSException, NFSInvalidOperation, FSNotFound |
24 | from .utils import ( | |
25 | CONF_PREFIX, | |
26 | EXPORT_PREFIX, | |
27 | USER_CONF_PREFIX, | |
28 | export_obj_name, | |
29 | conf_obj_name, | |
30 | available_clusters, | |
31 | check_fs, | |
32 | restart_nfs_service) | |
a4b75251 TL |
33 | |
34 | if TYPE_CHECKING: | |
35 | from nfs.module import Module | |
36 | ||
37 | FuncT = TypeVar('FuncT', bound=Callable) | |
b3b6e05e TL |
38 | |
39 | log = logging.getLogger(__name__) | |
40 | ||
41 | ||
1d09f67e TL |
42 | def known_cluster_ids(mgr: 'Module') -> Set[str]: |
43 | """Return the set of known cluster IDs.""" | |
44 | try: | |
45 | clusters = set(available_clusters(mgr)) | |
46 | except NoOrchestrator: | |
47 | clusters = nfs_rados_configs(mgr.rados) | |
48 | return clusters | |
49 | ||
50 | ||
a4b75251 TL |
51 | def export_cluster_checker(func: FuncT) -> FuncT: |
52 | def cluster_check( | |
53 | export: 'ExportMgr', | |
54 | *args: Any, | |
55 | **kwargs: Any | |
56 | ) -> Tuple[int, str, str]: | |
b3b6e05e | 57 | """ |
a4b75251 | 58 | This method checks if cluster exists |
b3b6e05e | 59 | """ |
1d09f67e TL |
60 | clusters = known_cluster_ids(export.mgr) |
61 | cluster_id: str = kwargs['cluster_id'] | |
62 | log.debug("checking for %r in known nfs clusters: %r", | |
63 | cluster_id, clusters) | |
64 | if cluster_id not in clusters: | |
65 | return -errno.ENOENT, "", "Cluster does not exist" | |
a4b75251 TL |
66 | return func(export, *args, **kwargs) |
67 | return cast(FuncT, cluster_check) | |
b3b6e05e TL |
68 | |
69 | ||
a4b75251 TL |
70 | def exception_handler( |
71 | exception_obj: Exception, | |
72 | log_msg: str = "" | |
73 | ) -> Tuple[int, str, str]: | |
b3b6e05e TL |
74 | if log_msg: |
75 | log.exception(log_msg) | |
76 | return getattr(exception_obj, 'errno', -1), "", str(exception_obj) | |
77 | ||
78 | ||
1d09f67e TL |
79 | def _check_rados_notify(ioctx: Any, obj: str) -> None: |
80 | try: | |
81 | ioctx.notify(obj) | |
82 | except TimedOut: | |
83 | log.exception("Ganesha timed out") | |
84 | ||
85 | ||
86 | def normalize_path(path: str) -> str: | |
87 | if path: | |
88 | path = normpath(path.strip()) | |
89 | if path[:2] == "//": | |
90 | path = path[1:] | |
91 | return path | |
92 | ||
93 | ||
b3b6e05e | 94 | class NFSRados: |
1d09f67e TL |
95 | def __init__(self, rados: 'Rados', namespace: str) -> None: |
96 | self.rados = rados | |
b3b6e05e TL |
97 | self.pool = POOL_NAME |
98 | self.namespace = namespace | |
99 | ||
a4b75251 | 100 | def _make_rados_url(self, obj: str) -> str: |
b3b6e05e TL |
101 | return "rados://{}/{}/{}".format(self.pool, self.namespace, obj) |
102 | ||
a4b75251 TL |
103 | def _create_url_block(self, obj_name: str) -> RawBlock: |
104 | return RawBlock('%url', values={'value': self._make_rados_url(obj_name)}) | |
b3b6e05e | 105 | |
a4b75251 | 106 | def write_obj(self, conf_block: str, obj: str, config_obj: str = '') -> None: |
1d09f67e | 107 | with self.rados.open_ioctx(self.pool) as ioctx: |
b3b6e05e TL |
108 | ioctx.set_namespace(self.namespace) |
109 | ioctx.write_full(obj, conf_block.encode('utf-8')) | |
110 | if not config_obj: | |
111 | # Return after creating empty common config object | |
112 | return | |
a4b75251 TL |
113 | log.debug("write configuration into rados object %s/%s/%s", |
114 | self.pool, self.namespace, obj) | |
b3b6e05e TL |
115 | |
116 | # Add created obj url to common config obj | |
117 | ioctx.append(config_obj, GaneshaConfParser.write_block( | |
118 | self._create_url_block(obj)).encode('utf-8')) | |
1d09f67e | 119 | _check_rados_notify(ioctx, config_obj) |
a4b75251 TL |
120 | log.debug("Added %s url to %s", obj, config_obj) |
121 | ||
122 | def read_obj(self, obj: str) -> Optional[str]: | |
1d09f67e | 123 | with self.rados.open_ioctx(self.pool) as ioctx: |
a4b75251 TL |
124 | ioctx.set_namespace(self.namespace) |
125 | try: | |
126 | return ioctx.read(obj, 1048576).decode() | |
127 | except ObjectNotFound: | |
128 | return None | |
b3b6e05e | 129 | |
a4b75251 | 130 | def update_obj(self, conf_block: str, obj: str, config_obj: str) -> None: |
1d09f67e | 131 | with self.rados.open_ioctx(self.pool) as ioctx: |
b3b6e05e TL |
132 | ioctx.set_namespace(self.namespace) |
133 | ioctx.write_full(obj, conf_block.encode('utf-8')) | |
a4b75251 TL |
134 | log.debug("write configuration into rados object %s/%s/%s", |
135 | self.pool, self.namespace, obj) | |
1d09f67e | 136 | _check_rados_notify(ioctx, config_obj) |
a4b75251 | 137 | log.debug("Update export %s in %s", obj, config_obj) |
b3b6e05e | 138 | |
a4b75251 | 139 | def remove_obj(self, obj: str, config_obj: str) -> None: |
1d09f67e | 140 | with self.rados.open_ioctx(self.pool) as ioctx: |
b3b6e05e TL |
141 | ioctx.set_namespace(self.namespace) |
142 | export_urls = ioctx.read(config_obj) | |
143 | url = '%url "{}"\n\n'.format(self._make_rados_url(obj)) | |
144 | export_urls = export_urls.replace(url.encode('utf-8'), b'') | |
145 | ioctx.remove_object(obj) | |
146 | ioctx.write_full(config_obj, export_urls) | |
1d09f67e | 147 | _check_rados_notify(ioctx, config_obj) |
a4b75251 | 148 | log.debug("Object deleted: %s", url) |
b3b6e05e | 149 | |
a4b75251 | 150 | def remove_all_obj(self) -> None: |
1d09f67e | 151 | with self.rados.open_ioctx(self.pool) as ioctx: |
b3b6e05e TL |
152 | ioctx.set_namespace(self.namespace) |
153 | for obj in ioctx.list_objects(): | |
154 | obj.remove() | |
155 | ||
a4b75251 | 156 | def check_user_config(self) -> bool: |
1d09f67e | 157 | with self.rados.open_ioctx(self.pool) as ioctx: |
b3b6e05e TL |
158 | ioctx.set_namespace(self.namespace) |
159 | for obj in ioctx.list_objects(): | |
1d09f67e | 160 | if obj.key.startswith(USER_CONF_PREFIX): |
b3b6e05e TL |
161 | return True |
162 | return False | |
163 | ||
164 | ||
1d09f67e TL |
165 | def nfs_rados_configs(rados: 'Rados', nfs_pool: str = POOL_NAME) -> Set[str]: |
166 | """Return a set of all the namespaces in the nfs_pool where nfs | |
167 | configuration objects are found. The namespaces also correspond | |
168 | to the cluster ids. | |
169 | """ | |
170 | ns: Set[str] = set() | |
171 | prefixes = (EXPORT_PREFIX, CONF_PREFIX, USER_CONF_PREFIX) | |
172 | with rados.open_ioctx(nfs_pool) as ioctx: | |
173 | ioctx.set_namespace(LIBRADOS_ALL_NSPACES) | |
174 | for obj in ioctx.list_objects(): | |
175 | if obj.key.startswith(prefixes): | |
176 | ns.add(obj.nspace) | |
177 | return ns | |
178 | ||
179 | ||
b3b6e05e | 180 | class ExportMgr: |
a4b75251 TL |
181 | def __init__( |
182 | self, | |
183 | mgr: 'Module', | |
184 | export_ls: Optional[Dict[str, List[Export]]] = None | |
185 | ) -> None: | |
b3b6e05e TL |
186 | self.mgr = mgr |
187 | self.rados_pool = POOL_NAME | |
a4b75251 | 188 | self._exports: Optional[Dict[str, List[Export]]] = export_ls |
b3b6e05e | 189 | |
b3b6e05e | 190 | @property |
a4b75251 | 191 | def exports(self) -> Dict[str, List[Export]]: |
b3b6e05e TL |
192 | if self._exports is None: |
193 | self._exports = {} | |
194 | log.info("Begin export parsing") | |
1d09f67e | 195 | for cluster_id in known_cluster_ids(self.mgr): |
b3b6e05e TL |
196 | self.export_conf_objs = [] # type: List[Export] |
197 | self._read_raw_config(cluster_id) | |
1d09f67e | 198 | self._exports[cluster_id] = self.export_conf_objs |
a4b75251 | 199 | log.info("Exports parsed successfully %s", self.exports.items()) |
b3b6e05e TL |
200 | return self._exports |
201 | ||
a4b75251 TL |
202 | def _fetch_export( |
203 | self, | |
204 | cluster_id: str, | |
205 | pseudo_path: str | |
206 | ) -> Optional[Export]: | |
b3b6e05e | 207 | try: |
a4b75251 | 208 | for ex in self.exports[cluster_id]: |
b3b6e05e TL |
209 | if ex.pseudo == pseudo_path: |
210 | return ex | |
a4b75251 | 211 | return None |
b3b6e05e | 212 | except KeyError: |
a4b75251 TL |
213 | log.info('no exports for cluster %s', cluster_id) |
214 | return None | |
215 | ||
216 | def _fetch_export_id( | |
217 | self, | |
218 | cluster_id: str, | |
219 | export_id: int | |
220 | ) -> Optional[Export]: | |
221 | try: | |
222 | for ex in self.exports[cluster_id]: | |
223 | if ex.export_id == export_id: | |
224 | return ex | |
225 | return None | |
226 | except KeyError: | |
227 | log.info(f'no exports for cluster {cluster_id}') | |
228 | return None | |
229 | ||
230 | def _delete_export_user(self, export: Export) -> None: | |
231 | if isinstance(export.fsal, CephFSFSAL): | |
232 | assert export.fsal.user_id | |
233 | self.mgr.check_mon_command({ | |
234 | 'prefix': 'auth rm', | |
235 | 'entity': 'client.{}'.format(export.fsal.user_id), | |
b3b6e05e | 236 | }) |
a4b75251 TL |
237 | log.info("Deleted export user %s", export.fsal.user_id) |
238 | elif isinstance(export.fsal, RGWFSAL): | |
239 | # do nothing; we're using the bucket owner creds. | |
240 | pass | |
b3b6e05e | 241 | |
a4b75251 TL |
242 | def _create_export_user(self, export: Export) -> None: |
243 | if isinstance(export.fsal, CephFSFSAL): | |
244 | fsal = cast(CephFSFSAL, export.fsal) | |
245 | assert fsal.fs_name | |
246 | ||
247 | # is top-level or any client rw? | |
248 | rw = export.access_type.lower() == 'rw' | |
249 | for c in export.clients: | |
250 | if c.access_type.lower() == 'rw': | |
251 | rw = True | |
252 | break | |
253 | ||
254 | fsal.user_id = f"nfs.{export.cluster_id}.{export.export_id}" | |
255 | fsal.cephx_key = self._create_user_key( | |
256 | export.cluster_id, fsal.user_id, export.path, fsal.fs_name, not rw | |
257 | ) | |
258 | log.debug("Successfully created user %s for cephfs path %s", fsal.user_id, export.path) | |
259 | ||
260 | elif isinstance(export.fsal, RGWFSAL): | |
261 | rgwfsal = cast(RGWFSAL, export.fsal) | |
262 | if not rgwfsal.user_id: | |
263 | assert export.path | |
264 | ret, out, err = self.mgr.tool_exec( | |
265 | ['radosgw-admin', 'bucket', 'stats', '--bucket', export.path] | |
266 | ) | |
267 | if ret: | |
268 | raise NFSException(f'Failed to fetch owner for bucket {export.path}') | |
269 | j = json.loads(out) | |
270 | owner = j.get('owner', '') | |
271 | rgwfsal.user_id = owner | |
272 | assert rgwfsal.user_id | |
273 | ret, out, err = self.mgr.tool_exec([ | |
274 | 'radosgw-admin', 'user', 'info', '--uid', rgwfsal.user_id | |
275 | ]) | |
276 | if ret: | |
277 | raise NFSException( | |
278 | f'Failed to fetch key for bucket {export.path} owner {rgwfsal.user_id}' | |
279 | ) | |
280 | j = json.loads(out) | |
281 | ||
282 | # FIXME: make this more tolerate of unexpected output? | |
283 | rgwfsal.access_key_id = j['keys'][0]['access_key'] | |
284 | rgwfsal.secret_access_key = j['keys'][0]['secret_key'] | |
285 | log.debug("Successfully fetched user %s for RGW path %s", rgwfsal.user_id, export.path) | |
286 | ||
287 | def _gen_export_id(self, cluster_id: str) -> int: | |
288 | exports = sorted([ex.export_id for ex in self.exports[cluster_id]]) | |
b3b6e05e TL |
289 | nid = 1 |
290 | for e_id in exports: | |
291 | if e_id == nid: | |
292 | nid += 1 | |
293 | else: | |
294 | break | |
295 | return nid | |
296 | ||
a4b75251 | 297 | def _read_raw_config(self, rados_namespace: str) -> None: |
b3b6e05e TL |
298 | with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx: |
299 | ioctx.set_namespace(rados_namespace) | |
300 | for obj in ioctx.list_objects(): | |
1d09f67e | 301 | if obj.key.startswith(EXPORT_PREFIX): |
b3b6e05e TL |
302 | size, _ = obj.stat() |
303 | raw_config = obj.read(size) | |
304 | raw_config = raw_config.decode("utf-8") | |
305 | log.debug("read export configuration from rados " | |
a4b75251 TL |
306 | "object %s/%s/%s", self.rados_pool, |
307 | rados_namespace, obj.key) | |
b3b6e05e TL |
308 | self.export_conf_objs.append(Export.from_export_block( |
309 | GaneshaConfParser(raw_config).parse()[0], rados_namespace)) | |
310 | ||
a4b75251 TL |
311 | def _save_export(self, cluster_id: str, export: Export) -> None: |
312 | self.exports[cluster_id].append(export) | |
1d09f67e | 313 | self._rados(cluster_id).write_obj( |
a4b75251 | 314 | GaneshaConfParser.write_block(export.to_export_block()), |
1d09f67e TL |
315 | export_obj_name(export.export_id), |
316 | conf_obj_name(export.cluster_id) | |
a4b75251 TL |
317 | ) |
318 | ||
319 | def _delete_export( | |
320 | self, | |
321 | cluster_id: str, | |
322 | pseudo_path: Optional[str], | |
323 | export_obj: Optional[Export] = None | |
324 | ) -> Tuple[int, str, str]: | |
b3b6e05e TL |
325 | try: |
326 | if export_obj: | |
a4b75251 | 327 | export: Optional[Export] = export_obj |
b3b6e05e | 328 | else: |
a4b75251 TL |
329 | assert pseudo_path |
330 | export = self._fetch_export(cluster_id, pseudo_path) | |
b3b6e05e TL |
331 | |
332 | if export: | |
333 | if pseudo_path: | |
1d09f67e TL |
334 | self._rados(cluster_id).remove_obj( |
335 | export_obj_name(export.export_id), conf_obj_name(cluster_id)) | |
b3b6e05e | 336 | self.exports[cluster_id].remove(export) |
a4b75251 | 337 | self._delete_export_user(export) |
b3b6e05e TL |
338 | if not self.exports[cluster_id]: |
339 | del self.exports[cluster_id] | |
a4b75251 | 340 | log.debug("Deleted all exports for cluster %s", cluster_id) |
b3b6e05e TL |
341 | return 0, "Successfully deleted export", "" |
342 | return 0, "", "Export does not exist" | |
343 | except Exception as e: | |
344 | return exception_handler(e, f"Failed to delete {pseudo_path} export for {cluster_id}") | |
345 | ||
a4b75251 | 346 | def _fetch_export_obj(self, cluster_id: str, ex_id: int) -> Optional[Export]: |
b3b6e05e TL |
347 | try: |
348 | with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx: | |
a4b75251 TL |
349 | ioctx.set_namespace(cluster_id) |
350 | export = Export.from_export_block( | |
351 | GaneshaConfParser( | |
1d09f67e | 352 | ioctx.read(export_obj_name(ex_id)).decode("utf-8") |
a4b75251 TL |
353 | ).parse()[0], |
354 | cluster_id | |
355 | ) | |
b3b6e05e TL |
356 | return export |
357 | except ObjectNotFound: | |
a4b75251 TL |
358 | log.exception("Export ID: %s not found", ex_id) |
359 | return None | |
b3b6e05e | 360 | |
a4b75251 TL |
361 | def _update_export(self, cluster_id: str, export: Export) -> None: |
362 | self.exports[cluster_id].append(export) | |
1d09f67e | 363 | self._rados(cluster_id).update_obj( |
a4b75251 | 364 | GaneshaConfParser.write_block(export.to_export_block()), |
1d09f67e | 365 | export_obj_name(export.export_id), conf_obj_name(export.cluster_id)) |
b3b6e05e TL |
366 | |
367 | @export_cluster_checker | |
a4b75251 TL |
368 | def create_export(self, addr: Optional[List[str]] = None, **kwargs: Any) -> Tuple[int, str, str]: |
369 | # if addr(s) are provided, construct client list and adjust outer block | |
370 | clients = [] | |
371 | if addr: | |
372 | clients = [{ | |
373 | 'addresses': addr, | |
374 | 'access_type': 'ro' if kwargs['read_only'] else 'rw', | |
375 | 'squash': kwargs['squash'], | |
376 | }] | |
377 | kwargs['squash'] = 'none' | |
378 | kwargs['clients'] = clients | |
379 | ||
380 | if clients: | |
381 | kwargs['access_type'] = "none" | |
382 | elif kwargs['read_only']: | |
383 | kwargs['access_type'] = "RO" | |
384 | else: | |
385 | kwargs['access_type'] = "RW" | |
386 | ||
387 | if kwargs['cluster_id'] not in self.exports: | |
388 | self.exports[kwargs['cluster_id']] = [] | |
389 | ||
b3b6e05e TL |
390 | try: |
391 | fsal_type = kwargs.pop('fsal_type') | |
392 | if fsal_type == 'cephfs': | |
a4b75251 TL |
393 | return self.create_cephfs_export(**kwargs) |
394 | if fsal_type == 'rgw': | |
395 | return self.create_rgw_export(**kwargs) | |
b3b6e05e TL |
396 | raise NotImplementedError() |
397 | except Exception as e: | |
398 | return exception_handler(e, f"Failed to create {kwargs['pseudo_path']} export for {kwargs['cluster_id']}") | |
399 | ||
400 | @export_cluster_checker | |
a4b75251 TL |
401 | def delete_export(self, |
402 | cluster_id: str, | |
403 | pseudo_path: str) -> Tuple[int, str, str]: | |
b3b6e05e TL |
404 | return self._delete_export(cluster_id, pseudo_path) |
405 | ||
a4b75251 | 406 | def delete_all_exports(self, cluster_id: str) -> None: |
b3b6e05e TL |
407 | try: |
408 | export_list = list(self.exports[cluster_id]) | |
409 | except KeyError: | |
410 | log.info("No exports to delete") | |
411 | return | |
b3b6e05e TL |
412 | for export in export_list: |
413 | ret, out, err = self._delete_export(cluster_id=cluster_id, pseudo_path=None, | |
414 | export_obj=export) | |
415 | if ret != 0: | |
a4b75251 TL |
416 | raise NFSException(f"Failed to delete exports: {err} and {ret}") |
417 | log.info("All exports successfully deleted for cluster id: %s", cluster_id) | |
418 | ||
419 | def list_all_exports(self) -> List[Dict[str, Any]]: | |
420 | r = [] | |
421 | for cluster_id, ls in self.exports.items(): | |
422 | r.extend([e.to_dict() for e in ls]) | |
423 | return r | |
b3b6e05e TL |
424 | |
425 | @export_cluster_checker | |
a4b75251 TL |
426 | def list_exports(self, |
427 | cluster_id: str, | |
428 | detailed: bool = False) -> Tuple[int, str, str]: | |
b3b6e05e TL |
429 | try: |
430 | if detailed: | |
a4b75251 TL |
431 | result_d = [export.to_dict() for export in self.exports[cluster_id]] |
432 | return 0, json.dumps(result_d, indent=2), '' | |
b3b6e05e | 433 | else: |
a4b75251 TL |
434 | result_ps = [export.pseudo for export in self.exports[cluster_id]] |
435 | return 0, json.dumps(result_ps, indent=2), '' | |
436 | ||
b3b6e05e | 437 | except KeyError: |
a4b75251 | 438 | log.warning("No exports to list for %s", cluster_id) |
b3b6e05e TL |
439 | return 0, '', '' |
440 | except Exception as e: | |
441 | return exception_handler(e, f"Failed to list exports for {cluster_id}") | |
442 | ||
a4b75251 TL |
443 | def _get_export_dict(self, cluster_id: str, pseudo_path: str) -> Optional[Dict[str, Any]]: |
444 | export = self._fetch_export(cluster_id, pseudo_path) | |
445 | if export: | |
446 | return export.to_dict() | |
447 | log.warning(f"No {pseudo_path} export to show for {cluster_id}") | |
448 | return None | |
449 | ||
b3b6e05e | 450 | @export_cluster_checker |
a4b75251 TL |
451 | def get_export( |
452 | self, | |
453 | cluster_id: str, | |
454 | pseudo_path: str, | |
455 | ) -> Tuple[int, str, str]: | |
b3b6e05e | 456 | try: |
a4b75251 TL |
457 | export_dict = self._get_export_dict(cluster_id, pseudo_path) |
458 | if export_dict: | |
459 | return 0, json.dumps(export_dict, indent=2), '' | |
460 | log.warning("No %s export to show for %s", pseudo_path, cluster_id) | |
b3b6e05e TL |
461 | return 0, '', '' |
462 | except Exception as e: | |
463 | return exception_handler(e, f"Failed to get {pseudo_path} export for {cluster_id}") | |
464 | ||
a4b75251 TL |
465 | def get_export_by_id( |
466 | self, | |
467 | cluster_id: str, | |
468 | export_id: int | |
469 | ) -> Optional[Dict[str, Any]]: | |
470 | export = self._fetch_export_id(cluster_id, export_id) | |
471 | return export.to_dict() if export else None | |
472 | ||
473 | def get_export_by_pseudo( | |
474 | self, | |
475 | cluster_id: str, | |
476 | pseudo_path: str | |
477 | ) -> Optional[Dict[str, Any]]: | |
478 | export = self._fetch_export(cluster_id, pseudo_path) | |
479 | return export.to_dict() if export else None | |
480 | ||
481 | def apply_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]: | |
b3b6e05e | 482 | try: |
a4b75251 TL |
483 | if not export_config: |
484 | raise NFSInvalidOperation("Empty Config!!") | |
485 | try: | |
486 | j = json.loads(export_config) | |
487 | except ValueError: | |
488 | # okay, not JSON. is it an EXPORT block? | |
489 | try: | |
490 | blocks = GaneshaConfParser(export_config).parse() | |
491 | exports = [ | |
492 | Export.from_export_block(block, cluster_id) | |
493 | for block in blocks | |
494 | ] | |
495 | j = [export.to_dict() for export in exports] | |
496 | except Exception as ex: | |
497 | raise NFSInvalidOperation(f"Input must be JSON or a ganesha EXPORT block: {ex}") | |
498 | ||
b3b6e05e | 499 | # check export type |
a4b75251 TL |
500 | if isinstance(j, list): |
501 | ret, out, err = (0, '', '') | |
502 | for export in j: | |
503 | try: | |
504 | r, o, e = self._apply_export(cluster_id, export) | |
505 | except Exception as ex: | |
506 | r, o, e = exception_handler(ex, f'Failed to apply export: {ex}') | |
507 | if r: | |
508 | ret = r | |
509 | if o: | |
510 | out += o + '\n' | |
511 | if e: | |
512 | err += e + '\n' | |
513 | return ret, out, err | |
514 | else: | |
515 | r, o, e = self._apply_export(cluster_id, j) | |
516 | return r, o, e | |
b3b6e05e TL |
517 | except NotImplementedError: |
518 | return 0, " Manual Restart of NFS PODS required for successful update of exports", "" | |
519 | except Exception as e: | |
520 | return exception_handler(e, f'Failed to update export: {e}') | |
521 | ||
a4b75251 TL |
522 | def _update_user_id( |
523 | self, | |
524 | cluster_id: str, | |
525 | path: str, | |
526 | access_type: str, | |
527 | fs_name: str, | |
528 | user_id: str | |
529 | ) -> None: | |
b3b6e05e | 530 | osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format( |
a4b75251 | 531 | self.rados_pool, cluster_id, fs_name) |
b3b6e05e TL |
532 | access_type = 'r' if access_type == 'RO' else 'rw' |
533 | ||
534 | self.mgr.check_mon_command({ | |
535 | 'prefix': 'auth caps', | |
536 | 'entity': f'client.{user_id}', | |
537 | 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow {} path={}'.format( | |
538 | access_type, path)], | |
a4b75251 TL |
539 | }) |
540 | ||
541 | log.info("Export user updated %s", user_id) | |
542 | ||
543 | def _create_user_key( | |
544 | self, | |
545 | cluster_id: str, | |
546 | entity: str, | |
547 | path: str, | |
548 | fs_name: str, | |
549 | fs_ro: bool | |
550 | ) -> str: | |
b3b6e05e | 551 | osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format( |
a4b75251 | 552 | self.rados_pool, cluster_id, fs_name) |
b3b6e05e | 553 | access_type = 'r' if fs_ro else 'rw' |
a4b75251 TL |
554 | nfs_caps = [ |
555 | 'mon', 'allow r', | |
556 | 'osd', osd_cap, | |
557 | 'mds', 'allow {} path={}'.format(access_type, path) | |
558 | ] | |
b3b6e05e | 559 | |
a4b75251 | 560 | ret, out, err = self.mgr.mon_command({ |
b3b6e05e TL |
561 | 'prefix': 'auth get-or-create', |
562 | 'entity': 'client.{}'.format(entity), | |
a4b75251 | 563 | 'caps': nfs_caps, |
b3b6e05e | 564 | 'format': 'json', |
a4b75251 TL |
565 | }) |
566 | if ret == -errno.EINVAL and 'does not match' in err: | |
567 | ret, out, err = self.mgr.mon_command({ | |
568 | 'prefix': 'auth caps', | |
569 | 'entity': 'client.{}'.format(entity), | |
570 | 'caps': nfs_caps, | |
571 | 'format': 'json', | |
572 | }) | |
573 | if err: | |
574 | raise NFSException(f'Failed to update caps for {entity}: {err}') | |
575 | ret, out, err = self.mgr.mon_command({ | |
576 | 'prefix': 'auth get', | |
577 | 'entity': 'client.{}'.format(entity), | |
578 | 'format': 'json', | |
b3b6e05e | 579 | }) |
a4b75251 TL |
580 | if err: |
581 | raise NFSException(f'Failed to fetch caps for {entity}: {err}') | |
b3b6e05e TL |
582 | |
583 | json_res = json.loads(out) | |
a4b75251 TL |
584 | log.info("Export user created is %s", json_res[0]['entity']) |
585 | return json_res[0]['key'] | |
586 | ||
587 | def create_export_from_dict(self, | |
588 | cluster_id: str, | |
589 | ex_id: int, | |
590 | ex_dict: Dict[str, Any]) -> Export: | |
591 | pseudo_path = ex_dict.get("pseudo") | |
592 | if not pseudo_path: | |
593 | raise NFSInvalidOperation("export must specify pseudo path") | |
594 | ||
595 | path = ex_dict.get("path") | |
596 | if path is None: | |
597 | raise NFSInvalidOperation("export must specify path") | |
1d09f67e | 598 | path = normalize_path(path) |
a4b75251 TL |
599 | |
600 | fsal = ex_dict.get("fsal", {}) | |
601 | fsal_type = fsal.get("name") | |
602 | if fsal_type == NFS_GANESHA_SUPPORTED_FSALS[1]: | |
603 | if '/' in path and path != '/': | |
604 | raise NFSInvalidOperation('"/" is not allowed in path with bucket name') | |
605 | elif fsal_type == NFS_GANESHA_SUPPORTED_FSALS[0]: | |
606 | fs_name = fsal.get("fs_name") | |
607 | if not fs_name: | |
608 | raise NFSInvalidOperation("export FSAL must specify fs_name") | |
609 | if not check_fs(self.mgr, fs_name): | |
610 | raise FSNotFound(fs_name) | |
611 | ||
612 | user_id = f"nfs.{cluster_id}.{ex_id}" | |
613 | if "user_id" in fsal and fsal["user_id"] != user_id: | |
614 | raise NFSInvalidOperation(f"export FSAL user_id must be '{user_id}'") | |
615 | else: | |
616 | raise NFSInvalidOperation(f"NFS Ganesha supported FSALs are {NFS_GANESHA_SUPPORTED_FSALS}." | |
617 | "Export must specify any one of it.") | |
618 | ||
619 | ex_dict["fsal"] = fsal | |
620 | ex_dict["cluster_id"] = cluster_id | |
621 | export = Export.from_dict(ex_id, ex_dict) | |
622 | export.validate(self.mgr) | |
623 | log.debug("Successfully created %s export-%s from dict for cluster %s", | |
624 | fsal_type, ex_id, cluster_id) | |
625 | return export | |
626 | ||
627 | def create_cephfs_export(self, | |
628 | fs_name: str, | |
629 | cluster_id: str, | |
630 | pseudo_path: str, | |
631 | read_only: bool, | |
632 | path: str, | |
633 | squash: str, | |
634 | access_type: str, | |
635 | clients: list = []) -> Tuple[int, str, str]: | |
1d09f67e | 636 | pseudo_path = normalize_path(pseudo_path) |
b3b6e05e | 637 | |
a4b75251 TL |
638 | if not self._fetch_export(cluster_id, pseudo_path): |
639 | export = self.create_export_from_dict( | |
640 | cluster_id, | |
641 | self._gen_export_id(cluster_id), | |
642 | { | |
643 | "pseudo": pseudo_path, | |
644 | "path": path, | |
645 | "access_type": access_type, | |
646 | "squash": squash, | |
647 | "fsal": { | |
648 | "name": NFS_GANESHA_SUPPORTED_FSALS[0], | |
649 | "fs_name": fs_name, | |
650 | }, | |
651 | "clients": clients, | |
652 | } | |
653 | ) | |
654 | log.debug("creating cephfs export %s", export) | |
655 | self._create_export_user(export) | |
656 | self._save_export(cluster_id, export) | |
657 | result = { | |
658 | "bind": export.pseudo, | |
659 | "fs": fs_name, | |
660 | "path": export.path, | |
661 | "cluster": cluster_id, | |
662 | "mode": export.access_type, | |
663 | } | |
664 | return (0, json.dumps(result, indent=4), '') | |
665 | return 0, "", "Export already exists" | |
b3b6e05e | 666 | |
a4b75251 TL |
667 | def create_rgw_export(self, |
668 | cluster_id: str, | |
669 | pseudo_path: str, | |
670 | access_type: str, | |
671 | read_only: bool, | |
672 | squash: str, | |
673 | bucket: Optional[str] = None, | |
674 | user_id: Optional[str] = None, | |
675 | clients: list = []) -> Tuple[int, str, str]: | |
1d09f67e | 676 | pseudo_path = normalize_path(pseudo_path) |
b3b6e05e | 677 | |
a4b75251 TL |
678 | if not bucket and not user_id: |
679 | return -errno.EINVAL, "", "Must specify either bucket or user_id" | |
680 | ||
681 | if not self._fetch_export(cluster_id, pseudo_path): | |
682 | export = self.create_export_from_dict( | |
683 | cluster_id, | |
684 | self._gen_export_id(cluster_id), | |
685 | { | |
686 | "pseudo": pseudo_path, | |
687 | "path": bucket or '/', | |
688 | "access_type": access_type, | |
689 | "squash": squash, | |
690 | "fsal": { | |
691 | "name": NFS_GANESHA_SUPPORTED_FSALS[1], | |
692 | "user_id": user_id, | |
693 | }, | |
694 | "clients": clients, | |
695 | } | |
696 | ) | |
697 | log.debug("creating rgw export %s", export) | |
698 | self._create_export_user(export) | |
699 | self._save_export(cluster_id, export) | |
b3b6e05e | 700 | result = { |
a4b75251 TL |
701 | "bind": export.pseudo, |
702 | "path": export.path, | |
703 | "cluster": cluster_id, | |
704 | "mode": export.access_type, | |
705 | "squash": export.squash, | |
706 | } | |
b3b6e05e TL |
707 | return (0, json.dumps(result, indent=4), '') |
708 | return 0, "", "Export already exists" | |
709 | ||
a4b75251 TL |
710 | def _apply_export( |
711 | self, | |
712 | cluster_id: str, | |
713 | new_export_dict: Dict, | |
714 | ) -> Tuple[int, str, str]: | |
715 | for k in ['path', 'pseudo']: | |
716 | if k not in new_export_dict: | |
717 | raise NFSInvalidOperation(f'Export missing required field {k}') | |
a4b75251 TL |
718 | if cluster_id not in self.exports: |
719 | self.exports[cluster_id] = [] | |
720 | ||
1d09f67e TL |
721 | new_export_dict['path'] = normalize_path(new_export_dict['path']) |
722 | new_export_dict['pseudo'] = normalize_path(new_export_dict['pseudo']) | |
a4b75251 TL |
723 | |
724 | old_export = self._fetch_export(cluster_id, new_export_dict['pseudo']) | |
725 | if old_export: | |
726 | # Check if export id matches | |
727 | if new_export_dict.get('export_id'): | |
728 | if old_export.export_id != new_export_dict.get('export_id'): | |
729 | raise NFSInvalidOperation('Export ID changed, Cannot update export') | |
730 | else: | |
731 | new_export_dict['export_id'] = old_export.export_id | |
732 | elif new_export_dict.get('export_id'): | |
733 | old_export = self._fetch_export_obj(cluster_id, new_export_dict['export_id']) | |
734 | if old_export: | |
735 | # re-fetch via old pseudo | |
736 | old_export = self._fetch_export(cluster_id, old_export.pseudo) | |
737 | assert old_export | |
738 | log.debug("export %s pseudo %s -> %s", | |
739 | old_export.export_id, old_export.pseudo, new_export_dict['pseudo']) | |
740 | ||
741 | new_export = self.create_export_from_dict( | |
742 | cluster_id, | |
743 | new_export_dict.get('export_id', self._gen_export_id(cluster_id)), | |
744 | new_export_dict | |
745 | ) | |
746 | ||
747 | if not old_export: | |
748 | self._create_export_user(new_export) | |
749 | self._save_export(cluster_id, new_export) | |
750 | return 0, f'Added export {new_export.pseudo}', '' | |
751 | ||
752 | if old_export.fsal.name != new_export.fsal.name: | |
753 | raise NFSInvalidOperation('FSAL change not allowed') | |
754 | if old_export.pseudo != new_export.pseudo: | |
755 | log.debug('export %s pseudo %s -> %s', | |
756 | new_export.export_id, old_export.pseudo, new_export.pseudo) | |
757 | ||
758 | if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]: | |
759 | old_fsal = cast(CephFSFSAL, old_export.fsal) | |
760 | new_fsal = cast(CephFSFSAL, new_export.fsal) | |
761 | if old_fsal.user_id != new_fsal.user_id: | |
762 | self._delete_export_user(old_export) | |
763 | self._create_export_user(new_export) | |
764 | elif ( | |
765 | old_export.path != new_export.path | |
766 | or old_fsal.fs_name != new_fsal.fs_name | |
767 | ): | |
768 | self._update_user_id( | |
769 | cluster_id, | |
770 | new_export.path, | |
771 | new_export.access_type, | |
772 | cast(str, new_fsal.fs_name), | |
773 | cast(str, new_fsal.user_id) | |
774 | ) | |
775 | new_fsal.cephx_key = old_fsal.cephx_key | |
776 | else: | |
777 | new_fsal.cephx_key = old_fsal.cephx_key | |
778 | if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]: | |
779 | old_rgw_fsal = cast(RGWFSAL, old_export.fsal) | |
780 | new_rgw_fsal = cast(RGWFSAL, new_export.fsal) | |
781 | if old_rgw_fsal.user_id != new_rgw_fsal.user_id: | |
782 | self._delete_export_user(old_export) | |
783 | self._create_export_user(new_export) | |
784 | elif old_rgw_fsal.access_key_id != new_rgw_fsal.access_key_id: | |
785 | raise NFSInvalidOperation('access_key_id change is not allowed') | |
786 | elif old_rgw_fsal.secret_access_key != new_rgw_fsal.secret_access_key: | |
787 | raise NFSInvalidOperation('secret_access_key change is not allowed') | |
788 | ||
789 | self.exports[cluster_id].remove(old_export) | |
790 | self._update_export(cluster_id, new_export) | |
791 | ||
792 | # TODO: detect whether the update is such that a reload is sufficient | |
b3b6e05e | 793 | restart_nfs_service(self.mgr, new_export.cluster_id) |
a4b75251 TL |
794 | |
795 | return 0, f"Updated export {new_export.pseudo}", "" | |
1d09f67e TL |
796 | |
797 | def _rados(self, cluster_id: str) -> NFSRados: | |
798 | """Return a new NFSRados object for the given cluster id.""" | |
799 | return NFSRados(self.mgr.rados, cluster_id) |