]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/volumes/module.py
792121563a46b2ebd0f5cb19530adc6f7af5cf2b
[ceph.git] / ceph / src / pybind / mgr / volumes / module.py
1 import errno
2 import json
3 import logging
4 import traceback
5 import threading
6
7 from mgr_module import MgrModule
8 import orchestrator
9
10 from .fs.volume import VolumeClient
11 from .fs.nfs import NFSCluster, FSExport
12
13 log = logging.getLogger(__name__)
14
15 class VolumesInfoWrapper():
16 def __init__(self, f, context):
17 self.f = f
18 self.context = context
19 def __enter__(self):
20 log.info("Starting {}".format(self.context))
21 def __exit__(self, exc_type, exc_value, tb):
22 if exc_type is not None:
23 log.error("Failed {}:\n{}".format(self.context, "".join(traceback.format_exception(exc_type, exc_value, tb))))
24 else:
25 log.info("Finishing {}".format(self.context))
26
27 def mgr_cmd_wrap(f):
28 def wrap(self, inbuf, cmd):
29 astr = []
30 for k in cmd:
31 astr.append("{}:{}".format(k, cmd[k]))
32 context = "{}({}) < \"{}\"".format(f.__name__, ", ".join(astr), inbuf)
33 with VolumesInfoWrapper(f, context):
34 return f(self, inbuf, cmd)
35 return wrap
36
37 class Module(orchestrator.OrchestratorClientMixin, MgrModule):
38 COMMANDS = [
39 {
40 'cmd': 'fs volume ls',
41 'desc': "List volumes",
42 'perm': 'r'
43 },
44 {
45 'cmd': 'fs volume create '
46 'name=name,type=CephString '
47 'name=placement,type=CephString,req=false ',
48 'desc': "Create a CephFS volume",
49 'perm': 'rw'
50 },
51 {
52 'cmd': 'fs volume rm '
53 'name=vol_name,type=CephString '
54 'name=yes-i-really-mean-it,type=CephString,req=false ',
55 'desc': "Delete a FS volume by passing --yes-i-really-mean-it flag",
56 'perm': 'rw'
57 },
58 {
59 'cmd': 'fs subvolumegroup ls '
60 'name=vol_name,type=CephString ',
61 'desc': "List subvolumegroups",
62 'perm': 'r'
63 },
64 {
65 'cmd': 'fs subvolumegroup create '
66 'name=vol_name,type=CephString '
67 'name=group_name,type=CephString '
68 'name=pool_layout,type=CephString,req=false '
69 'name=uid,type=CephInt,req=false '
70 'name=gid,type=CephInt,req=false '
71 'name=mode,type=CephString,req=false ',
72 'desc': "Create a CephFS subvolume group in a volume, and optionally, "
73 "with a specific data pool layout, and a specific numeric mode",
74 'perm': 'rw'
75 },
76 {
77 'cmd': 'fs subvolumegroup rm '
78 'name=vol_name,type=CephString '
79 'name=group_name,type=CephString '
80 'name=force,type=CephBool,req=false ',
81 'desc': "Delete a CephFS subvolume group in a volume",
82 'perm': 'rw'
83 },
84 {
85 'cmd': 'fs subvolume ls '
86 'name=vol_name,type=CephString '
87 'name=group_name,type=CephString,req=false ',
88 'desc': "List subvolumes",
89 'perm': 'r'
90 },
91 {
92 'cmd': 'fs subvolume create '
93 'name=vol_name,type=CephString '
94 'name=sub_name,type=CephString '
95 'name=size,type=CephInt,req=false '
96 'name=group_name,type=CephString,req=false '
97 'name=pool_layout,type=CephString,req=false '
98 'name=uid,type=CephInt,req=false '
99 'name=gid,type=CephInt,req=false '
100 'name=mode,type=CephString,req=false '
101 'name=namespace_isolated,type=CephBool,req=false ',
102 'desc': "Create a CephFS subvolume in a volume, and optionally, "
103 "with a specific size (in bytes), a specific data pool layout, "
104 "a specific mode, in a specific subvolume group and in separate "
105 "RADOS namespace",
106 'perm': 'rw'
107 },
108 {
109 'cmd': 'fs subvolume rm '
110 'name=vol_name,type=CephString '
111 'name=sub_name,type=CephString '
112 'name=group_name,type=CephString,req=false '
113 'name=force,type=CephBool,req=false '
114 'name=retain_snapshots,type=CephBool,req=false ',
115 'desc': "Delete a CephFS subvolume in a volume, and optionally, "
116 "in a specific subvolume group, force deleting a cancelled or failed "
117 "clone, and retaining existing subvolume snapshots",
118 'perm': 'rw'
119 },
120 {
121 'cmd': 'fs subvolumegroup getpath '
122 'name=vol_name,type=CephString '
123 'name=group_name,type=CephString ',
124 'desc': "Get the mountpath of a CephFS subvolume group in a volume",
125 'perm': 'r'
126 },
127 {
128 'cmd': 'fs subvolume getpath '
129 'name=vol_name,type=CephString '
130 'name=sub_name,type=CephString '
131 'name=group_name,type=CephString,req=false ',
132 'desc': "Get the mountpath of a CephFS subvolume in a volume, "
133 "and optionally, in a specific subvolume group",
134 'perm': 'rw'
135 },
136 {
137 'cmd': 'fs subvolume info '
138 'name=vol_name,type=CephString '
139 'name=sub_name,type=CephString '
140 'name=group_name,type=CephString,req=false ',
141 'desc': "Get the metadata of a CephFS subvolume in a volume, "
142 "and optionally, in a specific subvolume group",
143 'perm': 'r'
144 },
145 {
146 'cmd': 'fs subvolumegroup pin'
147 ' name=vol_name,type=CephString'
148 ' name=group_name,type=CephString,req=true'
149 ' name=pin_type,type=CephChoices,strings=export|distributed|random'
150 ' name=pin_setting,type=CephString,req=true',
151 'desc': "Set MDS pinning policy for subvolumegroup",
152 'perm': 'rw'
153 },
154 {
155 'cmd': 'fs subvolumegroup snapshot ls '
156 'name=vol_name,type=CephString '
157 'name=group_name,type=CephString ',
158 'desc': "List subvolumegroup snapshots",
159 'perm': 'r'
160 },
161 {
162 'cmd': 'fs subvolumegroup snapshot create '
163 'name=vol_name,type=CephString '
164 'name=group_name,type=CephString '
165 'name=snap_name,type=CephString ',
166 'desc': "Create a snapshot of a CephFS subvolume group in a volume",
167 'perm': 'rw'
168 },
169 {
170 'cmd': 'fs subvolumegroup snapshot rm '
171 'name=vol_name,type=CephString '
172 'name=group_name,type=CephString '
173 'name=snap_name,type=CephString '
174 'name=force,type=CephBool,req=false ',
175 'desc': "Delete a snapshot of a CephFS subvolume group in a volume",
176 'perm': 'rw'
177 },
178 {
179 'cmd': 'fs subvolume snapshot ls '
180 'name=vol_name,type=CephString '
181 'name=sub_name,type=CephString '
182 'name=group_name,type=CephString,req=false ',
183 'desc': "List subvolume snapshots",
184 'perm': 'r'
185 },
186 {
187 'cmd': 'fs subvolume snapshot create '
188 'name=vol_name,type=CephString '
189 'name=sub_name,type=CephString '
190 'name=snap_name,type=CephString '
191 'name=group_name,type=CephString,req=false ',
192 'desc': "Create a snapshot of a CephFS subvolume in a volume, "
193 "and optionally, in a specific subvolume group",
194 'perm': 'rw'
195 },
196 {
197 'cmd': 'fs subvolume snapshot info '
198 'name=vol_name,type=CephString '
199 'name=sub_name,type=CephString '
200 'name=snap_name,type=CephString '
201 'name=group_name,type=CephString,req=false ',
202 'desc': "Get the metadata of a CephFS subvolume snapshot "
203 "and optionally, in a specific subvolume group",
204 'perm': 'r'
205 },
206 {
207 'cmd': 'fs subvolume snapshot rm '
208 'name=vol_name,type=CephString '
209 'name=sub_name,type=CephString '
210 'name=snap_name,type=CephString '
211 'name=group_name,type=CephString,req=false '
212 'name=force,type=CephBool,req=false ',
213 'desc': "Delete a snapshot of a CephFS subvolume in a volume, "
214 "and optionally, in a specific subvolume group",
215 'perm': 'rw'
216 },
217 {
218 'cmd': 'fs subvolume resize '
219 'name=vol_name,type=CephString '
220 'name=sub_name,type=CephString '
221 'name=new_size,type=CephString,req=true '
222 'name=group_name,type=CephString,req=false '
223 'name=no_shrink,type=CephBool,req=false ',
224 'desc': "Resize a CephFS subvolume",
225 'perm': 'rw'
226 },
227 {
228 'cmd': 'fs subvolume pin'
229 ' name=vol_name,type=CephString'
230 ' name=sub_name,type=CephString'
231 ' name=pin_type,type=CephChoices,strings=export|distributed|random'
232 ' name=pin_setting,type=CephString,req=true'
233 ' name=group_name,type=CephString,req=false',
234 'desc': "Set MDS pinning policy for subvolume",
235 'perm': 'rw'
236 },
237 {
238 'cmd': 'fs subvolume snapshot protect '
239 'name=vol_name,type=CephString '
240 'name=sub_name,type=CephString '
241 'name=snap_name,type=CephString '
242 'name=group_name,type=CephString,req=false ',
243 'desc': "(deprecated) Protect snapshot of a CephFS subvolume in a volume, "
244 "and optionally, in a specific subvolume group",
245 'perm': 'rw'
246 },
247 {
248 'cmd': 'fs subvolume snapshot unprotect '
249 'name=vol_name,type=CephString '
250 'name=sub_name,type=CephString '
251 'name=snap_name,type=CephString '
252 'name=group_name,type=CephString,req=false ',
253 'desc': "(deprecated) Unprotect a snapshot of a CephFS subvolume in a volume, "
254 "and optionally, in a specific subvolume group",
255 'perm': 'rw'
256 },
257 {
258 'cmd': 'fs subvolume snapshot clone '
259 'name=vol_name,type=CephString '
260 'name=sub_name,type=CephString '
261 'name=snap_name,type=CephString '
262 'name=target_sub_name,type=CephString '
263 'name=pool_layout,type=CephString,req=false '
264 'name=group_name,type=CephString,req=false '
265 'name=target_group_name,type=CephString,req=false ',
266 'desc': "Clone a snapshot to target subvolume",
267 'perm': 'rw'
268 },
269 {
270 'cmd': 'fs clone status '
271 'name=vol_name,type=CephString '
272 'name=clone_name,type=CephString '
273 'name=group_name,type=CephString,req=false ',
274 'desc': "Get status on a cloned subvolume.",
275 'perm': 'r'
276 },
277 {
278 'cmd': 'fs clone cancel '
279 'name=vol_name,type=CephString '
280 'name=clone_name,type=CephString '
281 'name=group_name,type=CephString,req=false ',
282 'desc': "Cancel an pending or ongoing clone operation.",
283 'perm': 'r'
284 },
285 {
286 'cmd': 'nfs export create cephfs '
287 'name=fsname,type=CephString '
288 'name=clusterid,type=CephString '
289 'name=binding,type=CephString '
290 'name=readonly,type=CephBool,req=false '
291 'name=path,type=CephString,req=false ',
292 'desc': "Create a cephfs export",
293 'perm': 'rw'
294 },
295 {
296 'cmd': 'nfs export delete '
297 'name=clusterid,type=CephString '
298 'name=binding,type=CephString ',
299 'desc': "Delete a cephfs export",
300 'perm': 'rw'
301 },
302 {
303 'cmd': 'nfs export ls '
304 'name=clusterid,type=CephString '
305 'name=detailed,type=CephBool,req=false ',
306 'desc': "List exports of a NFS cluster",
307 'perm': 'r'
308 },
309 {
310 'cmd': 'nfs export get '
311 'name=clusterid,type=CephString '
312 'name=binding,type=CephString ',
313 'desc': "Fetch a export of a NFS cluster given the pseudo path/binding",
314 'perm': 'r'
315 },
316 {
317 'cmd': 'nfs cluster create '
318 'name=type,type=CephString '
319 'name=clusterid,type=CephString,goodchars=[A-Za-z0-9-_.] '
320 'name=placement,type=CephString,req=false ',
321 'desc': "Create an NFS Cluster",
322 'perm': 'rw'
323 },
324 {
325 'cmd': 'nfs cluster update '
326 'name=clusterid,type=CephString '
327 'name=placement,type=CephString ',
328 'desc': "Updates an NFS Cluster",
329 'perm': 'rw'
330 },
331 {
332 'cmd': 'nfs cluster delete '
333 'name=clusterid,type=CephString ',
334 'desc': "Deletes an NFS Cluster",
335 'perm': 'rw'
336 },
337 {
338 'cmd': 'nfs cluster ls ',
339 'desc': "List NFS Clusters",
340 'perm': 'r'
341 },
342 {
343 'cmd': 'nfs cluster info '
344 'name=clusterid,type=CephString,req=false ',
345 'desc': "Displays NFS Cluster info",
346 'perm': 'r'
347 },
348 {
349 'cmd': 'nfs cluster config set '
350 'name=clusterid,type=CephString ',
351 'desc': "Set NFS-Ganesha config by `-i <config_file>`",
352 'perm': 'rw'
353 },
354 {
355 'cmd': 'nfs cluster config reset '
356 'name=clusterid,type=CephString ',
357 'desc': "Reset NFS-Ganesha Config to default",
358 'perm': 'rw'
359 },
360 # volume ls [recursive]
361 # subvolume ls <volume>
362 # volume authorize/deauthorize
363 # subvolume authorize/deauthorize
364
365 # volume describe (free space, etc)
366 # volume auth list (vc.get_authorized_ids)
367
368 # snapshots?
369
370 # FIXME: we're doing CephFSVolumeClient.recover on every
371 # path where we instantiate and connect a client. Perhaps
372 # keep clients alive longer, or just pass a "don't recover"
373 # flag in if it's the >1st time we connected a particular
374 # volume in the lifetime of this module instance.
375 ]
376
377 MODULE_OPTIONS = [
378 {
379 'name': 'max_concurrent_clones',
380 'type': 'int',
381 'default': 4,
382 'desc': 'Number of asynchronous cloner threads',
383 }
384 ]
385
386 def __init__(self, *args, **kwargs):
387 self.inited = False
388 # for mypy
389 self.max_concurrent_clones = None
390 self.lock = threading.Lock()
391 super(Module, self).__init__(*args, **kwargs)
392 # Initialize config option members
393 self.config_notify()
394 with self.lock:
395 self.vc = VolumeClient(self)
396 self.fs_export = FSExport(self)
397 self.nfs = NFSCluster(self)
398 self.inited = True
399
400 def __del__(self):
401 self.vc.shutdown()
402
403 def shutdown(self):
404 self.vc.shutdown()
405
406 def config_notify(self):
407 """
408 This method is called whenever one of our config options is changed.
409 """
410 with self.lock:
411 for opt in self.MODULE_OPTIONS:
412 setattr(self,
413 opt['name'], # type: ignore
414 self.get_module_option(opt['name'])) # type: ignore
415 self.log.debug(' mgr option %s = %s',
416 opt['name'], getattr(self, opt['name'])) # type: ignore
417 if self.inited:
418 if opt['name'] == "max_concurrent_clones":
419 self.vc.cloner.reconfigure_max_concurrent_clones(self.max_concurrent_clones)
420
421 def handle_command(self, inbuf, cmd):
422 handler_name = "_cmd_" + cmd['prefix'].replace(" ", "_")
423 try:
424 handler = getattr(self, handler_name)
425 except AttributeError:
426 return -errno.EINVAL, "", "Unknown command"
427
428 return handler(inbuf, cmd)
429
430 @mgr_cmd_wrap
431 def _cmd_fs_volume_create(self, inbuf, cmd):
432 vol_id = cmd['name']
433 placement = cmd.get('placement', '')
434 return self.vc.create_fs_volume(vol_id, placement)
435
436 @mgr_cmd_wrap
437 def _cmd_fs_volume_rm(self, inbuf, cmd):
438 vol_name = cmd['vol_name']
439 confirm = cmd.get('yes-i-really-mean-it', None)
440 return self.vc.delete_fs_volume(vol_name, confirm)
441
442 @mgr_cmd_wrap
443 def _cmd_fs_volume_ls(self, inbuf, cmd):
444 return self.vc.list_fs_volumes()
445
446 @mgr_cmd_wrap
447 def _cmd_fs_subvolumegroup_create(self, inbuf, cmd):
448 """
449 :return: a 3-tuple of return code(int), empty string(str), error message (str)
450 """
451 return self.vc.create_subvolume_group(
452 vol_name=cmd['vol_name'], group_name=cmd['group_name'],
453 pool_layout=cmd.get('pool_layout', None), mode=cmd.get('mode', '755'),
454 uid=cmd.get('uid', None), gid=cmd.get('gid', None))
455
456 @mgr_cmd_wrap
457 def _cmd_fs_subvolumegroup_rm(self, inbuf, cmd):
458 """
459 :return: a 3-tuple of return code(int), empty string(str), error message (str)
460 """
461 return self.vc.remove_subvolume_group(vol_name=cmd['vol_name'],
462 group_name=cmd['group_name'],
463 force=cmd.get('force', False))
464
465 @mgr_cmd_wrap
466 def _cmd_fs_subvolumegroup_ls(self, inbuf, cmd):
467 return self.vc.list_subvolume_groups(vol_name=cmd['vol_name'])
468
469 @mgr_cmd_wrap
470 def _cmd_fs_subvolume_create(self, inbuf, cmd):
471 """
472 :return: a 3-tuple of return code(int), empty string(str), error message (str)
473 """
474 return self.vc.create_subvolume(vol_name=cmd['vol_name'],
475 sub_name=cmd['sub_name'],
476 group_name=cmd.get('group_name', None),
477 size=cmd.get('size', None),
478 pool_layout=cmd.get('pool_layout', None),
479 uid=cmd.get('uid', None),
480 gid=cmd.get('gid', None),
481 mode=cmd.get('mode', '755'),
482 namespace_isolated=cmd.get('namespace_isolated', False))
483
484 @mgr_cmd_wrap
485 def _cmd_fs_subvolume_rm(self, inbuf, cmd):
486 """
487 :return: a 3-tuple of return code(int), empty string(str), error message (str)
488 """
489 return self.vc.remove_subvolume(vol_name=cmd['vol_name'],
490 sub_name=cmd['sub_name'],
491 group_name=cmd.get('group_name', None),
492 force=cmd.get('force', False),
493 retain_snapshots=cmd.get('retain_snapshots', False))
494
495 @mgr_cmd_wrap
496 def _cmd_fs_subvolume_ls(self, inbuf, cmd):
497 return self.vc.list_subvolumes(vol_name=cmd['vol_name'],
498 group_name=cmd.get('group_name', None))
499
500 @mgr_cmd_wrap
501 def _cmd_fs_subvolumegroup_getpath(self, inbuf, cmd):
502 return self.vc.getpath_subvolume_group(
503 vol_name=cmd['vol_name'], group_name=cmd['group_name'])
504
505 @mgr_cmd_wrap
506 def _cmd_fs_subvolume_getpath(self, inbuf, cmd):
507 return self.vc.subvolume_getpath(vol_name=cmd['vol_name'],
508 sub_name=cmd['sub_name'],
509 group_name=cmd.get('group_name', None))
510
511 @mgr_cmd_wrap
512 def _cmd_fs_subvolume_info(self, inbuf, cmd):
513 return self.vc.subvolume_info(vol_name=cmd['vol_name'],
514 sub_name=cmd['sub_name'],
515 group_name=cmd.get('group_name', None))
516
517 @mgr_cmd_wrap
518 def _cmd_fs_subvolumegroup_pin(self, inbuf, cmd):
519 return self.vc.pin_subvolume_group(vol_name=cmd['vol_name'],
520 group_name=cmd['group_name'], pin_type=cmd['pin_type'],
521 pin_setting=cmd['pin_setting'])
522
523 @mgr_cmd_wrap
524 def _cmd_fs_subvolumegroup_snapshot_create(self, inbuf, cmd):
525 return self.vc.create_subvolume_group_snapshot(vol_name=cmd['vol_name'],
526 group_name=cmd['group_name'],
527 snap_name=cmd['snap_name'])
528
529 @mgr_cmd_wrap
530 def _cmd_fs_subvolumegroup_snapshot_rm(self, inbuf, cmd):
531 return self.vc.remove_subvolume_group_snapshot(vol_name=cmd['vol_name'],
532 group_name=cmd['group_name'],
533 snap_name=cmd['snap_name'],
534 force=cmd.get('force', False))
535
536 @mgr_cmd_wrap
537 def _cmd_fs_subvolumegroup_snapshot_ls(self, inbuf, cmd):
538 return self.vc.list_subvolume_group_snapshots(vol_name=cmd['vol_name'],
539 group_name=cmd['group_name'])
540
541 @mgr_cmd_wrap
542 def _cmd_fs_subvolume_snapshot_create(self, inbuf, cmd):
543 return self.vc.create_subvolume_snapshot(vol_name=cmd['vol_name'],
544 sub_name=cmd['sub_name'],
545 snap_name=cmd['snap_name'],
546 group_name=cmd.get('group_name', None))
547
548 @mgr_cmd_wrap
549 def _cmd_fs_subvolume_snapshot_rm(self, inbuf, cmd):
550 return self.vc.remove_subvolume_snapshot(vol_name=cmd['vol_name'],
551 sub_name=cmd['sub_name'],
552 snap_name=cmd['snap_name'],
553 group_name=cmd.get('group_name', None),
554 force=cmd.get('force', False))
555
556 @mgr_cmd_wrap
557 def _cmd_fs_subvolume_snapshot_info(self, inbuf, cmd):
558 return self.vc.subvolume_snapshot_info(vol_name=cmd['vol_name'],
559 sub_name=cmd['sub_name'],
560 snap_name=cmd['snap_name'],
561 group_name=cmd.get('group_name', None))
562
563 @mgr_cmd_wrap
564 def _cmd_fs_subvolume_snapshot_ls(self, inbuf, cmd):
565 return self.vc.list_subvolume_snapshots(vol_name=cmd['vol_name'],
566 sub_name=cmd['sub_name'],
567 group_name=cmd.get('group_name', None))
568
569 @mgr_cmd_wrap
570 def _cmd_fs_subvolume_resize(self, inbuf, cmd):
571 return self.vc.resize_subvolume(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
572 new_size=cmd['new_size'], group_name=cmd.get('group_name', None),
573 no_shrink=cmd.get('no_shrink', False))
574
575 @mgr_cmd_wrap
576 def _cmd_fs_subvolume_pin(self, inbuf, cmd):
577 return self.vc.subvolume_pin(vol_name=cmd['vol_name'],
578 sub_name=cmd['sub_name'], pin_type=cmd['pin_type'],
579 pin_setting=cmd['pin_setting'],
580 group_name=cmd.get('group_name', None))
581
582 @mgr_cmd_wrap
583 def _cmd_fs_subvolume_snapshot_protect(self, inbuf, cmd):
584 return self.vc.protect_subvolume_snapshot(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
585 snap_name=cmd['snap_name'], group_name=cmd.get('group_name', None))
586
587 @mgr_cmd_wrap
588 def _cmd_fs_subvolume_snapshot_unprotect(self, inbuf, cmd):
589 return self.vc.unprotect_subvolume_snapshot(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
590 snap_name=cmd['snap_name'], group_name=cmd.get('group_name', None))
591
592 @mgr_cmd_wrap
593 def _cmd_fs_subvolume_snapshot_clone(self, inbuf, cmd):
594 return self.vc.clone_subvolume_snapshot(
595 vol_name=cmd['vol_name'], sub_name=cmd['sub_name'], snap_name=cmd['snap_name'],
596 group_name=cmd.get('group_name', None), pool_layout=cmd.get('pool_layout', None),
597 target_sub_name=cmd['target_sub_name'], target_group_name=cmd.get('target_group_name', None))
598
599 @mgr_cmd_wrap
600 def _cmd_fs_clone_status(self, inbuf, cmd):
601 return self.vc.clone_status(
602 vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None))
603
604 @mgr_cmd_wrap
605 def _cmd_fs_clone_cancel(self, inbuf, cmd):
606 return self.vc.clone_cancel(
607 vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None))
608
609 @mgr_cmd_wrap
610 def _cmd_nfs_export_create_cephfs(self, inbuf, cmd):
611 #TODO Extend export creation for rgw.
612 return self.fs_export.create_export(fs_name=cmd['fsname'], cluster_id=cmd['clusterid'],
613 pseudo_path=cmd['binding'], read_only=cmd.get('readonly', False), path=cmd.get('path', '/'))
614
615 @mgr_cmd_wrap
616 def _cmd_nfs_export_delete(self, inbuf, cmd):
617 return self.fs_export.delete_export(cluster_id=cmd['clusterid'], pseudo_path=cmd['binding'])
618
619 @mgr_cmd_wrap
620 def _cmd_nfs_export_ls(self, inbuf, cmd):
621 return self.fs_export.list_exports(cluster_id=cmd['clusterid'], detailed=cmd.get('detailed', False))
622
623 @mgr_cmd_wrap
624 def _cmd_nfs_export_get(self, inbuf, cmd):
625 return self.fs_export.get_export(cluster_id=cmd['clusterid'], pseudo_path=cmd['binding'])
626
627 @mgr_cmd_wrap
628 def _cmd_nfs_cluster_create(self, inbuf, cmd):
629 return self.nfs.create_nfs_cluster(cluster_id=cmd['clusterid'], export_type=cmd['type'],
630 placement=cmd.get('placement', None))
631
632 @mgr_cmd_wrap
633 def _cmd_nfs_cluster_update(self, inbuf, cmd):
634 return self.nfs.update_nfs_cluster(cluster_id=cmd['clusterid'], placement=cmd['placement'])
635
636 @mgr_cmd_wrap
637 def _cmd_nfs_cluster_delete(self, inbuf, cmd):
638 return self.nfs.delete_nfs_cluster(cluster_id=cmd['clusterid'])
639
640 @mgr_cmd_wrap
641 def _cmd_nfs_cluster_ls(self, inbuf, cmd):
642 return self.nfs.list_nfs_cluster()
643
644 @mgr_cmd_wrap
645 def _cmd_nfs_cluster_info(self, inbuf, cmd):
646 return self.nfs.show_nfs_cluster_info(cluster_id=cmd.get('clusterid', None))
647
648 def _cmd_nfs_cluster_config_set(self, inbuf, cmd):
649 return self.nfs.set_nfs_cluster_config(cluster_id=cmd['clusterid'], nfs_config=inbuf)
650
651 def _cmd_nfs_cluster_config_reset(self, inbuf, cmd):
652 return self.nfs.reset_nfs_cluster_config(cluster_id=cmd['clusterid'])