]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/volumes/module.py
import ceph 15.2.10
[ceph.git] / ceph / src / pybind / mgr / volumes / module.py
1 import errno
2 import json
3 import logging
4 import traceback
5 import threading
6
7 from mgr_module import MgrModule
8 import orchestrator
9
10 from .fs.volume import VolumeClient
11 from .fs.nfs import NFSCluster, FSExport
12
13 log = logging.getLogger(__name__)
14
15 class VolumesInfoWrapper():
16 def __init__(self, f, context):
17 self.f = f
18 self.context = context
19 def __enter__(self):
20 log.info("Starting {}".format(self.context))
21 def __exit__(self, exc_type, exc_value, tb):
22 if exc_type is not None:
23 log.error("Failed {}:\n{}".format(self.context, "".join(traceback.format_exception(exc_type, exc_value, tb))))
24 else:
25 log.info("Finishing {}".format(self.context))
26
27 def mgr_cmd_wrap(f):
28 def wrap(self, inbuf, cmd):
29 astr = []
30 for k in cmd:
31 astr.append("{}:{}".format(k, cmd[k]))
32 context = "{}({}) < \"{}\"".format(f.__name__, ", ".join(astr), inbuf)
33 with VolumesInfoWrapper(f, context):
34 return f(self, inbuf, cmd)
35 return wrap
36
37 class Module(orchestrator.OrchestratorClientMixin, MgrModule):
38 COMMANDS = [
39 {
40 'cmd': 'fs volume ls',
41 'desc': "List volumes",
42 'perm': 'r'
43 },
44 {
45 'cmd': 'fs volume create '
46 'name=name,type=CephString '
47 'name=placement,type=CephString,req=false ',
48 'desc': "Create a CephFS volume",
49 'perm': 'rw'
50 },
51 {
52 'cmd': 'fs volume rm '
53 'name=vol_name,type=CephString '
54 'name=yes-i-really-mean-it,type=CephString,req=false ',
55 'desc': "Delete a FS volume by passing --yes-i-really-mean-it flag",
56 'perm': 'rw'
57 },
58 {
59 'cmd': 'fs subvolumegroup ls '
60 'name=vol_name,type=CephString ',
61 'desc': "List subvolumegroups",
62 'perm': 'r'
63 },
64 {
65 'cmd': 'fs subvolumegroup create '
66 'name=vol_name,type=CephString '
67 'name=group_name,type=CephString '
68 'name=pool_layout,type=CephString,req=false '
69 'name=uid,type=CephInt,req=false '
70 'name=gid,type=CephInt,req=false '
71 'name=mode,type=CephString,req=false ',
72 'desc': "Create a CephFS subvolume group in a volume, and optionally, "
73 "with a specific data pool layout, and a specific numeric mode",
74 'perm': 'rw'
75 },
76 {
77 'cmd': 'fs subvolumegroup rm '
78 'name=vol_name,type=CephString '
79 'name=group_name,type=CephString '
80 'name=force,type=CephBool,req=false ',
81 'desc': "Delete a CephFS subvolume group in a volume",
82 'perm': 'rw'
83 },
84 {
85 'cmd': 'fs subvolume ls '
86 'name=vol_name,type=CephString '
87 'name=group_name,type=CephString,req=false ',
88 'desc': "List subvolumes",
89 'perm': 'r'
90 },
91 {
92 'cmd': 'fs subvolume create '
93 'name=vol_name,type=CephString '
94 'name=sub_name,type=CephString '
95 'name=size,type=CephInt,req=false '
96 'name=group_name,type=CephString,req=false '
97 'name=pool_layout,type=CephString,req=false '
98 'name=uid,type=CephInt,req=false '
99 'name=gid,type=CephInt,req=false '
100 'name=mode,type=CephString,req=false '
101 'name=namespace_isolated,type=CephBool,req=false ',
102 'desc': "Create a CephFS subvolume in a volume, and optionally, "
103 "with a specific size (in bytes), a specific data pool layout, "
104 "a specific mode, in a specific subvolume group and in separate "
105 "RADOS namespace",
106 'perm': 'rw'
107 },
108 {
109 'cmd': 'fs subvolume rm '
110 'name=vol_name,type=CephString '
111 'name=sub_name,type=CephString '
112 'name=group_name,type=CephString,req=false '
113 'name=force,type=CephBool,req=false '
114 'name=retain_snapshots,type=CephBool,req=false ',
115 'desc': "Delete a CephFS subvolume in a volume, and optionally, "
116 "in a specific subvolume group, force deleting a cancelled or failed "
117 "clone, and retaining existing subvolume snapshots",
118 'perm': 'rw'
119 },
120 {
121 'cmd': 'fs subvolume authorize '
122 'name=vol_name,type=CephString '
123 'name=sub_name,type=CephString '
124 'name=auth_id,type=CephString '
125 'name=group_name,type=CephString,req=false '
126 'name=access_level,type=CephString,req=false '
127 'name=tenant_id,type=CephString,req=false '
128 'name=allow_existing_id,type=CephBool,req=false ',
129 'desc': "Allow a cephx auth ID access to a subvolume",
130 'perm': 'rw'
131 },
132 {
133 'cmd': 'fs subvolume deauthorize '
134 'name=vol_name,type=CephString '
135 'name=sub_name,type=CephString '
136 'name=auth_id,type=CephString '
137 'name=group_name,type=CephString,req=false ',
138 'desc': "Deny a cephx auth ID access to a subvolume",
139 'perm': 'rw'
140 },
141 {
142 'cmd': 'fs subvolume authorized_list '
143 'name=vol_name,type=CephString '
144 'name=sub_name,type=CephString '
145 'name=group_name,type=CephString,req=false ',
146 'desc': "List auth IDs that have access to a subvolume",
147 'perm': 'r'
148 },
149 {
150 'cmd': 'fs subvolume evict '
151 'name=vol_name,type=CephString '
152 'name=sub_name,type=CephString '
153 'name=auth_id,type=CephString '
154 'name=group_name,type=CephString,req=false ',
155 'desc': "Evict clients based on auth IDs and subvolume mounted",
156 'perm': 'rw'
157 },
158 {
159 'cmd': 'fs subvolumegroup getpath '
160 'name=vol_name,type=CephString '
161 'name=group_name,type=CephString ',
162 'desc': "Get the mountpath of a CephFS subvolume group in a volume",
163 'perm': 'r'
164 },
165 {
166 'cmd': 'fs subvolume getpath '
167 'name=vol_name,type=CephString '
168 'name=sub_name,type=CephString '
169 'name=group_name,type=CephString,req=false ',
170 'desc': "Get the mountpath of a CephFS subvolume in a volume, "
171 "and optionally, in a specific subvolume group",
172 'perm': 'rw'
173 },
174 {
175 'cmd': 'fs subvolume info '
176 'name=vol_name,type=CephString '
177 'name=sub_name,type=CephString '
178 'name=group_name,type=CephString,req=false ',
179 'desc': "Get the metadata of a CephFS subvolume in a volume, "
180 "and optionally, in a specific subvolume group",
181 'perm': 'r'
182 },
183 {
184 'cmd': 'fs subvolumegroup pin'
185 ' name=vol_name,type=CephString'
186 ' name=group_name,type=CephString,req=true'
187 ' name=pin_type,type=CephChoices,strings=export|distributed|random'
188 ' name=pin_setting,type=CephString,req=true',
189 'desc': "Set MDS pinning policy for subvolumegroup",
190 'perm': 'rw'
191 },
192 {
193 'cmd': 'fs subvolumegroup snapshot ls '
194 'name=vol_name,type=CephString '
195 'name=group_name,type=CephString ',
196 'desc': "List subvolumegroup snapshots",
197 'perm': 'r'
198 },
199 {
200 'cmd': 'fs subvolumegroup snapshot create '
201 'name=vol_name,type=CephString '
202 'name=group_name,type=CephString '
203 'name=snap_name,type=CephString ',
204 'desc': "Create a snapshot of a CephFS subvolume group in a volume",
205 'perm': 'rw'
206 },
207 {
208 'cmd': 'fs subvolumegroup snapshot rm '
209 'name=vol_name,type=CephString '
210 'name=group_name,type=CephString '
211 'name=snap_name,type=CephString '
212 'name=force,type=CephBool,req=false ',
213 'desc': "Delete a snapshot of a CephFS subvolume group in a volume",
214 'perm': 'rw'
215 },
216 {
217 'cmd': 'fs subvolume snapshot ls '
218 'name=vol_name,type=CephString '
219 'name=sub_name,type=CephString '
220 'name=group_name,type=CephString,req=false ',
221 'desc': "List subvolume snapshots",
222 'perm': 'r'
223 },
224 {
225 'cmd': 'fs subvolume snapshot create '
226 'name=vol_name,type=CephString '
227 'name=sub_name,type=CephString '
228 'name=snap_name,type=CephString '
229 'name=group_name,type=CephString,req=false ',
230 'desc': "Create a snapshot of a CephFS subvolume in a volume, "
231 "and optionally, in a specific subvolume group",
232 'perm': 'rw'
233 },
234 {
235 'cmd': 'fs subvolume snapshot info '
236 'name=vol_name,type=CephString '
237 'name=sub_name,type=CephString '
238 'name=snap_name,type=CephString '
239 'name=group_name,type=CephString,req=false ',
240 'desc': "Get the metadata of a CephFS subvolume snapshot "
241 "and optionally, in a specific subvolume group",
242 'perm': 'r'
243 },
244 {
245 'cmd': 'fs subvolume snapshot rm '
246 'name=vol_name,type=CephString '
247 'name=sub_name,type=CephString '
248 'name=snap_name,type=CephString '
249 'name=group_name,type=CephString,req=false '
250 'name=force,type=CephBool,req=false ',
251 'desc': "Delete a snapshot of a CephFS subvolume in a volume, "
252 "and optionally, in a specific subvolume group",
253 'perm': 'rw'
254 },
255 {
256 'cmd': 'fs subvolume resize '
257 'name=vol_name,type=CephString '
258 'name=sub_name,type=CephString '
259 'name=new_size,type=CephString,req=true '
260 'name=group_name,type=CephString,req=false '
261 'name=no_shrink,type=CephBool,req=false ',
262 'desc': "Resize a CephFS subvolume",
263 'perm': 'rw'
264 },
265 {
266 'cmd': 'fs subvolume pin'
267 ' name=vol_name,type=CephString'
268 ' name=sub_name,type=CephString'
269 ' name=pin_type,type=CephChoices,strings=export|distributed|random'
270 ' name=pin_setting,type=CephString,req=true'
271 ' name=group_name,type=CephString,req=false',
272 'desc': "Set MDS pinning policy for subvolume",
273 'perm': 'rw'
274 },
275 {
276 'cmd': 'fs subvolume snapshot protect '
277 'name=vol_name,type=CephString '
278 'name=sub_name,type=CephString '
279 'name=snap_name,type=CephString '
280 'name=group_name,type=CephString,req=false ',
281 'desc': "(deprecated) Protect snapshot of a CephFS subvolume in a volume, "
282 "and optionally, in a specific subvolume group",
283 'perm': 'rw'
284 },
285 {
286 'cmd': 'fs subvolume snapshot unprotect '
287 'name=vol_name,type=CephString '
288 'name=sub_name,type=CephString '
289 'name=snap_name,type=CephString '
290 'name=group_name,type=CephString,req=false ',
291 'desc': "(deprecated) Unprotect a snapshot of a CephFS subvolume in a volume, "
292 "and optionally, in a specific subvolume group",
293 'perm': 'rw'
294 },
295 {
296 'cmd': 'fs subvolume snapshot clone '
297 'name=vol_name,type=CephString '
298 'name=sub_name,type=CephString '
299 'name=snap_name,type=CephString '
300 'name=target_sub_name,type=CephString '
301 'name=pool_layout,type=CephString,req=false '
302 'name=group_name,type=CephString,req=false '
303 'name=target_group_name,type=CephString,req=false ',
304 'desc': "Clone a snapshot to target subvolume",
305 'perm': 'rw'
306 },
307 {
308 'cmd': 'fs clone status '
309 'name=vol_name,type=CephString '
310 'name=clone_name,type=CephString '
311 'name=group_name,type=CephString,req=false ',
312 'desc': "Get status on a cloned subvolume.",
313 'perm': 'r'
314 },
315 {
316 'cmd': 'fs clone cancel '
317 'name=vol_name,type=CephString '
318 'name=clone_name,type=CephString '
319 'name=group_name,type=CephString,req=false ',
320 'desc': "Cancel an pending or ongoing clone operation.",
321 'perm': 'r'
322 },
323 {
324 'cmd': 'nfs export create cephfs '
325 'name=fsname,type=CephString '
326 'name=clusterid,type=CephString '
327 'name=binding,type=CephString '
328 'name=readonly,type=CephBool,req=false '
329 'name=path,type=CephString,req=false ',
330 'desc': "Create a cephfs export",
331 'perm': 'rw'
332 },
333 {
334 'cmd': 'nfs export delete '
335 'name=clusterid,type=CephString '
336 'name=binding,type=CephString ',
337 'desc': "Delete a cephfs export",
338 'perm': 'rw'
339 },
340 {
341 'cmd': 'nfs export ls '
342 'name=clusterid,type=CephString '
343 'name=detailed,type=CephBool,req=false ',
344 'desc': "List exports of a NFS cluster",
345 'perm': 'r'
346 },
347 {
348 'cmd': 'nfs export get '
349 'name=clusterid,type=CephString '
350 'name=binding,type=CephString ',
351 'desc': "Fetch a export of a NFS cluster given the pseudo path/binding",
352 'perm': 'r'
353 },
354 {
355 'cmd': 'nfs cluster create '
356 'name=type,type=CephString '
357 'name=clusterid,type=CephString,goodchars=[A-Za-z0-9-_.] '
358 'name=placement,type=CephString,req=false ',
359 'desc': "Create an NFS Cluster",
360 'perm': 'rw'
361 },
362 {
363 'cmd': 'nfs cluster update '
364 'name=clusterid,type=CephString '
365 'name=placement,type=CephString ',
366 'desc': "Updates an NFS Cluster",
367 'perm': 'rw'
368 },
369 {
370 'cmd': 'nfs cluster delete '
371 'name=clusterid,type=CephString ',
372 'desc': "Deletes an NFS Cluster",
373 'perm': 'rw'
374 },
375 {
376 'cmd': 'nfs cluster ls ',
377 'desc': "List NFS Clusters",
378 'perm': 'r'
379 },
380 {
381 'cmd': 'nfs cluster info '
382 'name=clusterid,type=CephString,req=false ',
383 'desc': "Displays NFS Cluster info",
384 'perm': 'r'
385 },
386 {
387 'cmd': 'nfs cluster config set '
388 'name=clusterid,type=CephString ',
389 'desc': "Set NFS-Ganesha config by `-i <config_file>`",
390 'perm': 'rw'
391 },
392 {
393 'cmd': 'nfs cluster config reset '
394 'name=clusterid,type=CephString ',
395 'desc': "Reset NFS-Ganesha Config to default",
396 'perm': 'rw'
397 },
398 # volume ls [recursive]
399 # subvolume ls <volume>
400 # volume authorize/deauthorize
401 # subvolume authorize/deauthorize
402
403 # volume describe (free space, etc)
404 # volume auth list (vc.get_authorized_ids)
405
406 # snapshots?
407
408 # FIXME: we're doing CephFSVolumeClient.recover on every
409 # path where we instantiate and connect a client. Perhaps
410 # keep clients alive longer, or just pass a "don't recover"
411 # flag in if it's the >1st time we connected a particular
412 # volume in the lifetime of this module instance.
413 ]
414
415 MODULE_OPTIONS = [
416 {
417 'name': 'max_concurrent_clones',
418 'type': 'int',
419 'default': 4,
420 'desc': 'Number of asynchronous cloner threads',
421 }
422 ]
423
424 def __init__(self, *args, **kwargs):
425 self.inited = False
426 # for mypy
427 self.max_concurrent_clones = None
428 self.lock = threading.Lock()
429 super(Module, self).__init__(*args, **kwargs)
430 # Initialize config option members
431 self.config_notify()
432 with self.lock:
433 self.vc = VolumeClient(self)
434 self.fs_export = FSExport(self)
435 self.nfs = NFSCluster(self)
436 self.inited = True
437
438 def __del__(self):
439 self.vc.shutdown()
440
441 def shutdown(self):
442 self.vc.shutdown()
443
444 def config_notify(self):
445 """
446 This method is called whenever one of our config options is changed.
447 """
448 with self.lock:
449 for opt in self.MODULE_OPTIONS:
450 setattr(self,
451 opt['name'], # type: ignore
452 self.get_module_option(opt['name'])) # type: ignore
453 self.log.debug(' mgr option %s = %s',
454 opt['name'], getattr(self, opt['name'])) # type: ignore
455 if self.inited:
456 if opt['name'] == "max_concurrent_clones":
457 self.vc.cloner.reconfigure_max_concurrent_clones(self.max_concurrent_clones)
458
459 def handle_command(self, inbuf, cmd):
460 handler_name = "_cmd_" + cmd['prefix'].replace(" ", "_")
461 try:
462 handler = getattr(self, handler_name)
463 except AttributeError:
464 return -errno.EINVAL, "", "Unknown command"
465
466 return handler(inbuf, cmd)
467
468 @mgr_cmd_wrap
469 def _cmd_fs_volume_create(self, inbuf, cmd):
470 vol_id = cmd['name']
471 placement = cmd.get('placement', '')
472 return self.vc.create_fs_volume(vol_id, placement)
473
474 @mgr_cmd_wrap
475 def _cmd_fs_volume_rm(self, inbuf, cmd):
476 vol_name = cmd['vol_name']
477 confirm = cmd.get('yes-i-really-mean-it', None)
478 return self.vc.delete_fs_volume(vol_name, confirm)
479
480 @mgr_cmd_wrap
481 def _cmd_fs_volume_ls(self, inbuf, cmd):
482 return self.vc.list_fs_volumes()
483
484 @mgr_cmd_wrap
485 def _cmd_fs_subvolumegroup_create(self, inbuf, cmd):
486 """
487 :return: a 3-tuple of return code(int), empty string(str), error message (str)
488 """
489 return self.vc.create_subvolume_group(
490 vol_name=cmd['vol_name'], group_name=cmd['group_name'],
491 pool_layout=cmd.get('pool_layout', None), mode=cmd.get('mode', '755'),
492 uid=cmd.get('uid', None), gid=cmd.get('gid', None))
493
494 @mgr_cmd_wrap
495 def _cmd_fs_subvolumegroup_rm(self, inbuf, cmd):
496 """
497 :return: a 3-tuple of return code(int), empty string(str), error message (str)
498 """
499 return self.vc.remove_subvolume_group(vol_name=cmd['vol_name'],
500 group_name=cmd['group_name'],
501 force=cmd.get('force', False))
502
503 @mgr_cmd_wrap
504 def _cmd_fs_subvolumegroup_ls(self, inbuf, cmd):
505 return self.vc.list_subvolume_groups(vol_name=cmd['vol_name'])
506
507 @mgr_cmd_wrap
508 def _cmd_fs_subvolume_create(self, inbuf, cmd):
509 """
510 :return: a 3-tuple of return code(int), empty string(str), error message (str)
511 """
512 return self.vc.create_subvolume(vol_name=cmd['vol_name'],
513 sub_name=cmd['sub_name'],
514 group_name=cmd.get('group_name', None),
515 size=cmd.get('size', None),
516 pool_layout=cmd.get('pool_layout', None),
517 uid=cmd.get('uid', None),
518 gid=cmd.get('gid', None),
519 mode=cmd.get('mode', '755'),
520 namespace_isolated=cmd.get('namespace_isolated', False))
521
522 @mgr_cmd_wrap
523 def _cmd_fs_subvolume_rm(self, inbuf, cmd):
524 """
525 :return: a 3-tuple of return code(int), empty string(str), error message (str)
526 """
527 return self.vc.remove_subvolume(vol_name=cmd['vol_name'],
528 sub_name=cmd['sub_name'],
529 group_name=cmd.get('group_name', None),
530 force=cmd.get('force', False),
531 retain_snapshots=cmd.get('retain_snapshots', False))
532
533 @mgr_cmd_wrap
534 def _cmd_fs_subvolume_authorize(self, inbuf, cmd):
535 """
536 :return: a 3-tuple of return code(int), secret key(str), error message (str)
537 """
538 return self.vc.authorize_subvolume(vol_name=cmd['vol_name'],
539 sub_name=cmd['sub_name'],
540 auth_id=cmd['auth_id'],
541 group_name=cmd.get('group_name', None),
542 access_level=cmd.get('access_level', 'rw'),
543 tenant_id=cmd.get('tenant_id', None),
544 allow_existing_id=cmd.get('allow_existing_id', False))
545
546 @mgr_cmd_wrap
547 def _cmd_fs_subvolume_deauthorize(self, inbuf, cmd):
548 """
549 :return: a 3-tuple of return code(int), empty string(str), error message (str)
550 """
551 return self.vc.deauthorize_subvolume(vol_name=cmd['vol_name'],
552 sub_name=cmd['sub_name'],
553 auth_id=cmd['auth_id'],
554 group_name=cmd.get('group_name', None))
555
556 @mgr_cmd_wrap
557 def _cmd_fs_subvolume_authorized_list(self, inbuf, cmd):
558 """
559 :return: a 3-tuple of return code(int), list of authids(json), error message (str)
560 """
561 return self.vc.authorized_list(vol_name=cmd['vol_name'],
562 sub_name=cmd['sub_name'],
563 group_name=cmd.get('group_name', None))
564
565 @mgr_cmd_wrap
566 def _cmd_fs_subvolume_evict(self, inbuf, cmd):
567 """
568 :return: a 3-tuple of return code(int), empyt string(str), error message (str)
569 """
570 return self.vc.evict(vol_name=cmd['vol_name'],
571 sub_name=cmd['sub_name'],
572 auth_id=cmd['auth_id'],
573 group_name=cmd.get('group_name', None))
574
575 @mgr_cmd_wrap
576 def _cmd_fs_subvolume_ls(self, inbuf, cmd):
577 return self.vc.list_subvolumes(vol_name=cmd['vol_name'],
578 group_name=cmd.get('group_name', None))
579
580 @mgr_cmd_wrap
581 def _cmd_fs_subvolumegroup_getpath(self, inbuf, cmd):
582 return self.vc.getpath_subvolume_group(
583 vol_name=cmd['vol_name'], group_name=cmd['group_name'])
584
585 @mgr_cmd_wrap
586 def _cmd_fs_subvolume_getpath(self, inbuf, cmd):
587 return self.vc.subvolume_getpath(vol_name=cmd['vol_name'],
588 sub_name=cmd['sub_name'],
589 group_name=cmd.get('group_name', None))
590
591 @mgr_cmd_wrap
592 def _cmd_fs_subvolume_info(self, inbuf, cmd):
593 return self.vc.subvolume_info(vol_name=cmd['vol_name'],
594 sub_name=cmd['sub_name'],
595 group_name=cmd.get('group_name', None))
596
597 @mgr_cmd_wrap
598 def _cmd_fs_subvolumegroup_pin(self, inbuf, cmd):
599 return self.vc.pin_subvolume_group(vol_name=cmd['vol_name'],
600 group_name=cmd['group_name'], pin_type=cmd['pin_type'],
601 pin_setting=cmd['pin_setting'])
602
603 @mgr_cmd_wrap
604 def _cmd_fs_subvolumegroup_snapshot_create(self, inbuf, cmd):
605 return self.vc.create_subvolume_group_snapshot(vol_name=cmd['vol_name'],
606 group_name=cmd['group_name'],
607 snap_name=cmd['snap_name'])
608
609 @mgr_cmd_wrap
610 def _cmd_fs_subvolumegroup_snapshot_rm(self, inbuf, cmd):
611 return self.vc.remove_subvolume_group_snapshot(vol_name=cmd['vol_name'],
612 group_name=cmd['group_name'],
613 snap_name=cmd['snap_name'],
614 force=cmd.get('force', False))
615
616 @mgr_cmd_wrap
617 def _cmd_fs_subvolumegroup_snapshot_ls(self, inbuf, cmd):
618 return self.vc.list_subvolume_group_snapshots(vol_name=cmd['vol_name'],
619 group_name=cmd['group_name'])
620
621 @mgr_cmd_wrap
622 def _cmd_fs_subvolume_snapshot_create(self, inbuf, cmd):
623 return self.vc.create_subvolume_snapshot(vol_name=cmd['vol_name'],
624 sub_name=cmd['sub_name'],
625 snap_name=cmd['snap_name'],
626 group_name=cmd.get('group_name', None))
627
628 @mgr_cmd_wrap
629 def _cmd_fs_subvolume_snapshot_rm(self, inbuf, cmd):
630 return self.vc.remove_subvolume_snapshot(vol_name=cmd['vol_name'],
631 sub_name=cmd['sub_name'],
632 snap_name=cmd['snap_name'],
633 group_name=cmd.get('group_name', None),
634 force=cmd.get('force', False))
635
636 @mgr_cmd_wrap
637 def _cmd_fs_subvolume_snapshot_info(self, inbuf, cmd):
638 return self.vc.subvolume_snapshot_info(vol_name=cmd['vol_name'],
639 sub_name=cmd['sub_name'],
640 snap_name=cmd['snap_name'],
641 group_name=cmd.get('group_name', None))
642
643 @mgr_cmd_wrap
644 def _cmd_fs_subvolume_snapshot_ls(self, inbuf, cmd):
645 return self.vc.list_subvolume_snapshots(vol_name=cmd['vol_name'],
646 sub_name=cmd['sub_name'],
647 group_name=cmd.get('group_name', None))
648
649 @mgr_cmd_wrap
650 def _cmd_fs_subvolume_resize(self, inbuf, cmd):
651 return self.vc.resize_subvolume(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
652 new_size=cmd['new_size'], group_name=cmd.get('group_name', None),
653 no_shrink=cmd.get('no_shrink', False))
654
655 @mgr_cmd_wrap
656 def _cmd_fs_subvolume_pin(self, inbuf, cmd):
657 return self.vc.subvolume_pin(vol_name=cmd['vol_name'],
658 sub_name=cmd['sub_name'], pin_type=cmd['pin_type'],
659 pin_setting=cmd['pin_setting'],
660 group_name=cmd.get('group_name', None))
661
662 @mgr_cmd_wrap
663 def _cmd_fs_subvolume_snapshot_protect(self, inbuf, cmd):
664 return self.vc.protect_subvolume_snapshot(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
665 snap_name=cmd['snap_name'], group_name=cmd.get('group_name', None))
666
667 @mgr_cmd_wrap
668 def _cmd_fs_subvolume_snapshot_unprotect(self, inbuf, cmd):
669 return self.vc.unprotect_subvolume_snapshot(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
670 snap_name=cmd['snap_name'], group_name=cmd.get('group_name', None))
671
672 @mgr_cmd_wrap
673 def _cmd_fs_subvolume_snapshot_clone(self, inbuf, cmd):
674 return self.vc.clone_subvolume_snapshot(
675 vol_name=cmd['vol_name'], sub_name=cmd['sub_name'], snap_name=cmd['snap_name'],
676 group_name=cmd.get('group_name', None), pool_layout=cmd.get('pool_layout', None),
677 target_sub_name=cmd['target_sub_name'], target_group_name=cmd.get('target_group_name', None))
678
679 @mgr_cmd_wrap
680 def _cmd_fs_clone_status(self, inbuf, cmd):
681 return self.vc.clone_status(
682 vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None))
683
684 @mgr_cmd_wrap
685 def _cmd_fs_clone_cancel(self, inbuf, cmd):
686 return self.vc.clone_cancel(
687 vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None))
688
689 @mgr_cmd_wrap
690 def _cmd_nfs_export_create_cephfs(self, inbuf, cmd):
691 #TODO Extend export creation for rgw.
692 return self.fs_export.create_export(fs_name=cmd['fsname'], cluster_id=cmd['clusterid'],
693 pseudo_path=cmd['binding'], read_only=cmd.get('readonly', False), path=cmd.get('path', '/'))
694
695 @mgr_cmd_wrap
696 def _cmd_nfs_export_delete(self, inbuf, cmd):
697 return self.fs_export.delete_export(cluster_id=cmd['clusterid'], pseudo_path=cmd['binding'])
698
699 @mgr_cmd_wrap
700 def _cmd_nfs_export_ls(self, inbuf, cmd):
701 return self.fs_export.list_exports(cluster_id=cmd['clusterid'], detailed=cmd.get('detailed', False))
702
703 @mgr_cmd_wrap
704 def _cmd_nfs_export_get(self, inbuf, cmd):
705 return self.fs_export.get_export(cluster_id=cmd['clusterid'], pseudo_path=cmd['binding'])
706
707 @mgr_cmd_wrap
708 def _cmd_nfs_cluster_create(self, inbuf, cmd):
709 return self.nfs.create_nfs_cluster(cluster_id=cmd['clusterid'], export_type=cmd['type'],
710 placement=cmd.get('placement', None))
711
712 @mgr_cmd_wrap
713 def _cmd_nfs_cluster_update(self, inbuf, cmd):
714 return self.nfs.update_nfs_cluster(cluster_id=cmd['clusterid'], placement=cmd['placement'])
715
716 @mgr_cmd_wrap
717 def _cmd_nfs_cluster_delete(self, inbuf, cmd):
718 return self.nfs.delete_nfs_cluster(cluster_id=cmd['clusterid'])
719
720 @mgr_cmd_wrap
721 def _cmd_nfs_cluster_ls(self, inbuf, cmd):
722 return self.nfs.list_nfs_cluster()
723
724 @mgr_cmd_wrap
725 def _cmd_nfs_cluster_info(self, inbuf, cmd):
726 return self.nfs.show_nfs_cluster_info(cluster_id=cmd.get('clusterid', None))
727
728 def _cmd_nfs_cluster_config_set(self, inbuf, cmd):
729 return self.nfs.set_nfs_cluster_config(cluster_id=cmd['clusterid'], nfs_config=inbuf)
730
731 def _cmd_nfs_cluster_config_reset(self, inbuf, cmd):
732 return self.nfs.reset_nfs_cluster_config(cluster_id=cmd['clusterid'])