]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/volumes/module.py
Import ceph 15.2.8
[ceph.git] / ceph / src / pybind / mgr / volumes / module.py
CommitLineData
11fdf7f2
TL
1import errno
2import json
f91f0fd5
TL
3import logging
4import traceback
5import threading
11fdf7f2
TL
6
7from mgr_module import MgrModule
8import orchestrator
9
81eedcae 10from .fs.volume import VolumeClient
f6b5b4d7 11from .fs.nfs import NFSCluster, FSExport
11fdf7f2 12
f91f0fd5
TL
13log = logging.getLogger(__name__)
14
15class VolumesInfoWrapper():
16 def __init__(self, f, context):
17 self.f = f
18 self.context = context
19 def __enter__(self):
20 log.info("Starting {}".format(self.context))
21 def __exit__(self, exc_type, exc_value, tb):
22 if exc_type is not None:
23 log.error("Failed {}:\n{}".format(self.context, "".join(traceback.format_exception(exc_type, exc_value, tb))))
24 else:
25 log.info("Finishing {}".format(self.context))
26
27def mgr_cmd_wrap(f):
28 def wrap(self, inbuf, cmd):
29 astr = []
30 for k in cmd:
31 astr.append("{}:{}".format(k, cmd[k]))
32 context = "{}({}) < \"{}\"".format(f.__name__, ", ".join(astr), inbuf)
33 with VolumesInfoWrapper(f, context):
34 return f(self, inbuf, cmd)
35 return wrap
36
11fdf7f2
TL
37class Module(orchestrator.OrchestratorClientMixin, MgrModule):
38 COMMANDS = [
39 {
40 'cmd': 'fs volume ls',
41 'desc': "List volumes",
42 'perm': 'r'
43 },
44 {
45 'cmd': 'fs volume create '
9f95a23c
TL
46 'name=name,type=CephString '
47 'name=placement,type=CephString,req=false ',
11fdf7f2
TL
48 'desc': "Create a CephFS volume",
49 'perm': 'rw'
50 },
51 {
52 'cmd': 'fs volume rm '
eafe8130
TL
53 'name=vol_name,type=CephString '
54 'name=yes-i-really-mean-it,type=CephString,req=false ',
55 'desc': "Delete a FS volume by passing --yes-i-really-mean-it flag",
11fdf7f2
TL
56 'perm': 'rw'
57 },
eafe8130
TL
58 {
59 'cmd': 'fs subvolumegroup ls '
60 'name=vol_name,type=CephString ',
61 'desc': "List subvolumegroups",
62 'perm': 'r'
63 },
81eedcae
TL
64 {
65 'cmd': 'fs subvolumegroup create '
66 'name=vol_name,type=CephString '
67 'name=group_name,type=CephString '
68 'name=pool_layout,type=CephString,req=false '
92f5a8d4
TL
69 'name=uid,type=CephInt,req=false '
70 'name=gid,type=CephInt,req=false '
81eedcae
TL
71 'name=mode,type=CephString,req=false ',
72 'desc': "Create a CephFS subvolume group in a volume, and optionally, "
73 "with a specific data pool layout, and a specific numeric mode",
74 'perm': 'rw'
75 },
76 {
77 'cmd': 'fs subvolumegroup rm '
78 'name=vol_name,type=CephString '
79 'name=group_name,type=CephString '
80 'name=force,type=CephBool,req=false ',
81 'desc': "Delete a CephFS subvolume group in a volume",
82 'perm': 'rw'
83 },
eafe8130
TL
84 {
85 'cmd': 'fs subvolume ls '
86 'name=vol_name,type=CephString '
87 'name=group_name,type=CephString,req=false ',
88 'desc': "List subvolumes",
89 'perm': 'r'
90 },
11fdf7f2
TL
91 {
92 'cmd': 'fs subvolume create '
93 'name=vol_name,type=CephString '
94 'name=sub_name,type=CephString '
81eedcae
TL
95 'name=size,type=CephInt,req=false '
96 'name=group_name,type=CephString,req=false '
97 'name=pool_layout,type=CephString,req=false '
92f5a8d4
TL
98 'name=uid,type=CephInt,req=false '
99 'name=gid,type=CephInt,req=false '
e306af50
TL
100 'name=mode,type=CephString,req=false '
101 'name=namespace_isolated,type=CephBool,req=false ',
81eedcae
TL
102 'desc': "Create a CephFS subvolume in a volume, and optionally, "
103 "with a specific size (in bytes), a specific data pool layout, "
e306af50
TL
104 "a specific mode, in a specific subvolume group and in separate "
105 "RADOS namespace",
11fdf7f2
TL
106 'perm': 'rw'
107 },
108 {
109 'cmd': 'fs subvolume rm '
110 'name=vol_name,type=CephString '
81eedcae
TL
111 'name=sub_name,type=CephString '
112 'name=group_name,type=CephString,req=false '
113 'name=force,type=CephBool,req=false ',
114 'desc': "Delete a CephFS subvolume in a volume, and optionally, "
115 "in a specific subvolume group",
116 'perm': 'rw'
117 },
494da23a
TL
118 {
119 'cmd': 'fs subvolumegroup getpath '
120 'name=vol_name,type=CephString '
121 'name=group_name,type=CephString ',
122 'desc': "Get the mountpath of a CephFS subvolume group in a volume",
123 'perm': 'r'
124 },
81eedcae
TL
125 {
126 'cmd': 'fs subvolume getpath '
127 'name=vol_name,type=CephString '
128 'name=sub_name,type=CephString '
129 'name=group_name,type=CephString,req=false ',
130 'desc': "Get the mountpath of a CephFS subvolume in a volume, "
131 "and optionally, in a specific subvolume group",
132 'perm': 'rw'
133 },
1911f103
TL
134 {
135 'cmd': 'fs subvolume info '
136 'name=vol_name,type=CephString '
137 'name=sub_name,type=CephString '
138 'name=group_name,type=CephString,req=false ',
139 'desc': "Get the metadata of a CephFS subvolume in a volume, "
140 "and optionally, in a specific subvolume group",
141 'perm': 'r'
142 },
f6b5b4d7
TL
143 {
144 'cmd': 'fs subvolumegroup pin'
145 ' name=vol_name,type=CephString'
146 ' name=group_name,type=CephString,req=true'
147 ' name=pin_type,type=CephChoices,strings=export|distributed|random'
148 ' name=pin_setting,type=CephString,req=true',
149 'desc': "Set MDS pinning policy for subvolumegroup",
150 'perm': 'rw'
151 },
eafe8130
TL
152 {
153 'cmd': 'fs subvolumegroup snapshot ls '
154 'name=vol_name,type=CephString '
155 'name=group_name,type=CephString ',
156 'desc': "List subvolumegroup snapshots",
157 'perm': 'r'
158 },
81eedcae
TL
159 {
160 'cmd': 'fs subvolumegroup snapshot create '
161 'name=vol_name,type=CephString '
162 'name=group_name,type=CephString '
163 'name=snap_name,type=CephString ',
164 'desc': "Create a snapshot of a CephFS subvolume group in a volume",
165 'perm': 'rw'
166 },
167 {
168 'cmd': 'fs subvolumegroup snapshot rm '
169 'name=vol_name,type=CephString '
170 'name=group_name,type=CephString '
171 'name=snap_name,type=CephString '
172 'name=force,type=CephBool,req=false ',
173 'desc': "Delete a snapshot of a CephFS subvolume group in a volume",
174 'perm': 'rw'
175 },
eafe8130
TL
176 {
177 'cmd': 'fs subvolume snapshot ls '
178 'name=vol_name,type=CephString '
179 'name=sub_name,type=CephString '
180 'name=group_name,type=CephString,req=false ',
181 'desc': "List subvolume snapshots",
182 'perm': 'r'
183 },
81eedcae
TL
184 {
185 'cmd': 'fs subvolume snapshot create '
186 'name=vol_name,type=CephString '
187 'name=sub_name,type=CephString '
188 'name=snap_name,type=CephString '
189 'name=group_name,type=CephString,req=false ',
190 'desc': "Create a snapshot of a CephFS subvolume in a volume, "
191 "and optionally, in a specific subvolume group",
192 'perm': 'rw'
193 },
e306af50
TL
194 {
195 'cmd': 'fs subvolume snapshot info '
196 'name=vol_name,type=CephString '
197 'name=sub_name,type=CephString '
198 'name=snap_name,type=CephString '
199 'name=group_name,type=CephString,req=false ',
200 'desc': "Get the metadata of a CephFS subvolume snapshot "
201 "and optionally, in a specific subvolume group",
202 'perm': 'r'
203 },
81eedcae
TL
204 {
205 'cmd': 'fs subvolume snapshot rm '
206 'name=vol_name,type=CephString '
207 'name=sub_name,type=CephString '
208 'name=snap_name,type=CephString '
209 'name=group_name,type=CephString,req=false '
210 'name=force,type=CephBool,req=false ',
211 'desc': "Delete a snapshot of a CephFS subvolume in a volume, "
212 "and optionally, in a specific subvolume group",
11fdf7f2
TL
213 'perm': 'rw'
214 },
92f5a8d4
TL
215 {
216 'cmd': 'fs subvolume resize '
217 'name=vol_name,type=CephString '
218 'name=sub_name,type=CephString '
219 'name=new_size,type=CephString,req=true '
220 'name=group_name,type=CephString,req=false '
221 'name=no_shrink,type=CephBool,req=false ',
222 'desc': "Resize a CephFS subvolume",
223 'perm': 'rw'
224 },
f6b5b4d7
TL
225 {
226 'cmd': 'fs subvolume pin'
227 ' name=vol_name,type=CephString'
228 ' name=sub_name,type=CephString'
229 ' name=pin_type,type=CephChoices,strings=export|distributed|random'
230 ' name=pin_setting,type=CephString,req=true'
231 ' name=group_name,type=CephString,req=false',
232 'desc': "Set MDS pinning policy for subvolume",
233 'perm': 'rw'
234 },
92f5a8d4
TL
235 {
236 'cmd': 'fs subvolume snapshot protect '
237 'name=vol_name,type=CephString '
238 'name=sub_name,type=CephString '
239 'name=snap_name,type=CephString '
240 'name=group_name,type=CephString,req=false ',
f6b5b4d7 241 'desc': "(deprecated) Protect snapshot of a CephFS subvolume in a volume, "
92f5a8d4
TL
242 "and optionally, in a specific subvolume group",
243 'perm': 'rw'
244 },
245 {
246 'cmd': 'fs subvolume snapshot unprotect '
247 'name=vol_name,type=CephString '
248 'name=sub_name,type=CephString '
249 'name=snap_name,type=CephString '
250 'name=group_name,type=CephString,req=false ',
f6b5b4d7 251 'desc': "(deprecated) Unprotect a snapshot of a CephFS subvolume in a volume, "
92f5a8d4
TL
252 "and optionally, in a specific subvolume group",
253 'perm': 'rw'
254 },
255 {
256 'cmd': 'fs subvolume snapshot clone '
257 'name=vol_name,type=CephString '
258 'name=sub_name,type=CephString '
259 'name=snap_name,type=CephString '
260 'name=target_sub_name,type=CephString '
261 'name=pool_layout,type=CephString,req=false '
262 'name=group_name,type=CephString,req=false '
263 'name=target_group_name,type=CephString,req=false ',
264 'desc': "Clone a snapshot to target subvolume",
265 'perm': 'rw'
266 },
267 {
268 'cmd': 'fs clone status '
269 'name=vol_name,type=CephString '
270 'name=clone_name,type=CephString '
271 'name=group_name,type=CephString,req=false ',
272 'desc': "Get status on a cloned subvolume.",
273 'perm': 'r'
274 },
9f95a23c
TL
275 {
276 'cmd': 'fs clone cancel '
277 'name=vol_name,type=CephString '
278 'name=clone_name,type=CephString '
279 'name=group_name,type=CephString,req=false ',
280 'desc': "Cancel an pending or ongoing clone operation.",
281 'perm': 'r'
282 },
f6b5b4d7
TL
283 {
284 'cmd': 'nfs export create cephfs '
285 'name=fsname,type=CephString '
286 'name=clusterid,type=CephString '
287 'name=binding,type=CephString '
288 'name=readonly,type=CephBool,req=false '
289 'name=path,type=CephString,req=false ',
290 'desc': "Create a cephfs export",
291 'perm': 'rw'
292 },
293 {
294 'cmd': 'nfs export delete '
295 'name=clusterid,type=CephString '
296 'name=binding,type=CephString ',
297 'desc': "Delete a cephfs export",
298 'perm': 'rw'
299 },
300 {
301 'cmd': 'nfs export ls '
302 'name=clusterid,type=CephString '
303 'name=detailed,type=CephBool,req=false ',
304 'desc': "List exports of a NFS cluster",
305 'perm': 'r'
306 },
307 {
308 'cmd': 'nfs export get '
309 'name=clusterid,type=CephString '
310 'name=binding,type=CephString ',
311 'desc': "Fetch a export of a NFS cluster given the pseudo path/binding",
312 'perm': 'r'
313 },
314 {
315 'cmd': 'nfs cluster create '
316 'name=type,type=CephString '
317 'name=clusterid,type=CephString,goodchars=[A-Za-z0-9-_.] '
318 'name=placement,type=CephString,req=false ',
319 'desc': "Create an NFS Cluster",
320 'perm': 'rw'
321 },
322 {
323 'cmd': 'nfs cluster update '
324 'name=clusterid,type=CephString '
325 'name=placement,type=CephString ',
326 'desc': "Updates an NFS Cluster",
327 'perm': 'rw'
328 },
329 {
330 'cmd': 'nfs cluster delete '
331 'name=clusterid,type=CephString ',
332 'desc': "Deletes an NFS Cluster",
333 'perm': 'rw'
334 },
335 {
336 'cmd': 'nfs cluster ls ',
337 'desc': "List NFS Clusters",
338 'perm': 'r'
339 },
340 {
341 'cmd': 'nfs cluster info '
342 'name=clusterid,type=CephString,req=false ',
343 'desc': "Displays NFS Cluster info",
344 'perm': 'r'
345 },
346 {
347 'cmd': 'nfs cluster config set '
348 'name=clusterid,type=CephString ',
349 'desc': "Set NFS-Ganesha config by `-i <config_file>`",
350 'perm': 'rw'
351 },
352 {
353 'cmd': 'nfs cluster config reset '
354 'name=clusterid,type=CephString ',
355 'desc': "Reset NFS-Ganesha Config to default",
356 'perm': 'rw'
357 },
11fdf7f2
TL
358 # volume ls [recursive]
359 # subvolume ls <volume>
360 # volume authorize/deauthorize
361 # subvolume authorize/deauthorize
362
363 # volume describe (free space, etc)
364 # volume auth list (vc.get_authorized_ids)
365
366 # snapshots?
367
368 # FIXME: we're doing CephFSVolumeClient.recover on every
369 # path where we instantiate and connect a client. Perhaps
370 # keep clients alive longer, or just pass a "don't recover"
371 # flag in if it's the >1st time we connected a particular
372 # volume in the lifetime of this module instance.
373 ]
374
f91f0fd5
TL
375 MODULE_OPTIONS = [
376 {
377 'name': 'max_concurrent_clones',
378 'type': 'int',
379 'default': 4,
380 'desc': 'Number of asynchronous cloner threads',
381 }
382 ]
383
11fdf7f2 384 def __init__(self, *args, **kwargs):
f91f0fd5
TL
385 self.inited = False
386 # for mypy
387 self.max_concurrent_clones = None
388 self.lock = threading.Lock()
11fdf7f2 389 super(Module, self).__init__(*args, **kwargs)
f91f0fd5
TL
390 # Initialize config option members
391 self.config_notify()
392 with self.lock:
393 self.vc = VolumeClient(self)
394 self.fs_export = FSExport(self)
395 self.nfs = NFSCluster(self)
396 self.inited = True
11fdf7f2 397
92f5a8d4
TL
398 def __del__(self):
399 self.vc.shutdown()
11fdf7f2 400
92f5a8d4
TL
401 def shutdown(self):
402 self.vc.shutdown()
11fdf7f2 403
f91f0fd5
TL
404 def config_notify(self):
405 """
406 This method is called whenever one of our config options is changed.
407 """
408 with self.lock:
409 for opt in self.MODULE_OPTIONS:
410 setattr(self,
411 opt['name'], # type: ignore
412 self.get_module_option(opt['name'])) # type: ignore
413 self.log.debug(' mgr option %s = %s',
414 opt['name'], getattr(self, opt['name'])) # type: ignore
415 if self.inited:
416 if opt['name'] == "max_concurrent_clones":
417 self.vc.cloner.reconfigure_max_concurrent_clones(self.max_concurrent_clones)
418
11fdf7f2 419 def handle_command(self, inbuf, cmd):
11fdf7f2
TL
420 handler_name = "_cmd_" + cmd['prefix'].replace(" ", "_")
421 try:
422 handler = getattr(self, handler_name)
423 except AttributeError:
424 return -errno.EINVAL, "", "Unknown command"
425
426 return handler(inbuf, cmd)
427
f91f0fd5 428 @mgr_cmd_wrap
11fdf7f2 429 def _cmd_fs_volume_create(self, inbuf, cmd):
81eedcae 430 vol_id = cmd['name']
9f95a23c
TL
431 placement = cmd.get('placement', '')
432 return self.vc.create_fs_volume(vol_id, placement)
11fdf7f2 433
f91f0fd5 434 @mgr_cmd_wrap
81eedcae 435 def _cmd_fs_volume_rm(self, inbuf, cmd):
11fdf7f2 436 vol_name = cmd['vol_name']
eafe8130 437 confirm = cmd.get('yes-i-really-mean-it', None)
92f5a8d4 438 return self.vc.delete_fs_volume(vol_name, confirm)
11fdf7f2 439
f91f0fd5 440 @mgr_cmd_wrap
81eedcae 441 def _cmd_fs_volume_ls(self, inbuf, cmd):
92f5a8d4 442 return self.vc.list_fs_volumes()
11fdf7f2 443
f91f0fd5 444 @mgr_cmd_wrap
81eedcae
TL
445 def _cmd_fs_subvolumegroup_create(self, inbuf, cmd):
446 """
447 :return: a 3-tuple of return code(int), empty string(str), error message (str)
448 """
494da23a 449 return self.vc.create_subvolume_group(
92f5a8d4
TL
450 vol_name=cmd['vol_name'], group_name=cmd['group_name'],
451 pool_layout=cmd.get('pool_layout', None), mode=cmd.get('mode', '755'),
452 uid=cmd.get('uid', None), gid=cmd.get('gid', None))
11fdf7f2 453
f91f0fd5 454 @mgr_cmd_wrap
81eedcae
TL
455 def _cmd_fs_subvolumegroup_rm(self, inbuf, cmd):
456 """
457 :return: a 3-tuple of return code(int), empty string(str), error message (str)
458 """
92f5a8d4 459 return self.vc.remove_subvolume_group(vol_name=cmd['vol_name'],
494da23a
TL
460 group_name=cmd['group_name'],
461 force=cmd.get('force', False))
11fdf7f2 462
f91f0fd5 463 @mgr_cmd_wrap
eafe8130 464 def _cmd_fs_subvolumegroup_ls(self, inbuf, cmd):
92f5a8d4 465 return self.vc.list_subvolume_groups(vol_name=cmd['vol_name'])
eafe8130 466
f91f0fd5 467 @mgr_cmd_wrap
81eedcae
TL
468 def _cmd_fs_subvolume_create(self, inbuf, cmd):
469 """
470 :return: a 3-tuple of return code(int), empty string(str), error message (str)
471 """
92f5a8d4 472 return self.vc.create_subvolume(vol_name=cmd['vol_name'],
494da23a
TL
473 sub_name=cmd['sub_name'],
474 group_name=cmd.get('group_name', None),
475 size=cmd.get('size', None),
476 pool_layout=cmd.get('pool_layout', None),
92f5a8d4
TL
477 uid=cmd.get('uid', None),
478 gid=cmd.get('gid', None),
e306af50
TL
479 mode=cmd.get('mode', '755'),
480 namespace_isolated=cmd.get('namespace_isolated', False))
11fdf7f2 481
f91f0fd5 482 @mgr_cmd_wrap
11fdf7f2 483 def _cmd_fs_subvolume_rm(self, inbuf, cmd):
81eedcae
TL
484 """
485 :return: a 3-tuple of return code(int), empty string(str), error message (str)
486 """
92f5a8d4 487 return self.vc.remove_subvolume(vol_name=cmd['vol_name'],
494da23a
TL
488 sub_name=cmd['sub_name'],
489 group_name=cmd.get('group_name', None),
490 force=cmd.get('force', False))
11fdf7f2 491
f91f0fd5 492 @mgr_cmd_wrap
eafe8130 493 def _cmd_fs_subvolume_ls(self, inbuf, cmd):
92f5a8d4 494 return self.vc.list_subvolumes(vol_name=cmd['vol_name'],
eafe8130
TL
495 group_name=cmd.get('group_name', None))
496
f91f0fd5 497 @mgr_cmd_wrap
494da23a
TL
498 def _cmd_fs_subvolumegroup_getpath(self, inbuf, cmd):
499 return self.vc.getpath_subvolume_group(
92f5a8d4 500 vol_name=cmd['vol_name'], group_name=cmd['group_name'])
11fdf7f2 501
f91f0fd5 502 @mgr_cmd_wrap
81eedcae 503 def _cmd_fs_subvolume_getpath(self, inbuf, cmd):
92f5a8d4 504 return self.vc.subvolume_getpath(vol_name=cmd['vol_name'],
494da23a
TL
505 sub_name=cmd['sub_name'],
506 group_name=cmd.get('group_name', None))
11fdf7f2 507
f91f0fd5 508 @mgr_cmd_wrap
1911f103
TL
509 def _cmd_fs_subvolume_info(self, inbuf, cmd):
510 return self.vc.subvolume_info(vol_name=cmd['vol_name'],
511 sub_name=cmd['sub_name'],
512 group_name=cmd.get('group_name', None))
513
f91f0fd5 514 @mgr_cmd_wrap
f6b5b4d7
TL
515 def _cmd_fs_subvolumegroup_pin(self, inbuf, cmd):
516 return self.vc.pin_subvolume_group(vol_name=cmd['vol_name'],
517 group_name=cmd['group_name'], pin_type=cmd['pin_type'],
518 pin_setting=cmd['pin_setting'])
519
f91f0fd5 520 @mgr_cmd_wrap
81eedcae 521 def _cmd_fs_subvolumegroup_snapshot_create(self, inbuf, cmd):
92f5a8d4 522 return self.vc.create_subvolume_group_snapshot(vol_name=cmd['vol_name'],
494da23a
TL
523 group_name=cmd['group_name'],
524 snap_name=cmd['snap_name'])
11fdf7f2 525
f91f0fd5 526 @mgr_cmd_wrap
81eedcae 527 def _cmd_fs_subvolumegroup_snapshot_rm(self, inbuf, cmd):
92f5a8d4 528 return self.vc.remove_subvolume_group_snapshot(vol_name=cmd['vol_name'],
494da23a
TL
529 group_name=cmd['group_name'],
530 snap_name=cmd['snap_name'],
531 force=cmd.get('force', False))
11fdf7f2 532
f91f0fd5 533 @mgr_cmd_wrap
eafe8130 534 def _cmd_fs_subvolumegroup_snapshot_ls(self, inbuf, cmd):
92f5a8d4 535 return self.vc.list_subvolume_group_snapshots(vol_name=cmd['vol_name'],
eafe8130
TL
536 group_name=cmd['group_name'])
537
f91f0fd5 538 @mgr_cmd_wrap
81eedcae 539 def _cmd_fs_subvolume_snapshot_create(self, inbuf, cmd):
92f5a8d4 540 return self.vc.create_subvolume_snapshot(vol_name=cmd['vol_name'],
494da23a
TL
541 sub_name=cmd['sub_name'],
542 snap_name=cmd['snap_name'],
543 group_name=cmd.get('group_name', None))
11fdf7f2 544
f91f0fd5 545 @mgr_cmd_wrap
81eedcae 546 def _cmd_fs_subvolume_snapshot_rm(self, inbuf, cmd):
92f5a8d4 547 return self.vc.remove_subvolume_snapshot(vol_name=cmd['vol_name'],
494da23a
TL
548 sub_name=cmd['sub_name'],
549 snap_name=cmd['snap_name'],
550 group_name=cmd.get('group_name', None),
551 force=cmd.get('force', False))
eafe8130 552
f91f0fd5 553 @mgr_cmd_wrap
e306af50
TL
554 def _cmd_fs_subvolume_snapshot_info(self, inbuf, cmd):
555 return self.vc.subvolume_snapshot_info(vol_name=cmd['vol_name'],
556 sub_name=cmd['sub_name'],
557 snap_name=cmd['snap_name'],
558 group_name=cmd.get('group_name', None))
559
f91f0fd5 560 @mgr_cmd_wrap
eafe8130 561 def _cmd_fs_subvolume_snapshot_ls(self, inbuf, cmd):
92f5a8d4 562 return self.vc.list_subvolume_snapshots(vol_name=cmd['vol_name'],
eafe8130
TL
563 sub_name=cmd['sub_name'],
564 group_name=cmd.get('group_name', None))
92f5a8d4 565
f91f0fd5 566 @mgr_cmd_wrap
92f5a8d4
TL
567 def _cmd_fs_subvolume_resize(self, inbuf, cmd):
568 return self.vc.resize_subvolume(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
569 new_size=cmd['new_size'], group_name=cmd.get('group_name', None),
570 no_shrink=cmd.get('no_shrink', False))
571
f91f0fd5 572 @mgr_cmd_wrap
f6b5b4d7
TL
573 def _cmd_fs_subvolume_pin(self, inbuf, cmd):
574 return self.vc.subvolume_pin(vol_name=cmd['vol_name'],
575 sub_name=cmd['sub_name'], pin_type=cmd['pin_type'],
576 pin_setting=cmd['pin_setting'],
577 group_name=cmd.get('group_name', None))
578
f91f0fd5 579 @mgr_cmd_wrap
92f5a8d4
TL
580 def _cmd_fs_subvolume_snapshot_protect(self, inbuf, cmd):
581 return self.vc.protect_subvolume_snapshot(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
582 snap_name=cmd['snap_name'], group_name=cmd.get('group_name', None))
583
f91f0fd5 584 @mgr_cmd_wrap
92f5a8d4
TL
585 def _cmd_fs_subvolume_snapshot_unprotect(self, inbuf, cmd):
586 return self.vc.unprotect_subvolume_snapshot(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
587 snap_name=cmd['snap_name'], group_name=cmd.get('group_name', None))
588
f91f0fd5 589 @mgr_cmd_wrap
92f5a8d4
TL
590 def _cmd_fs_subvolume_snapshot_clone(self, inbuf, cmd):
591 return self.vc.clone_subvolume_snapshot(
592 vol_name=cmd['vol_name'], sub_name=cmd['sub_name'], snap_name=cmd['snap_name'],
593 group_name=cmd.get('group_name', None), pool_layout=cmd.get('pool_layout', None),
594 target_sub_name=cmd['target_sub_name'], target_group_name=cmd.get('target_group_name', None))
595
f91f0fd5 596 @mgr_cmd_wrap
92f5a8d4
TL
597 def _cmd_fs_clone_status(self, inbuf, cmd):
598 return self.vc.clone_status(
599 vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None))
9f95a23c 600
f91f0fd5 601 @mgr_cmd_wrap
9f95a23c
TL
602 def _cmd_fs_clone_cancel(self, inbuf, cmd):
603 return self.vc.clone_cancel(
604 vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None))
f6b5b4d7 605
f91f0fd5 606 @mgr_cmd_wrap
f6b5b4d7
TL
607 def _cmd_nfs_export_create_cephfs(self, inbuf, cmd):
608 #TODO Extend export creation for rgw.
609 return self.fs_export.create_export(fs_name=cmd['fsname'], cluster_id=cmd['clusterid'],
610 pseudo_path=cmd['binding'], read_only=cmd.get('readonly', False), path=cmd.get('path', '/'))
611
f91f0fd5 612 @mgr_cmd_wrap
f6b5b4d7
TL
613 def _cmd_nfs_export_delete(self, inbuf, cmd):
614 return self.fs_export.delete_export(cluster_id=cmd['clusterid'], pseudo_path=cmd['binding'])
615
f91f0fd5 616 @mgr_cmd_wrap
f6b5b4d7
TL
617 def _cmd_nfs_export_ls(self, inbuf, cmd):
618 return self.fs_export.list_exports(cluster_id=cmd['clusterid'], detailed=cmd.get('detailed', False))
619
f91f0fd5 620 @mgr_cmd_wrap
f6b5b4d7
TL
621 def _cmd_nfs_export_get(self, inbuf, cmd):
622 return self.fs_export.get_export(cluster_id=cmd['clusterid'], pseudo_path=cmd['binding'])
623
f91f0fd5 624 @mgr_cmd_wrap
f6b5b4d7
TL
625 def _cmd_nfs_cluster_create(self, inbuf, cmd):
626 return self.nfs.create_nfs_cluster(cluster_id=cmd['clusterid'], export_type=cmd['type'],
627 placement=cmd.get('placement', None))
628
f91f0fd5 629 @mgr_cmd_wrap
f6b5b4d7
TL
630 def _cmd_nfs_cluster_update(self, inbuf, cmd):
631 return self.nfs.update_nfs_cluster(cluster_id=cmd['clusterid'], placement=cmd['placement'])
632
f91f0fd5 633 @mgr_cmd_wrap
f6b5b4d7
TL
634 def _cmd_nfs_cluster_delete(self, inbuf, cmd):
635 return self.nfs.delete_nfs_cluster(cluster_id=cmd['clusterid'])
636
f91f0fd5 637 @mgr_cmd_wrap
f6b5b4d7
TL
638 def _cmd_nfs_cluster_ls(self, inbuf, cmd):
639 return self.nfs.list_nfs_cluster()
640
f91f0fd5 641 @mgr_cmd_wrap
f6b5b4d7
TL
642 def _cmd_nfs_cluster_info(self, inbuf, cmd):
643 return self.nfs.show_nfs_cluster_info(cluster_id=cmd.get('clusterid', None))
644
645 def _cmd_nfs_cluster_config_set(self, inbuf, cmd):
646 return self.nfs.set_nfs_cluster_config(cluster_id=cmd['clusterid'], nfs_config=inbuf)
647
648 def _cmd_nfs_cluster_config_reset(self, inbuf, cmd):
649 return self.nfs.reset_nfs_cluster_config(cluster_id=cmd['clusterid'])