]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/volumes/fs/volume.py
7e20ca9a9546696d07d7aac633424cbf1bf852c1
[ceph.git] / ceph / src / pybind / mgr / volumes / fs / volume.py
1 import json
2 import errno
3 import logging
4 from threading import Event
5
6 import cephfs
7
8 from .fs_util import listdir
9
10 from .operations.volume import ConnectionPool, open_volume, create_volume, \
11 delete_volume, list_volumes, get_pool_names
12 from .operations.group import open_group, create_group, remove_group, open_group_unique
13 from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \
14 create_clone
15
16 from .vol_spec import VolSpec
17 from .exception import VolumeException
18 from .async_cloner import Cloner
19 from .purge_queue import ThreadPoolPurgeQueueMixin
20 from .operations.template import SubvolumeOpType
21
22 log = logging.getLogger(__name__)
23
24 def octal_str_to_decimal_int(mode):
25 try:
26 return int(mode, 8)
27 except ValueError:
28 raise VolumeException(-errno.EINVAL, "Invalid mode '{0}'".format(mode))
29
30 def name_to_json(names):
31 """
32 convert the list of names to json
33 """
34 namedict = []
35 for i in range(len(names)):
36 namedict.append({'name': names[i].decode('utf-8')})
37 return json.dumps(namedict, indent=4, sort_keys=True)
38
39 class VolumeClient(object):
40 def __init__(self, mgr):
41 self.mgr = mgr
42 self.stopping = Event()
43 # volume specification
44 self.volspec = VolSpec(mgr.rados.conf_get('client_snapdir'))
45 self.connection_pool = ConnectionPool(self.mgr)
46 self.cloner = Cloner(self, self.mgr.max_concurrent_clones)
47 self.purge_queue = ThreadPoolPurgeQueueMixin(self, 4)
48 # on startup, queue purge job for available volumes to kickstart
49 # purge for leftover subvolume entries in trash. note that, if the
50 # trash directory does not exist or if there are no purge entries
51 # available for a volume, the volume is removed from the purge
52 # job list.
53 fs_map = self.mgr.get('fs_map')
54 for fs in fs_map['filesystems']:
55 self.cloner.queue_job(fs['mdsmap']['fs_name'])
56 self.purge_queue.queue_job(fs['mdsmap']['fs_name'])
57
58 def is_stopping(self):
59 return self.stopping.is_set()
60
61 def shutdown(self):
62 log.info("shutting down")
63 # first, note that we're shutting down
64 self.stopping.set()
65 # second, ask purge threads to quit
66 self.purge_queue.cancel_all_jobs()
67 # third, delete all libcephfs handles from connection pool
68 self.connection_pool.del_all_handles()
69
70 def cluster_log(self, msg, lvl=None):
71 """
72 log to cluster log with default log level as WARN.
73 """
74 if not lvl:
75 lvl = self.mgr.CLUSTER_LOG_PRIO_WARN
76 self.mgr.cluster_log("cluster", lvl, msg)
77
78 def volume_exception_to_retval(self, ve):
79 """
80 return a tuple representation from a volume exception
81 """
82 return ve.to_tuple()
83
84 ### volume operations -- create, rm, ls
85
86 def create_fs_volume(self, volname, placement):
87 if self.is_stopping():
88 return -errno.ESHUTDOWN, "", "shutdown in progress"
89 return create_volume(self.mgr, volname, placement)
90
91 def delete_fs_volume(self, volname, confirm):
92 if self.is_stopping():
93 return -errno.ESHUTDOWN, "", "shutdown in progress"
94
95 if confirm != "--yes-i-really-mean-it":
96 return -errno.EPERM, "", "WARNING: this will *PERMANENTLY DESTROY* all data " \
97 "stored in the filesystem '{0}'. If you are *ABSOLUTELY CERTAIN* " \
98 "that is what you want, re-issue the command followed by " \
99 "--yes-i-really-mean-it.".format(volname)
100
101 ret, out, err = self.mgr.check_mon_command({
102 'prefix': 'config get',
103 'key': 'mon_allow_pool_delete',
104 'who': 'mon',
105 'format': 'json',
106 })
107 mon_allow_pool_delete = json.loads(out)
108 if not mon_allow_pool_delete:
109 return -errno.EPERM, "", "pool deletion is disabled; you must first " \
110 "set the mon_allow_pool_delete config option to true before volumes " \
111 "can be deleted"
112
113 metadata_pool, data_pools = get_pool_names(self.mgr, volname)
114 if not metadata_pool:
115 return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)
116 self.purge_queue.cancel_jobs(volname)
117 self.connection_pool.del_fs_handle(volname, wait=True)
118 return delete_volume(self.mgr, volname, metadata_pool, data_pools)
119
120 def list_fs_volumes(self):
121 if self.stopping.is_set():
122 return -errno.ESHUTDOWN, "", "shutdown in progress"
123 volumes = list_volumes(self.mgr)
124 return 0, json.dumps(volumes, indent=4, sort_keys=True), ""
125
126 ### subvolume operations
127
128 def _create_subvolume(self, fs_handle, volname, group, subvolname, **kwargs):
129 size = kwargs['size']
130 pool = kwargs['pool_layout']
131 uid = kwargs['uid']
132 gid = kwargs['gid']
133 mode = kwargs['mode']
134 isolate_nspace = kwargs['namespace_isolated']
135
136 oct_mode = octal_str_to_decimal_int(mode)
137 try:
138 create_subvol(
139 fs_handle, self.volspec, group, subvolname, size, isolate_nspace, pool, oct_mode, uid, gid)
140 except VolumeException as ve:
141 # kick the purge threads for async removal -- note that this
142 # assumes that the subvolume is moved to trashcan for cleanup on error.
143 self.purge_queue.queue_job(volname)
144 raise ve
145
146 def create_subvolume(self, **kwargs):
147 ret = 0, "", ""
148 volname = kwargs['vol_name']
149 subvolname = kwargs['sub_name']
150 groupname = kwargs['group_name']
151 size = kwargs['size']
152 pool = kwargs['pool_layout']
153 uid = kwargs['uid']
154 gid = kwargs['gid']
155 isolate_nspace = kwargs['namespace_isolated']
156
157 try:
158 with open_volume(self, volname) as fs_handle:
159 with open_group(fs_handle, self.volspec, groupname) as group:
160 try:
161 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.CREATE) as subvolume:
162 # idempotent creation -- valid. Attributes set is supported.
163 attrs = {
164 'uid': uid if uid else subvolume.uid,
165 'gid': gid if gid else subvolume.gid,
166 'data_pool': pool,
167 'pool_namespace': subvolume.namespace if isolate_nspace else None,
168 'quota': size
169 }
170 subvolume.set_attrs(subvolume.path, attrs)
171 except VolumeException as ve:
172 if ve.errno == -errno.ENOENT:
173 self._create_subvolume(fs_handle, volname, group, subvolname, **kwargs)
174 else:
175 raise
176 except VolumeException as ve:
177 # volume/group does not exist or subvolume creation failed
178 ret = self.volume_exception_to_retval(ve)
179 return ret
180
181 def remove_subvolume(self, **kwargs):
182 ret = 0, "", ""
183 volname = kwargs['vol_name']
184 subvolname = kwargs['sub_name']
185 groupname = kwargs['group_name']
186 force = kwargs['force']
187 retainsnaps = kwargs['retain_snapshots']
188
189 try:
190 with open_volume(self, volname) as fs_handle:
191 with open_group(fs_handle, self.volspec, groupname) as group:
192 remove_subvol(fs_handle, self.volspec, group, subvolname, force, retainsnaps)
193 # kick the purge threads for async removal -- note that this
194 # assumes that the subvolume is moved to trash can.
195 # TODO: make purge queue as singleton so that trash can kicks
196 # the purge threads on dump.
197 self.purge_queue.queue_job(volname)
198 except VolumeException as ve:
199 if ve.errno == -errno.EAGAIN:
200 ve = VolumeException(ve.errno, ve.error_str + " (use --force to override)")
201 ret = self.volume_exception_to_retval(ve)
202 elif not (ve.errno == -errno.ENOENT and force):
203 ret = self.volume_exception_to_retval(ve)
204 return ret
205
206 def resize_subvolume(self, **kwargs):
207 ret = 0, "", ""
208 volname = kwargs['vol_name']
209 subvolname = kwargs['sub_name']
210 newsize = kwargs['new_size']
211 noshrink = kwargs['no_shrink']
212 groupname = kwargs['group_name']
213
214 try:
215 with open_volume(self, volname) as fs_handle:
216 with open_group(fs_handle, self.volspec, groupname) as group:
217 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.RESIZE) as subvolume:
218 nsize, usedbytes = subvolume.resize(newsize, noshrink)
219 ret = 0, json.dumps(
220 [{'bytes_used': usedbytes},{'bytes_quota': nsize},
221 {'bytes_pcent': "undefined" if nsize == 0 else '{0:.2f}'.format((float(usedbytes) / nsize) * 100.0)}],
222 indent=4, sort_keys=True), ""
223 except VolumeException as ve:
224 ret = self.volume_exception_to_retval(ve)
225 return ret
226
227 def subvolume_pin(self, **kwargs):
228 ret = 0, "", ""
229 volname = kwargs['vol_name']
230 subvolname = kwargs['sub_name']
231 pin_type = kwargs['pin_type']
232 pin_setting = kwargs['pin_setting']
233 groupname = kwargs['group_name']
234
235 try:
236 with open_volume(self, volname) as fs_handle:
237 with open_group(fs_handle, self.volspec, groupname) as group:
238 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.PIN) as subvolume:
239 subvolume.pin(pin_type, pin_setting)
240 ret = 0, json.dumps({}), ""
241 except VolumeException as ve:
242 ret = self.volume_exception_to_retval(ve)
243 return ret
244
245 def subvolume_getpath(self, **kwargs):
246 ret = None
247 volname = kwargs['vol_name']
248 subvolname = kwargs['sub_name']
249 groupname = kwargs['group_name']
250
251 try:
252 with open_volume(self, volname) as fs_handle:
253 with open_group(fs_handle, self.volspec, groupname) as group:
254 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.GETPATH) as subvolume:
255 subvolpath = subvolume.path
256 ret = 0, subvolpath.decode("utf-8"), ""
257 except VolumeException as ve:
258 ret = self.volume_exception_to_retval(ve)
259 return ret
260
261 def subvolume_info(self, **kwargs):
262 ret = None
263 volname = kwargs['vol_name']
264 subvolname = kwargs['sub_name']
265 groupname = kwargs['group_name']
266
267 try:
268 with open_volume(self, volname) as fs_handle:
269 with open_group(fs_handle, self.volspec, groupname) as group:
270 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.INFO) as subvolume:
271 mon_addr_lst = []
272 mon_map_mons = self.mgr.get('mon_map')['mons']
273 for mon in mon_map_mons:
274 ip_port = mon['addr'].split("/")[0]
275 mon_addr_lst.append(ip_port)
276
277 subvol_info_dict = subvolume.info()
278 subvol_info_dict["mon_addrs"] = mon_addr_lst
279 ret = 0, json.dumps(subvol_info_dict, indent=4, sort_keys=True), ""
280 except VolumeException as ve:
281 ret = self.volume_exception_to_retval(ve)
282 return ret
283
284 def list_subvolumes(self, **kwargs):
285 ret = 0, "", ""
286 volname = kwargs['vol_name']
287 groupname = kwargs['group_name']
288
289 try:
290 with open_volume(self, volname) as fs_handle:
291 with open_group(fs_handle, self.volspec, groupname) as group:
292 subvolumes = group.list_subvolumes()
293 ret = 0, name_to_json(subvolumes), ""
294 except VolumeException as ve:
295 ret = self.volume_exception_to_retval(ve)
296 return ret
297
298 ### subvolume snapshot
299
300 def create_subvolume_snapshot(self, **kwargs):
301 ret = 0, "", ""
302 volname = kwargs['vol_name']
303 subvolname = kwargs['sub_name']
304 snapname = kwargs['snap_name']
305 groupname = kwargs['group_name']
306
307 try:
308 with open_volume(self, volname) as fs_handle:
309 with open_group(fs_handle, self.volspec, groupname) as group:
310 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_CREATE) as subvolume:
311 subvolume.create_snapshot(snapname)
312 except VolumeException as ve:
313 ret = self.volume_exception_to_retval(ve)
314 return ret
315
316 def remove_subvolume_snapshot(self, **kwargs):
317 ret = 0, "", ""
318 volname = kwargs['vol_name']
319 subvolname = kwargs['sub_name']
320 snapname = kwargs['snap_name']
321 groupname = kwargs['group_name']
322 force = kwargs['force']
323
324 try:
325 with open_volume(self, volname) as fs_handle:
326 with open_group(fs_handle, self.volspec, groupname) as group:
327 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_REMOVE) as subvolume:
328 subvolume.remove_snapshot(snapname)
329 except VolumeException as ve:
330 # ESTALE serves as an error to state that subvolume is currently stale due to internal removal and,
331 # we should tickle the purge jobs to purge the same
332 if ve.errno == -errno.ESTALE:
333 self.purge_queue.queue_job(volname)
334 elif not (ve.errno == -errno.ENOENT and force):
335 ret = self.volume_exception_to_retval(ve)
336 return ret
337
338 def subvolume_snapshot_info(self, **kwargs):
339 ret = 0, "", ""
340 volname = kwargs['vol_name']
341 subvolname = kwargs['sub_name']
342 snapname = kwargs['snap_name']
343 groupname = kwargs['group_name']
344
345 try:
346 with open_volume(self, volname) as fs_handle:
347 with open_group(fs_handle, self.volspec, groupname) as group:
348 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_INFO) as subvolume:
349 snap_info_dict = subvolume.snapshot_info(snapname)
350 ret = 0, json.dumps(snap_info_dict, indent=4, sort_keys=True), ""
351 except VolumeException as ve:
352 ret = self.volume_exception_to_retval(ve)
353 return ret
354
355 def list_subvolume_snapshots(self, **kwargs):
356 ret = 0, "", ""
357 volname = kwargs['vol_name']
358 subvolname = kwargs['sub_name']
359 groupname = kwargs['group_name']
360
361 try:
362 with open_volume(self, volname) as fs_handle:
363 with open_group(fs_handle, self.volspec, groupname) as group:
364 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_LIST) as subvolume:
365 snapshots = subvolume.list_snapshots()
366 ret = 0, name_to_json(snapshots), ""
367 except VolumeException as ve:
368 ret = self.volume_exception_to_retval(ve)
369 return ret
370
371 def protect_subvolume_snapshot(self, **kwargs):
372 ret = 0, "", "Deprecation warning: 'snapshot protect' call is deprecated and will be removed in a future release"
373 volname = kwargs['vol_name']
374 subvolname = kwargs['sub_name']
375 groupname = kwargs['group_name']
376
377 try:
378 with open_volume(self, volname) as fs_handle:
379 with open_group(fs_handle, self.volspec, groupname) as group:
380 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_PROTECT) as subvolume:
381 log.warning("snapshot protect call is deprecated and will be removed in a future release")
382 except VolumeException as ve:
383 ret = self.volume_exception_to_retval(ve)
384 return ret
385
386 def unprotect_subvolume_snapshot(self, **kwargs):
387 ret = 0, "", "Deprecation warning: 'snapshot unprotect' call is deprecated and will be removed in a future release"
388 volname = kwargs['vol_name']
389 subvolname = kwargs['sub_name']
390 groupname = kwargs['group_name']
391
392 try:
393 with open_volume(self, volname) as fs_handle:
394 with open_group(fs_handle, self.volspec, groupname) as group:
395 with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_UNPROTECT) as subvolume:
396 log.warning("snapshot unprotect call is deprecated and will be removed in a future release")
397 except VolumeException as ve:
398 ret = self.volume_exception_to_retval(ve)
399 return ret
400
401 def _prepare_clone_subvolume(self, fs_handle, volname, s_subvolume, s_snapname, t_group, t_subvolname, **kwargs):
402 t_pool = kwargs['pool_layout']
403 s_subvolname = kwargs['sub_name']
404 s_groupname = kwargs['group_name']
405 t_groupname = kwargs['target_group_name']
406
407 create_clone(fs_handle, self.volspec, t_group, t_subvolname, t_pool, volname, s_subvolume, s_snapname)
408 with open_subvol(fs_handle, self.volspec, t_group, t_subvolname, SubvolumeOpType.CLONE_INTERNAL) as t_subvolume:
409 try:
410 if t_groupname == s_groupname and t_subvolname == s_subvolname:
411 t_subvolume.attach_snapshot(s_snapname, t_subvolume)
412 else:
413 s_subvolume.attach_snapshot(s_snapname, t_subvolume)
414 self.cloner.queue_job(volname)
415 except VolumeException as ve:
416 try:
417 t_subvolume.remove()
418 self.purge_queue.queue_job(volname)
419 except Exception as e:
420 log.warning("failed to cleanup clone subvolume '{0}' ({1})".format(t_subvolname, e))
421 raise ve
422
423 def _clone_subvolume_snapshot(self, fs_handle, volname, s_group, s_subvolume, **kwargs):
424 s_snapname = kwargs['snap_name']
425 target_subvolname = kwargs['target_sub_name']
426 target_groupname = kwargs['target_group_name']
427 s_groupname = kwargs['group_name']
428
429 if not s_snapname.encode('utf-8') in s_subvolume.list_snapshots():
430 raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(s_snapname))
431
432 with open_group_unique(fs_handle, self.volspec, target_groupname, s_group, s_groupname) as target_group:
433 try:
434 with open_subvol(fs_handle, self.volspec, target_group, target_subvolname, SubvolumeOpType.CLONE_CREATE):
435 raise VolumeException(-errno.EEXIST, "subvolume '{0}' exists".format(target_subvolname))
436 except VolumeException as ve:
437 if ve.errno == -errno.ENOENT:
438 self._prepare_clone_subvolume(fs_handle, volname, s_subvolume, s_snapname,
439 target_group, target_subvolname, **kwargs)
440 else:
441 raise
442
443 def clone_subvolume_snapshot(self, **kwargs):
444 ret = 0, "", ""
445 volname = kwargs['vol_name']
446 s_subvolname = kwargs['sub_name']
447 s_groupname = kwargs['group_name']
448
449 try:
450 with open_volume(self, volname) as fs_handle:
451 with open_group(fs_handle, self.volspec, s_groupname) as s_group:
452 with open_subvol(fs_handle, self.volspec, s_group, s_subvolname, SubvolumeOpType.CLONE_SOURCE) as s_subvolume:
453 self._clone_subvolume_snapshot(fs_handle, volname, s_group, s_subvolume, **kwargs)
454 except VolumeException as ve:
455 ret = self.volume_exception_to_retval(ve)
456 return ret
457
458 def clone_status(self, **kwargs):
459 ret = 0, "", ""
460 volname = kwargs['vol_name']
461 clonename = kwargs['clone_name']
462 groupname = kwargs['group_name']
463
464 try:
465 with open_volume(self, volname) as fs_handle:
466 with open_group(fs_handle, self.volspec, groupname) as group:
467 with open_subvol(fs_handle, self.volspec, group, clonename, SubvolumeOpType.CLONE_STATUS) as subvolume:
468 ret = 0, json.dumps({'status' : subvolume.status}, indent=2), ""
469 except VolumeException as ve:
470 ret = self.volume_exception_to_retval(ve)
471 return ret
472
473 def clone_cancel(self, **kwargs):
474 ret = 0, "", ""
475 volname = kwargs['vol_name']
476 clonename = kwargs['clone_name']
477 groupname = kwargs['group_name']
478
479 try:
480 self.cloner.cancel_job(volname, (clonename, groupname))
481 except VolumeException as ve:
482 ret = self.volume_exception_to_retval(ve)
483 return ret
484
485 ### group operations
486
487 def create_subvolume_group(self, **kwargs):
488 ret = 0, "", ""
489 volname = kwargs['vol_name']
490 groupname = kwargs['group_name']
491 pool = kwargs['pool_layout']
492 uid = kwargs['uid']
493 gid = kwargs['gid']
494 mode = kwargs['mode']
495
496 try:
497 with open_volume(self, volname) as fs_handle:
498 try:
499 with open_group(fs_handle, self.volspec, groupname):
500 # idempotent creation -- valid.
501 pass
502 except VolumeException as ve:
503 if ve.errno == -errno.ENOENT:
504 oct_mode = octal_str_to_decimal_int(mode)
505 create_group(fs_handle, self.volspec, groupname, pool, oct_mode, uid, gid)
506 else:
507 raise
508 except VolumeException as ve:
509 # volume does not exist or subvolume group creation failed
510 ret = self.volume_exception_to_retval(ve)
511 return ret
512
513 def remove_subvolume_group(self, **kwargs):
514 ret = 0, "", ""
515 volname = kwargs['vol_name']
516 groupname = kwargs['group_name']
517 force = kwargs['force']
518
519 try:
520 with open_volume(self, volname) as fs_handle:
521 remove_group(fs_handle, self.volspec, groupname)
522 except VolumeException as ve:
523 if not (ve.errno == -errno.ENOENT and force):
524 ret = self.volume_exception_to_retval(ve)
525 return ret
526
527 def getpath_subvolume_group(self, **kwargs):
528 volname = kwargs['vol_name']
529 groupname = kwargs['group_name']
530
531 try:
532 with open_volume(self, volname) as fs_handle:
533 with open_group(fs_handle, self.volspec, groupname) as group:
534 return 0, group.path.decode('utf-8'), ""
535 except VolumeException as ve:
536 return self.volume_exception_to_retval(ve)
537
538 def list_subvolume_groups(self, **kwargs):
539 volname = kwargs['vol_name']
540 ret = 0, '[]', ""
541 try:
542 with open_volume(self, volname) as fs_handle:
543 groups = listdir(fs_handle, self.volspec.base_dir)
544 ret = 0, name_to_json(groups), ""
545 except VolumeException as ve:
546 if not ve.errno == -errno.ENOENT:
547 ret = self.volume_exception_to_retval(ve)
548 return ret
549
550 def pin_subvolume_group(self, **kwargs):
551 ret = 0, "", ""
552 volname = kwargs['vol_name']
553 groupname = kwargs['group_name']
554 pin_type = kwargs['pin_type']
555 pin_setting = kwargs['pin_setting']
556
557 try:
558 with open_volume(self, volname) as fs_handle:
559 with open_group(fs_handle, self.volspec, groupname) as group:
560 group.pin(pin_type, pin_setting)
561 ret = 0, json.dumps({}), ""
562 except VolumeException as ve:
563 ret = self.volume_exception_to_retval(ve)
564 return ret
565
566 ### group snapshot
567
568 def create_subvolume_group_snapshot(self, **kwargs):
569 ret = -errno.ENOSYS, "", "subvolume group snapshots are not supported"
570 volname = kwargs['vol_name']
571 groupname = kwargs['group_name']
572 # snapname = kwargs['snap_name']
573
574 try:
575 with open_volume(self, volname) as fs_handle:
576 with open_group(fs_handle, self.volspec, groupname) as group:
577 # as subvolumes are marked with the vxattr ceph.dir.subvolume deny snapshots
578 # at the subvolume group (see: https://tracker.ceph.com/issues/46074)
579 # group.create_snapshot(snapname)
580 pass
581 except VolumeException as ve:
582 ret = self.volume_exception_to_retval(ve)
583 return ret
584
585 def remove_subvolume_group_snapshot(self, **kwargs):
586 ret = 0, "", ""
587 volname = kwargs['vol_name']
588 groupname = kwargs['group_name']
589 snapname = kwargs['snap_name']
590 force = kwargs['force']
591
592 try:
593 with open_volume(self, volname) as fs_handle:
594 with open_group(fs_handle, self.volspec, groupname) as group:
595 group.remove_snapshot(snapname)
596 except VolumeException as ve:
597 if not (ve.errno == -errno.ENOENT and force):
598 ret = self.volume_exception_to_retval(ve)
599 return ret
600
601 def list_subvolume_group_snapshots(self, **kwargs):
602 ret = 0, "", ""
603 volname = kwargs['vol_name']
604 groupname = kwargs['group_name']
605
606 try:
607 with open_volume(self, volname) as fs_handle:
608 with open_group(fs_handle, self.volspec, groupname) as group:
609 snapshots = group.list_snapshots()
610 ret = 0, name_to_json(snapshots), ""
611 except VolumeException as ve:
612 ret = self.volume_exception_to_retval(ve)
613 return ret