]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_volumes.py
check in ceph 17.2.3 sources
[ceph.git] / ceph / qa / tasks / cephfs / test_volumes.py
CommitLineData
81eedcae
TL
1import os
2import json
92f5a8d4 3import time
81eedcae
TL
4import errno
5import random
6import logging
eafe8130 7import collections
adb31ebb
TL
8import uuid
9import unittest
10from hashlib import md5
11from textwrap import dedent
33c7a0ef 12from io import StringIO
81eedcae
TL
13
14from tasks.cephfs.cephfs_test_case import CephFSTestCase
f67539c2 15from tasks.cephfs.fuse_mount import FuseMount
81eedcae
TL
16from teuthology.exceptions import CommandFailedError
17
18log = logging.getLogger(__name__)
19
f67539c2
TL
20class TestVolumesHelper(CephFSTestCase):
21 """Helper class for testing FS volume, subvolume group and subvolume operations."""
92f5a8d4 22 TEST_VOLUME_PREFIX = "volume"
81eedcae
TL
23 TEST_SUBVOLUME_PREFIX="subvolume"
24 TEST_GROUP_PREFIX="group"
25 TEST_SNAPSHOT_PREFIX="snapshot"
92f5a8d4 26 TEST_CLONE_PREFIX="clone"
494da23a
TL
27 TEST_FILE_NAME_PREFIX="subvolume_file"
28
29 # for filling subvolume with data
cd265ab1 30 CLIENTS_REQUIRED = 2
f6b5b4d7 31 MDSS_REQUIRED = 2
494da23a
TL
32
33 # io defaults
34 DEFAULT_FILE_SIZE = 1 # MB
35 DEFAULT_NUMBER_OF_FILES = 1024
81eedcae
TL
36
37 def _fs_cmd(self, *args):
38 return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
39
f6b5b4d7
TL
40 def _raw_cmd(self, *args):
41 return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
42
92f5a8d4
TL
43 def __check_clone_state(self, state, clone, clone_group=None, timo=120):
44 check = 0
45 args = ["clone", "status", self.volname, clone]
46 if clone_group:
47 args.append(clone_group)
48 args = tuple(args)
49 while check < timo:
50 result = json.loads(self._fs_cmd(*args))
51 if result["status"]["state"] == state:
52 break
53 check += 1
54 time.sleep(1)
55 self.assertTrue(check < timo)
56
33c7a0ef
TL
57 def _get_clone_status(self, clone, clone_group=None):
58 args = ["clone", "status", self.volname, clone]
59 if clone_group:
60 args.append(clone_group)
61 args = tuple(args)
62 result = json.loads(self._fs_cmd(*args))
63 return result
64
92f5a8d4
TL
65 def _wait_for_clone_to_complete(self, clone, clone_group=None, timo=120):
66 self.__check_clone_state("complete", clone, clone_group, timo)
67
68 def _wait_for_clone_to_fail(self, clone, clone_group=None, timo=120):
69 self.__check_clone_state("failed", clone, clone_group, timo)
70
33c7a0ef
TL
71 def _wait_for_clone_to_be_in_progress(self, clone, clone_group=None, timo=120):
72 self.__check_clone_state("in-progress", clone, clone_group, timo)
73
9f95a23c
TL
74 def _check_clone_canceled(self, clone, clone_group=None):
75 self.__check_clone_state("canceled", clone, clone_group, timo=1)
76
adb31ebb
TL
77 def _get_subvolume_snapshot_path(self, subvolume, snapshot, source_group, subvol_path, source_version):
78 if source_version == 2:
79 # v2
80 if subvol_path is not None:
81 (base_path, uuid_str) = os.path.split(subvol_path)
82 else:
83 (base_path, uuid_str) = os.path.split(self._get_subvolume_path(self.volname, subvolume, group_name=source_group))
84 return os.path.join(base_path, ".snap", snapshot, uuid_str)
85
86 # v1
87 base_path = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
88 return os.path.join(base_path, ".snap", snapshot)
89
90 def _verify_clone_attrs(self, source_path, clone_path):
91 path1 = source_path
92 path2 = clone_path
92f5a8d4 93
9f95a23c
TL
94 p = self.mount_a.run_shell(["find", path1])
95 paths = p.stdout.getvalue().strip().split()
96
97 # for each entry in source and clone (sink) verify certain inode attributes:
98 # inode type, mode, ownership, [am]time.
99 for source_path in paths:
100 sink_entry = source_path[len(path1)+1:]
101 sink_path = os.path.join(path2, sink_entry)
102
103 # mode+type
104 sval = int(self.mount_a.run_shell(['stat', '-c' '%f', source_path]).stdout.getvalue().strip(), 16)
105 cval = int(self.mount_a.run_shell(['stat', '-c' '%f', sink_path]).stdout.getvalue().strip(), 16)
106 self.assertEqual(sval, cval)
107
108 # ownership
109 sval = int(self.mount_a.run_shell(['stat', '-c' '%u', source_path]).stdout.getvalue().strip())
110 cval = int(self.mount_a.run_shell(['stat', '-c' '%u', sink_path]).stdout.getvalue().strip())
111 self.assertEqual(sval, cval)
92f5a8d4 112
9f95a23c
TL
113 sval = int(self.mount_a.run_shell(['stat', '-c' '%g', source_path]).stdout.getvalue().strip())
114 cval = int(self.mount_a.run_shell(['stat', '-c' '%g', sink_path]).stdout.getvalue().strip())
115 self.assertEqual(sval, cval)
92f5a8d4 116
9f95a23c 117 # inode timestamps
f67539c2 118 # do not check access as kclient will generally not update this like ceph-fuse will.
9f95a23c
TL
119 sval = int(self.mount_a.run_shell(['stat', '-c' '%Y', source_path]).stdout.getvalue().strip())
120 cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip())
121 self.assertEqual(sval, cval)
122
adb31ebb
TL
123 def _verify_clone_root(self, source_path, clone_path, clone, clone_group, clone_pool):
124 # verifies following clone root attrs quota, data_pool and pool_namespace
125 # remaining attributes of clone root are validated in _verify_clone_attrs
126
127 clone_info = json.loads(self._get_subvolume_info(self.volname, clone, clone_group))
128
129 # verify quota is inherited from source snapshot
130 src_quota = self.mount_a.getfattr(source_path, "ceph.quota.max_bytes")
f67539c2
TL
131 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
132 if isinstance(self.mount_a, FuseMount):
133 self.assertEqual(clone_info["bytes_quota"], "infinite" if src_quota is None else int(src_quota))
adb31ebb
TL
134
135 if clone_pool:
136 # verify pool is set as per request
137 self.assertEqual(clone_info["data_pool"], clone_pool)
138 else:
139 # verify pool and pool namespace are inherited from snapshot
140 self.assertEqual(clone_info["data_pool"],
141 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool"))
142 self.assertEqual(clone_info["pool_namespace"],
143 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool_namespace"))
144
145 def _verify_clone(self, subvolume, snapshot, clone,
146 source_group=None, clone_group=None, clone_pool=None,
147 subvol_path=None, source_version=2, timo=120):
148 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
149 # but snapshots are retained for clone verification
150 path1 = self._get_subvolume_snapshot_path(subvolume, snapshot, source_group, subvol_path, source_version)
9f95a23c 151 path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group)
92f5a8d4
TL
152
153 check = 0
adb31ebb
TL
154 # TODO: currently snapshot rentries are not stable if snapshot source entries
155 # are removed, https://tracker.ceph.com/issues/46747
156 while check < timo and subvol_path is None:
92f5a8d4
TL
157 val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries"))
158 val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries"))
159 if val1 == val2:
160 break
161 check += 1
162 time.sleep(1)
163 self.assertTrue(check < timo)
164
adb31ebb
TL
165 self._verify_clone_root(path1, path2, clone, clone_group, clone_pool)
166 self._verify_clone_attrs(path1, path2)
9f95a23c 167
92f5a8d4 168 def _generate_random_volume_name(self, count=1):
f6b5b4d7
TL
169 n = self.volume_start
170 volumes = [f"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
171 self.volume_start += count
92f5a8d4
TL
172 return volumes[0] if count == 1 else volumes
173
174 def _generate_random_subvolume_name(self, count=1):
f6b5b4d7
TL
175 n = self.subvolume_start
176 subvolumes = [f"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
177 self.subvolume_start += count
92f5a8d4
TL
178 return subvolumes[0] if count == 1 else subvolumes
179
180 def _generate_random_group_name(self, count=1):
f6b5b4d7
TL
181 n = self.group_start
182 groups = [f"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i in range(n, n+count)]
183 self.group_start += count
92f5a8d4
TL
184 return groups[0] if count == 1 else groups
185
186 def _generate_random_snapshot_name(self, count=1):
f6b5b4d7
TL
187 n = self.snapshot_start
188 snaps = [f"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i in range(n, n+count)]
189 self.snapshot_start += count
92f5a8d4
TL
190 return snaps[0] if count == 1 else snaps
191
192 def _generate_random_clone_name(self, count=1):
f6b5b4d7
TL
193 n = self.clone_start
194 clones = [f"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i in range(n, n+count)]
195 self.clone_start += count
92f5a8d4 196 return clones[0] if count == 1 else clones
81eedcae
TL
197
198 def _enable_multi_fs(self):
199 self._fs_cmd("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
200
201 def _create_or_reuse_test_volume(self):
202 result = json.loads(self._fs_cmd("volume", "ls"))
203 if len(result) == 0:
204 self.vol_created = True
92f5a8d4 205 self.volname = self._generate_random_volume_name()
81eedcae
TL
206 self._fs_cmd("volume", "create", self.volname)
207 else:
208 self.volname = result[0]['name']
209
494da23a
TL
210 def _get_subvolume_group_path(self, vol_name, group_name):
211 args = ("subvolumegroup", "getpath", vol_name, group_name)
212 path = self._fs_cmd(*args)
213 # remove the leading '/', and trailing whitespaces
214 return path[1:].rstrip()
215
81eedcae
TL
216 def _get_subvolume_path(self, vol_name, subvol_name, group_name=None):
217 args = ["subvolume", "getpath", vol_name, subvol_name]
218 if group_name:
219 args.append(group_name)
220 args = tuple(args)
221 path = self._fs_cmd(*args)
222 # remove the leading '/', and trailing whitespaces
223 return path[1:].rstrip()
224
1911f103
TL
225 def _get_subvolume_info(self, vol_name, subvol_name, group_name=None):
226 args = ["subvolume", "info", vol_name, subvol_name]
227 if group_name:
228 args.append(group_name)
229 args = tuple(args)
230 subvol_md = self._fs_cmd(*args)
231 return subvol_md
232
e306af50
TL
233 def _get_subvolume_snapshot_info(self, vol_name, subvol_name, snapname, group_name=None):
234 args = ["subvolume", "snapshot", "info", vol_name, subvol_name, snapname]
235 if group_name:
236 args.append(group_name)
237 args = tuple(args)
238 snap_md = self._fs_cmd(*args)
239 return snap_md
240
81eedcae 241 def _delete_test_volume(self):
eafe8130 242 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
81eedcae 243
adb31ebb
TL
244 def _do_subvolume_pool_and_namespace_update(self, subvolume, pool=None, pool_namespace=None, subvolume_group=None):
245 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
246
247 if pool is not None:
522d829b 248 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool, sudo=True)
adb31ebb
TL
249
250 if pool_namespace is not None:
522d829b 251 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace, sudo=True)
adb31ebb
TL
252
253 def _do_subvolume_attr_update(self, subvolume, uid, gid, mode, subvolume_group=None):
254 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
255
256 # mode
522d829b 257 self.mount_a.run_shell(['chmod', mode, subvolpath], sudo=True)
adb31ebb
TL
258
259 # ownership
522d829b
TL
260 self.mount_a.run_shell(['chown', uid, subvolpath], sudo=True)
261 self.mount_a.run_shell(['chgrp', gid, subvolpath], sudo=True)
adb31ebb 262
92f5a8d4
TL
263 def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None,
264 number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE):
494da23a 265 # get subvolume path for IO
92f5a8d4
TL
266 args = ["subvolume", "getpath", self.volname, subvolume]
267 if subvolume_group:
268 args.append(subvolume_group)
269 args = tuple(args)
270 subvolpath = self._fs_cmd(*args)
494da23a
TL
271 self.assertNotEqual(subvolpath, None)
272 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
273
92f5a8d4
TL
274 io_path = subvolpath
275 if create_dir:
276 io_path = os.path.join(subvolpath, create_dir)
522d829b 277 self.mount_a.run_shell_payload(f"mkdir -p {io_path}")
92f5a8d4
TL
278
279 log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path))
494da23a
TL
280 for i in range(number_of_files):
281 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
92f5a8d4 282 self.mount_a.write_n_mb(os.path.join(io_path, filename), file_size)
494da23a 283
9f95a23c
TL
284 def _do_subvolume_io_mixed(self, subvolume, subvolume_group=None):
285 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
286
287 reg_file = "regfile.0"
9f95a23c
TL
288 dir_path = os.path.join(subvolpath, "dir.0")
289 sym_path1 = os.path.join(subvolpath, "sym.0")
290 # this symlink's ownership would be changed
291 sym_path2 = os.path.join(dir_path, "sym.0")
292
522d829b
TL
293 self.mount_a.run_shell(["mkdir", dir_path])
294 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path1])
295 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path2])
9f95a23c 296 # flip ownership to nobody. assumption: nobody's id is 65534
522d829b 297 self.mount_a.run_shell(["chown", "-h", "65534:65534", sym_path2], sudo=True, omit_sudo=False)
9f95a23c 298
494da23a
TL
299 def _wait_for_trash_empty(self, timeout=30):
300 # XXX: construct the trash dir path (note that there is no mgr
301 # [sub]volume interface for this).
302 trashdir = os.path.join("./", "volumes", "_deleting")
92f5a8d4 303 self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
494da23a 304
adb31ebb
TL
305 def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False):
306 if legacy:
307 subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group)
308 m = md5()
309 m.update(("/"+subvol_path).encode('utf-8'))
310 meta_filename = "{0}.meta".format(m.digest().hex())
311 metapath = os.path.join(".", "volumes", "_legacy", meta_filename)
312 else:
313 group = subvol_group if subvol_group is not None else '_nogroup'
314 metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
315
522d829b 316 out = self.mount_a.run_shell(['cat', metapath], sudo=True)
adb31ebb
TL
317 lines = out.stdout.getvalue().strip().split('\n')
318 sv_version = -1
319 for line in lines:
320 if line == "version = " + str(version):
321 sv_version = version
322 break
323 self.assertEqual(sv_version, version, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
324 version, sv_version, metapath))
325
326 def _create_v1_subvolume(self, subvol_name, subvol_group=None, has_snapshot=True, subvol_type='subvolume', state='complete'):
327 group = subvol_group if subvol_group is not None else '_nogroup'
328 basepath = os.path.join("volumes", group, subvol_name)
329 uuid_str = str(uuid.uuid4())
330 createpath = os.path.join(basepath, uuid_str)
522d829b 331 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
adb31ebb
TL
332
333 # create a v1 snapshot, to prevent auto upgrades
334 if has_snapshot:
335 snappath = os.path.join(createpath, ".snap", "fake")
522d829b 336 self.mount_a.run_shell(['mkdir', '-p', snappath], sudo=True)
adb31ebb
TL
337
338 # add required xattrs to subvolume
339 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
522d829b 340 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
adb31ebb
TL
341
342 # create a v1 .meta file
343 meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, "/" + createpath, state)
344 if state == 'pending':
345 # add a fake clone source
346 meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
347 meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta")
522d829b 348 self.mount_a.client_remote.write_file(meta_filepath1, meta_contents, sudo=True)
adb31ebb
TL
349 return createpath
350
351 def _update_fake_trash(self, subvol_name, subvol_group=None, trash_name='fake', create=True):
352 group = subvol_group if subvol_group is not None else '_nogroup'
353 trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name)
354 if create:
522d829b 355 self.mount_a.run_shell(['mkdir', '-p', trashpath], sudo=True)
adb31ebb 356 else:
522d829b 357 self.mount_a.run_shell(['rmdir', trashpath], sudo=True)
adb31ebb 358
cd265ab1
TL
359 def _configure_guest_auth(self, guest_mount, authid, key):
360 """
361 Set up auth credentials for a guest client.
362 """
363 # Create keyring file for the guest client.
364 keyring_txt = dedent("""
365 [client.{authid}]
366 key = {key}
367
368 """.format(authid=authid,key=key))
369
370 guest_mount.client_id = authid
371 guest_mount.client_remote.write_file(guest_mount.get_keyring_path(),
372 keyring_txt, sudo=True)
373 # Add a guest client section to the ceph config file.
374 self.config_set("client.{0}".format(authid), "debug client", 20)
375 self.config_set("client.{0}".format(authid), "debug objecter", 20)
376 self.set_conf("client.{0}".format(authid),
377 "keyring", guest_mount.get_keyring_path())
378
379 def _auth_metadata_get(self, filedata):
380 """
381 Return a deserialized JSON object, or None
382 """
383 try:
384 data = json.loads(filedata)
385 except json.decoder.JSONDecodeError:
386 data = None
387 return data
388
81eedcae 389 def setUp(self):
f67539c2 390 super(TestVolumesHelper, self).setUp()
81eedcae
TL
391 self.volname = None
392 self.vol_created = False
393 self._enable_multi_fs()
394 self._create_or_reuse_test_volume()
f6b5b4d7
TL
395 self.config_set('mon', 'mon_allow_pool_delete', True)
396 self.volume_start = random.randint(1, (1<<20))
397 self.subvolume_start = random.randint(1, (1<<20))
398 self.group_start = random.randint(1, (1<<20))
399 self.snapshot_start = random.randint(1, (1<<20))
400 self.clone_start = random.randint(1, (1<<20))
81eedcae
TL
401
402 def tearDown(self):
403 if self.vol_created:
404 self._delete_test_volume()
f67539c2 405 super(TestVolumesHelper, self).tearDown()
92f5a8d4 406
92f5a8d4 407
f67539c2
TL
408class TestVolumes(TestVolumesHelper):
409 """Tests for FS volume operations."""
92f5a8d4
TL
410 def test_volume_create(self):
411 """
412 That the volume can be created and then cleans up
413 """
414 volname = self._generate_random_volume_name()
415 self._fs_cmd("volume", "create", volname)
416 volumels = json.loads(self._fs_cmd("volume", "ls"))
417
418 if not (volname in ([volume['name'] for volume in volumels])):
419 raise RuntimeError("Error creating volume '{0}'".format(volname))
420 else:
421 # clean up
422 self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
423
424 def test_volume_ls(self):
425 """
426 That the existing and the newly created volumes can be listed and
427 finally cleans up.
428 """
429 vls = json.loads(self._fs_cmd("volume", "ls"))
430 volumes = [volume['name'] for volume in vls]
431
432 #create new volumes and add it to the existing list of volumes
cd265ab1 433 volumenames = self._generate_random_volume_name(2)
92f5a8d4
TL
434 for volumename in volumenames:
435 self._fs_cmd("volume", "create", volumename)
436 volumes.extend(volumenames)
437
438 # list volumes
439 try:
440 volumels = json.loads(self._fs_cmd('volume', 'ls'))
441 if len(volumels) == 0:
442 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
443 else:
444 volnames = [volume['name'] for volume in volumels]
445 if collections.Counter(volnames) != collections.Counter(volumes):
446 raise RuntimeError("Error creating or listing volumes")
447 finally:
448 # clean up
449 for volume in volumenames:
450 self._fs_cmd("volume", "rm", volume, "--yes-i-really-mean-it")
451
eafe8130 452 def test_volume_rm(self):
92f5a8d4
TL
453 """
454 That the volume can only be removed when --yes-i-really-mean-it is used
455 and verify that the deleted volume is not listed anymore.
456 """
adb31ebb
TL
457 for m in self.mounts:
458 m.umount_wait()
eafe8130
TL
459 try:
460 self._fs_cmd("volume", "rm", self.volname)
461 except CommandFailedError as ce:
462 if ce.exitstatus != errno.EPERM:
463 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
464 "but it failed with {0}".format(ce.exitstatus))
465 else:
466 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
467
468 #check if it's gone
92f5a8d4 469 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
eafe8130 470 if (self.volname in [volume['name'] for volume in volumes]):
92f5a8d4
TL
471 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
472 "The volume {0} not removed.".format(self.volname))
eafe8130
TL
473 else:
474 raise RuntimeError("expected the 'fs volume rm' command to fail.")
475
f6b5b4d7
TL
476 def test_volume_rm_arbitrary_pool_removal(self):
477 """
478 That the arbitrary pool added to the volume out of band is removed
479 successfully on volume removal.
480 """
adb31ebb
TL
481 for m in self.mounts:
482 m.umount_wait()
f6b5b4d7
TL
483 new_pool = "new_pool"
484 # add arbitrary data pool
485 self.fs.add_data_pool(new_pool)
486 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
487 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
488
489 #check if fs is gone
490 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
491 volnames = [volume['name'] for volume in volumes]
492 self.assertNotIn(self.volname, volnames)
493
494 #check if osd pools are gone
495 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
496 for pool in vol_status["pools"]:
497 self.assertNotIn(pool["name"], pools)
498
499 def test_volume_rm_when_mon_delete_pool_false(self):
500 """
501 That the volume can only be removed when mon_allowd_pool_delete is set
502 to true and verify that the pools are removed after volume deletion.
503 """
adb31ebb
TL
504 for m in self.mounts:
505 m.umount_wait()
f6b5b4d7
TL
506 self.config_set('mon', 'mon_allow_pool_delete', False)
507 try:
508 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
509 except CommandFailedError as ce:
510 self.assertEqual(ce.exitstatus, errno.EPERM,
511 "expected the 'fs volume rm' command to fail with EPERM, "
512 "but it failed with {0}".format(ce.exitstatus))
513 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
514 self.config_set('mon', 'mon_allow_pool_delete', True)
515 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
516
517 #check if fs is gone
518 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
519 volnames = [volume['name'] for volume in volumes]
520 self.assertNotIn(self.volname, volnames,
521 "volume {0} exists after removal".format(self.volname))
522 #check if pools are gone
523 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
524 for pool in vol_status["pools"]:
525 self.assertNotIn(pool["name"], pools,
526 "pool {0} exists after volume removal".format(pool["name"]))
527
1d09f67e
TL
528 def test_volume_rename(self):
529 """
530 That volume, its file system and pools, can be renamed.
531 """
532 for m in self.mounts:
533 m.umount_wait()
534 oldvolname = self.volname
535 newvolname = self._generate_random_volume_name()
536 new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
537 self._fs_cmd("volume", "rename", oldvolname, newvolname,
538 "--yes-i-really-mean-it")
539 volumels = json.loads(self._fs_cmd('volume', 'ls'))
540 volnames = [volume['name'] for volume in volumels]
541 # volume name changed
542 self.assertIn(newvolname, volnames)
543 self.assertNotIn(oldvolname, volnames)
544 # pool names changed
545 self.fs.get_pool_names(refresh=True)
546 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
547 self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
548
549 def test_volume_rename_idempotency(self):
550 """
551 That volume rename is idempotent.
552 """
553 for m in self.mounts:
554 m.umount_wait()
555 oldvolname = self.volname
556 newvolname = self._generate_random_volume_name()
557 new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
558 self._fs_cmd("volume", "rename", oldvolname, newvolname,
559 "--yes-i-really-mean-it")
560 self._fs_cmd("volume", "rename", oldvolname, newvolname,
561 "--yes-i-really-mean-it")
562 volumels = json.loads(self._fs_cmd('volume', 'ls'))
563 volnames = [volume['name'] for volume in volumels]
564 self.assertIn(newvolname, volnames)
565 self.assertNotIn(oldvolname, volnames)
566 self.fs.get_pool_names(refresh=True)
567 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
568 self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
569
570 def test_volume_rename_fails_without_confirmation_flag(self):
571 """
572 That renaming volume fails without --yes-i-really-mean-it flag.
573 """
574 newvolname = self._generate_random_volume_name()
575 try:
576 self._fs_cmd("volume", "rename", self.volname, newvolname)
577 except CommandFailedError as ce:
578 self.assertEqual(ce.exitstatus, errno.EPERM,
579 "invalid error code on renaming a FS volume without the "
580 "'--yes-i-really-mean-it' flag")
581 else:
582 self.fail("expected renaming of FS volume to fail without the "
583 "'--yes-i-really-mean-it' flag")
584
585 def test_volume_rename_for_more_than_one_data_pool(self):
586 """
587 That renaming a volume with more than one data pool does not change
588 the name of the data pools.
589 """
590 for m in self.mounts:
591 m.umount_wait()
592 self.fs.add_data_pool('another-data-pool')
593 oldvolname = self.volname
594 newvolname = self._generate_random_volume_name()
595 self.fs.get_pool_names(refresh=True)
596 orig_data_pool_names = list(self.fs.data_pools.values())
597 new_metadata_pool = f"cephfs.{newvolname}.meta"
598 self._fs_cmd("volume", "rename", self.volname, newvolname,
599 "--yes-i-really-mean-it")
600 volumels = json.loads(self._fs_cmd('volume', 'ls'))
601 volnames = [volume['name'] for volume in volumels]
602 # volume name changed
603 self.assertIn(newvolname, volnames)
604 self.assertNotIn(oldvolname, volnames)
605 self.fs.get_pool_names(refresh=True)
606 # metadata pool name changed
607 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
608 # data pool names unchanged
609 self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values()))
610
81eedcae 611
f67539c2
TL
612class TestSubvolumeGroups(TestVolumesHelper):
613 """Tests for FS subvolume group operations."""
614 def test_default_uid_gid_subvolume_group(self):
615 group = self._generate_random_group_name()
616 expected_uid = 0
617 expected_gid = 0
81eedcae 618
f67539c2
TL
619 # create group
620 self._fs_cmd("subvolumegroup", "create", self.volname, group)
621 group_path = self._get_subvolume_group_path(self.volname, group)
81eedcae 622
f67539c2
TL
623 # check group's uid and gid
624 stat = self.mount_a.stat(group_path)
625 self.assertEqual(stat['st_uid'], expected_uid)
626 self.assertEqual(stat['st_gid'], expected_gid)
627
628 # remove group
629 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
630
631 def test_nonexistent_subvolume_group_create(self):
632 subvolume = self._generate_random_subvolume_name()
633 group = "non_existent_group"
634
635 # try, creating subvolume in a nonexistent group
81eedcae 636 try:
f67539c2 637 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
81eedcae
TL
638 except CommandFailedError as ce:
639 if ce.exitstatus != errno.ENOENT:
640 raise
92f5a8d4 641 else:
f67539c2 642 raise RuntimeError("expected the 'fs subvolume create' command to fail")
81eedcae 643
f67539c2
TL
644 def test_nonexistent_subvolume_group_rm(self):
645 group = "non_existent_group"
494da23a 646
f67539c2
TL
647 # try, remove subvolume group
648 try:
649 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
650 except CommandFailedError as ce:
651 if ce.exitstatus != errno.ENOENT:
652 raise
653 else:
654 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
92f5a8d4 655
f67539c2
TL
656 def test_subvolume_group_create_with_auto_cleanup_on_fail(self):
657 group = self._generate_random_group_name()
658 data_pool = "invalid_pool"
659 # create group with invalid data pool layout
660 with self.assertRaises(CommandFailedError):
661 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
92f5a8d4 662
f67539c2
TL
663 # check whether group path is cleaned up
664 try:
665 self._fs_cmd("subvolumegroup", "getpath", self.volname, group)
666 except CommandFailedError as ce:
667 if ce.exitstatus != errno.ENOENT:
668 raise
669 else:
670 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
92f5a8d4 671
f67539c2
TL
672 def test_subvolume_group_create_with_desired_data_pool_layout(self):
673 group1, group2 = self._generate_random_group_name(2)
92f5a8d4 674
f67539c2
TL
675 # create group
676 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
677 group1_path = self._get_subvolume_group_path(self.volname, group1)
92f5a8d4 678
f67539c2
TL
679 default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool")
680 new_pool = "new_pool"
681 self.assertNotEqual(default_pool, new_pool)
adb31ebb 682
f67539c2
TL
683 # add data pool
684 newid = self.fs.add_data_pool(new_pool)
adb31ebb 685
f67539c2
TL
686 # create group specifying the new data pool as its pool layout
687 self._fs_cmd("subvolumegroup", "create", self.volname, group2,
688 "--pool_layout", new_pool)
689 group2_path = self._get_subvolume_group_path(self.volname, group2)
92f5a8d4 690
f67539c2
TL
691 desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool")
692 try:
693 self.assertEqual(desired_pool, new_pool)
694 except AssertionError:
695 self.assertEqual(int(desired_pool), newid) # old kernel returns id
92f5a8d4 696
f67539c2
TL
697 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
698 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
92f5a8d4 699
f67539c2
TL
700 def test_subvolume_group_create_with_desired_mode(self):
701 group1, group2 = self._generate_random_group_name(2)
702 # default mode
703 expected_mode1 = "755"
704 # desired mode
705 expected_mode2 = "777"
92f5a8d4 706
f67539c2 707 # create group
522d829b 708 self._fs_cmd("subvolumegroup", "create", self.volname, group2, f"--mode={expected_mode2}")
a4b75251 709 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
92f5a8d4 710
f67539c2
TL
711 group1_path = self._get_subvolume_group_path(self.volname, group1)
712 group2_path = self._get_subvolume_group_path(self.volname, group2)
a4b75251 713 volumes_path = os.path.dirname(group1_path)
adb31ebb 714
f67539c2
TL
715 # check group's mode
716 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip()
717 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip()
a4b75251 718 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
f67539c2
TL
719 self.assertEqual(actual_mode1, expected_mode1)
720 self.assertEqual(actual_mode2, expected_mode2)
a4b75251 721 self.assertEqual(actual_mode3, expected_mode1)
adb31ebb 722
f67539c2
TL
723 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
724 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
725
726 def test_subvolume_group_create_with_desired_uid_gid(self):
92f5a8d4 727 """
f67539c2
TL
728 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
729 expected values.
92f5a8d4 730 """
f67539c2
TL
731 uid = 1000
732 gid = 1000
92f5a8d4 733
f67539c2
TL
734 # create subvolume group
735 subvolgroupname = self._generate_random_group_name()
736 self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid))
92f5a8d4
TL
737
738 # make sure it exists
f67539c2
TL
739 subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname)
740 self.assertNotEqual(subvolgrouppath, None)
92f5a8d4 741
f67539c2
TL
742 # verify the uid and gid
743 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip())
744 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip())
745 self.assertEqual(uid, suid)
746 self.assertEqual(gid, sgid)
747
748 # remove group
749 self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname)
750
751 def test_subvolume_group_create_with_invalid_data_pool_layout(self):
752 group = self._generate_random_group_name()
753 data_pool = "invalid_pool"
754 # create group with invalid data pool layout
92f5a8d4 755 try:
f67539c2 756 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
92f5a8d4 757 except CommandFailedError as ce:
f67539c2
TL
758 if ce.exitstatus != errno.EINVAL:
759 raise
92f5a8d4 760 else:
f67539c2 761 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
92f5a8d4 762
f67539c2
TL
763 def test_subvolume_group_ls(self):
764 # tests the 'fs subvolumegroup ls' command
92f5a8d4 765
f67539c2 766 subvolumegroups = []
adb31ebb 767
f67539c2
TL
768 #create subvolumegroups
769 subvolumegroups = self._generate_random_group_name(3)
770 for groupname in subvolumegroups:
771 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
adb31ebb 772
f67539c2
TL
773 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
774 if len(subvolumegroupls) == 0:
775 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
776 else:
777 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
778 if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups):
779 raise RuntimeError("Error creating or listing subvolume groups")
92f5a8d4 780
1d09f67e
TL
781 def test_subvolume_group_ls_filter(self):
782 # tests the 'fs subvolumegroup ls' command filters '_deleting' directory
783
784 subvolumegroups = []
785
786 #create subvolumegroup
787 subvolumegroups = self._generate_random_group_name(3)
788 for groupname in subvolumegroups:
789 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
790
791 # create subvolume and remove. This creates '_deleting' directory.
792 subvolume = self._generate_random_subvolume_name()
793 self._fs_cmd("subvolume", "create", self.volname, subvolume)
794 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
795
796 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
797 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
798 if "_deleting" in subvolgroupnames:
799 self.fail("Listing subvolume groups listed '_deleting' directory")
800
f67539c2
TL
801 def test_subvolume_group_ls_for_nonexistent_volume(self):
802 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
803 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
92f5a8d4 804
f67539c2
TL
805 # list subvolume groups
806 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
807 if len(subvolumegroupls) > 0:
808 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
92f5a8d4 809
f67539c2
TL
810 def test_subvolumegroup_pin_distributed(self):
811 self.fs.set_max_mds(2)
812 status = self.fs.wait_for_daemons()
813 self.config_set('mds', 'mds_export_ephemeral_distributed', True)
92f5a8d4 814
f67539c2
TL
815 group = "pinme"
816 self._fs_cmd("subvolumegroup", "create", self.volname, group)
817 self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True")
818 subvolumes = self._generate_random_subvolume_name(50)
819 for subvolume in subvolumes:
820 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
821 self._wait_distributed_subtrees(2 * 2, status=status, rank="all")
92f5a8d4 822
f67539c2
TL
823 # remove subvolumes
824 for subvolume in subvolumes:
825 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
adb31ebb
TL
826
827 # verify trash dir is clean
828 self._wait_for_trash_empty()
829
f67539c2
TL
830 def test_subvolume_group_rm_force(self):
831 # test removing non-existing subvolume group with --force
832 group = self._generate_random_group_name()
833 try:
834 self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force")
835 except CommandFailedError:
836 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
92f5a8d4 837
92f5a8d4 838
f67539c2
TL
839class TestSubvolumes(TestVolumesHelper):
840 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
841 def test_async_subvolume_rm(self):
842 subvolumes = self._generate_random_subvolume_name(100)
92f5a8d4 843
f67539c2
TL
844 # create subvolumes
845 for subvolume in subvolumes:
522d829b 846 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
f67539c2 847 self._do_subvolume_io(subvolume, number_of_files=10)
92f5a8d4 848
f67539c2 849 self.mount_a.umount_wait()
92f5a8d4 850
f67539c2
TL
851 # remove subvolumes
852 for subvolume in subvolumes:
853 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
92f5a8d4 854
f67539c2
TL
855 self.mount_a.mount_wait()
856
857 # verify trash dir is clean
858 self._wait_for_trash_empty(timeout=300)
859
860 def test_default_uid_gid_subvolume(self):
861 subvolume = self._generate_random_subvolume_name()
862 expected_uid = 0
863 expected_gid = 0
864
865 # create subvolume
866 self._fs_cmd("subvolume", "create", self.volname, subvolume)
867 subvol_path = self._get_subvolume_path(self.volname, subvolume)
868
869 # check subvolume's uid and gid
870 stat = self.mount_a.stat(subvol_path)
871 self.assertEqual(stat['st_uid'], expected_uid)
872 self.assertEqual(stat['st_gid'], expected_gid)
92f5a8d4 873
adb31ebb 874 # remove subvolume
f67539c2 875 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
876
877 # verify trash dir is clean
878 self._wait_for_trash_empty()
879
f67539c2
TL
880 def test_nonexistent_subvolume_rm(self):
881 # remove non-existing subvolume
882 subvolume = "non_existent_subvolume"
92f5a8d4 883
f67539c2
TL
884 # try, remove subvolume
885 try:
886 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
887 except CommandFailedError as ce:
888 if ce.exitstatus != errno.ENOENT:
889 raise
890 else:
891 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
92f5a8d4 892
f67539c2 893 def test_subvolume_create_and_rm(self):
92f5a8d4 894 # create subvolume
f67539c2
TL
895 subvolume = self._generate_random_subvolume_name()
896 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4
TL
897
898 # make sure it exists
f67539c2 899 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
92f5a8d4
TL
900 self.assertNotEqual(subvolpath, None)
901
f67539c2
TL
902 # remove subvolume
903 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
904 # make sure its gone
92f5a8d4 905 try:
f67539c2 906 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
92f5a8d4 907 except CommandFailedError as ce:
f67539c2
TL
908 if ce.exitstatus != errno.ENOENT:
909 raise
92f5a8d4 910 else:
f67539c2 911 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
adb31ebb
TL
912
913 # verify trash dir is clean
914 self._wait_for_trash_empty()
915
f67539c2
TL
916 def test_subvolume_create_and_rm_in_group(self):
917 subvolume = self._generate_random_subvolume_name()
918 group = self._generate_random_group_name()
92f5a8d4 919
f67539c2
TL
920 # create group
921 self._fs_cmd("subvolumegroup", "create", self.volname, group)
92f5a8d4 922
f67539c2
TL
923 # create subvolume in group
924 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
92f5a8d4 925
adb31ebb 926 # remove subvolume
f67539c2 927 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
adb31ebb
TL
928
929 # verify trash dir is clean
930 self._wait_for_trash_empty()
931
f67539c2
TL
932 # remove group
933 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
934
81eedcae
TL
935 def test_subvolume_create_idempotence(self):
936 # create subvolume
937 subvolume = self._generate_random_subvolume_name()
938 self._fs_cmd("subvolume", "create", self.volname, subvolume)
939
940 # try creating w/ same subvolume name -- should be idempotent
941 self._fs_cmd("subvolume", "create", self.volname, subvolume)
942
943 # remove subvolume
944 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
945
494da23a
TL
946 # verify trash dir is clean
947 self._wait_for_trash_empty()
948
e306af50
TL
949 def test_subvolume_create_idempotence_resize(self):
950 # create subvolume
951 subvolume = self._generate_random_subvolume_name()
952 self._fs_cmd("subvolume", "create", self.volname, subvolume)
953
954 # try creating w/ same subvolume name with size -- should set quota
955 self._fs_cmd("subvolume", "create", self.volname, subvolume, "1000000000")
956
957 # get subvolume metadata
958 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
959 self.assertEqual(subvol_info["bytes_quota"], 1000000000)
960
961 # remove subvolume
962 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
963
964 # verify trash dir is clean
965 self._wait_for_trash_empty()
966
1d09f67e
TL
967 def test_subvolume_create_idempotence_mode(self):
968 # default mode
969 default_mode = "755"
970
971 # create subvolume
972 subvolume = self._generate_random_subvolume_name()
973 self._fs_cmd("subvolume", "create", self.volname, subvolume)
974
975 subvol_path = self._get_subvolume_path(self.volname, subvolume)
976
977 actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
978 self.assertEqual(actual_mode_1, default_mode)
979
980 # try creating w/ same subvolume name with --mode 777
981 new_mode = "777"
982 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", new_mode)
983
984 actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
985 self.assertEqual(actual_mode_2, new_mode)
986
987 # remove subvolume
988 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
989
990 # verify trash dir is clean
991 self._wait_for_trash_empty()
992
993 def test_subvolume_create_idempotence_without_passing_mode(self):
994 # create subvolume
995 desired_mode = "777"
996 subvolume = self._generate_random_subvolume_name()
997 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", desired_mode)
998
999 subvol_path = self._get_subvolume_path(self.volname, subvolume)
1000
1001 actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
1002 self.assertEqual(actual_mode_1, desired_mode)
1003
1004 # default mode
1005 default_mode = "755"
1006
1007 # try creating w/ same subvolume name without passing --mode argument
1008 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1009
1010 actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
1011 self.assertEqual(actual_mode_2, default_mode)
1012
1013 # remove subvolume
1014 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1015
1016 # verify trash dir is clean
1017 self._wait_for_trash_empty()
1018
f67539c2
TL
1019 def test_subvolume_create_isolated_namespace(self):
1020 """
1021 Create subvolume in separate rados namespace
1022 """
f6b5b4d7 1023
f67539c2 1024 # create subvolume
f6b5b4d7 1025 subvolume = self._generate_random_subvolume_name()
f67539c2 1026 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated")
f6b5b4d7 1027
f67539c2
TL
1028 # get subvolume metadata
1029 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1030 self.assertNotEqual(len(subvol_info), 0)
1031 self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume)
f6b5b4d7 1032
f67539c2 1033 # remove subvolumes
adb31ebb
TL
1034 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1035
1036 # verify trash dir is clean
1037 self._wait_for_trash_empty()
1038
f67539c2
TL
1039 def test_subvolume_create_with_auto_cleanup_on_fail(self):
1040 subvolume = self._generate_random_subvolume_name()
1041 data_pool = "invalid_pool"
1042 # create subvolume with invalid data pool layout fails
1043 with self.assertRaises(CommandFailedError):
1044 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
adb31ebb 1045
f67539c2
TL
1046 # check whether subvol path is cleaned up
1047 try:
1048 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1049 except CommandFailedError as ce:
1050 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume")
1051 else:
1052 self.fail("expected the 'fs subvolume getpath' command to fail")
1053
1054 # verify trash dir is clean
adb31ebb
TL
1055 self._wait_for_trash_empty()
1056
f67539c2
TL
1057 def test_subvolume_create_with_desired_data_pool_layout_in_group(self):
1058 subvol1, subvol2 = self._generate_random_subvolume_name(2)
1059 group = self._generate_random_group_name()
f6b5b4d7 1060
f67539c2
TL
1061 # create group. this also helps set default pool layout for subvolumes
1062 # created within the group.
1063 self._fs_cmd("subvolumegroup", "create", self.volname, group)
f6b5b4d7 1064
f67539c2
TL
1065 # create subvolume in group.
1066 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
1067 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
1068
1069 default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool")
1070 new_pool = "new_pool"
1071 self.assertNotEqual(default_pool, new_pool)
1072
1073 # add data pool
1074 newid = self.fs.add_data_pool(new_pool)
1075
1076 # create subvolume specifying the new data pool as its pool layout
1077 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group,
1078 "--pool_layout", new_pool)
1079 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
1080
1081 desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool")
1082 try:
1083 self.assertEqual(desired_pool, new_pool)
1084 except AssertionError:
1085 self.assertEqual(int(desired_pool), newid) # old kernel returns id
1086
1087 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
1088 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
1089 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
adb31ebb
TL
1090
1091 # verify trash dir is clean
1092 self._wait_for_trash_empty()
1093
a4b75251
TL
1094 def test_subvolume_create_with_desired_mode(self):
1095 subvol1 = self._generate_random_subvolume_name()
1096
1097 # default mode
1098 default_mode = "755"
1099 # desired mode
1100 desired_mode = "777"
1101
1102 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--mode", "777")
1103
1104 subvol1_path = self._get_subvolume_path(self.volname, subvol1)
1105
1106 # check subvolumegroup's mode
1107 subvol_par_path = os.path.dirname(subvol1_path)
1108 group_path = os.path.dirname(subvol_par_path)
1109 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip()
1110 self.assertEqual(actual_mode1, default_mode)
1111 # check /volumes mode
1112 volumes_path = os.path.dirname(group_path)
1113 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
1114 self.assertEqual(actual_mode2, default_mode)
1115 # check subvolume's mode
1116 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
1117 self.assertEqual(actual_mode3, desired_mode)
1118
1119 self._fs_cmd("subvolume", "rm", self.volname, subvol1)
1120
1121 # verify trash dir is clean
1122 self._wait_for_trash_empty()
1123
f67539c2
TL
1124 def test_subvolume_create_with_desired_mode_in_group(self):
1125 subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
1126
1127 group = self._generate_random_group_name()
1128 # default mode
1129 expected_mode1 = "755"
1130 # desired mode
1131 expected_mode2 = "777"
1132
1133 # create group
1134 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1135
1136 # create subvolume in group
1137 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
1138 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777")
1139 # check whether mode 0777 also works
1140 self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777")
1141
1142 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
1143 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
1144 subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group)
1145
1146 # check subvolume's mode
1147 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
1148 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip()
1149 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip()
1150 self.assertEqual(actual_mode1, expected_mode1)
1151 self.assertEqual(actual_mode2, expected_mode2)
1152 self.assertEqual(actual_mode3, expected_mode2)
1153
1154 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
1155 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
1156 self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
1157 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1158
1159 # verify trash dir is clean
1160 self._wait_for_trash_empty()
1161
1162 def test_subvolume_create_with_desired_uid_gid(self):
e306af50 1163 """
f67539c2
TL
1164 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
1165 expected values.
e306af50 1166 """
f67539c2
TL
1167 uid = 1000
1168 gid = 1000
e306af50
TL
1169
1170 # create subvolume
f67539c2
TL
1171 subvolname = self._generate_random_subvolume_name()
1172 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid))
e306af50 1173
f67539c2
TL
1174 # make sure it exists
1175 subvolpath = self._get_subvolume_path(self.volname, subvolname)
1176 self.assertNotEqual(subvolpath, None)
e306af50 1177
f67539c2
TL
1178 # verify the uid and gid
1179 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip())
1180 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip())
1181 self.assertEqual(uid, suid)
1182 self.assertEqual(gid, sgid)
1183
1184 # remove subvolume
1185 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
e306af50
TL
1186
1187 # verify trash dir is clean
1188 self._wait_for_trash_empty()
1189
eafe8130
TL
1190 def test_subvolume_create_with_invalid_data_pool_layout(self):
1191 subvolume = self._generate_random_subvolume_name()
1192 data_pool = "invalid_pool"
1193 # create subvolume with invalid data pool layout
1194 try:
1195 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
1196 except CommandFailedError as ce:
adb31ebb 1197 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid pool layout")
eafe8130 1198 else:
adb31ebb
TL
1199 self.fail("expected the 'fs subvolume create' command to fail")
1200
1201 # verify trash dir is clean
1202 self._wait_for_trash_empty()
92f5a8d4 1203
eafe8130
TL
1204 def test_subvolume_create_with_invalid_size(self):
1205 # create subvolume with an invalid size -1
1206 subvolume = self._generate_random_subvolume_name()
1207 try:
1208 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--size", "-1")
1209 except CommandFailedError as ce:
adb31ebb 1210 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid size")
eafe8130 1211 else:
adb31ebb
TL
1212 self.fail("expected the 'fs subvolume create' command to fail")
1213
1214 # verify trash dir is clean
1215 self._wait_for_trash_empty()
eafe8130 1216
f67539c2
TL
1217 def test_subvolume_expand(self):
1218 """
1219 That a subvolume can be expanded in size and its quota matches the expected size.
1220 """
81eedcae 1221
f67539c2
TL
1222 # create subvolume
1223 subvolname = self._generate_random_subvolume_name()
1224 osize = self.DEFAULT_FILE_SIZE*1024*1024
1225 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 1226
f67539c2
TL
1227 # make sure it exists
1228 subvolpath = self._get_subvolume_path(self.volname, subvolname)
1229 self.assertNotEqual(subvolpath, None)
81eedcae 1230
f67539c2
TL
1231 # expand the subvolume
1232 nsize = osize*2
1233 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
81eedcae 1234
f67539c2
TL
1235 # verify the quota
1236 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
1237 self.assertEqual(size, nsize)
1238
1239 # remove subvolume
1240 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
1241
1242 # verify trash dir is clean
1243 self._wait_for_trash_empty()
1244
1245 def test_subvolume_info(self):
1246 # tests the 'fs subvolume info' command
1247
1248 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
1249 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
1250 "type", "uid", "features", "state"]
494da23a
TL
1251
1252 # create subvolume
f67539c2 1253 subvolume = self._generate_random_subvolume_name()
494da23a 1254 self._fs_cmd("subvolume", "create", self.volname, subvolume)
494da23a 1255
f67539c2
TL
1256 # get subvolume metadata
1257 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1258 for md in subvol_md:
1259 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
494da23a 1260
f67539c2
TL
1261 self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
1262 self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
1263 self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty")
1264 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
1265
1266 self.assertEqual(len(subvol_info["features"]), 3,
1267 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
1268 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1269 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
1270
1271 nsize = self.DEFAULT_FILE_SIZE*1024*1024
1272 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
1273
1274 # get subvolume metadata after quota set
1275 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1276 for md in subvol_md:
1277 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
1278
1279 self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
1280 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
1281 self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume")
1282 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
1283
1284 self.assertEqual(len(subvol_info["features"]), 3,
1285 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
1286 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1287 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
1288
1289 # remove subvolumes
494da23a
TL
1290 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1291
adb31ebb
TL
1292 # verify trash dir is clean
1293 self._wait_for_trash_empty()
1294
eafe8130
TL
1295 def test_subvolume_ls(self):
1296 # tests the 'fs subvolume ls' command
1297
1298 subvolumes = []
1299
1300 # create subvolumes
92f5a8d4
TL
1301 subvolumes = self._generate_random_subvolume_name(3)
1302 for subvolume in subvolumes:
1303 self._fs_cmd("subvolume", "create", self.volname, subvolume)
eafe8130
TL
1304
1305 # list subvolumes
1306 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
1307 if len(subvolumels) == 0:
adb31ebb 1308 self.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
eafe8130
TL
1309 else:
1310 subvolnames = [subvolume['name'] for subvolume in subvolumels]
1311 if collections.Counter(subvolnames) != collections.Counter(subvolumes):
adb31ebb
TL
1312 self.fail("Error creating or listing subvolumes")
1313
1314 # remove subvolume
1315 for subvolume in subvolumes:
1316 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1317
1318 # verify trash dir is clean
1319 self._wait_for_trash_empty()
eafe8130
TL
1320
1321 def test_subvolume_ls_for_notexistent_default_group(self):
1322 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
1323 # prerequisite: we expect that the volume is created and the default group _nogroup is
1324 # NOT created (i.e. a subvolume without group is not created)
1325
1326 # list subvolumes
1327 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
1328 if len(subvolumels) > 0:
1329 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
1330
f67539c2 1331 def test_subvolume_marked(self):
92f5a8d4 1332 """
f67539c2 1333 ensure a subvolume is marked with the ceph.dir.subvolume xattr
92f5a8d4 1334 """
f67539c2 1335 subvolume = self._generate_random_subvolume_name()
92f5a8d4
TL
1336
1337 # create subvolume
f67539c2 1338 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4 1339
f67539c2
TL
1340 # getpath
1341 subvolpath = self._get_subvolume_path(self.volname, subvolume)
92f5a8d4 1342
f67539c2
TL
1343 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
1344 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
1345 # outside the subvolume
1346 dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location')
1347 srcpath = os.path.join(self.mount_a.mountpoint, subvolpath)
1348 rename_script = dedent("""
1349 import os
1350 import errno
1351 try:
1352 os.rename("{src}", "{dst}")
1353 except OSError as e:
1354 if e.errno != errno.EXDEV:
1355 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
1356 else:
1357 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
1358 """)
522d829b 1359 self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath), sudo=True)
f67539c2
TL
1360
1361 # remove subvolume
1362 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1363
1364 # verify trash dir is clean
1365 self._wait_for_trash_empty()
1366
1367 def test_subvolume_pin_export(self):
1368 self.fs.set_max_mds(2)
1369 status = self.fs.wait_for_daemons()
1370
1371 subvolume = self._generate_random_subvolume_name()
1372 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1373 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1")
1374 path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1375 path = os.path.dirname(path) # get subvolume path
1376
1377 self._get_subtrees(status=status, rank=1)
1378 self._wait_subtrees([(path, 1)], status=status)
1379
1380 # remove subvolume
1381 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
1382
1383 # verify trash dir is clean
1384 self._wait_for_trash_empty()
1385
cd265ab1
TL
1386 ### authorize operations
1387
1388 def test_authorize_deauthorize_legacy_subvolume(self):
1389 subvolume = self._generate_random_subvolume_name()
1390 group = self._generate_random_group_name()
1391 authid = "alice"
1392
1393 guest_mount = self.mount_b
1394 guest_mount.umount_wait()
1395
1396 # emulate a old-fashioned subvolume in a custom group
1397 createpath = os.path.join(".", "volumes", group, subvolume)
522d829b 1398 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
cd265ab1
TL
1399
1400 # add required xattrs to subvolume
1401 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
522d829b 1402 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
cd265ab1
TL
1403
1404 mount_path = os.path.join("/", "volumes", group, subvolume)
1405
1406 # authorize guest authID read-write access to subvolume
1407 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1408 "--group_name", group, "--tenant_id", "tenant_id")
1409
1410 # guest authID should exist
1411 existing_ids = [a['entity'] for a in self.auth_list()]
1412 self.assertIn("client.{0}".format(authid), existing_ids)
1413
1414 # configure credentials for guest client
1415 self._configure_guest_auth(guest_mount, authid, key)
1416
1417 # mount the subvolume, and write to it
522d829b 1418 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1419 guest_mount.write_n_mb("data.bin", 1)
1420
1421 # authorize guest authID read access to subvolume
1422 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1423 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
1424
1425 # guest client sees the change in access level to read only after a
1426 # remount of the subvolume.
1427 guest_mount.umount_wait()
522d829b 1428 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1429
1430 # read existing content of the subvolume
1431 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
1432 # cannot write into read-only subvolume
1433 with self.assertRaises(CommandFailedError):
1434 guest_mount.write_n_mb("rogue.bin", 1)
1435
1436 # cleanup
1437 guest_mount.umount_wait()
1438 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
1439 "--group_name", group)
1440 # guest authID should no longer exist
1441 existing_ids = [a['entity'] for a in self.auth_list()]
1442 self.assertNotIn("client.{0}".format(authid), existing_ids)
1443 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1444 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1445
1446 def test_authorize_deauthorize_subvolume(self):
1447 subvolume = self._generate_random_subvolume_name()
1448 group = self._generate_random_group_name()
1449 authid = "alice"
1450
1451 guest_mount = self.mount_b
1452 guest_mount.umount_wait()
1453
1454 # create group
522d829b 1455 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=777")
cd265ab1
TL
1456
1457 # create subvolume in group
1458 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1459 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
1460 "--group_name", group).rstrip()
1461
1462 # authorize guest authID read-write access to subvolume
1463 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1464 "--group_name", group, "--tenant_id", "tenant_id")
1465
1466 # guest authID should exist
1467 existing_ids = [a['entity'] for a in self.auth_list()]
1468 self.assertIn("client.{0}".format(authid), existing_ids)
1469
1470 # configure credentials for guest client
1471 self._configure_guest_auth(guest_mount, authid, key)
1472
1473 # mount the subvolume, and write to it
522d829b 1474 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1475 guest_mount.write_n_mb("data.bin", 1)
1476
1477 # authorize guest authID read access to subvolume
1478 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1479 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
1480
1481 # guest client sees the change in access level to read only after a
1482 # remount of the subvolume.
1483 guest_mount.umount_wait()
522d829b 1484 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1485
1486 # read existing content of the subvolume
1487 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
1488 # cannot write into read-only subvolume
1489 with self.assertRaises(CommandFailedError):
1490 guest_mount.write_n_mb("rogue.bin", 1)
1491
1492 # cleanup
1493 guest_mount.umount_wait()
1494 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
1495 "--group_name", group)
1496 # guest authID should no longer exist
1497 existing_ids = [a['entity'] for a in self.auth_list()]
1498 self.assertNotIn("client.{0}".format(authid), existing_ids)
1499 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1500 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1501
cd265ab1
TL
1502 def test_multitenant_subvolumes(self):
1503 """
1504 That subvolume access can be restricted to a tenant.
1505
1506 That metadata used to enforce tenant isolation of
1507 subvolumes is stored as a two-way mapping between auth
1508 IDs and subvolumes that they're authorized to access.
1509 """
1510 subvolume = self._generate_random_subvolume_name()
1511 group = self._generate_random_group_name()
1512
1513 guest_mount = self.mount_b
1514
1515 # Guest clients belonging to different tenants, but using the same
1516 # auth ID.
1517 auth_id = "alice"
1518 guestclient_1 = {
1519 "auth_id": auth_id,
1520 "tenant_id": "tenant1",
1521 }
1522 guestclient_2 = {
1523 "auth_id": auth_id,
1524 "tenant_id": "tenant2",
1525 }
1526
1527 # create group
1528 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1529
1530 # create subvolume in group
1531 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1532
1533 # Check that subvolume metadata file is created on subvolume creation.
1534 subvol_metadata_filename = "_{0}:{1}.meta".format(group, subvolume)
1535 self.assertIn(subvol_metadata_filename, guest_mount.ls("volumes"))
1536
1537 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
1538 # 'tenant1', with 'rw' access to the volume.
1539 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1540 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1541
1542 # Check that auth metadata file for auth ID 'alice', is
1543 # created on authorizing 'alice' access to the subvolume.
1544 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1545 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1546
1547 # Verify that the auth metadata file stores the tenant ID that the
1548 # auth ID belongs to, the auth ID's authorized access levels
1549 # for different subvolumes, versioning details, etc.
1550 expected_auth_metadata = {
1551 "version": 5,
1552 "compat_version": 6,
1553 "dirty": False,
1554 "tenant_id": "tenant1",
1555 "subvolumes": {
1556 "{0}/{1}".format(group,subvolume): {
1557 "dirty": False,
1558 "access_level": "rw"
1559 }
1560 }
1561 }
1562
1563 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
1564 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
1565 del expected_auth_metadata["version"]
1566 del auth_metadata["version"]
1567 self.assertEqual(expected_auth_metadata, auth_metadata)
1568
1569 # Verify that the subvolume metadata file stores info about auth IDs
1570 # and their access levels to the subvolume, versioning details, etc.
1571 expected_subvol_metadata = {
1572 "version": 1,
1573 "compat_version": 1,
1574 "auths": {
1575 "alice": {
1576 "dirty": False,
1577 "access_level": "rw"
1578 }
1579 }
1580 }
1581 subvol_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(subvol_metadata_filename)))
1582
1583 self.assertGreaterEqual(subvol_metadata["version"], expected_subvol_metadata["version"])
1584 del expected_subvol_metadata["version"]
1585 del subvol_metadata["version"]
1586 self.assertEqual(expected_subvol_metadata, subvol_metadata)
1587
1588 # Cannot authorize 'guestclient_2' to access the volume.
1589 # It uses auth ID 'alice', which has already been used by a
1590 # 'guestclient_1' belonging to an another tenant for accessing
1591 # the volume.
1592
1593 try:
1594 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_2["auth_id"],
1595 "--group_name", group, "--tenant_id", guestclient_2["tenant_id"])
1596 except CommandFailedError as ce:
1597 self.assertEqual(ce.exitstatus, errno.EPERM,
1598 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
1599 else:
1600 self.fail("expected the 'fs subvolume authorize' command to fail")
1601
1602 # Check that auth metadata file is cleaned up on removing
1603 # auth ID's only access to a volume.
1604
1605 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
1606 "--group_name", group)
1607 self.assertNotIn(auth_metadata_filename, guest_mount.ls("volumes"))
1608
1609 # Check that subvolume metadata file is cleaned up on subvolume deletion.
1610 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1611 self.assertNotIn(subvol_metadata_filename, guest_mount.ls("volumes"))
1612
1613 # clean up
1614 guest_mount.umount_wait()
1615 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1616
1617 def test_subvolume_authorized_list(self):
1618 subvolume = self._generate_random_subvolume_name()
1619 group = self._generate_random_group_name()
1620 authid1 = "alice"
1621 authid2 = "guest1"
1622 authid3 = "guest2"
1623
1624 # create group
1625 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1626
1627 # create subvolume in group
1628 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1629
1630 # authorize alice authID read-write access to subvolume
1631 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid1,
1632 "--group_name", group)
1633 # authorize guest1 authID read-write access to subvolume
1634 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid2,
1635 "--group_name", group)
1636 # authorize guest2 authID read access to subvolume
1637 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid3,
1638 "--group_name", group, "--access_level", "r")
1639
1640 # list authorized-ids of the subvolume
1641 expected_auth_list = [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
1642 auth_list = json.loads(self._fs_cmd('subvolume', 'authorized_list', self.volname, subvolume, "--group_name", group))
1643 self.assertCountEqual(expected_auth_list, auth_list)
1644
1645 # cleanup
1646 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid1,
1647 "--group_name", group)
1648 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid2,
1649 "--group_name", group)
1650 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid3,
1651 "--group_name", group)
1652 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1653 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1654
1655 def test_authorize_auth_id_not_created_by_mgr_volumes(self):
1656 """
1657 If the auth_id already exists and is not created by mgr plugin,
1658 it's not allowed to authorize the auth-id by default.
1659 """
1660
1661 subvolume = self._generate_random_subvolume_name()
1662 group = self._generate_random_group_name()
1663
1664 # Create auth_id
1665 self.fs.mon_manager.raw_cluster_cmd(
1666 "auth", "get-or-create", "client.guest1",
1667 "mds", "allow *",
1668 "osd", "allow rw",
1669 "mon", "allow *"
1670 )
1671
1672 auth_id = "guest1"
1673 guestclient_1 = {
1674 "auth_id": auth_id,
1675 "tenant_id": "tenant1",
1676 }
1677
1678 # create group
1679 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1680
1681 # create subvolume in group
1682 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1683
1684 try:
1685 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1686 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1687 except CommandFailedError as ce:
1688 self.assertEqual(ce.exitstatus, errno.EPERM,
1689 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
1690 else:
1691 self.fail("expected the 'fs subvolume authorize' command to fail")
1692
1693 # clean up
1694 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1695 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1696 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1697
1698 def test_authorize_allow_existing_id_option(self):
1699 """
1700 If the auth_id already exists and is not created by mgr volumes,
1701 it's not allowed to authorize the auth-id by default but is
1702 allowed with option allow_existing_id.
1703 """
1704
1705 subvolume = self._generate_random_subvolume_name()
1706 group = self._generate_random_group_name()
1707
1708 # Create auth_id
1709 self.fs.mon_manager.raw_cluster_cmd(
1710 "auth", "get-or-create", "client.guest1",
1711 "mds", "allow *",
1712 "osd", "allow rw",
1713 "mon", "allow *"
1714 )
1715
1716 auth_id = "guest1"
1717 guestclient_1 = {
1718 "auth_id": auth_id,
1719 "tenant_id": "tenant1",
1720 }
1721
1722 # create group
1723 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1724
1725 # create subvolume in group
1726 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1727
1728 # Cannot authorize 'guestclient_1' to access the volume by default,
1729 # which already exists and not created by mgr volumes but is allowed
1730 # with option 'allow_existing_id'.
1731 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1732 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"], "--allow-existing-id")
1733
1734 # clean up
1735 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
1736 "--group_name", group)
1737 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1738 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1739 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1740
1741 def test_deauthorize_auth_id_after_out_of_band_update(self):
1742 """
1743 If the auth_id authorized by mgr/volumes plugin is updated
1744 out of band, the auth_id should not be deleted after a
1745 deauthorize. It should only remove caps associated with it.
1746 """
1747
1748 subvolume = self._generate_random_subvolume_name()
1749 group = self._generate_random_group_name()
1750
1751 auth_id = "guest1"
1752 guestclient_1 = {
1753 "auth_id": auth_id,
1754 "tenant_id": "tenant1",
1755 }
1756
1757 # create group
1758 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1759
1760 # create subvolume in group
1761 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1762
1763 # Authorize 'guestclient_1' to access the subvolume.
1764 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1765 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1766
1767 subvol_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
1768 "--group_name", group).rstrip()
1769
1770 # Update caps for guestclient_1 out of band
1771 out = self.fs.mon_manager.raw_cluster_cmd(
1772 "auth", "caps", "client.guest1",
1773 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path),
1774 "osd", "allow rw pool=cephfs_data",
1775 "mon", "allow r",
1776 "mgr", "allow *"
1777 )
1778
1779 # Deauthorize guestclient_1
1780 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
1781
1782 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
1783 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
1784 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
1785
1786 self.assertEqual("client.guest1", out[0]["entity"])
1787 self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"])
1788 self.assertEqual("allow *", out[0]["caps"]["mgr"])
1789 self.assertNotIn("osd", out[0]["caps"])
1790
1791 # clean up
1792 out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1793 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1794 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1795
1796 def test_recover_auth_metadata_during_authorize(self):
1797 """
1798 That auth metadata manager can recover from partial auth updates using
1799 metadata files, which store auth info and its update status info. This
1800 test validates the recovery during authorize.
1801 """
1802
1803 guest_mount = self.mount_b
1804
1805 subvolume = self._generate_random_subvolume_name()
1806 group = self._generate_random_group_name()
1807
1808 auth_id = "guest1"
1809 guestclient_1 = {
1810 "auth_id": auth_id,
1811 "tenant_id": "tenant1",
1812 }
1813
1814 # create group
1815 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1816
1817 # create subvolume in group
1818 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1819
1820 # Authorize 'guestclient_1' to access the subvolume.
1821 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1822 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1823
1824 # Check that auth metadata file for auth ID 'guest1', is
1825 # created on authorizing 'guest1' access to the subvolume.
1826 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1827 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1828 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1829
1830 # Induce partial auth update state by modifying the auth metadata file,
1831 # and then run authorize again.
522d829b 1832 guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
1833
1834 # Authorize 'guestclient_1' to access the subvolume.
1835 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1836 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1837
1838 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1839 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
1840
1841 # clean up
1842 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
1843 guest_mount.umount_wait()
1844 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1845 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1846 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1847
1848 def test_recover_auth_metadata_during_deauthorize(self):
1849 """
1850 That auth metadata manager can recover from partial auth updates using
1851 metadata files, which store auth info and its update status info. This
1852 test validates the recovery during deauthorize.
1853 """
1854
1855 guest_mount = self.mount_b
1856
1857 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
1858 group = self._generate_random_group_name()
1859
1860 guestclient_1 = {
1861 "auth_id": "guest1",
1862 "tenant_id": "tenant1",
1863 }
1864
1865 # create group
1866 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1867
1868 # create subvolumes in group
1869 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
1870 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
1871
1872 # Authorize 'guestclient_1' to access the subvolume1.
1873 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
1874 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1875
1876 # Check that auth metadata file for auth ID 'guest1', is
1877 # created on authorizing 'guest1' access to the subvolume1.
1878 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1879 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1880 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1881
1882 # Authorize 'guestclient_1' to access the subvolume2.
1883 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
1884 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1885
1886 # Induce partial auth update state by modifying the auth metadata file,
1887 # and then run de-authorize.
522d829b 1888 guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
1889
1890 # Deauthorize 'guestclient_1' to access the subvolume2.
1891 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"],
1892 "--group_name", group)
1893
1894 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1895 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
1896
1897 # clean up
1898 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
1899 guest_mount.umount_wait()
1900 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1901 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
1902 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
1903 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1904
1905 def test_update_old_style_auth_metadata_to_new_during_authorize(self):
1906 """
1907 CephVolumeClient stores the subvolume data in auth metadata file with
1908 'volumes' key as there was no subvolume namespace. It doesn't makes sense
1909 with mgr/volumes. This test validates the transparent update of 'volumes'
1910 key to 'subvolumes' key in auth metadata file during authorize.
1911 """
1912
1913 guest_mount = self.mount_b
1914
1915 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
1916 group = self._generate_random_group_name()
1917
1918 auth_id = "guest1"
1919 guestclient_1 = {
1920 "auth_id": auth_id,
1921 "tenant_id": "tenant1",
1922 }
1923
1924 # create group
1925 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1926
1927 # create subvolumes in group
1928 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
1929 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
1930
1931 # Authorize 'guestclient_1' to access the subvolume1.
1932 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
1933 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1934
1935 # Check that auth metadata file for auth ID 'guest1', is
1936 # created on authorizing 'guest1' access to the subvolume1.
1937 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1938 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1939
1940 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
522d829b 1941 guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
1942
1943 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
1944 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
1945 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1946
1947 expected_auth_metadata = {
1948 "version": 5,
1949 "compat_version": 6,
1950 "dirty": False,
1951 "tenant_id": "tenant1",
1952 "subvolumes": {
1953 "{0}/{1}".format(group,subvolume1): {
1954 "dirty": False,
1955 "access_level": "rw"
1956 },
1957 "{0}/{1}".format(group,subvolume2): {
1958 "dirty": False,
1959 "access_level": "rw"
1960 }
1961 }
1962 }
1963
1964 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
1965
1966 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
1967 del expected_auth_metadata["version"]
1968 del auth_metadata["version"]
1969 self.assertEqual(expected_auth_metadata, auth_metadata)
1970
1971 # clean up
1972 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
1973 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
1974 guest_mount.umount_wait()
1975 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1976 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
1977 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
1978 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1979
1980 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self):
1981 """
1982 CephVolumeClient stores the subvolume data in auth metadata file with
1983 'volumes' key as there was no subvolume namespace. It doesn't makes sense
1984 with mgr/volumes. This test validates the transparent update of 'volumes'
1985 key to 'subvolumes' key in auth metadata file during deauthorize.
1986 """
1987
1988 guest_mount = self.mount_b
1989
1990 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
1991 group = self._generate_random_group_name()
1992
1993 auth_id = "guest1"
1994 guestclient_1 = {
1995 "auth_id": auth_id,
1996 "tenant_id": "tenant1",
1997 }
1998
1999 # create group
2000 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2001
2002 # create subvolumes in group
2003 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
2004 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
2005
2006 # Authorize 'guestclient_1' to access the subvolume1.
2007 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
2008 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2009
2010 # Authorize 'guestclient_1' to access the subvolume2.
2011 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
2012 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2013
2014 # Check that auth metadata file for auth ID 'guest1', is created.
2015 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2016 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2017
2018 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
522d829b 2019 guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
2020
2021 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
2022 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
2023
2024 expected_auth_metadata = {
2025 "version": 5,
2026 "compat_version": 6,
2027 "dirty": False,
2028 "tenant_id": "tenant1",
2029 "subvolumes": {
2030 "{0}/{1}".format(group,subvolume1): {
2031 "dirty": False,
2032 "access_level": "rw"
2033 }
2034 }
2035 }
2036
2037 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
2038
2039 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
2040 del expected_auth_metadata["version"]
2041 del auth_metadata["version"]
2042 self.assertEqual(expected_auth_metadata, auth_metadata)
2043
2044 # clean up
2045 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
2046 guest_mount.umount_wait()
2047 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2048 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
2049 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
2050 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2051
cd265ab1
TL
2052 def test_subvolume_evict_client(self):
2053 """
2054 That a subvolume client can be evicted based on the auth ID
2055 """
2056
2057 subvolumes = self._generate_random_subvolume_name(2)
2058 group = self._generate_random_group_name()
2059
2060 # create group
2061 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2062
2063 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
2064 for i in range(0, 2):
2065 self.mounts[i].umount_wait()
2066 guest_mounts = (self.mounts[0], self.mounts[1])
2067 auth_id = "guest"
2068 guestclient_1 = {
2069 "auth_id": auth_id,
2070 "tenant_id": "tenant1",
2071 }
2072
2073 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
2074 # subvolumes. Mount the two subvolumes. Write data to the volumes.
2075 for i in range(2):
2076 # Create subvolume.
522d829b 2077 self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group, "--mode=777")
cd265ab1
TL
2078
2079 # authorize guest authID read-write access to subvolume
2080 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolumes[i], guestclient_1["auth_id"],
2081 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2082
2083 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolumes[i],
2084 "--group_name", group).rstrip()
2085 # configure credentials for guest client
2086 self._configure_guest_auth(guest_mounts[i], auth_id, key)
2087
2088 # mount the subvolume, and write to it
522d829b 2089 guest_mounts[i].mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
2090 guest_mounts[i].write_n_mb("data.bin", 1)
2091
2092 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
2093 # one volume.
2094 self._fs_cmd("subvolume", "evict", self.volname, subvolumes[0], auth_id, "--group_name", group)
2095
2096 # Evicted guest client, guest_mounts[0], should not be able to do
2097 # anymore metadata ops. It should start failing all operations
2098 # when it sees that its own address is in the blocklist.
2099 try:
2100 guest_mounts[0].write_n_mb("rogue.bin", 1)
2101 except CommandFailedError:
2102 pass
2103 else:
2104 raise RuntimeError("post-eviction write should have failed!")
2105
2106 # The blocklisted guest client should now be unmountable
2107 guest_mounts[0].umount_wait()
2108
2109 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
2110 # has mounted the other volume, should be able to use its volume
2111 # unaffected.
2112 guest_mounts[1].write_n_mb("data.bin.1", 1)
2113
2114 # Cleanup.
2115 guest_mounts[1].umount_wait()
2116 for i in range(2):
2117 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolumes[i], auth_id, "--group_name", group)
2118 self._fs_cmd("subvolume", "rm", self.volname, subvolumes[i], "--group_name", group)
2119 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2120
f67539c2
TL
2121 def test_subvolume_pin_random(self):
2122 self.fs.set_max_mds(2)
2123 self.fs.wait_for_daemons()
2124 self.config_set('mds', 'mds_export_ephemeral_random', True)
1911f103
TL
2125
2126 subvolume = self._generate_random_subvolume_name()
1911f103 2127 self._fs_cmd("subvolume", "create", self.volname, subvolume)
f67539c2
TL
2128 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01")
2129 # no verification
1911f103 2130
f67539c2 2131 # remove subvolume
1911f103 2132 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1911f103
TL
2133
2134 # verify trash dir is clean
2135 self._wait_for_trash_empty()
2136
f67539c2
TL
2137 def test_subvolume_resize_fail_invalid_size(self):
2138 """
2139 That a subvolume cannot be resized to an invalid size and the quota did not change
2140 """
1911f103 2141
f67539c2
TL
2142 osize = self.DEFAULT_FILE_SIZE*1024*1024
2143 # create subvolume
2144 subvolname = self._generate_random_subvolume_name()
2145 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 2146
f67539c2
TL
2147 # make sure it exists
2148 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2149 self.assertNotEqual(subvolpath, None)
81eedcae 2150
f67539c2
TL
2151 # try to resize the subvolume with an invalid size -10
2152 nsize = -10
2153 try:
2154 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2155 except CommandFailedError as ce:
2156 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
2157 else:
2158 self.fail("expected the 'fs subvolume resize' command to fail")
81eedcae 2159
f67539c2
TL
2160 # verify the quota did not change
2161 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2162 self.assertEqual(size, osize)
81eedcae
TL
2163
2164 # remove subvolume
f67539c2 2165 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2166
494da23a
TL
2167 # verify trash dir is clean
2168 self._wait_for_trash_empty()
2169
f67539c2
TL
2170 def test_subvolume_resize_fail_zero_size(self):
2171 """
2172 That a subvolume cannot be resized to a zero size and the quota did not change
2173 """
81eedcae 2174
f67539c2
TL
2175 osize = self.DEFAULT_FILE_SIZE*1024*1024
2176 # create subvolume
2177 subvolname = self._generate_random_subvolume_name()
2178 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 2179
f67539c2
TL
2180 # make sure it exists
2181 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2182 self.assertNotEqual(subvolpath, None)
81eedcae 2183
f67539c2
TL
2184 # try to resize the subvolume with size 0
2185 nsize = 0
2186 try:
2187 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2188 except CommandFailedError as ce:
2189 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
2190 else:
2191 self.fail("expected the 'fs subvolume resize' command to fail")
81eedcae 2192
f67539c2
TL
2193 # verify the quota did not change
2194 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2195 self.assertEqual(size, osize)
81eedcae 2196
f67539c2
TL
2197 # remove subvolume
2198 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2199
f67539c2
TL
2200 # verify trash dir is clean
2201 self._wait_for_trash_empty()
81eedcae 2202
f67539c2
TL
2203 def test_subvolume_resize_quota_lt_used_size(self):
2204 """
2205 That a subvolume can be resized to a size smaller than the current used size
2206 and the resulting quota matches the expected size.
2207 """
81eedcae 2208
f67539c2
TL
2209 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
2210 # create subvolume
2211 subvolname = self._generate_random_subvolume_name()
522d829b 2212 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
81eedcae 2213
f67539c2
TL
2214 # make sure it exists
2215 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2216 self.assertNotEqual(subvolpath, None)
81eedcae 2217
f67539c2
TL
2218 # create one file of 10MB
2219 file_size=self.DEFAULT_FILE_SIZE*10
2220 number_of_files=1
2221 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2222 number_of_files,
2223 file_size))
2224 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
2225 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
81eedcae 2226
f67539c2
TL
2227 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
2228 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
2229 if isinstance(self.mount_a, FuseMount):
2230 # kclient dir does not have size==rbytes
2231 self.assertEqual(usedsize, susedsize)
81eedcae 2232
f67539c2
TL
2233 # shrink the subvolume
2234 nsize = usedsize // 2
2235 try:
2236 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2237 except CommandFailedError:
2238 self.fail("expected the 'fs subvolume resize' command to succeed")
81eedcae 2239
f67539c2
TL
2240 # verify the quota
2241 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2242 self.assertEqual(size, nsize)
81eedcae 2243
f67539c2
TL
2244 # remove subvolume
2245 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2246
adb31ebb
TL
2247 # verify trash dir is clean
2248 self._wait_for_trash_empty()
2249
f67539c2 2250 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self):
92f5a8d4 2251 """
f67539c2
TL
2252 That a subvolume cannot be resized to a size smaller than the current used size
2253 when --no_shrink is given and the quota did not change.
92f5a8d4 2254 """
92f5a8d4 2255
f67539c2
TL
2256 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
2257 # create subvolume
2258 subvolname = self._generate_random_subvolume_name()
522d829b 2259 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
92f5a8d4
TL
2260
2261 # make sure it exists
f67539c2
TL
2262 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2263 self.assertNotEqual(subvolpath, None)
81eedcae 2264
f67539c2
TL
2265 # create one file of 10MB
2266 file_size=self.DEFAULT_FILE_SIZE*10
2267 number_of_files=1
2268 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2269 number_of_files,
2270 file_size))
2271 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
2272 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
81eedcae 2273
f67539c2
TL
2274 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
2275 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
2276 if isinstance(self.mount_a, FuseMount):
2277 # kclient dir does not have size==rbytes
2278 self.assertEqual(usedsize, susedsize)
81eedcae 2279
f67539c2
TL
2280 # shrink the subvolume
2281 nsize = usedsize // 2
2282 try:
2283 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink")
2284 except CommandFailedError as ce:
2285 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
2286 else:
2287 self.fail("expected the 'fs subvolume resize' command to fail")
81eedcae 2288
f67539c2
TL
2289 # verify the quota did not change
2290 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2291 self.assertEqual(size, osize)
81eedcae 2292
f67539c2
TL
2293 # remove subvolume
2294 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2295
adb31ebb
TL
2296 # verify trash dir is clean
2297 self._wait_for_trash_empty()
2298
f67539c2 2299 def test_subvolume_resize_expand_on_full_subvolume(self):
92f5a8d4 2300 """
f67539c2 2301 That the subvolume can be expanded from a full subvolume and future writes succeed.
92f5a8d4 2302 """
92f5a8d4 2303
f67539c2
TL
2304 osize = self.DEFAULT_FILE_SIZE*1024*1024*10
2305 # create subvolume of quota 10MB and make sure it exists
92f5a8d4 2306 subvolname = self._generate_random_subvolume_name()
522d829b 2307 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
92f5a8d4
TL
2308 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2309 self.assertNotEqual(subvolpath, None)
2310
f67539c2
TL
2311 # create one file of size 10MB and write
2312 file_size=self.DEFAULT_FILE_SIZE*10
2313 number_of_files=1
2314 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2315 number_of_files,
2316 file_size))
2317 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3)
2318 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2319
2320 # create a file of size 5MB and try write more
2321 file_size=file_size // 2
2322 number_of_files=1
2323 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2324 number_of_files,
2325 file_size))
2326 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4)
2327 try:
2328 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2329 except CommandFailedError:
2330 # Not able to write. So expand the subvolume more and try writing the 5MB file again
2331 nsize = osize*2
2332 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2333 try:
2334 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2335 except CommandFailedError:
2336 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
2337 "to succeed".format(subvolname, number_of_files, file_size))
2338 else:
2339 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
2340 "to fail".format(subvolname, number_of_files, file_size))
92f5a8d4
TL
2341
2342 # remove subvolume
2343 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2344
adb31ebb
TL
2345 # verify trash dir is clean
2346 self._wait_for_trash_empty()
2347
f67539c2
TL
2348 def test_subvolume_resize_infinite_size(self):
2349 """
2350 That a subvolume can be resized to an infinite size by unsetting its quota.
2351 """
81eedcae
TL
2352
2353 # create subvolume
f67539c2
TL
2354 subvolname = self._generate_random_subvolume_name()
2355 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
2356 str(self.DEFAULT_FILE_SIZE*1024*1024))
81eedcae 2357
f67539c2
TL
2358 # make sure it exists
2359 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2360 self.assertNotEqual(subvolpath, None)
81eedcae 2361
f67539c2
TL
2362 # resize inf
2363 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
2364
2365 # verify that the quota is None
2366 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
2367 self.assertEqual(size, None)
81eedcae
TL
2368
2369 # remove subvolume
f67539c2 2370 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2371
494da23a
TL
2372 # verify trash dir is clean
2373 self._wait_for_trash_empty()
2374
f67539c2 2375 def test_subvolume_resize_infinite_size_future_writes(self):
e306af50 2376 """
f67539c2 2377 That a subvolume can be resized to an infinite size and the future writes succeed.
e306af50
TL
2378 """
2379
e306af50 2380 # create subvolume
f67539c2
TL
2381 subvolname = self._generate_random_subvolume_name()
2382 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
522d829b 2383 str(self.DEFAULT_FILE_SIZE*1024*1024*5), "--mode=777")
e306af50 2384
f67539c2
TL
2385 # make sure it exists
2386 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2387 self.assertNotEqual(subvolpath, None)
e306af50 2388
f67539c2
TL
2389 # resize inf
2390 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
e306af50 2391
f67539c2
TL
2392 # verify that the quota is None
2393 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
2394 self.assertEqual(size, None)
e306af50 2395
f67539c2
TL
2396 # create one file of 10MB and try to write
2397 file_size=self.DEFAULT_FILE_SIZE*10
2398 number_of_files=1
2399 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2400 number_of_files,
2401 file_size))
2402 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5)
adb31ebb 2403
f67539c2
TL
2404 try:
2405 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2406 except CommandFailedError:
2407 self.fail("expected filling subvolume {0} with {1} file of size {2}MB "
2408 "to succeed".format(subvolname, number_of_files, file_size))
e306af50
TL
2409
2410 # remove subvolume
f67539c2 2411 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
e306af50
TL
2412
2413 # verify trash dir is clean
2414 self._wait_for_trash_empty()
2415
f67539c2
TL
2416 def test_subvolume_rm_force(self):
2417 # test removing non-existing subvolume with --force
81eedcae 2418 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
2419 try:
2420 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
2421 except CommandFailedError:
2422 self.fail("expected the 'fs subvolume rm --force' command to succeed")
2423
2424 def test_subvolume_shrink(self):
2425 """
2426 That a subvolume can be shrinked in size and its quota matches the expected size.
2427 """
81eedcae
TL
2428
2429 # create subvolume
f67539c2
TL
2430 subvolname = self._generate_random_subvolume_name()
2431 osize = self.DEFAULT_FILE_SIZE*1024*1024
2432 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 2433
f67539c2
TL
2434 # make sure it exists
2435 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2436 self.assertNotEqual(subvolpath, None)
81eedcae 2437
f67539c2
TL
2438 # shrink the subvolume
2439 nsize = osize // 2
2440 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
81eedcae 2441
f67539c2
TL
2442 # verify the quota
2443 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2444 self.assertEqual(size, nsize)
81eedcae
TL
2445
2446 # remove subvolume
f67539c2 2447 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2448
494da23a
TL
2449 # verify trash dir is clean
2450 self._wait_for_trash_empty()
2451
33c7a0ef
TL
2452 def test_subvolume_retain_snapshot_rm_idempotency(self):
2453 """
2454 ensure subvolume deletion of a subvolume which is already deleted with retain snapshots option passes.
2455 After subvolume deletion with retain snapshots, the subvolume exists until the trash directory (resides inside subvolume)
2456 is cleaned up. The subvolume deletion issued while the trash directory is not empty, should pass and should
2457 not error out with EAGAIN.
2458 """
81eedcae
TL
2459 subvolume = self._generate_random_subvolume_name()
2460 snapshot = self._generate_random_snapshot_name()
2461
33c7a0ef
TL
2462 # create subvolume
2463 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
81eedcae 2464
33c7a0ef
TL
2465 # do some IO
2466 self._do_subvolume_io(subvolume, number_of_files=256)
f67539c2 2467
33c7a0ef
TL
2468 # snapshot subvolume
2469 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
81eedcae 2470
33c7a0ef
TL
2471 # remove with snapshot retention
2472 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
81eedcae 2473
33c7a0ef
TL
2474 # remove snapshots (removes retained volume)
2475 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2476
2477 # remove subvolume (check idempotency)
81eedcae 2478 try:
33c7a0ef 2479 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
81eedcae
TL
2480 except CommandFailedError as ce:
2481 if ce.exitstatus != errno.ENOENT:
33c7a0ef 2482 self.fail(f"expected subvolume rm to pass with error: {os.strerror(ce.exitstatus)}")
81eedcae 2483
494da23a
TL
2484 # verify trash dir is clean
2485 self._wait_for_trash_empty()
2486
f67539c2 2487
33c7a0ef
TL
2488 def test_subvolume_user_metadata_set(self):
2489 subvolname = self._generate_random_subvolume_name()
f67539c2 2490 group = self._generate_random_group_name()
92f5a8d4 2491
33c7a0ef 2492 # create group.
f67539c2
TL
2493 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2494
33c7a0ef
TL
2495 # create subvolume in group.
2496 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
f67539c2 2497
33c7a0ef
TL
2498 # set metadata for subvolume.
2499 key = "key"
2500 value = "value"
2501 try:
2502 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
2503 except CommandFailedError:
2504 self.fail("expected the 'fs subvolume metadata set' command to succeed")
92f5a8d4 2505
33c7a0ef
TL
2506 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2507 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
f67539c2 2508
33c7a0ef 2509 # verify trash dir is clean.
f67539c2
TL
2510 self._wait_for_trash_empty()
2511
33c7a0ef
TL
2512 def test_subvolume_user_metadata_set_idempotence(self):
2513 subvolname = self._generate_random_subvolume_name()
81eedcae 2514 group = self._generate_random_group_name()
81eedcae 2515
33c7a0ef 2516 # create group.
81eedcae
TL
2517 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2518
33c7a0ef
TL
2519 # create subvolume in group.
2520 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
81eedcae 2521
33c7a0ef
TL
2522 # set metadata for subvolume.
2523 key = "key"
2524 value = "value"
2525 try:
2526 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
2527 except CommandFailedError:
2528 self.fail("expected the 'fs subvolume metadata set' command to succeed")
81eedcae 2529
33c7a0ef
TL
2530 # set same metadata again for subvolume.
2531 try:
2532 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
2533 except CommandFailedError:
2534 self.fail("expected the 'fs subvolume metadata set' command to succeed because it is idempotent operation")
494da23a 2535
33c7a0ef 2536 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
81eedcae
TL
2537 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2538
33c7a0ef
TL
2539 # verify trash dir is clean.
2540 self._wait_for_trash_empty()
eafe8130 2541
33c7a0ef
TL
2542 def test_subvolume_user_metadata_get(self):
2543 subvolname = self._generate_random_subvolume_name()
f67539c2 2544 group = self._generate_random_group_name()
33c7a0ef
TL
2545
2546 # create group.
f67539c2 2547 self._fs_cmd("subvolumegroup", "create", self.volname, group)
eafe8130 2548
33c7a0ef
TL
2549 # create subvolume in group.
2550 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
eafe8130 2551
33c7a0ef
TL
2552 # set metadata for subvolume.
2553 key = "key"
2554 value = "value"
2555 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
adb31ebb 2556
33c7a0ef 2557 # get value for specified key.
f67539c2 2558 try:
33c7a0ef 2559 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
f67539c2 2560 except CommandFailedError:
33c7a0ef 2561 self.fail("expected the 'fs subvolume metadata get' command to succeed")
eafe8130 2562
33c7a0ef
TL
2563 # remove '\n' from returned value.
2564 ret = ret.strip('\n')
adb31ebb 2565
33c7a0ef
TL
2566 # match received value with expected value.
2567 self.assertEqual(value, ret)
adb31ebb 2568
33c7a0ef 2569 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
adb31ebb
TL
2570 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2571
33c7a0ef
TL
2572 # verify trash dir is clean.
2573 self._wait_for_trash_empty()
cd265ab1 2574
33c7a0ef
TL
2575 def test_subvolume_user_metadata_get_for_nonexisting_key(self):
2576 subvolname = self._generate_random_subvolume_name()
2577 group = self._generate_random_group_name()
cd265ab1 2578
33c7a0ef
TL
2579 # create group.
2580 self._fs_cmd("subvolumegroup", "create", self.volname, group)
cd265ab1 2581
33c7a0ef
TL
2582 # create subvolume in group.
2583 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
cd265ab1 2584
33c7a0ef
TL
2585 # set metadata for subvolume.
2586 key = "key"
2587 value = "value"
2588 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
cd265ab1 2589
33c7a0ef
TL
2590 # try to get value for nonexisting key
2591 # Expecting ENOENT exit status because key does not exist
f67539c2 2592 try:
33c7a0ef
TL
2593 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key_nonexist", "--group_name", group)
2594 except CommandFailedError as e:
2595 self.assertEqual(e.exitstatus, errno.ENOENT)
f67539c2 2596 else:
33c7a0ef 2597 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
cd265ab1 2598
33c7a0ef
TL
2599 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2600 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
cd265ab1 2601
33c7a0ef 2602 # verify trash dir is clean.
f67539c2
TL
2603 self._wait_for_trash_empty()
2604
33c7a0ef
TL
2605 def test_subvolume_user_metadata_get_for_nonexisting_section(self):
2606 subvolname = self._generate_random_subvolume_name()
2607 group = self._generate_random_group_name()
f67539c2 2608
33c7a0ef
TL
2609 # create group.
2610 self._fs_cmd("subvolumegroup", "create", self.volname, group)
f67539c2 2611
33c7a0ef
TL
2612 # create subvolume in group.
2613 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
cd265ab1 2614
33c7a0ef
TL
2615 # try to get value for nonexisting key (as section does not exist)
2616 # Expecting ENOENT exit status because key does not exist
2617 try:
2618 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key", "--group_name", group)
2619 except CommandFailedError as e:
2620 self.assertEqual(e.exitstatus, errno.ENOENT)
2621 else:
2622 self.fail("Expected ENOENT because section does not exist")
cd265ab1 2623
33c7a0ef
TL
2624 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2625 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
cd265ab1 2626
33c7a0ef 2627 # verify trash dir is clean.
cd265ab1
TL
2628 self._wait_for_trash_empty()
2629
33c7a0ef
TL
2630 def test_subvolume_user_metadata_update(self):
2631 subvolname = self._generate_random_subvolume_name()
2632 group = self._generate_random_group_name()
cd265ab1 2633
33c7a0ef
TL
2634 # create group.
2635 self._fs_cmd("subvolumegroup", "create", self.volname, group)
cd265ab1 2636
33c7a0ef
TL
2637 # create subvolume in group.
2638 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
cd265ab1 2639
33c7a0ef
TL
2640 # set metadata for subvolume.
2641 key = "key"
2642 value = "value"
2643 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
cd265ab1 2644
33c7a0ef
TL
2645 # update metadata against key.
2646 new_value = "new_value"
2647 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, new_value, "--group_name", group)
cd265ab1 2648
33c7a0ef
TL
2649 # get metadata for specified key of subvolume.
2650 try:
2651 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
2652 except CommandFailedError:
2653 self.fail("expected the 'fs subvolume metadata get' command to succeed")
cd265ab1 2654
33c7a0ef
TL
2655 # remove '\n' from returned value.
2656 ret = ret.strip('\n')
cd265ab1 2657
33c7a0ef
TL
2658 # match received value with expected value.
2659 self.assertEqual(new_value, ret)
cd265ab1 2660
33c7a0ef
TL
2661 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2662 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
cd265ab1 2663
33c7a0ef
TL
2664 # verify trash dir is clean.
2665 self._wait_for_trash_empty()
f67539c2 2666
33c7a0ef
TL
2667 def test_subvolume_user_metadata_list(self):
2668 subvolname = self._generate_random_subvolume_name()
2669 group = self._generate_random_group_name()
cd265ab1 2670
33c7a0ef
TL
2671 # create group.
2672 self._fs_cmd("subvolumegroup", "create", self.volname, group)
cd265ab1 2673
33c7a0ef
TL
2674 # create subvolume in group.
2675 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
cd265ab1 2676
33c7a0ef
TL
2677 # set metadata for subvolume.
2678 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
cd265ab1 2679
33c7a0ef
TL
2680 for k, v in input_metadata_dict.items():
2681 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, k, v, "--group_name", group)
cd265ab1 2682
33c7a0ef 2683 # list metadata
cd265ab1 2684 try:
33c7a0ef
TL
2685 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
2686 except CommandFailedError:
2687 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
cd265ab1 2688
33c7a0ef 2689 ret_dict = json.loads(ret)
cd265ab1 2690
33c7a0ef
TL
2691 # compare output with expected output
2692 self.assertDictEqual(input_metadata_dict, ret_dict)
cd265ab1 2693
33c7a0ef
TL
2694 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2695 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2696
2697 # verify trash dir is clean.
cd265ab1
TL
2698 self._wait_for_trash_empty()
2699
33c7a0ef
TL
2700 def test_subvolume_user_metadata_list_if_no_metadata_set(self):
2701 subvolname = self._generate_random_subvolume_name()
cd265ab1 2702 group = self._generate_random_group_name()
cd265ab1 2703
33c7a0ef 2704 # create group.
cd265ab1
TL
2705 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2706
33c7a0ef
TL
2707 # create subvolume in group.
2708 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
cd265ab1 2709
33c7a0ef
TL
2710 # list metadata
2711 try:
2712 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
2713 except CommandFailedError:
2714 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
cd265ab1 2715
33c7a0ef
TL
2716 # remove '\n' from returned value.
2717 ret = ret.strip('\n')
cd265ab1 2718
33c7a0ef
TL
2719 # compare output with expected output
2720 # expecting empty json/dictionary
2721 self.assertEqual(ret, "{}")
cd265ab1 2722
33c7a0ef
TL
2723 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2724 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2725
2726 # verify trash dir is clean.
cd265ab1
TL
2727 self._wait_for_trash_empty()
2728
33c7a0ef
TL
2729 def test_subvolume_user_metadata_remove(self):
2730 subvolname = self._generate_random_subvolume_name()
2731 group = self._generate_random_group_name()
cd265ab1 2732
33c7a0ef
TL
2733 # create group.
2734 self._fs_cmd("subvolumegroup", "create", self.volname, group)
81eedcae 2735
33c7a0ef
TL
2736 # create subvolume in group.
2737 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
81eedcae 2738
33c7a0ef
TL
2739 # set metadata for subvolume.
2740 key = "key"
2741 value = "value"
2742 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
81eedcae 2743
33c7a0ef
TL
2744 # remove metadata against specified key.
2745 try:
2746 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group)
2747 except CommandFailedError:
2748 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
f67539c2 2749
33c7a0ef
TL
2750 # confirm key is removed by again fetching metadata
2751 try:
2752 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
2753 except CommandFailedError as e:
2754 self.assertEqual(e.exitstatus, errno.ENOENT)
f67539c2 2755 else:
33c7a0ef 2756 self.fail("Expected ENOENT because key does not exist")
81eedcae 2757
33c7a0ef
TL
2758 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2759 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
81eedcae 2760
33c7a0ef 2761 # verify trash dir is clean.
494da23a
TL
2762 self._wait_for_trash_empty()
2763
33c7a0ef
TL
2764 def test_subvolume_user_metadata_remove_for_nonexisting_key(self):
2765 subvolname = self._generate_random_subvolume_name()
81eedcae 2766 group = self._generate_random_group_name()
81eedcae 2767
33c7a0ef 2768 # create group.
81eedcae
TL
2769 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2770
33c7a0ef
TL
2771 # create subvolume in group.
2772 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
81eedcae 2773
33c7a0ef
TL
2774 # set metadata for subvolume.
2775 key = "key"
2776 value = "value"
2777 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
f67539c2 2778
33c7a0ef
TL
2779 # try to remove value for nonexisting key
2780 # Expecting ENOENT exit status because key does not exist
2781 try:
2782 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key_nonexist", "--group_name", group)
2783 except CommandFailedError as e:
2784 self.assertEqual(e.exitstatus, errno.ENOENT)
2785 else:
2786 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
f67539c2 2787
33c7a0ef
TL
2788 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2789 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
81eedcae 2790
33c7a0ef
TL
2791 # verify trash dir is clean.
2792 self._wait_for_trash_empty()
81eedcae 2793
33c7a0ef
TL
2794 def test_subvolume_user_metadata_remove_for_nonexisting_section(self):
2795 subvolname = self._generate_random_subvolume_name()
2796 group = self._generate_random_group_name()
81eedcae 2797
33c7a0ef
TL
2798 # create group.
2799 self._fs_cmd("subvolumegroup", "create", self.volname, group)
81eedcae 2800
33c7a0ef
TL
2801 # create subvolume in group.
2802 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
2803
2804 # try to remove value for nonexisting key (as section does not exist)
2805 # Expecting ENOENT exit status because key does not exist
2806 try:
2807 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key", "--group_name", group)
2808 except CommandFailedError as e:
2809 self.assertEqual(e.exitstatus, errno.ENOENT)
2810 else:
2811 self.fail("Expected ENOENT because section does not exist")
2812
2813 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2814 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2815
2816 # verify trash dir is clean.
494da23a
TL
2817 self._wait_for_trash_empty()
2818
33c7a0ef
TL
2819 def test_subvolume_user_metadata_remove_force(self):
2820 subvolname = self._generate_random_subvolume_name()
2821 group = self._generate_random_group_name()
2822
2823 # create group.
2824 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2825
2826 # create subvolume in group.
2827 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
2828
2829 # set metadata for subvolume.
2830 key = "key"
2831 value = "value"
2832 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
2833
2834 # remove metadata against specified key with --force option.
2835 try:
2836 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group, "--force")
2837 except CommandFailedError:
2838 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
2839
2840 # confirm key is removed by again fetching metadata
2841 try:
2842 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
2843 except CommandFailedError as e:
2844 self.assertEqual(e.exitstatus, errno.ENOENT)
2845 else:
2846 self.fail("Expected ENOENT because key does not exist")
2847
2848 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
81eedcae
TL
2849 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2850
33c7a0ef
TL
2851 # verify trash dir is clean.
2852 self._wait_for_trash_empty()
f67539c2 2853
33c7a0ef
TL
2854 def test_subvolume_user_metadata_remove_force_for_nonexisting_key(self):
2855 subvolname = self._generate_random_subvolume_name()
81eedcae 2856 group = self._generate_random_group_name()
81eedcae 2857
33c7a0ef 2858 # create group.
81eedcae
TL
2859 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2860
33c7a0ef
TL
2861 # create subvolume in group.
2862 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
81eedcae 2863
33c7a0ef
TL
2864 # set metadata for subvolume.
2865 key = "key"
2866 value = "value"
2867 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
81eedcae 2868
33c7a0ef
TL
2869 # remove metadata against specified key.
2870 try:
2871 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group)
2872 except CommandFailedError:
2873 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
81eedcae 2874
33c7a0ef 2875 # confirm key is removed by again fetching metadata
81eedcae 2876 try:
33c7a0ef
TL
2877 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
2878 except CommandFailedError as e:
2879 self.assertEqual(e.exitstatus, errno.ENOENT)
92f5a8d4 2880 else:
33c7a0ef 2881 self.fail("Expected ENOENT because key does not exist")
f67539c2 2882
33c7a0ef
TL
2883 # again remove metadata against already removed key with --force option.
2884 try:
2885 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group, "--force")
2886 except CommandFailedError:
2887 self.fail("expected the 'fs subvolume metadata rm' (with --force) command to succeed")
81eedcae 2888
33c7a0ef
TL
2889 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2890 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
81eedcae 2891
33c7a0ef 2892 # verify trash dir is clean.
494da23a
TL
2893 self._wait_for_trash_empty()
2894
33c7a0ef
TL
2895 def test_subvolume_user_metadata_set_and_get_for_legacy_subvolume(self):
2896 subvolname = self._generate_random_subvolume_name()
2897 group = self._generate_random_group_name()
2898
2899 # emulate a old-fashioned subvolume in a custom group
2900 createpath = os.path.join(".", "volumes", group, subvolname)
2901 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
2902
2903 # set metadata for subvolume.
2904 key = "key"
2905 value = "value"
2906 try:
2907 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
2908 except CommandFailedError:
2909 self.fail("expected the 'fs subvolume metadata set' command to succeed")
2910
2911 # get value for specified key.
2912 try:
2913 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
2914 except CommandFailedError:
2915 self.fail("expected the 'fs subvolume metadata get' command to succeed")
2916
2917 # remove '\n' from returned value.
2918 ret = ret.strip('\n')
2919
2920 # match received value with expected value.
2921 self.assertEqual(value, ret)
2922
2923 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
81eedcae 2924 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
494da23a 2925
33c7a0ef
TL
2926 # verify trash dir is clean.
2927 self._wait_for_trash_empty()
2928
2929 def test_subvolume_user_metadata_list_and_remove_for_legacy_subvolume(self):
2930 subvolname = self._generate_random_subvolume_name()
2931 group = self._generate_random_group_name()
2932
2933 # emulate a old-fashioned subvolume in a custom group
2934 createpath = os.path.join(".", "volumes", group, subvolname)
2935 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
2936
2937 # set metadata for subvolume.
2938 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
2939
2940 for k, v in input_metadata_dict.items():
2941 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, k, v, "--group_name", group)
2942
2943 # list metadata
2944 try:
2945 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
2946 except CommandFailedError:
2947 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
2948
2949 ret_dict = json.loads(ret)
2950
2951 # compare output with expected output
2952 self.assertDictEqual(input_metadata_dict, ret_dict)
2953
2954 # remove metadata against specified key.
2955 try:
2956 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key_1", "--group_name", group)
2957 except CommandFailedError:
2958 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
2959
2960 # confirm key is removed by again fetching metadata
2961 try:
2962 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key_1", "--group_name", group)
2963 except CommandFailedError as e:
2964 self.assertEqual(e.exitstatus, errno.ENOENT)
2965 else:
2966 self.fail("Expected ENOENT because key_1 does not exist")
2967
2968 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
2969 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
eafe8130 2970
33c7a0ef
TL
2971 # verify trash dir is clean.
2972 self._wait_for_trash_empty()
2973
2974class TestSubvolumeGroupSnapshots(TestVolumesHelper):
2975 """Tests for FS subvolume group snapshot operations."""
2976 @unittest.skip("skipping subvolumegroup snapshot tests")
2977 def test_nonexistent_subvolume_group_snapshot_rm(self):
f67539c2
TL
2978 subvolume = self._generate_random_subvolume_name()
2979 group = self._generate_random_group_name()
33c7a0ef 2980 snapshot = self._generate_random_snapshot_name()
eafe8130
TL
2981
2982 # create group
eafe8130
TL
2983 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2984
f67539c2
TL
2985 # create subvolume in group
2986 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
eafe8130 2987
33c7a0ef
TL
2988 # snapshot group
2989 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
92f5a8d4 2990
33c7a0ef
TL
2991 # remove snapshot
2992 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
92f5a8d4 2993
33c7a0ef 2994 # remove snapshot
f67539c2 2995 try:
33c7a0ef 2996 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
f67539c2 2997 except CommandFailedError as ce:
33c7a0ef
TL
2998 if ce.exitstatus != errno.ENOENT:
2999 raise
f67539c2 3000 else:
33c7a0ef 3001 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
92f5a8d4 3002
f67539c2
TL
3003 # remove subvolume
3004 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
92f5a8d4
TL
3005
3006 # verify trash dir is clean
f67539c2 3007 self._wait_for_trash_empty()
9f95a23c 3008
f67539c2
TL
3009 # remove group
3010 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
9f95a23c 3011
33c7a0ef
TL
3012 @unittest.skip("skipping subvolumegroup snapshot tests")
3013 def test_subvolume_group_snapshot_create_and_rm(self):
f67539c2
TL
3014 subvolume = self._generate_random_subvolume_name()
3015 group = self._generate_random_group_name()
33c7a0ef 3016 snapshot = self._generate_random_snapshot_name()
92f5a8d4
TL
3017
3018 # create group
f67539c2 3019 self._fs_cmd("subvolumegroup", "create", self.volname, group)
92f5a8d4 3020
f67539c2
TL
3021 # create subvolume in group
3022 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
92f5a8d4 3023
33c7a0ef
TL
3024 # snapshot group
3025 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
f67539c2 3026
33c7a0ef
TL
3027 # remove snapshot
3028 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
adb31ebb 3029
92f5a8d4 3030 # remove subvolume
f67539c2 3031 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
92f5a8d4
TL
3032
3033 # verify trash dir is clean
3034 self._wait_for_trash_empty()
3035
3036 # remove group
3037 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3038
33c7a0ef
TL
3039 @unittest.skip("skipping subvolumegroup snapshot tests")
3040 def test_subvolume_group_snapshot_idempotence(self):
494da23a 3041 subvolume = self._generate_random_subvolume_name()
33c7a0ef 3042 group = self._generate_random_group_name()
92f5a8d4 3043 snapshot = self._generate_random_snapshot_name()
494da23a 3044
33c7a0ef
TL
3045 # create group
3046 self._fs_cmd("subvolumegroup", "create", self.volname, group)
92f5a8d4 3047
33c7a0ef
TL
3048 # create subvolume in group
3049 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
494da23a 3050
33c7a0ef
TL
3051 # snapshot group
3052 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
494da23a 3053
33c7a0ef
TL
3054 # try creating snapshot w/ same snapshot name -- shoule be idempotent
3055 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
3056
3057 # remove snapshot
3058 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
3059
3060 # remove subvolume
3061 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
3062
3063 # verify trash dir is clean
3064 self._wait_for_trash_empty()
3065
3066 # remove group
3067 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3068
3069 @unittest.skip("skipping subvolumegroup snapshot tests")
3070 def test_subvolume_group_snapshot_ls(self):
3071 # tests the 'fs subvolumegroup snapshot ls' command
3072
3073 snapshots = []
3074
3075 # create group
3076 group = self._generate_random_group_name()
3077 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3078
3079 # create subvolumegroup snapshots
3080 snapshots = self._generate_random_snapshot_name(3)
3081 for snapshot in snapshots:
3082 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
3083
3084 subvolgrpsnapshotls = json.loads(self._fs_cmd('subvolumegroup', 'snapshot', 'ls', self.volname, group))
3085 if len(subvolgrpsnapshotls) == 0:
3086 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
3087 else:
3088 snapshotnames = [snapshot['name'] for snapshot in subvolgrpsnapshotls]
3089 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
3090 raise RuntimeError("Error creating or listing subvolume group snapshots")
3091
3092 @unittest.skip("skipping subvolumegroup snapshot tests")
3093 def test_subvolume_group_snapshot_rm_force(self):
3094 # test removing non-existing subvolume group snapshot with --force
3095 group = self._generate_random_group_name()
3096 snapshot = self._generate_random_snapshot_name()
3097 # remove snapshot
f67539c2 3098 try:
33c7a0ef
TL
3099 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force")
3100 except CommandFailedError:
3101 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
3102
3103 def test_subvolume_group_snapshot_unsupported_status(self):
3104 group = self._generate_random_group_name()
3105 snapshot = self._generate_random_snapshot_name()
3106
3107 # create group
3108 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3109
3110 # snapshot group
3111 try:
3112 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
f67539c2 3113 except CommandFailedError as ce:
33c7a0ef 3114 self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create")
f67539c2 3115 else:
33c7a0ef 3116 self.fail("expected subvolumegroup snapshot create command to fail")
92f5a8d4 3117
33c7a0ef
TL
3118 # remove group
3119 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
92f5a8d4 3120
33c7a0ef
TL
3121
3122class TestSubvolumeSnapshots(TestVolumesHelper):
3123 """Tests for FS subvolume snapshot operations."""
3124 def test_nonexistent_subvolume_snapshot_rm(self):
3125 subvolume = self._generate_random_subvolume_name()
3126 snapshot = self._generate_random_snapshot_name()
3127
3128 # create subvolume
3129 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3130
3131 # snapshot subvolume
3132 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3133
3134 # remove snapshot
3135 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3136
3137 # remove snapshot again
f67539c2 3138 try:
33c7a0ef 3139 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
f67539c2 3140 except CommandFailedError as ce:
33c7a0ef
TL
3141 if ce.exitstatus != errno.ENOENT:
3142 raise
f67539c2 3143 else:
33c7a0ef 3144 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
92f5a8d4 3145
33c7a0ef
TL
3146 # remove subvolume
3147 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
92f5a8d4 3148
f67539c2
TL
3149 # verify trash dir is clean
3150 self._wait_for_trash_empty()
92f5a8d4 3151
33c7a0ef 3152 def test_subvolume_snapshot_create_and_rm(self):
f67539c2 3153 subvolume = self._generate_random_subvolume_name()
33c7a0ef 3154 snapshot = self._generate_random_snapshot_name()
92f5a8d4 3155
f67539c2
TL
3156 # create subvolume
3157 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4 3158
f67539c2 3159 # snapshot subvolume
33c7a0ef 3160 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb 3161
33c7a0ef
TL
3162 # remove snapshot
3163 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb 3164
33c7a0ef
TL
3165 # remove subvolume
3166 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb 3167
33c7a0ef
TL
3168 # verify trash dir is clean
3169 self._wait_for_trash_empty()
adb31ebb 3170
33c7a0ef
TL
3171 def test_subvolume_snapshot_create_idempotence(self):
3172 subvolume = self._generate_random_subvolume_name()
3173 snapshot = self._generate_random_snapshot_name()
adb31ebb 3174
33c7a0ef
TL
3175 # create subvolume
3176 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 3177
33c7a0ef
TL
3178 # snapshot subvolume
3179 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb 3180
33c7a0ef
TL
3181 # try creating w/ same subvolume snapshot name -- should be idempotent
3182 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb 3183
33c7a0ef
TL
3184 # remove snapshot
3185 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
f67539c2 3186
33c7a0ef
TL
3187 # remove subvolume
3188 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
3189
3190 # verify trash dir is clean
3191 self._wait_for_trash_empty()
3192
33c7a0ef
TL
3193 def test_subvolume_snapshot_info(self):
3194
adb31ebb 3195 """
33c7a0ef 3196 tests the 'fs subvolume snapshot info' command
adb31ebb 3197 """
33c7a0ef 3198
f67539c2 3199 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
adb31ebb 3200
f67539c2 3201 subvolume = self._generate_random_subvolume_name()
33c7a0ef 3202 snapshot, snap_missing = self._generate_random_snapshot_name(2)
adb31ebb 3203
f67539c2 3204 # create subvolume
33c7a0ef
TL
3205 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
3206
3207 # do some IO
3208 self._do_subvolume_io(subvolume, number_of_files=1)
adb31ebb 3209
f67539c2
TL
3210 # snapshot subvolume
3211 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb 3212
f67539c2
TL
3213 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
3214 for md in snap_md:
3215 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
3216 self.assertEqual(snap_info["has_pending_clones"], "no")
adb31ebb 3217
33c7a0ef 3218 # snapshot info for non-existent snapshot
adb31ebb 3219 try:
33c7a0ef 3220 self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing)
adb31ebb 3221 except CommandFailedError as ce:
33c7a0ef 3222 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot")
adb31ebb 3223 else:
33c7a0ef 3224 self.fail("expected snapshot info of non-existent snapshot to fail")
adb31ebb 3225
33c7a0ef
TL
3226 # remove snapshot
3227 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
f67539c2 3228
33c7a0ef
TL
3229 # remove subvolume
3230 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2 3231
33c7a0ef
TL
3232 # verify trash dir is clean
3233 self._wait_for_trash_empty()
f67539c2 3234
33c7a0ef
TL
3235 def test_subvolume_snapshot_in_group(self):
3236 subvolume = self._generate_random_subvolume_name()
3237 group = self._generate_random_group_name()
3238 snapshot = self._generate_random_snapshot_name()
f67539c2 3239
33c7a0ef
TL
3240 # create group
3241 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3242
3243 # create subvolume in group
3244 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
3245
3246 # snapshot subvolume in group
3247 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
3248
3249 # remove snapshot
3250 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
3251
3252 # remove subvolume
3253 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
3254
3255 # verify trash dir is clean
3256 self._wait_for_trash_empty()
3257
3258 # remove group
3259 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3260
3261 def test_subvolume_snapshot_ls(self):
3262 # tests the 'fs subvolume snapshot ls' command
3263
3264 snapshots = []
3265
3266 # create subvolume
3267 subvolume = self._generate_random_subvolume_name()
3268 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3269
3270 # create subvolume snapshots
3271 snapshots = self._generate_random_snapshot_name(3)
3272 for snapshot in snapshots:
3273 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3274
3275 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
3276 if len(subvolsnapshotls) == 0:
3277 self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
3278 else:
3279 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
3280 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
3281 self.fail("Error creating or listing subvolume snapshots")
3282
3283 # remove snapshot
3284 for snapshot in snapshots:
3285 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3286
3287 # remove subvolume
3288 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3289
3290 # verify trash dir is clean
3291 self._wait_for_trash_empty()
3292
3293 def test_subvolume_inherited_snapshot_ls(self):
3294 # tests the scenario where 'fs subvolume snapshot ls' command
3295 # should not list inherited snapshots created as part of snapshot
3296 # at ancestral level
3297
3298 snapshots = []
3299 subvolume = self._generate_random_subvolume_name()
3300 group = self._generate_random_group_name()
3301 snap_count = 3
3302
3303 # create group
3304 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3305
3306 # create subvolume in group
3307 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
3308
3309 # create subvolume snapshots
3310 snapshots = self._generate_random_snapshot_name(snap_count)
3311 for snapshot in snapshots:
3312 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
3313
3314 # Create snapshot at ancestral level
3315 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1")
3316 ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2")
3317 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1, ancestral_snappath2], sudo=True)
3318
3319 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group))
3320 self.assertEqual(len(subvolsnapshotls), snap_count)
3321
3322 # remove ancestral snapshots
3323 self.mount_a.run_shell(['rmdir', ancestral_snappath1, ancestral_snappath2], sudo=True)
3324
3325 # remove snapshot
3326 for snapshot in snapshots:
3327 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
3328
3329 # remove subvolume
3330 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
3331
3332 # verify trash dir is clean
3333 self._wait_for_trash_empty()
3334
3335 # remove group
3336 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3337
3338 def test_subvolume_inherited_snapshot_info(self):
3339 """
3340 tests the scenario where 'fs subvolume snapshot info' command
3341 should fail for inherited snapshots created as part of snapshot
3342 at ancestral level
3343 """
3344
3345 subvolume = self._generate_random_subvolume_name()
3346 group = self._generate_random_group_name()
3347
3348 # create group
3349 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3350
3351 # create subvolume in group
3352 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
3353
3354 # Create snapshot at ancestral level
3355 ancestral_snap_name = "ancestral_snap_1"
3356 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
3357 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
3358
3359 # Validate existence of inherited snapshot
3360 group_path = os.path.join(".", "volumes", group)
3361 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
3362 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
3363 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
3364 self.mount_a.run_shell(['ls', inherited_snappath])
3365
3366 # snapshot info on inherited snapshot
3367 try:
3368 self._get_subvolume_snapshot_info(self.volname, subvolume, inherited_snap, group)
3369 except CommandFailedError as ce:
3370 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on snapshot info of inherited snapshot")
3371 else:
3372 self.fail("expected snapshot info of inherited snapshot to fail")
3373
3374 # remove ancestral snapshots
3375 self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
3376
3377 # remove subvolume
3378 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
3379
3380 # verify trash dir is clean
3381 self._wait_for_trash_empty()
3382
3383 # remove group
3384 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3385
3386 def test_subvolume_inherited_snapshot_rm(self):
3387 """
3388 tests the scenario where 'fs subvolume snapshot rm' command
3389 should fail for inherited snapshots created as part of snapshot
3390 at ancestral level
3391 """
3392
3393 subvolume = self._generate_random_subvolume_name()
3394 group = self._generate_random_group_name()
3395
3396 # create group
3397 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3398
3399 # create subvolume in group
3400 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
3401
3402 # Create snapshot at ancestral level
3403 ancestral_snap_name = "ancestral_snap_1"
3404 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
3405 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
3406
3407 # Validate existence of inherited snap
3408 group_path = os.path.join(".", "volumes", group)
3409 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
3410 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
3411 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
3412 self.mount_a.run_shell(['ls', inherited_snappath])
3413
3414 # inherited snapshot should not be deletable
3415 try:
3416 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, inherited_snap, "--group_name", group)
3417 except CommandFailedError as ce:
3418 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when removing inherited snapshot")
3419 else:
3420 self.fail("expected removing inheirted snapshot to fail")
3421
3422 # remove ancestral snapshots
3423 self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
3424
3425 # remove subvolume
3426 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
3427
3428 # verify trash dir is clean
3429 self._wait_for_trash_empty()
3430
3431 # remove group
3432 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3433
3434 def test_subvolume_subvolumegroup_snapshot_name_conflict(self):
3435 """
3436 tests the scenario where creation of subvolume snapshot name
3437 with same name as it's subvolumegroup snapshot name. This should
3438 fail.
3439 """
3440
3441 subvolume = self._generate_random_subvolume_name()
3442 group = self._generate_random_group_name()
3443 group_snapshot = self._generate_random_snapshot_name()
3444
3445 # create group
3446 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3447
3448 # create subvolume in group
3449 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
3450
3451 # Create subvolumegroup snapshot
3452 group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot)
3453 self.mount_a.run_shell(['mkdir', '-p', group_snapshot_path], sudo=True)
3454
3455 # Validate existence of subvolumegroup snapshot
3456 self.mount_a.run_shell(['ls', group_snapshot_path])
3457
3458 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
3459 try:
3460 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, group_snapshot, "--group_name", group)
3461 except CommandFailedError as ce:
3462 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
3463 else:
3464 self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
3465
3466 # remove subvolumegroup snapshot
3467 self.mount_a.run_shell(['rmdir', group_snapshot_path], sudo=True)
3468
3469 # remove subvolume
3470 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
3471
3472 # verify trash dir is clean
3473 self._wait_for_trash_empty()
3474
3475 # remove group
3476 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3477
3478 def test_subvolume_retain_snapshot_invalid_recreate(self):
3479 """
3480 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
3481 """
3482 subvolume = self._generate_random_subvolume_name()
3483 snapshot = self._generate_random_snapshot_name()
3484
3485 # create subvolume
3486 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3487
3488 # snapshot subvolume
3489 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3490
3491 # remove with snapshot retention
3492 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3493
3494 # recreate subvolume with an invalid pool
3495 data_pool = "invalid_pool"
3496 try:
3497 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
3498 except CommandFailedError as ce:
3499 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname")
3500 else:
3501 self.fail("expected recreate of subvolume with invalid poolname to fail")
3502
3503 # fetch info
3504 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
3505 self.assertEqual(subvol_info["state"], "snapshot-retained",
3506 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
3507
3508 # getpath
3509 try:
3510 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
3511 except CommandFailedError as ce:
3512 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
3513 else:
3514 self.fail("expected getpath of subvolume with retained snapshots to fail")
3515
3516 # remove snapshot (should remove volume)
3517 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3518
3519 # verify trash dir is clean
3520 self._wait_for_trash_empty()
3521
3522 def test_subvolume_retain_snapshot_recreate_subvolume(self):
3523 """
3524 ensure a retained subvolume can be recreated and further snapshotted
3525 """
3526 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
3527
3528 subvolume = self._generate_random_subvolume_name()
3529 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
3530
3531 # create subvolume
3532 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3533
3534 # snapshot subvolume
3535 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
3536
3537 # remove with snapshot retention
3538 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3539
3540 # fetch info
3541 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
3542 self.assertEqual(subvol_info["state"], "snapshot-retained",
3543 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
3544
3545 # recreate retained subvolume
3546 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3547
3548 # fetch info
3549 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
3550 self.assertEqual(subvol_info["state"], "complete",
3551 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
3552
3553 # snapshot info (older snapshot)
3554 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1))
3555 for md in snap_md:
3556 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
3557 self.assertEqual(snap_info["has_pending_clones"], "no")
3558
3559 # snap-create (new snapshot)
3560 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
3561
3562 # remove with retain snapshots
3563 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3564
3565 # list snapshots
3566 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
3567 self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
3568 " created subvolume snapshots")
3569 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
3570 for snap in [snapshot1, snapshot2]:
3571 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
3572
3573 # remove snapshots (should remove volume)
3574 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
3575 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
3576
3577 # verify list subvolumes returns an empty list
3578 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3579 self.assertEqual(len(subvolumels), 0)
3580
3581 # verify trash dir is clean
3582 self._wait_for_trash_empty()
3583
3584 def test_subvolume_retain_snapshot_with_snapshots(self):
3585 """
3586 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
3587 also test allowed and dis-allowed operations on a retained subvolume
3588 """
3589 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
3590
3591 subvolume = self._generate_random_subvolume_name()
3592 snapshot = self._generate_random_snapshot_name()
3593
3594 # create subvolume
3595 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3596
3597 # snapshot subvolume
3598 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3599
3600 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
3601 try:
3602 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3603 except CommandFailedError as ce:
3604 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of retained subvolume with snapshots")
3605 else:
3606 self.fail("expected rm of subvolume with retained snapshots to fail")
3607
3608 # remove with snapshot retention
3609 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3610
3611 # fetch info
3612 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
3613 self.assertEqual(subvol_info["state"], "snapshot-retained",
3614 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
3615
3616 ## test allowed ops in retained state
3617 # ls
3618 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3619 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
3620 self.assertEqual(subvolumes[0]['name'], subvolume,
3621 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
3622
3623 # snapshot info
3624 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
3625 for md in snap_md:
3626 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
3627 self.assertEqual(snap_info["has_pending_clones"], "no")
3628
3629 # rm --force (allowed but should fail)
3630 try:
3631 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
3632 except CommandFailedError as ce:
3633 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
3634 else:
3635 self.fail("expected rm of subvolume with retained snapshots to fail")
3636
3637 # rm (allowed but should fail)
3638 try:
3639 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3640 except CommandFailedError as ce:
3641 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
3642 else:
3643 self.fail("expected rm of subvolume with retained snapshots to fail")
3644
3645 ## test disallowed ops
3646 # getpath
3647 try:
3648 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
3649 except CommandFailedError as ce:
3650 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
3651 else:
3652 self.fail("expected getpath of subvolume with retained snapshots to fail")
3653
3654 # resize
3655 nsize = self.DEFAULT_FILE_SIZE*1024*1024
3656 try:
3657 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
3658 except CommandFailedError as ce:
3659 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on resize of subvolume with retained snapshots")
3660 else:
3661 self.fail("expected resize of subvolume with retained snapshots to fail")
3662
3663 # snap-create
3664 try:
3665 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, "fail")
3666 except CommandFailedError as ce:
3667 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot create of subvolume with retained snapshots")
3668 else:
3669 self.fail("expected snapshot create of subvolume with retained snapshots to fail")
3670
3671 # remove snapshot (should remove volume)
3672 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3673
3674 # verify list subvolumes returns an empty list
3675 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3676 self.assertEqual(len(subvolumels), 0)
3677
3678 # verify trash dir is clean
3679 self._wait_for_trash_empty()
3680
3681 def test_subvolume_retain_snapshot_without_snapshots(self):
3682 """
3683 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
3684 """
3685 subvolume = self._generate_random_subvolume_name()
3686
3687 # create subvolume
3688 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3689
3690 # remove with snapshot retention (should remove volume, no snapshots to retain)
3691 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3692
3693 # verify list subvolumes returns an empty list
3694 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3695 self.assertEqual(len(subvolumels), 0)
3696
3697 # verify trash dir is clean
3698 self._wait_for_trash_empty()
3699
3700 def test_subvolume_retain_snapshot_trash_busy_recreate(self):
3701 """
3702 ensure retained subvolume recreate fails if its trash is not yet purged
3703 """
3704 subvolume = self._generate_random_subvolume_name()
3705 snapshot = self._generate_random_snapshot_name()
3706
3707 # create subvolume
3708 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3709
3710 # snapshot subvolume
3711 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3712
3713 # remove with snapshot retention
3714 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3715
3716 # fake a trash entry
3717 self._update_fake_trash(subvolume)
3718
3719 # recreate subvolume
3720 try:
3721 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3722 except CommandFailedError as ce:
3723 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of subvolume with purge pending")
3724 else:
3725 self.fail("expected recreate of subvolume with purge pending to fail")
3726
3727 # clear fake trash entry
3728 self._update_fake_trash(subvolume, create=False)
3729
3730 # recreate subvolume
3731 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3732
3733 # remove snapshot
3734 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3735
3736 # remove subvolume
3737 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3738
3739 # verify trash dir is clean
3740 self._wait_for_trash_empty()
3741
3742 def test_subvolume_rm_with_snapshots(self):
3743 subvolume = self._generate_random_subvolume_name()
3744 snapshot = self._generate_random_snapshot_name()
3745
3746 # create subvolume
3747 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3748
3749 # snapshot subvolume
3750 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3751
3752 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
3753 try:
3754 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3755 except CommandFailedError as ce:
3756 if ce.exitstatus != errno.ENOTEMPTY:
3757 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
3758 else:
3759 raise RuntimeError("expected subvolume deletion to fail")
3760
3761 # remove snapshot
3762 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3763
3764 # remove subvolume
3765 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3766
3767 # verify trash dir is clean
3768 self._wait_for_trash_empty()
3769
3770 def test_subvolume_snapshot_protect_unprotect_sanity(self):
3771 """
3772 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
3773 invoking the command does not cause errors, till they are removed from a subsequent release.
3774 """
3775 subvolume = self._generate_random_subvolume_name()
3776 snapshot = self._generate_random_snapshot_name()
3777 clone = self._generate_random_clone_name()
3778
3779 # create subvolume
3780 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
3781
3782 # do some IO
3783 self._do_subvolume_io(subvolume, number_of_files=64)
3784
3785 # snapshot subvolume
3786 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3787
3788 # now, protect snapshot
3789 self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot)
3790
3791 # schedule a clone
3792 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3793
3794 # check clone status
3795 self._wait_for_clone_to_complete(clone)
3796
3797 # now, unprotect snapshot
3798 self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot)
3799
3800 # verify clone
3801 self._verify_clone(subvolume, snapshot, clone)
3802
3803 # remove snapshot
3804 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3805
3806 # remove subvolumes
3807 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3808 self._fs_cmd("subvolume", "rm", self.volname, clone)
3809
3810 # verify trash dir is clean
3811 self._wait_for_trash_empty()
3812
3813 def test_subvolume_snapshot_rm_force(self):
3814 # test removing non existing subvolume snapshot with --force
3815 subvolume = self._generate_random_subvolume_name()
3816 snapshot = self._generate_random_snapshot_name()
3817
3818 # remove snapshot
3819 try:
3820 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force")
3821 except CommandFailedError:
3822 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
3823
3824 def test_subvolume_snapshot_metadata_set(self):
3825 """
3826 Set custom metadata for subvolume snapshot.
3827 """
3828 subvolname = self._generate_random_subvolume_name()
3829 group = self._generate_random_group_name()
3830 snapshot = self._generate_random_snapshot_name()
3831
3832 # create group.
3833 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3834
3835 # create subvolume in group.
3836 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
3837
3838 # snapshot subvolume
3839 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
3840
3841 # set metadata for snapshot.
3842 key = "key"
3843 value = "value"
3844 try:
3845 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
3846 except CommandFailedError:
3847 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
3848
3849 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
3850 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3851 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3852
3853 # verify trash dir is clean.
3854 self._wait_for_trash_empty()
3855
3856 def test_subvolume_snapshot_metadata_set_idempotence(self):
3857 """
3858 Set custom metadata for subvolume snapshot (Idempotency).
3859 """
3860 subvolname = self._generate_random_subvolume_name()
3861 group = self._generate_random_group_name()
3862 snapshot = self._generate_random_snapshot_name()
3863
3864 # create group.
3865 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3866
3867 # create subvolume in group.
3868 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
3869
3870 # snapshot subvolume
3871 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
3872
3873 # set metadata for snapshot.
3874 key = "key"
3875 value = "value"
3876 try:
3877 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
3878 except CommandFailedError:
3879 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
3880
3881 # set same metadata again for subvolume.
3882 try:
3883 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
3884 except CommandFailedError:
3885 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed because it is idempotent operation")
3886
3887 # get value for specified key.
3888 try:
3889 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
3890 except CommandFailedError:
3891 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
3892
3893 # remove '\n' from returned value.
3894 ret = ret.strip('\n')
3895
3896 # match received value with expected value.
3897 self.assertEqual(value, ret)
3898
3899 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
3900 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3901 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3902
3903 # verify trash dir is clean.
3904 self._wait_for_trash_empty()
3905
3906 def test_subvolume_snapshot_metadata_get(self):
3907 """
3908 Get custom metadata for a specified key in subvolume snapshot metadata.
3909 """
3910 subvolname = self._generate_random_subvolume_name()
3911 group = self._generate_random_group_name()
3912 snapshot = self._generate_random_snapshot_name()
3913
3914 # create group.
3915 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3916
3917 # create subvolume in group.
3918 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
3919
3920 # snapshot subvolume
3921 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
3922
3923 # set metadata for snapshot.
3924 key = "key"
3925 value = "value"
3926 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
3927
3928 # get value for specified key.
3929 try:
3930 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
3931 except CommandFailedError:
3932 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
3933
3934 # remove '\n' from returned value.
3935 ret = ret.strip('\n')
3936
3937 # match received value with expected value.
3938 self.assertEqual(value, ret)
3939
3940 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
3941 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3942 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3943
3944 # verify trash dir is clean.
3945 self._wait_for_trash_empty()
3946
3947 def test_subvolume_snapshot_metadata_get_for_nonexisting_key(self):
3948 """
3949 Get custom metadata for subvolume snapshot if specified key not exist in metadata.
3950 """
3951 subvolname = self._generate_random_subvolume_name()
3952 group = self._generate_random_group_name()
3953 snapshot = self._generate_random_snapshot_name()
3954
3955 # create group.
3956 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3957
3958 # create subvolume in group.
3959 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
3960
3961 # snapshot subvolume
3962 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
3963
3964 # set metadata for snapshot.
3965 key = "key"
3966 value = "value"
3967 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
3968
3969 # try to get value for nonexisting key
3970 # Expecting ENOENT exit status because key does not exist
3971 try:
3972 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, "key_nonexist", group)
3973 except CommandFailedError as e:
3974 self.assertEqual(e.exitstatus, errno.ENOENT)
3975 else:
3976 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
3977
3978 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
3979 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3980 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3981
3982 # verify trash dir is clean.
3983 self._wait_for_trash_empty()
3984
3985 def test_subvolume_snapshot_metadata_get_for_nonexisting_section(self):
3986 """
3987 Get custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
3988 """
3989 subvolname = self._generate_random_subvolume_name()
3990 group = self._generate_random_group_name()
3991 snapshot = self._generate_random_snapshot_name()
3992
3993 # create group.
3994 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3995
3996 # create subvolume in group.
3997 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
3998
3999 # snapshot subvolume
4000 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4001
4002 # try to get value for nonexisting key (as section does not exist)
4003 # Expecting ENOENT exit status because key does not exist
4004 try:
4005 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, "key", group)
4006 except CommandFailedError as e:
4007 self.assertEqual(e.exitstatus, errno.ENOENT)
4008 else:
4009 self.fail("Expected ENOENT because section does not exist")
4010
4011 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4012 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4013 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4014
4015 # verify trash dir is clean.
4016 self._wait_for_trash_empty()
4017
4018 def test_subvolume_snapshot_metadata_update(self):
4019 """
4020 Update custom metadata for a specified key in subvolume snapshot metadata.
4021 """
4022 subvolname = self._generate_random_subvolume_name()
4023 group = self._generate_random_group_name()
4024 snapshot = self._generate_random_snapshot_name()
4025
4026 # create group.
4027 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4028
4029 # create subvolume in group.
4030 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4031
4032 # snapshot subvolume
4033 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4034
4035 # set metadata for snapshot.
4036 key = "key"
4037 value = "value"
4038 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
4039
4040 # update metadata against key.
4041 new_value = "new_value"
4042 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, new_value, group)
4043
4044 # get metadata for specified key of snapshot.
4045 try:
4046 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
4047 except CommandFailedError:
4048 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
4049
4050 # remove '\n' from returned value.
4051 ret = ret.strip('\n')
4052
4053 # match received value with expected value.
4054 self.assertEqual(new_value, ret)
4055
4056 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4057 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4058 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4059
4060 # verify trash dir is clean.
4061 self._wait_for_trash_empty()
4062
4063 def test_subvolume_snapshot_metadata_list(self):
4064 """
4065 List custom metadata for subvolume snapshot.
4066 """
4067 subvolname = self._generate_random_subvolume_name()
4068 group = self._generate_random_group_name()
4069 snapshot = self._generate_random_snapshot_name()
4070
4071 # create group.
4072 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4073
4074 # create subvolume in group.
4075 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4076
4077 # snapshot subvolume
4078 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4079
4080 # set metadata for subvolume.
4081 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
4082
4083 for k, v in input_metadata_dict.items():
4084 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, k, v, group)
4085
4086 # list metadata
4087 try:
4088 ret_dict = json.loads(self._fs_cmd("subvolume", "snapshot", "metadata", "ls", self.volname, subvolname, snapshot, group))
4089 except CommandFailedError:
4090 self.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
4091
4092 # compare output with expected output
4093 self.assertDictEqual(input_metadata_dict, ret_dict)
4094
4095 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4096 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4097 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4098
4099 # verify trash dir is clean.
4100 self._wait_for_trash_empty()
4101
4102 def test_subvolume_snapshot_metadata_list_if_no_metadata_set(self):
4103 """
4104 List custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
4105 """
4106 subvolname = self._generate_random_subvolume_name()
4107 group = self._generate_random_group_name()
4108 snapshot = self._generate_random_snapshot_name()
4109
4110 # create group.
4111 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4112
4113 # create subvolume in group.
4114 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4115
4116 # snapshot subvolume
4117 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4118
4119 # list metadata
4120 try:
4121 ret_dict = json.loads(self._fs_cmd("subvolume", "snapshot", "metadata", "ls", self.volname, subvolname, snapshot, group))
4122 except CommandFailedError:
4123 self.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
4124
4125 # compare output with expected output
4126 empty_dict = {}
4127 self.assertDictEqual(ret_dict, empty_dict)
4128
4129 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4130 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4131 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4132
4133 # verify trash dir is clean.
4134 self._wait_for_trash_empty()
4135
4136 def test_subvolume_snapshot_metadata_remove(self):
4137 """
4138 Remove custom metadata for a specified key in subvolume snapshot metadata.
4139 """
4140 subvolname = self._generate_random_subvolume_name()
4141 group = self._generate_random_group_name()
4142 snapshot = self._generate_random_snapshot_name()
4143
4144 # create group.
4145 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4146
4147 # create subvolume in group.
4148 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4149
4150 # snapshot subvolume
4151 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4152
4153 # set metadata for snapshot.
4154 key = "key"
4155 value = "value"
4156 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
4157
4158 # remove metadata against specified key.
4159 try:
4160 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group)
4161 except CommandFailedError:
4162 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
4163
4164 # confirm key is removed by again fetching metadata
4165 try:
4166 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, key, snapshot, group)
4167 except CommandFailedError as e:
4168 self.assertEqual(e.exitstatus, errno.ENOENT)
4169 else:
4170 self.fail("Expected ENOENT because key does not exist")
4171
4172 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4173 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4174 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4175
4176 # verify trash dir is clean.
4177 self._wait_for_trash_empty()
4178
4179 def test_subvolume_snapshot_metadata_remove_for_nonexisting_key(self):
4180 """
4181 Remove custom metadata for subvolume snapshot if specified key not exist in metadata.
4182 """
4183 subvolname = self._generate_random_subvolume_name()
4184 group = self._generate_random_group_name()
4185 snapshot = self._generate_random_snapshot_name()
4186
4187 # create group.
4188 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4189
4190 # create subvolume in group.
4191 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4192
4193 # snapshot subvolume
4194 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4195
4196 # set metadata for snapshot.
4197 key = "key"
4198 value = "value"
4199 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
4200
4201 # try to remove value for nonexisting key
4202 # Expecting ENOENT exit status because key does not exist
4203 try:
4204 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, "key_nonexist", group)
4205 except CommandFailedError as e:
4206 self.assertEqual(e.exitstatus, errno.ENOENT)
4207 else:
4208 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
adb31ebb 4209
33c7a0ef
TL
4210 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4211 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4212 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
adb31ebb 4213
33c7a0ef 4214 # verify trash dir is clean.
adb31ebb
TL
4215 self._wait_for_trash_empty()
4216
33c7a0ef 4217 def test_subvolume_snapshot_metadata_remove_for_nonexisting_section(self):
adb31ebb 4218 """
33c7a0ef 4219 Remove custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
adb31ebb 4220 """
33c7a0ef
TL
4221 subvolname = self._generate_random_subvolume_name()
4222 group = self._generate_random_group_name()
4223 snapshot = self._generate_random_snapshot_name()
adb31ebb 4224
33c7a0ef
TL
4225 # create group.
4226 self._fs_cmd("subvolumegroup", "create", self.volname, group)
adb31ebb 4227
33c7a0ef
TL
4228 # create subvolume in group.
4229 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
adb31ebb 4230
33c7a0ef
TL
4231 # snapshot subvolume
4232 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
f67539c2 4233
33c7a0ef
TL
4234 # try to remove value for nonexisting key (as section does not exist)
4235 # Expecting ENOENT exit status because key does not exist
4236 try:
4237 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, "key", group)
4238 except CommandFailedError as e:
4239 self.assertEqual(e.exitstatus, errno.ENOENT)
4240 else:
4241 self.fail("Expected ENOENT because section does not exist")
4242
4243 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4244 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4245 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4246
4247 # verify trash dir is clean.
f67539c2
TL
4248 self._wait_for_trash_empty()
4249
33c7a0ef 4250 def test_subvolume_snapshot_metadata_remove_force(self):
f67539c2 4251 """
33c7a0ef 4252 Forcefully remove custom metadata for a specified key in subvolume snapshot metadata.
f67539c2 4253 """
33c7a0ef
TL
4254 subvolname = self._generate_random_subvolume_name()
4255 group = self._generate_random_group_name()
f67539c2
TL
4256 snapshot = self._generate_random_snapshot_name()
4257
33c7a0ef
TL
4258 # create group.
4259 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4260
4261 # create subvolume in group.
4262 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
f67539c2
TL
4263
4264 # snapshot subvolume
33c7a0ef 4265 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
f67539c2 4266
33c7a0ef
TL
4267 # set metadata for snapshot.
4268 key = "key"
4269 value = "value"
4270 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
f67539c2 4271
33c7a0ef
TL
4272 # remove metadata against specified key with --force option.
4273 try:
4274 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group, "--force")
4275 except CommandFailedError:
4276 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
f67539c2 4277
33c7a0ef 4278 # confirm key is removed by again fetching metadata
f67539c2 4279 try:
33c7a0ef
TL
4280 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
4281 except CommandFailedError as e:
4282 self.assertEqual(e.exitstatus, errno.ENOENT)
f67539c2 4283 else:
33c7a0ef 4284 self.fail("Expected ENOENT because key does not exist")
adb31ebb 4285
33c7a0ef
TL
4286 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4287 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4288 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
adb31ebb 4289
33c7a0ef 4290 # verify trash dir is clean.
adb31ebb
TL
4291 self._wait_for_trash_empty()
4292
33c7a0ef
TL
4293 def test_subvolume_snapshot_metadata_remove_force_for_nonexisting_key(self):
4294 """
4295 Forcefully remove custom metadata for subvolume snapshot if specified key not exist in metadata.
4296 """
4297 subvolname = self._generate_random_subvolume_name()
4298 group = self._generate_random_group_name()
adb31ebb
TL
4299 snapshot = self._generate_random_snapshot_name()
4300
33c7a0ef
TL
4301 # create group.
4302 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4303
4304 # create subvolume in group.
4305 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
adb31ebb
TL
4306
4307 # snapshot subvolume
33c7a0ef 4308 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
adb31ebb 4309
33c7a0ef
TL
4310 # set metadata for snapshot.
4311 key = "key"
4312 value = "value"
4313 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
4314
4315 # remove metadata against specified key.
adb31ebb 4316 try:
33c7a0ef
TL
4317 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group)
4318 except CommandFailedError:
4319 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
4320
4321 # confirm key is removed by again fetching metadata
4322 try:
4323 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
4324 except CommandFailedError as e:
4325 self.assertEqual(e.exitstatus, errno.ENOENT)
adb31ebb 4326 else:
33c7a0ef 4327 self.fail("Expected ENOENT because key does not exist")
adb31ebb 4328
33c7a0ef
TL
4329 # again remove metadata against already removed key with --force option.
4330 try:
4331 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group, "--force")
4332 except CommandFailedError:
4333 self.fail("expected the 'fs subvolume snapshot metadata rm' (with --force) command to succeed")
adb31ebb 4334
33c7a0ef
TL
4335 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4336 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4337 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
adb31ebb 4338
33c7a0ef 4339 # verify trash dir is clean.
adb31ebb
TL
4340 self._wait_for_trash_empty()
4341
33c7a0ef 4342 def test_subvolume_snapshot_metadata_after_snapshot_remove(self):
adb31ebb 4343 """
33c7a0ef 4344 Verify metadata removal of subvolume snapshot after snapshot removal.
adb31ebb 4345 """
33c7a0ef
TL
4346 subvolname = self._generate_random_subvolume_name()
4347 group = self._generate_random_group_name()
f67539c2 4348 snapshot = self._generate_random_snapshot_name()
adb31ebb 4349
33c7a0ef
TL
4350 # create group.
4351 self._fs_cmd("subvolumegroup", "create", self.volname, group)
adb31ebb 4352
33c7a0ef
TL
4353 # create subvolume in group.
4354 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
adb31ebb 4355
f67539c2 4356 # snapshot subvolume
33c7a0ef 4357 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
f67539c2 4358
33c7a0ef
TL
4359 # set metadata for snapshot.
4360 key = "key"
4361 value = "value"
4362 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
f67539c2 4363
33c7a0ef
TL
4364 # get value for specified key.
4365 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
f67539c2 4366
33c7a0ef
TL
4367 # remove '\n' from returned value.
4368 ret = ret.strip('\n')
f67539c2 4369
33c7a0ef
TL
4370 # match received value with expected value.
4371 self.assertEqual(value, ret)
f67539c2 4372
33c7a0ef
TL
4373 # remove subvolume snapshot.
4374 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
adb31ebb 4375
33c7a0ef
TL
4376 # try to get metadata after removing snapshot.
4377 # Expecting error ENOENT with error message of snapshot does not exist
4378 cmd_ret = self.mgr_cluster.mon_manager.run_cluster_cmd(
4379 args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group],
4380 check_status=False, stdout=StringIO(), stderr=StringIO())
4381 self.assertEqual(cmd_ret.returncode, errno.ENOENT, "Expecting ENOENT error")
4382 self.assertIn(f"snapshot '{snapshot}' does not exist", cmd_ret.stderr.getvalue(),
4383 f"Expecting message: snapshot '{snapshot}' does not exist ")
adb31ebb 4384
33c7a0ef
TL
4385 # confirm metadata is removed by searching section name in .meta file
4386 meta_path = os.path.join(".", "volumes", group, subvolname, ".meta")
4387 section_name = "SNAP_METADATA_" + snapshot
f67539c2 4388
f67539c2 4389 try:
33c7a0ef
TL
4390 self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False)
4391 except CommandFailedError as e:
4392 self.assertNotEqual(e.exitstatus, 0)
4393 else:
4394 self.fail("Expected non-zero exist status because section should not exist")
4395
4396 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4397 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
f67539c2 4398
33c7a0ef
TL
4399 # verify trash dir is clean.
4400 self._wait_for_trash_empty()
f67539c2
TL
4401
4402class TestSubvolumeSnapshotClones(TestVolumesHelper):
4403 """ Tests for FS subvolume snapshot clone operations."""
4404 def test_clone_subvolume_info(self):
4405 # tests the 'fs subvolume info' command for a clone
4406 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
4407 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
4408 "type", "uid"]
adb31ebb
TL
4409
4410 subvolume = self._generate_random_subvolume_name()
4411 snapshot = self._generate_random_snapshot_name()
f67539c2 4412 clone = self._generate_random_clone_name()
adb31ebb
TL
4413
4414 # create subvolume
522d829b 4415 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 4416
f67539c2
TL
4417 # do some IO
4418 self._do_subvolume_io(subvolume, number_of_files=1)
4419
adb31ebb
TL
4420 # snapshot subvolume
4421 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4422
f67539c2
TL
4423 # schedule a clone
4424 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 4425
f67539c2
TL
4426 # check clone status
4427 self._wait_for_clone_to_complete(clone)
adb31ebb 4428
f67539c2
TL
4429 # remove snapshot
4430 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb 4431
f67539c2
TL
4432 subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
4433 if len(subvol_info) == 0:
4434 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
4435 for md in subvol_md:
4436 if md not in subvol_info.keys():
4437 raise RuntimeError("%s not present in the metadata of subvolume" % md)
4438 if subvol_info["type"] != "clone":
4439 raise RuntimeError("type should be set to clone")
adb31ebb 4440
f67539c2
TL
4441 # remove subvolumes
4442 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4443 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb 4444
f67539c2
TL
4445 # verify trash dir is clean
4446 self._wait_for_trash_empty()
adb31ebb 4447
f67539c2
TL
4448 def test_non_clone_status(self):
4449 subvolume = self._generate_random_subvolume_name()
adb31ebb 4450
f67539c2
TL
4451 # create subvolume
4452 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 4453
adb31ebb 4454 try:
f67539c2 4455 self._fs_cmd("clone", "status", self.volname, subvolume)
adb31ebb 4456 except CommandFailedError as ce:
f67539c2
TL
4457 if ce.exitstatus != errno.ENOTSUP:
4458 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
adb31ebb 4459 else:
f67539c2 4460 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
adb31ebb 4461
f67539c2
TL
4462 # remove subvolume
4463 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb 4464
f67539c2
TL
4465 # verify trash dir is clean
4466 self._wait_for_trash_empty()
4467
4468 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self):
4469 subvolume = self._generate_random_subvolume_name()
4470 snapshot = self._generate_random_snapshot_name()
4471 clone = self._generate_random_clone_name()
4472 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
4473
4474 # create subvolume, in an isolated namespace with a specified size
522d829b 4475 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize), "--mode=777")
f67539c2
TL
4476
4477 # do some IO
4478 self._do_subvolume_io(subvolume, number_of_files=8)
4479
4480 # snapshot subvolume
4481 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4482
4483 # create a pool different from current subvolume pool
4484 subvol_path = self._get_subvolume_path(self.volname, subvolume)
4485 default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
4486 new_pool = "new_pool"
4487 self.assertNotEqual(default_pool, new_pool)
4488 self.fs.add_data_pool(new_pool)
4489
4490 # update source subvolume pool
4491 self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="")
4492
4493 # schedule a clone, with NO --pool specification
4494 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4495
4496 # check clone status
4497 self._wait_for_clone_to_complete(clone)
4498
4499 # verify clone
4500 self._verify_clone(subvolume, snapshot, clone)
4501
4502 # remove snapshot
adb31ebb
TL
4503 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4504
f67539c2
TL
4505 # remove subvolumes
4506 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4507 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
4508
4509 # verify trash dir is clean
4510 self._wait_for_trash_empty()
4511
1d09f67e
TL
4512 def test_subvolume_clone_inherit_quota_attrs(self):
4513 subvolume = self._generate_random_subvolume_name()
4514 snapshot = self._generate_random_snapshot_name()
4515 clone = self._generate_random_clone_name()
4516 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
4517
4518 # create subvolume with a specified size
4519 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777", "--size", str(osize))
4520
4521 # do some IO
4522 self._do_subvolume_io(subvolume, number_of_files=8)
4523
4524 # get subvolume path
4525 subvolpath = self._get_subvolume_path(self.volname, subvolume)
4526
4527 # set quota on number of files
4528 self.mount_a.setfattr(subvolpath, 'ceph.quota.max_files', "20", sudo=True)
4529
4530 # snapshot subvolume
4531 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4532
4533 # schedule a clone
4534 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4535
4536 # check clone status
4537 self._wait_for_clone_to_complete(clone)
4538
4539 # verify clone
4540 self._verify_clone(subvolume, snapshot, clone)
4541
4542 # get subvolume path
4543 clonepath = self._get_subvolume_path(self.volname, clone)
4544
4545 # verify quota max_files is inherited from source snapshot
4546 subvol_quota = self.mount_a.getfattr(subvolpath, "ceph.quota.max_files")
4547 clone_quota = self.mount_a.getfattr(clonepath, "ceph.quota.max_files")
4548 self.assertEqual(subvol_quota, clone_quota)
4549
4550 # remove snapshot
4551 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4552
4553 # remove subvolumes
4554 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4555 self._fs_cmd("subvolume", "rm", self.volname, clone)
4556
4557 # verify trash dir is clean
4558 self._wait_for_trash_empty()
4559
f67539c2 4560 def test_subvolume_clone_in_progress_getpath(self):
adb31ebb
TL
4561 subvolume = self._generate_random_subvolume_name()
4562 snapshot = self._generate_random_snapshot_name()
f67539c2 4563 clone = self._generate_random_clone_name()
adb31ebb
TL
4564
4565 # create subvolume
522d829b 4566 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 4567
f67539c2
TL
4568 # do some IO
4569 self._do_subvolume_io(subvolume, number_of_files=64)
4570
adb31ebb
TL
4571 # snapshot subvolume
4572 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4573
522d829b
TL
4574 # Insert delay at the beginning of snapshot clone
4575 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4576
f67539c2
TL
4577 # schedule a clone
4578 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 4579
f67539c2 4580 # clone should not be accessible right now
adb31ebb 4581 try:
f67539c2 4582 self._get_subvolume_path(self.volname, clone)
adb31ebb 4583 except CommandFailedError as ce:
f67539c2
TL
4584 if ce.exitstatus != errno.EAGAIN:
4585 raise RuntimeError("invalid error code when fetching path of an pending clone")
adb31ebb 4586 else:
f67539c2 4587 raise RuntimeError("expected fetching path of an pending clone to fail")
adb31ebb 4588
f67539c2
TL
4589 # check clone status
4590 self._wait_for_clone_to_complete(clone)
adb31ebb 4591
f67539c2
TL
4592 # clone should be accessible now
4593 subvolpath = self._get_subvolume_path(self.volname, clone)
4594 self.assertNotEqual(subvolpath, None)
adb31ebb 4595
f67539c2
TL
4596 # verify clone
4597 self._verify_clone(subvolume, snapshot, clone)
4598
4599 # remove snapshot
adb31ebb
TL
4600 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4601
f67539c2
TL
4602 # remove subvolumes
4603 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4604 self._fs_cmd("subvolume", "rm", self.volname, clone)
4605
adb31ebb
TL
4606 # verify trash dir is clean
4607 self._wait_for_trash_empty()
4608
f67539c2 4609 def test_subvolume_clone_in_progress_snapshot_rm(self):
adb31ebb
TL
4610 subvolume = self._generate_random_subvolume_name()
4611 snapshot = self._generate_random_snapshot_name()
f67539c2 4612 clone = self._generate_random_clone_name()
adb31ebb
TL
4613
4614 # create subvolume
522d829b 4615 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 4616
f67539c2
TL
4617 # do some IO
4618 self._do_subvolume_io(subvolume, number_of_files=64)
4619
adb31ebb
TL
4620 # snapshot subvolume
4621 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4622
522d829b
TL
4623 # Insert delay at the beginning of snapshot clone
4624 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4625
f67539c2
TL
4626 # schedule a clone
4627 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 4628
f67539c2 4629 # snapshot should not be deletable now
adb31ebb 4630 try:
f67539c2 4631 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb 4632 except CommandFailedError as ce:
f67539c2 4633 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
adb31ebb 4634 else:
f67539c2 4635 self.fail("expected removing source snapshot of a clone to fail")
adb31ebb 4636
f67539c2
TL
4637 # check clone status
4638 self._wait_for_clone_to_complete(clone)
adb31ebb 4639
f67539c2
TL
4640 # clone should be accessible now
4641 subvolpath = self._get_subvolume_path(self.volname, clone)
4642 self.assertNotEqual(subvolpath, None)
4643
4644 # verify clone
4645 self._verify_clone(subvolume, snapshot, clone)
adb31ebb
TL
4646
4647 # remove snapshot
4648 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4649
f67539c2 4650 # remove subvolumes
adb31ebb 4651 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2 4652 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
4653
4654 # verify trash dir is clean
4655 self._wait_for_trash_empty()
4656
f67539c2 4657 def test_subvolume_clone_in_progress_source(self):
adb31ebb
TL
4658 subvolume = self._generate_random_subvolume_name()
4659 snapshot = self._generate_random_snapshot_name()
4660 clone = self._generate_random_clone_name()
4661
4662 # create subvolume
522d829b 4663 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 4664
f67539c2
TL
4665 # do some IO
4666 self._do_subvolume_io(subvolume, number_of_files=64)
4667
adb31ebb
TL
4668 # snapshot subvolume
4669 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4670
522d829b
TL
4671 # Insert delay at the beginning of snapshot clone
4672 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4673
f67539c2 4674 # schedule a clone
adb31ebb
TL
4675 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4676
f67539c2
TL
4677 # verify clone source
4678 result = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
4679 source = result['status']['source']
4680 self.assertEqual(source['volume'], self.volname)
4681 self.assertEqual(source['subvolume'], subvolume)
4682 self.assertEqual(source.get('group', None), None)
4683 self.assertEqual(source['snapshot'], snapshot)
4684
adb31ebb
TL
4685 # check clone status
4686 self._wait_for_clone_to_complete(clone)
4687
f67539c2
TL
4688 # clone should be accessible now
4689 subvolpath = self._get_subvolume_path(self.volname, clone)
4690 self.assertNotEqual(subvolpath, None)
adb31ebb 4691
f67539c2
TL
4692 # verify clone
4693 self._verify_clone(subvolume, snapshot, clone)
adb31ebb
TL
4694
4695 # remove snapshot
4696 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb 4697
f67539c2 4698 # remove subvolumes
adb31ebb
TL
4699 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4700 self._fs_cmd("subvolume", "rm", self.volname, clone)
4701
4702 # verify trash dir is clean
4703 self._wait_for_trash_empty()
4704
f67539c2 4705 def test_subvolume_clone_retain_snapshot_with_snapshots(self):
adb31ebb 4706 """
f67539c2 4707 retain snapshots of a cloned subvolume and check disallowed operations
adb31ebb 4708 """
adb31ebb
TL
4709 subvolume = self._generate_random_subvolume_name()
4710 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
f67539c2 4711 clone = self._generate_random_clone_name()
adb31ebb
TL
4712
4713 # create subvolume
522d829b 4714 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 4715
f67539c2
TL
4716 # store path for clone verification
4717 subvol1_path = self._get_subvolume_path(self.volname, subvolume)
4718
4719 # do some IO
4720 self._do_subvolume_io(subvolume, number_of_files=16)
4721
adb31ebb
TL
4722 # snapshot subvolume
4723 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
4724
4725 # remove with snapshot retention
4726 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4727
f67539c2
TL
4728 # clone retained subvolume snapshot
4729 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot1, clone)
adb31ebb 4730
f67539c2
TL
4731 # check clone status
4732 self._wait_for_clone_to_complete(clone)
adb31ebb 4733
f67539c2
TL
4734 # verify clone
4735 self._verify_clone(subvolume, snapshot1, clone, subvol_path=subvol1_path)
adb31ebb 4736
f67539c2
TL
4737 # create a snapshot on the clone
4738 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot2)
adb31ebb 4739
f67539c2
TL
4740 # retain a clone
4741 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
adb31ebb
TL
4742
4743 # list snapshots
f67539c2
TL
4744 clonesnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, clone))
4745 self.assertEqual(len(clonesnapshotls), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
adb31ebb 4746 " created subvolume snapshots")
f67539c2
TL
4747 snapshotnames = [snapshot['name'] for snapshot in clonesnapshotls]
4748 for snap in [snapshot2]:
adb31ebb
TL
4749 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
4750
f67539c2
TL
4751 ## check disallowed operations on retained clone
4752 # clone-status
4753 try:
4754 self._fs_cmd("clone", "status", self.volname, clone)
4755 except CommandFailedError as ce:
4756 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone status of clone with retained snapshots")
4757 else:
4758 self.fail("expected clone status of clone with retained snapshots to fail")
4759
4760 # clone-cancel
4761 try:
4762 self._fs_cmd("clone", "cancel", self.volname, clone)
4763 except CommandFailedError as ce:
4764 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone cancel of clone with retained snapshots")
4765 else:
4766 self.fail("expected clone cancel of clone with retained snapshots to fail")
4767
4768 # remove snapshots (removes subvolumes as all are in retained state)
adb31ebb 4769 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
f67539c2 4770 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot2)
adb31ebb
TL
4771
4772 # verify list subvolumes returns an empty list
4773 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4774 self.assertEqual(len(subvolumels), 0)
4775
4776 # verify trash dir is clean
4777 self._wait_for_trash_empty()
4778
4779 def test_subvolume_retain_snapshot_clone(self):
4780 """
4781 clone a snapshot from a snapshot retained subvolume
4782 """
4783 subvolume = self._generate_random_subvolume_name()
4784 snapshot = self._generate_random_snapshot_name()
4785 clone = self._generate_random_clone_name()
4786
4787 # create subvolume
522d829b 4788 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
4789
4790 # store path for clone verification
4791 subvol_path = self._get_subvolume_path(self.volname, subvolume)
4792
4793 # do some IO
4794 self._do_subvolume_io(subvolume, number_of_files=16)
4795
4796 # snapshot subvolume
4797 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4798
4799 # remove with snapshot retention
4800 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4801
4802 # clone retained subvolume snapshot
4803 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4804
4805 # check clone status
4806 self._wait_for_clone_to_complete(clone)
4807
4808 # verify clone
4809 self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path)
4810
4811 # remove snapshots (removes retained volume)
4812 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4813
4814 # remove subvolume
4815 self._fs_cmd("subvolume", "rm", self.volname, clone)
4816
4817 # verify list subvolumes returns an empty list
4818 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4819 self.assertEqual(len(subvolumels), 0)
92f5a8d4
TL
4820
4821 # verify trash dir is clean
4822 self._wait_for_trash_empty()
4823
f67539c2 4824 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self):
adb31ebb 4825 """
f67539c2 4826 clone a subvolume from recreated subvolume's latest snapshot
adb31ebb
TL
4827 """
4828 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
4829 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
4830 clone = self._generate_random_clone_name(1)
adb31ebb
TL
4831
4832 # create subvolume
522d829b 4833 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 4834
adb31ebb
TL
4835 # do some IO
4836 self._do_subvolume_io(subvolume, number_of_files=16)
4837
4838 # snapshot subvolume
f67539c2 4839 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
adb31ebb
TL
4840
4841 # remove with snapshot retention
4842 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4843
f67539c2 4844 # recreate subvolume
522d829b 4845 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
f67539c2
TL
4846
4847 # get and store path for clone verification
4848 subvol2_path = self._get_subvolume_path(self.volname, subvolume)
4849
4850 # do some IO
4851 self._do_subvolume_io(subvolume, number_of_files=16)
4852
4853 # snapshot newer subvolume
4854 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
4855
4856 # remove with snapshot retention
4857 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4858
4859 # clone retained subvolume's newer snapshot
4860 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot2, clone)
adb31ebb
TL
4861
4862 # check clone status
f67539c2 4863 self._wait_for_clone_to_complete(clone)
adb31ebb
TL
4864
4865 # verify clone
f67539c2 4866 self._verify_clone(subvolume, snapshot2, clone, subvol_path=subvol2_path)
adb31ebb
TL
4867
4868 # remove snapshot
f67539c2
TL
4869 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
4870 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
adb31ebb
TL
4871
4872 # remove subvolume
f67539c2 4873 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
4874
4875 # verify list subvolumes returns an empty list
4876 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4877 self.assertEqual(len(subvolumels), 0)
4878
4879 # verify trash dir is clean
4880 self._wait_for_trash_empty()
4881
f67539c2 4882 def test_subvolume_retain_snapshot_recreate(self):
adb31ebb 4883 """
f67539c2 4884 recreate a subvolume from one of its retained snapshots
adb31ebb
TL
4885 """
4886 subvolume = self._generate_random_subvolume_name()
f67539c2 4887 snapshot = self._generate_random_snapshot_name()
adb31ebb
TL
4888
4889 # create subvolume
522d829b 4890 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
4891
4892 # store path for clone verification
f67539c2 4893 subvol_path = self._get_subvolume_path(self.volname, subvolume)
adb31ebb
TL
4894
4895 # do some IO
4896 self._do_subvolume_io(subvolume, number_of_files=16)
4897
4898 # snapshot subvolume
f67539c2 4899 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb
TL
4900
4901 # remove with snapshot retention
4902 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4903
f67539c2
TL
4904 # recreate retained subvolume using its own snapshot to clone
4905 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume)
adb31ebb
TL
4906
4907 # check clone status
f67539c2 4908 self._wait_for_clone_to_complete(subvolume)
adb31ebb
TL
4909
4910 # verify clone
f67539c2 4911 self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path)
adb31ebb 4912
f67539c2
TL
4913 # remove snapshot
4914 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4915
4916 # remove subvolume
4917 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
4918
4919 # verify list subvolumes returns an empty list
4920 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4921 self.assertEqual(len(subvolumels), 0)
4922
4923 # verify trash dir is clean
4924 self._wait_for_trash_empty()
4925
f67539c2 4926 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self):
adb31ebb 4927 """
f67539c2 4928 ensure retained clone recreate fails if its trash is not yet purged
adb31ebb
TL
4929 """
4930 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
4931 snapshot = self._generate_random_snapshot_name()
4932 clone = self._generate_random_clone_name()
adb31ebb
TL
4933
4934 # create subvolume
4935 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4936
adb31ebb 4937 # snapshot subvolume
f67539c2 4938 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb 4939
f67539c2
TL
4940 # clone subvolume snapshot
4941 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 4942
f67539c2
TL
4943 # check clone status
4944 self._wait_for_clone_to_complete(clone)
adb31ebb 4945
f67539c2
TL
4946 # snapshot clone
4947 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot)
adb31ebb 4948
f67539c2
TL
4949 # remove clone with snapshot retention
4950 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
adb31ebb 4951
f67539c2
TL
4952 # fake a trash entry
4953 self._update_fake_trash(clone)
adb31ebb 4954
f67539c2
TL
4955 # clone subvolume snapshot (recreate)
4956 try:
4957 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4958 except CommandFailedError as ce:
4959 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending")
4960 else:
4961 self.fail("expected recreate of clone with purge pending to fail")
adb31ebb 4962
f67539c2
TL
4963 # clear fake trash entry
4964 self._update_fake_trash(clone, create=False)
4965
4966 # recreate subvolume
4967 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb
TL
4968
4969 # check clone status
4970 self._wait_for_clone_to_complete(clone)
4971
adb31ebb 4972 # remove snapshot
f67539c2
TL
4973 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4974 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot)
adb31ebb
TL
4975
4976 # remove subvolume
f67539c2 4977 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
4978 self._fs_cmd("subvolume", "rm", self.volname, clone)
4979
adb31ebb
TL
4980 # verify trash dir is clean
4981 self._wait_for_trash_empty()
4982
f67539c2 4983 def test_subvolume_snapshot_attr_clone(self):
92f5a8d4
TL
4984 subvolume = self._generate_random_subvolume_name()
4985 snapshot = self._generate_random_snapshot_name()
4986 clone = self._generate_random_clone_name()
4987
4988 # create subvolume
522d829b 4989 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
4990
4991 # do some IO
f67539c2 4992 self._do_subvolume_io_mixed(subvolume)
92f5a8d4
TL
4993
4994 # snapshot subvolume
4995 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4996
92f5a8d4
TL
4997 # schedule a clone
4998 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4999
92f5a8d4
TL
5000 # check clone status
5001 self._wait_for_clone_to_complete(clone)
5002
adb31ebb
TL
5003 # verify clone
5004 self._verify_clone(subvolume, snapshot, clone)
5005
92f5a8d4
TL
5006 # remove snapshot
5007 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5008
adb31ebb
TL
5009 # remove subvolumes
5010 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5011 self._fs_cmd("subvolume", "rm", self.volname, clone)
5012
5013 # verify trash dir is clean
5014 self._wait_for_trash_empty()
5015
33c7a0ef
TL
5016 def test_clone_failure_status_pending_in_progress_complete(self):
5017 """
5018 ensure failure status is not shown when clone is not in failed/cancelled state
5019 """
5020 subvolume = self._generate_random_subvolume_name()
5021 snapshot = self._generate_random_snapshot_name()
5022 clone1 = self._generate_random_clone_name()
5023
5024 # create subvolume
5025 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5026
5027 # do some IO
5028 self._do_subvolume_io(subvolume, number_of_files=200)
5029
5030 # snapshot subvolume
5031 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5032
5033 # Insert delay at the beginning of snapshot clone
5034 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5035
5036 # schedule a clone1
5037 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
5038
5039 # pending clone shouldn't show failure status
5040 clone1_result = self._get_clone_status(clone1)
5041 try:
5042 clone1_result["status"]["failure"]["errno"]
5043 except KeyError as e:
5044 self.assertEqual(str(e), "'failure'")
5045 else:
5046 self.fail("clone status shouldn't show failure for pending clone")
5047
5048 # check clone1 to be in-progress
5049 self._wait_for_clone_to_be_in_progress(clone1)
5050
5051 # in-progress clone1 shouldn't show failure status
5052 clone1_result = self._get_clone_status(clone1)
5053 try:
5054 clone1_result["status"]["failure"]["errno"]
5055 except KeyError as e:
5056 self.assertEqual(str(e), "'failure'")
5057 else:
5058 self.fail("clone status shouldn't show failure for in-progress clone")
5059
5060 # wait for clone1 to complete
5061 self._wait_for_clone_to_complete(clone1)
5062
5063 # complete clone1 shouldn't show failure status
5064 clone1_result = self._get_clone_status(clone1)
5065 try:
5066 clone1_result["status"]["failure"]["errno"]
5067 except KeyError as e:
5068 self.assertEqual(str(e), "'failure'")
5069 else:
5070 self.fail("clone status shouldn't show failure for complete clone")
5071
5072 # remove snapshot
5073 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5074
5075 # remove subvolumes
5076 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5077 self._fs_cmd("subvolume", "rm", self.volname, clone1)
5078
5079 # verify trash dir is clean
5080 self._wait_for_trash_empty()
5081
5082 def test_clone_failure_status_failed(self):
5083 """
5084 ensure failure status is shown when clone is in failed state and validate the reason
5085 """
5086 subvolume = self._generate_random_subvolume_name()
5087 snapshot = self._generate_random_snapshot_name()
5088 clone1 = self._generate_random_clone_name()
5089
5090 # create subvolume
5091 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5092
5093 # do some IO
5094 self._do_subvolume_io(subvolume, number_of_files=200)
5095
5096 # snapshot subvolume
5097 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5098
5099 # Insert delay at the beginning of snapshot clone
5100 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5101
5102 # schedule a clone1
5103 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
5104
5105 # remove snapshot from backend to force the clone failure.
5106 snappath = os.path.join(".", "volumes", "_nogroup", subvolume, ".snap", snapshot)
5107 self.mount_a.run_shell(['rmdir', snappath], sudo=True)
5108
5109 # wait for clone1 to fail.
5110 self._wait_for_clone_to_fail(clone1)
5111
5112 # check clone1 status
5113 clone1_result = self._get_clone_status(clone1)
5114 self.assertEqual(clone1_result["status"]["state"], "failed")
5115 self.assertEqual(clone1_result["status"]["failure"]["errno"], "2")
5116 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "snapshot '{0}' does not exist".format(snapshot))
5117
5118 # clone removal should succeed after failure, remove clone1
5119 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
5120
5121 # remove subvolumes
5122 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5123
5124 # verify trash dir is clean
5125 self._wait_for_trash_empty()
5126
5127 def test_clone_failure_status_pending_cancelled(self):
5128 """
5129 ensure failure status is shown when clone is cancelled during pending state and validate the reason
5130 """
5131 subvolume = self._generate_random_subvolume_name()
5132 snapshot = self._generate_random_snapshot_name()
5133 clone1 = self._generate_random_clone_name()
5134
5135 # create subvolume
5136 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5137
5138 # do some IO
5139 self._do_subvolume_io(subvolume, number_of_files=200)
5140
5141 # snapshot subvolume
5142 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5143
5144 # Insert delay at the beginning of snapshot clone
5145 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5146
5147 # schedule a clone1
5148 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
5149
5150 # cancel pending clone1
5151 self._fs_cmd("clone", "cancel", self.volname, clone1)
5152
5153 # check clone1 status
5154 clone1_result = self._get_clone_status(clone1)
5155 self.assertEqual(clone1_result["status"]["state"], "canceled")
5156 self.assertEqual(clone1_result["status"]["failure"]["errno"], "4")
5157 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "user interrupted clone operation")
5158
5159 # clone removal should succeed with force after cancelled, remove clone1
5160 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
5161
5162 # remove snapshot
5163 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5164
5165 # remove subvolumes
5166 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5167
5168 # verify trash dir is clean
5169 self._wait_for_trash_empty()
5170
5171 def test_clone_failure_status_in_progress_cancelled(self):
5172 """
5173 ensure failure status is shown when clone is cancelled during in-progress state and validate the reason
5174 """
5175 subvolume = self._generate_random_subvolume_name()
5176 snapshot = self._generate_random_snapshot_name()
5177 clone1 = self._generate_random_clone_name()
5178
5179 # create subvolume
5180 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5181
5182 # do some IO
5183 self._do_subvolume_io(subvolume, number_of_files=200)
5184
5185 # snapshot subvolume
5186 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5187
5188 # Insert delay at the beginning of snapshot clone
5189 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5190
5191 # schedule a clone1
5192 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
5193
5194 # wait for clone1 to be in-progress
5195 self._wait_for_clone_to_be_in_progress(clone1)
5196
5197 # cancel in-progess clone1
5198 self._fs_cmd("clone", "cancel", self.volname, clone1)
5199
5200 # check clone1 status
5201 clone1_result = self._get_clone_status(clone1)
5202 self.assertEqual(clone1_result["status"]["state"], "canceled")
5203 self.assertEqual(clone1_result["status"]["failure"]["errno"], "4")
5204 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "user interrupted clone operation")
5205
5206 # clone removal should succeed with force after cancelled, remove clone1
5207 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
5208
5209 # remove snapshot
5210 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5211
5212 # remove subvolumes
5213 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5214
5215 # verify trash dir is clean
5216 self._wait_for_trash_empty()
5217
adb31ebb
TL
5218 def test_subvolume_snapshot_clone(self):
5219 subvolume = self._generate_random_subvolume_name()
5220 snapshot = self._generate_random_snapshot_name()
5221 clone = self._generate_random_clone_name()
5222
5223 # create subvolume
522d829b 5224 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
5225
5226 # do some IO
5227 self._do_subvolume_io(subvolume, number_of_files=64)
5228
5229 # snapshot subvolume
5230 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5231
5232 # schedule a clone
5233 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5234
5235 # check clone status
5236 self._wait_for_clone_to_complete(clone)
5237
92f5a8d4 5238 # verify clone
adb31ebb
TL
5239 self._verify_clone(subvolume, snapshot, clone)
5240
5241 # remove snapshot
5242 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4
TL
5243
5244 # remove subvolumes
5245 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5246 self._fs_cmd("subvolume", "rm", self.volname, clone)
5247
5248 # verify trash dir is clean
5249 self._wait_for_trash_empty()
5250
20effc67
TL
5251 def test_subvolume_snapshot_clone_quota_exceeded(self):
5252 subvolume = self._generate_random_subvolume_name()
5253 snapshot = self._generate_random_snapshot_name()
5254 clone = self._generate_random_clone_name()
5255
5256 # create subvolume with 20MB quota
5257 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
5258 self._fs_cmd("subvolume", "create", self.volname, subvolume,"--mode=777", "--size", str(osize))
5259
5260 # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
5261 self._do_subvolume_io(subvolume, number_of_files=50)
5262
5263 # snapshot subvolume
5264 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5265
5266 # schedule a clone
5267 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5268
5269 # check clone status
5270 self._wait_for_clone_to_complete(clone)
5271
5272 # verify clone
5273 self._verify_clone(subvolume, snapshot, clone)
5274
5275 # remove snapshot
5276 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5277
5278 # remove subvolumes
5279 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5280 self._fs_cmd("subvolume", "rm", self.volname, clone)
5281
5282 # verify trash dir is clean
5283 self._wait_for_trash_empty()
5284
5285 def test_subvolume_snapshot_in_complete_clone_rm(self):
5286 """
5287 Validates the removal of clone when it is not in 'complete|cancelled|failed' state.
5288 The forceful removl of subvolume clone succeeds only if it's in any of the
5289 'complete|cancelled|failed' states. It fails with EAGAIN in any other states.
5290 """
5291
5292 subvolume = self._generate_random_subvolume_name()
5293 snapshot = self._generate_random_snapshot_name()
5294 clone = self._generate_random_clone_name()
5295
5296 # create subvolume
5297 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5298
5299 # do some IO
5300 self._do_subvolume_io(subvolume, number_of_files=64)
5301
5302 # snapshot subvolume
5303 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5304
5305 # Insert delay at the beginning of snapshot clone
5306 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5307
5308 # schedule a clone
5309 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5310
5311 # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled.
5312 try:
5313 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
5314 except CommandFailedError as ce:
5315 if ce.exitstatus != errno.EAGAIN:
5316 raise RuntimeError("invalid error code when trying to remove failed clone")
5317 else:
5318 raise RuntimeError("expected error when removing a failed clone")
5319
5320 # cancel on-going clone
5321 self._fs_cmd("clone", "cancel", self.volname, clone)
5322
5323 # verify canceled state
5324 self._check_clone_canceled(clone)
5325
5326 # clone removal should succeed after cancel
5327 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
5328
5329 # remove snapshot
5330 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5331
5332 # remove subvolumes
5333 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5334
5335 # verify trash dir is clean
5336 self._wait_for_trash_empty()
5337
f67539c2
TL
5338 def test_subvolume_snapshot_clone_retain_suid_guid(self):
5339 subvolume = self._generate_random_subvolume_name()
5340 snapshot = self._generate_random_snapshot_name()
5341 clone = self._generate_random_clone_name()
f91f0fd5 5342
f67539c2 5343 # create subvolume
522d829b 5344 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
f91f0fd5 5345
f67539c2
TL
5346 # Create a file with suid, guid bits set along with executable bit.
5347 args = ["subvolume", "getpath", self.volname, subvolume]
5348 args = tuple(args)
5349 subvolpath = self._fs_cmd(*args)
5350 self.assertNotEqual(subvolpath, None)
5351 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
f91f0fd5 5352
f67539c2
TL
5353 file_path = subvolpath
5354 file_path = os.path.join(subvolpath, "test_suid_file")
5355 self.mount_a.run_shell(["touch", file_path])
5356 self.mount_a.run_shell(["chmod", "u+sx,g+sx", file_path])
f91f0fd5 5357
f67539c2
TL
5358 # snapshot subvolume
5359 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5360
5361 # schedule a clone
5362 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5363
5364 # check clone status
5365 self._wait_for_clone_to_complete(clone)
5366
5367 # verify clone
5368 self._verify_clone(subvolume, snapshot, clone)
5369
5370 # remove snapshot
5371 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5372
5373 # remove subvolumes
5374 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5375 self._fs_cmd("subvolume", "rm", self.volname, clone)
5376
5377 # verify trash dir is clean
5378 self._wait_for_trash_empty()
5379
5380 def test_subvolume_snapshot_clone_and_reclone(self):
92f5a8d4
TL
5381 subvolume = self._generate_random_subvolume_name()
5382 snapshot = self._generate_random_snapshot_name()
f67539c2 5383 clone1, clone2 = self._generate_random_clone_name(2)
92f5a8d4
TL
5384
5385 # create subvolume
522d829b 5386 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
5387
5388 # do some IO
5389 self._do_subvolume_io(subvolume, number_of_files=32)
5390
5391 # snapshot subvolume
5392 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5393
92f5a8d4 5394 # schedule a clone
f67539c2 5395 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
92f5a8d4
TL
5396
5397 # check clone status
f67539c2 5398 self._wait_for_clone_to_complete(clone1)
92f5a8d4 5399
adb31ebb 5400 # verify clone
f67539c2 5401 self._verify_clone(subvolume, snapshot, clone1)
adb31ebb 5402
92f5a8d4
TL
5403 # remove snapshot
5404 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5405
f67539c2
TL
5406 # now the clone is just like a normal subvolume -- snapshot the clone and fork
5407 # another clone. before that do some IO so it's can be differentiated.
5408 self._do_subvolume_io(clone1, create_dir="data", number_of_files=32)
5409
5410 # snapshot clone -- use same snap name
5411 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot)
5412
5413 # schedule a clone
5414 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2)
5415
5416 # check clone status
5417 self._wait_for_clone_to_complete(clone2)
5418
5419 # verify clone
5420 self._verify_clone(clone1, snapshot, clone2)
5421
5422 # remove snapshot
5423 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot)
92f5a8d4
TL
5424
5425 # remove subvolumes
5426 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
5427 self._fs_cmd("subvolume", "rm", self.volname, clone1)
5428 self._fs_cmd("subvolume", "rm", self.volname, clone2)
92f5a8d4
TL
5429
5430 # verify trash dir is clean
5431 self._wait_for_trash_empty()
5432
f67539c2 5433 def test_subvolume_snapshot_clone_cancel_in_progress(self):
92f5a8d4
TL
5434 subvolume = self._generate_random_subvolume_name()
5435 snapshot = self._generate_random_snapshot_name()
5436 clone = self._generate_random_clone_name()
5437
92f5a8d4 5438 # create subvolume
522d829b 5439 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
5440
5441 # do some IO
f67539c2 5442 self._do_subvolume_io(subvolume, number_of_files=128)
92f5a8d4
TL
5443
5444 # snapshot subvolume
5445 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5446
522d829b
TL
5447 # Insert delay at the beginning of snapshot clone
5448 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5449
92f5a8d4
TL
5450 # schedule a clone
5451 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5452
f67539c2
TL
5453 # cancel on-going clone
5454 self._fs_cmd("clone", "cancel", self.volname, clone)
92f5a8d4 5455
f67539c2
TL
5456 # verify canceled state
5457 self._check_clone_canceled(clone)
adb31ebb 5458
92f5a8d4
TL
5459 # remove snapshot
5460 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5461
adb31ebb
TL
5462 # remove subvolumes
5463 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2 5464 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
adb31ebb
TL
5465
5466 # verify trash dir is clean
5467 self._wait_for_trash_empty()
5468
f67539c2
TL
5469 def test_subvolume_snapshot_clone_cancel_pending(self):
5470 """
5471 this test is a bit more involved compared to canceling an in-progress clone.
5472 we'd need to ensure that a to-be canceled clone has still not been picked up
5473 by cloner threads. exploit the fact that clones are picked up in an FCFS
5474 fashion and there are four (4) cloner threads by default. When the number of
5475 cloner threads increase, this test _may_ start tripping -- so, the number of
5476 clone operations would need to be jacked up.
5477 """
5478 # default number of clone threads
5479 NR_THREADS = 4
5480 # good enough for 4 threads
5481 NR_CLONES = 5
5482 # yeh, 1gig -- we need the clone to run for sometime
5483 FILE_SIZE_MB = 1024
5484
adb31ebb
TL
5485 subvolume = self._generate_random_subvolume_name()
5486 snapshot = self._generate_random_snapshot_name()
f67539c2 5487 clones = self._generate_random_clone_name(NR_CLONES)
adb31ebb 5488
f67539c2 5489 # create subvolume
522d829b 5490 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
5491
5492 # do some IO
f67539c2 5493 self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB)
adb31ebb
TL
5494
5495 # snapshot subvolume
5496 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5497
f67539c2
TL
5498 # schedule clones
5499 for clone in clones:
5500 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 5501
f67539c2
TL
5502 to_wait = clones[0:NR_THREADS]
5503 to_cancel = clones[NR_THREADS:]
adb31ebb 5504
f67539c2
TL
5505 # cancel pending clones and verify
5506 for clone in to_cancel:
5507 status = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
5508 self.assertEqual(status["status"]["state"], "pending")
5509 self._fs_cmd("clone", "cancel", self.volname, clone)
5510 self._check_clone_canceled(clone)
adb31ebb 5511
f67539c2
TL
5512 # let's cancel on-going clones. handle the case where some of the clones
5513 # _just_ complete
5514 for clone in list(to_wait):
5515 try:
5516 self._fs_cmd("clone", "cancel", self.volname, clone)
5517 to_cancel.append(clone)
5518 to_wait.remove(clone)
5519 except CommandFailedError as ce:
5520 if ce.exitstatus != errno.EINVAL:
5521 raise RuntimeError("invalid error code when cancelling on-going clone")
adb31ebb
TL
5522
5523 # remove snapshot
5524 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4
TL
5525
5526 # remove subvolumes
5527 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
5528 for clone in to_wait:
5529 self._fs_cmd("subvolume", "rm", self.volname, clone)
5530 for clone in to_cancel:
5531 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
92f5a8d4
TL
5532
5533 # verify trash dir is clean
5534 self._wait_for_trash_empty()
5535
f67539c2 5536 def test_subvolume_snapshot_clone_different_groups(self):
92f5a8d4
TL
5537 subvolume = self._generate_random_subvolume_name()
5538 snapshot = self._generate_random_snapshot_name()
f67539c2
TL
5539 clone = self._generate_random_clone_name()
5540 s_group, c_group = self._generate_random_group_name(2)
5541
5542 # create groups
5543 self._fs_cmd("subvolumegroup", "create", self.volname, s_group)
5544 self._fs_cmd("subvolumegroup", "create", self.volname, c_group)
92f5a8d4
TL
5545
5546 # create subvolume
522d829b 5547 self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group, "--mode=777")
92f5a8d4
TL
5548
5549 # do some IO
f67539c2 5550 self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32)
92f5a8d4
TL
5551
5552 # snapshot subvolume
f67539c2 5553 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group)
92f5a8d4 5554
92f5a8d4 5555 # schedule a clone
f67539c2
TL
5556 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
5557 '--group_name', s_group, '--target_group_name', c_group)
92f5a8d4
TL
5558
5559 # check clone status
f67539c2 5560 self._wait_for_clone_to_complete(clone, clone_group=c_group)
92f5a8d4 5561
adb31ebb 5562 # verify clone
f67539c2 5563 self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group)
adb31ebb 5564
92f5a8d4 5565 # remove snapshot
f67539c2 5566 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group)
92f5a8d4 5567
92f5a8d4 5568 # remove subvolumes
f67539c2
TL
5569 self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group)
5570 self._fs_cmd("subvolume", "rm", self.volname, clone, c_group)
5571
5572 # remove groups
5573 self._fs_cmd("subvolumegroup", "rm", self.volname, s_group)
5574 self._fs_cmd("subvolumegroup", "rm", self.volname, c_group)
92f5a8d4
TL
5575
5576 # verify trash dir is clean
5577 self._wait_for_trash_empty()
5578
f67539c2 5579 def test_subvolume_snapshot_clone_fail_with_remove(self):
92f5a8d4
TL
5580 subvolume = self._generate_random_subvolume_name()
5581 snapshot = self._generate_random_snapshot_name()
f67539c2
TL
5582 clone1, clone2 = self._generate_random_clone_name(2)
5583
5584 pool_capacity = 32 * 1024 * 1024
5585 # number of files required to fill up 99% of the pool
5586 nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
92f5a8d4
TL
5587
5588 # create subvolume
522d829b 5589 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
5590
5591 # do some IO
f67539c2 5592 self._do_subvolume_io(subvolume, number_of_files=nr_files)
92f5a8d4
TL
5593
5594 # snapshot subvolume
5595 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5596
f67539c2
TL
5597 # add data pool
5598 new_pool = "new_pool"
5599 self.fs.add_data_pool(new_pool)
5600
5601 self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
5602 "max_bytes", "{0}".format(pool_capacity // 4))
92f5a8d4
TL
5603
5604 # schedule a clone
f67539c2 5605 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
92f5a8d4 5606
f67539c2
TL
5607 # check clone status -- this should dramatically overshoot the pool quota
5608 self._wait_for_clone_to_complete(clone1)
92f5a8d4 5609
adb31ebb 5610 # verify clone
f67539c2
TL
5611 self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool)
5612
5613 # wait a bit so that subsequent I/O will give pool full error
5614 time.sleep(120)
5615
5616 # schedule a clone
5617 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool)
5618
5619 # check clone status
5620 self._wait_for_clone_to_fail(clone2)
adb31ebb 5621
92f5a8d4
TL
5622 # remove snapshot
5623 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5624
92f5a8d4
TL
5625 # remove subvolumes
5626 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
5627 self._fs_cmd("subvolume", "rm", self.volname, clone1)
5628 try:
5629 self._fs_cmd("subvolume", "rm", self.volname, clone2)
5630 except CommandFailedError as ce:
5631 if ce.exitstatus != errno.EAGAIN:
5632 raise RuntimeError("invalid error code when trying to remove failed clone")
5633 else:
5634 raise RuntimeError("expected error when removing a failed clone")
92f5a8d4 5635
f67539c2
TL
5636 # ... and with force, failed clone can be removed
5637 self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force")
92f5a8d4
TL
5638
5639 # verify trash dir is clean
5640 self._wait_for_trash_empty()
5641
f67539c2
TL
5642 def test_subvolume_snapshot_clone_on_existing_subvolumes(self):
5643 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
92f5a8d4
TL
5644 snapshot = self._generate_random_snapshot_name()
5645 clone = self._generate_random_clone_name()
5646
f67539c2 5647 # create subvolumes
522d829b
TL
5648 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--mode=777")
5649 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--mode=777")
92f5a8d4
TL
5650
5651 # do some IO
f67539c2 5652 self._do_subvolume_io(subvolume1, number_of_files=32)
92f5a8d4
TL
5653
5654 # snapshot subvolume
f67539c2 5655 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot)
92f5a8d4 5656
f67539c2
TL
5657 # schedule a clone with target as subvolume2
5658 try:
5659 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2)
5660 except CommandFailedError as ce:
5661 if ce.exitstatus != errno.EEXIST:
5662 raise RuntimeError("invalid error code when cloning to existing subvolume")
5663 else:
5664 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
5665
5666 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
5667
5668 # schedule a clone with target as clone
5669 try:
5670 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
5671 except CommandFailedError as ce:
5672 if ce.exitstatus != errno.EEXIST:
5673 raise RuntimeError("invalid error code when cloning to existing clone")
5674 else:
5675 raise RuntimeError("expected cloning to fail if the target is an existing clone")
92f5a8d4
TL
5676
5677 # check clone status
5678 self._wait_for_clone_to_complete(clone)
5679
adb31ebb 5680 # verify clone
f67539c2 5681 self._verify_clone(subvolume1, snapshot, clone)
adb31ebb 5682
92f5a8d4 5683 # remove snapshot
f67539c2 5684 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot)
92f5a8d4 5685
92f5a8d4 5686 # remove subvolumes
f67539c2
TL
5687 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
5688 self._fs_cmd("subvolume", "rm", self.volname, subvolume2)
92f5a8d4
TL
5689 self._fs_cmd("subvolume", "rm", self.volname, clone)
5690
92f5a8d4
TL
5691 # verify trash dir is clean
5692 self._wait_for_trash_empty()
5693
f67539c2 5694 def test_subvolume_snapshot_clone_pool_layout(self):
92f5a8d4
TL
5695 subvolume = self._generate_random_subvolume_name()
5696 snapshot = self._generate_random_snapshot_name()
5697 clone = self._generate_random_clone_name()
92f5a8d4 5698
f67539c2
TL
5699 # add data pool
5700 new_pool = "new_pool"
5701 newid = self.fs.add_data_pool(new_pool)
92f5a8d4
TL
5702
5703 # create subvolume
522d829b 5704 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
5705
5706 # do some IO
f67539c2 5707 self._do_subvolume_io(subvolume, number_of_files=32)
92f5a8d4
TL
5708
5709 # snapshot subvolume
f67539c2 5710 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
92f5a8d4 5711
92f5a8d4 5712 # schedule a clone
f67539c2 5713 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool)
92f5a8d4
TL
5714
5715 # check clone status
f67539c2 5716 self._wait_for_clone_to_complete(clone)
92f5a8d4 5717
adb31ebb 5718 # verify clone
f67539c2 5719 self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool)
adb31ebb 5720
92f5a8d4 5721 # remove snapshot
f67539c2 5722 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4 5723
f67539c2
TL
5724 subvol_path = self._get_subvolume_path(self.volname, clone)
5725 desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
5726 try:
5727 self.assertEqual(desired_pool, new_pool)
5728 except AssertionError:
5729 self.assertEqual(int(desired_pool), newid) # old kernel returns id
92f5a8d4 5730
f67539c2
TL
5731 # remove subvolumes
5732 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5733 self._fs_cmd("subvolume", "rm", self.volname, clone)
92f5a8d4
TL
5734
5735 # verify trash dir is clean
5736 self._wait_for_trash_empty()
5737
f67539c2 5738 def test_subvolume_snapshot_clone_under_group(self):
92f5a8d4
TL
5739 subvolume = self._generate_random_subvolume_name()
5740 snapshot = self._generate_random_snapshot_name()
5741 clone = self._generate_random_clone_name()
f67539c2 5742 group = self._generate_random_group_name()
92f5a8d4 5743
f67539c2 5744 # create subvolume
522d829b 5745 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 5746
92f5a8d4 5747 # do some IO
f67539c2 5748 self._do_subvolume_io(subvolume, number_of_files=32)
92f5a8d4
TL
5749
5750 # snapshot subvolume
5751 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5752
f67539c2
TL
5753 # create group
5754 self._fs_cmd("subvolumegroup", "create", self.volname, group)
adb31ebb 5755
92f5a8d4 5756 # schedule a clone
f67539c2 5757 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group)
f6b5b4d7 5758
92f5a8d4 5759 # check clone status
f67539c2 5760 self._wait_for_clone_to_complete(clone, clone_group=group)
92f5a8d4 5761
adb31ebb 5762 # verify clone
f67539c2 5763 self._verify_clone(subvolume, snapshot, clone, clone_group=group)
adb31ebb 5764
92f5a8d4
TL
5765 # remove snapshot
5766 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5767
92f5a8d4
TL
5768 # remove subvolumes
5769 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
5770 self._fs_cmd("subvolume", "rm", self.volname, clone, group)
5771
5772 # remove group
5773 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
92f5a8d4
TL
5774
5775 # verify trash dir is clean
5776 self._wait_for_trash_empty()
5777
f67539c2 5778 def test_subvolume_snapshot_clone_with_attrs(self):
92f5a8d4
TL
5779 subvolume = self._generate_random_subvolume_name()
5780 snapshot = self._generate_random_snapshot_name()
5781 clone = self._generate_random_clone_name()
5782
f67539c2
TL
5783 mode = "777"
5784 uid = "1000"
5785 gid = "1000"
5786 new_uid = "1001"
5787 new_gid = "1001"
5788 new_mode = "700"
5789
92f5a8d4 5790 # create subvolume
f67539c2 5791 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
92f5a8d4
TL
5792
5793 # do some IO
f67539c2 5794 self._do_subvolume_io(subvolume, number_of_files=32)
92f5a8d4
TL
5795
5796 # snapshot subvolume
5797 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5798
f67539c2
TL
5799 # change subvolume attrs (to ensure clone picks up snapshot attrs)
5800 self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode)
92f5a8d4 5801
f67539c2
TL
5802 # schedule a clone
5803 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
92f5a8d4
TL
5804
5805 # check clone status
5806 self._wait_for_clone_to_complete(clone)
5807
adb31ebb
TL
5808 # verify clone
5809 self._verify_clone(subvolume, snapshot, clone)
5810
f6b5b4d7
TL
5811 # remove snapshot
5812 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5813
f6b5b4d7
TL
5814 # remove subvolumes
5815 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5816 self._fs_cmd("subvolume", "rm", self.volname, clone)
5817
5818 # verify trash dir is clean
5819 self._wait_for_trash_empty()
5820
f67539c2
TL
5821 def test_subvolume_snapshot_clone_with_upgrade(self):
5822 """
5823 yet another poor man's upgrade test -- rather than going through a full
5824 upgrade cycle, emulate old types subvolumes by going through the wormhole
5825 and verify clone operation.
5826 further ensure that a legacy volume is not updated to v2, but clone is.
5827 """
f6b5b4d7
TL
5828 subvolume = self._generate_random_subvolume_name()
5829 snapshot = self._generate_random_snapshot_name()
5830 clone = self._generate_random_clone_name()
5831
f67539c2
TL
5832 # emulate a old-fashioned subvolume
5833 createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
522d829b 5834 self.mount_a.run_shell_payload(f"mkdir -p -m 777 {createpath}", sudo=True)
f67539c2
TL
5835
5836 # add required xattrs to subvolume
5837 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
522d829b 5838 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
f6b5b4d7
TL
5839
5840 # do some IO
5841 self._do_subvolume_io(subvolume, number_of_files=64)
5842
5843 # snapshot subvolume
5844 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5845
f67539c2
TL
5846 # ensure metadata file is in legacy location, with required version v1
5847 self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
5848
522d829b
TL
5849 # Insert delay at the beginning of snapshot clone
5850 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5851
f6b5b4d7
TL
5852 # schedule a clone
5853 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5854
5855 # snapshot should not be deletable now
5856 try:
5857 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5858 except CommandFailedError as ce:
5859 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
5860 else:
5861 self.fail("expected removing source snapshot of a clone to fail")
5862
5863 # check clone status
5864 self._wait_for_clone_to_complete(clone)
5865
adb31ebb 5866 # verify clone
f67539c2 5867 self._verify_clone(subvolume, snapshot, clone, source_version=1)
adb31ebb 5868
92f5a8d4
TL
5869 # remove snapshot
5870 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5871
f67539c2
TL
5872 # ensure metadata file is in v2 location, with required version v2
5873 self._assert_meta_location_and_version(self.volname, clone)
5874
92f5a8d4
TL
5875 # remove subvolumes
5876 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5877 self._fs_cmd("subvolume", "rm", self.volname, clone)
5878
5879 # verify trash dir is clean
5880 self._wait_for_trash_empty()
5881
f67539c2
TL
5882 def test_subvolume_snapshot_reconf_max_concurrent_clones(self):
5883 """
5884 Validate 'max_concurrent_clones' config option
5885 """
5886
5887 # get the default number of cloner threads
5888 default_max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
5889 self.assertEqual(default_max_concurrent_clones, 4)
5890
5891 # Increase number of cloner threads
5892 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
5893 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
5894 self.assertEqual(max_concurrent_clones, 6)
5895
5896 # Decrease number of cloner threads
5897 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
5898 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
5899 self.assertEqual(max_concurrent_clones, 2)
5900
522d829b
TL
5901 def test_subvolume_snapshot_config_snapshot_clone_delay(self):
5902 """
5903 Validate 'snapshot_clone_delay' config option
5904 """
5905
5906 # get the default delay before starting the clone
5907 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
5908 self.assertEqual(default_timeout, 0)
5909
5910 # Insert delay of 2 seconds at the beginning of the snapshot clone
5911 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5912 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
5913 self.assertEqual(default_timeout, 2)
5914
5915 # Decrease number of cloner threads
5916 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
5917 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
5918 self.assertEqual(max_concurrent_clones, 2)
5919
f67539c2 5920 def test_subvolume_under_group_snapshot_clone(self):
92f5a8d4 5921 subvolume = self._generate_random_subvolume_name()
f67539c2 5922 group = self._generate_random_group_name()
92f5a8d4
TL
5923 snapshot = self._generate_random_snapshot_name()
5924 clone = self._generate_random_clone_name()
5925
f67539c2
TL
5926 # create group
5927 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5928
92f5a8d4 5929 # create subvolume
522d829b 5930 self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777")
92f5a8d4
TL
5931
5932 # do some IO
f67539c2 5933 self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32)
92f5a8d4
TL
5934
5935 # snapshot subvolume
f67539c2 5936 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
92f5a8d4 5937
92f5a8d4 5938 # schedule a clone
f67539c2 5939 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group)
92f5a8d4
TL
5940
5941 # check clone status
5942 self._wait_for_clone_to_complete(clone)
5943
adb31ebb 5944 # verify clone
f67539c2 5945 self._verify_clone(subvolume, snapshot, clone, source_group=group)
adb31ebb 5946
92f5a8d4 5947 # remove snapshot
f67539c2 5948 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
92f5a8d4 5949
92f5a8d4 5950 # remove subvolumes
f67539c2 5951 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
92f5a8d4
TL
5952 self._fs_cmd("subvolume", "rm", self.volname, clone)
5953
f67539c2
TL
5954 # remove group
5955 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5956
92f5a8d4
TL
5957 # verify trash dir is clean
5958 self._wait_for_trash_empty()
5959
f67539c2
TL
5960
5961class TestMisc(TestVolumesHelper):
5962 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
5963 def test_connection_expiration(self):
5964 # unmount any cephfs mounts
5965 for i in range(0, self.CLIENTS_REQUIRED):
5966 self.mounts[i].umount_wait()
5967 sessions = self._session_list()
5968 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
5969
5970 # Get the mgr to definitely mount cephfs
92f5a8d4 5971 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
5972 self._fs_cmd("subvolume", "create", self.volname, subvolume)
5973 sessions = self._session_list()
5974 self.assertEqual(len(sessions), 1)
92f5a8d4 5975
f67539c2
TL
5976 # Now wait for the mgr to expire the connection:
5977 self.wait_until_evicted(sessions[0]['id'], timeout=90)
5978
5979 def test_mgr_eviction(self):
5980 # unmount any cephfs mounts
5981 for i in range(0, self.CLIENTS_REQUIRED):
5982 self.mounts[i].umount_wait()
5983 sessions = self._session_list()
5984 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
5985
5986 # Get the mgr to definitely mount cephfs
5987 subvolume = self._generate_random_subvolume_name()
92f5a8d4 5988 self._fs_cmd("subvolume", "create", self.volname, subvolume)
f67539c2
TL
5989 sessions = self._session_list()
5990 self.assertEqual(len(sessions), 1)
92f5a8d4 5991
f67539c2
TL
5992 # Now fail the mgr, check the session was evicted
5993 mgr = self.mgr_cluster.get_active_id()
5994 self.mgr_cluster.mgr_fail(mgr)
5995 self.wait_until_evicted(sessions[0]['id'])
92f5a8d4 5996
f67539c2
TL
5997 def test_names_can_only_be_goodchars(self):
5998 """
5999 Test the creating vols, subvols subvolgroups fails when their names uses
6000 characters beyond [a-zA-Z0-9 -_.].
6001 """
6002 volname, badname = 'testvol', 'abcd@#'
92f5a8d4 6003
f67539c2
TL
6004 with self.assertRaises(CommandFailedError):
6005 self._fs_cmd('volume', 'create', badname)
6006 self._fs_cmd('volume', 'create', volname)
92f5a8d4 6007
f67539c2
TL
6008 with self.assertRaises(CommandFailedError):
6009 self._fs_cmd('subvolumegroup', 'create', volname, badname)
92f5a8d4 6010
f67539c2
TL
6011 with self.assertRaises(CommandFailedError):
6012 self._fs_cmd('subvolume', 'create', volname, badname)
6013 self._fs_cmd('volume', 'rm', volname, '--yes-i-really-mean-it')
92f5a8d4 6014
f67539c2
TL
6015 def test_subvolume_ops_on_nonexistent_vol(self):
6016 # tests the fs subvolume operations on non existing volume
92f5a8d4 6017
f67539c2 6018 volname = "non_existent_subvolume"
92f5a8d4 6019
f67539c2
TL
6020 # try subvolume operations
6021 for op in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
6022 try:
6023 if op == "resize":
6024 self._fs_cmd("subvolume", "resize", volname, "subvolname_1", "inf")
6025 elif op == "pin":
6026 self._fs_cmd("subvolume", "pin", volname, "subvolname_1", "export", "1")
6027 elif op == "ls":
6028 self._fs_cmd("subvolume", "ls", volname)
6029 else:
6030 self._fs_cmd("subvolume", op, volname, "subvolume_1")
6031 except CommandFailedError as ce:
6032 self.assertEqual(ce.exitstatus, errno.ENOENT)
6033 else:
6034 self.fail("expected the 'fs subvolume {0}' command to fail".format(op))
92f5a8d4 6035
f67539c2
TL
6036 # try subvolume snapshot operations and clone create
6037 for op in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
6038 try:
6039 if op == "ls":
6040 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1")
6041 elif op == "clone":
6042 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1", "clone_1")
6043 else:
6044 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1")
6045 except CommandFailedError as ce:
6046 self.assertEqual(ce.exitstatus, errno.ENOENT)
6047 else:
6048 self.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op))
92f5a8d4 6049
f67539c2 6050 # try, clone status
92f5a8d4 6051 try:
f67539c2 6052 self._fs_cmd("clone", "status", volname, "clone_1")
92f5a8d4 6053 except CommandFailedError as ce:
f67539c2 6054 self.assertEqual(ce.exitstatus, errno.ENOENT)
92f5a8d4 6055 else:
f67539c2 6056 self.fail("expected the 'fs clone status' command to fail")
92f5a8d4 6057
f67539c2
TL
6058 # try subvolumegroup operations
6059 for op in ("create", "rm", "getpath", "pin", "ls"):
6060 try:
6061 if op == "pin":
6062 self._fs_cmd("subvolumegroup", "pin", volname, "group_1", "export", "0")
6063 elif op == "ls":
6064 self._fs_cmd("subvolumegroup", op, volname)
6065 else:
6066 self._fs_cmd("subvolumegroup", op, volname, "group_1")
6067 except CommandFailedError as ce:
6068 self.assertEqual(ce.exitstatus, errno.ENOENT)
6069 else:
6070 self.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op))
92f5a8d4 6071
f67539c2
TL
6072 # try subvolumegroup snapshot operations
6073 for op in ("create", "rm", "ls"):
6074 try:
6075 if op == "ls":
6076 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1")
6077 else:
6078 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1", "snapshot_1")
6079 except CommandFailedError as ce:
6080 self.assertEqual(ce.exitstatus, errno.ENOENT)
6081 else:
6082 self.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op))
adb31ebb 6083
f67539c2
TL
6084 def test_subvolume_upgrade_legacy_to_v1(self):
6085 """
6086 poor man's upgrade test -- rather than going through a full upgrade cycle,
6087 emulate subvolumes by going through the wormhole and verify if they are
6088 accessible.
6089 further ensure that a legacy volume is not updated to v2.
6090 """
6091 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
6092 group = self._generate_random_group_name()
92f5a8d4 6093
f67539c2
TL
6094 # emulate a old-fashioned subvolume -- one in the default group and
6095 # the other in a custom group
6096 createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
522d829b 6097 self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
92f5a8d4 6098
f67539c2
TL
6099 # create group
6100 createpath2 = os.path.join(".", "volumes", group, subvolume2)
522d829b 6101 self.mount_a.run_shell(['mkdir', '-p', createpath2], sudo=True)
92f5a8d4 6102
f67539c2
TL
6103 # this would auto-upgrade on access without anyone noticing
6104 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
6105 self.assertNotEqual(subvolpath1, None)
6106 subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
92f5a8d4 6107
f67539c2
TL
6108 subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
6109 self.assertNotEqual(subvolpath2, None)
6110 subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline
92f5a8d4 6111
f67539c2
TL
6112 # and... the subvolume path returned should be what we created behind the scene
6113 self.assertEqual(createpath1[1:], subvolpath1)
6114 self.assertEqual(createpath2[1:], subvolpath2)
92f5a8d4 6115
f67539c2
TL
6116 # ensure metadata file is in legacy location, with required version v1
6117 self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True)
6118 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True)
92f5a8d4 6119
f67539c2
TL
6120 # remove subvolume
6121 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
6122 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
92f5a8d4 6123
f67539c2
TL
6124 # verify trash dir is clean
6125 self._wait_for_trash_empty()
92f5a8d4 6126
f67539c2
TL
6127 # remove group
6128 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
92f5a8d4 6129
f67539c2
TL
6130 def test_subvolume_no_upgrade_v1_sanity(self):
6131 """
6132 poor man's upgrade test -- theme continues...
6133
6134 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
6135 a series of operations on the v1 subvolume to ensure they work as expected.
6136 """
6137 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
6138 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
6139 "type", "uid", "features", "state"]
6140 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
6141
6142 subvolume = self._generate_random_subvolume_name()
6143 snapshot = self._generate_random_snapshot_name()
6144 clone1, clone2 = self._generate_random_clone_name(2)
6145 mode = "777"
6146 uid = "1000"
6147 gid = "1000"
92f5a8d4 6148
f67539c2
TL
6149 # emulate a v1 subvolume -- in the default group
6150 subvolume_path = self._create_v1_subvolume(subvolume)
92f5a8d4 6151
f67539c2
TL
6152 # getpath
6153 subvolpath = self._get_subvolume_path(self.volname, subvolume)
6154 self.assertEqual(subvolpath, subvolume_path)
92f5a8d4 6155
f67539c2
TL
6156 # ls
6157 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6158 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
6159 self.assertEqual(subvolumes[0]['name'], subvolume,
6160 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
92f5a8d4 6161
f67539c2
TL
6162 # info
6163 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
6164 for md in subvol_md:
6165 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
92f5a8d4 6166
f67539c2
TL
6167 self.assertEqual(subvol_info["state"], "complete",
6168 msg="expected state to be 'complete', found '{0}".format(subvol_info["state"]))
6169 self.assertEqual(len(subvol_info["features"]), 2,
6170 msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
6171 for feature in ['snapshot-clone', 'snapshot-autoprotect']:
6172 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
92f5a8d4 6173
f67539c2
TL
6174 # resize
6175 nsize = self.DEFAULT_FILE_SIZE*1024*1024*10
6176 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
6177 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
6178 for md in subvol_md:
6179 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
6180 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
92f5a8d4 6181
f67539c2
TL
6182 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
6183 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
92f5a8d4 6184
f67539c2
TL
6185 # do some IO
6186 self._do_subvolume_io(subvolume, number_of_files=8)
494da23a 6187
f67539c2
TL
6188 # snap-create
6189 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
9f95a23c 6190
f67539c2
TL
6191 # clone
6192 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
9f95a23c 6193
f67539c2
TL
6194 # check clone status
6195 self._wait_for_clone_to_complete(clone1)
9f95a23c 6196
f67539c2
TL
6197 # ensure clone is v2
6198 self._assert_meta_location_and_version(self.volname, clone1, version=2)
9f95a23c 6199
f67539c2
TL
6200 # verify clone
6201 self._verify_clone(subvolume, snapshot, clone1, source_version=1)
9f95a23c 6202
f67539c2
TL
6203 # clone (older snapshot)
6204 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2)
9f95a23c
TL
6205
6206 # check clone status
f67539c2
TL
6207 self._wait_for_clone_to_complete(clone2)
6208
6209 # ensure clone is v2
6210 self._assert_meta_location_and_version(self.volname, clone2, version=2)
9f95a23c 6211
adb31ebb 6212 # verify clone
f67539c2
TL
6213 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
6214 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
adb31ebb 6215
f67539c2
TL
6216 # snap-info
6217 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
6218 for md in snap_md:
6219 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
6220 self.assertEqual(snap_info["has_pending_clones"], "no")
6221
6222 # snap-ls
6223 subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
6224 self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots)))
6225 snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots]
6226 for name in [snapshot, 'fake']:
6227 self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name))
6228
6229 # snap-rm
9f95a23c 6230 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
f67539c2 6231 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake")
9f95a23c 6232
f67539c2
TL
6233 # ensure volume is still at version 1
6234 self._assert_meta_location_and_version(self.volname, subvolume, version=1)
6235
6236 # rm
9f95a23c 6237 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
6238 self._fs_cmd("subvolume", "rm", self.volname, clone1)
6239 self._fs_cmd("subvolume", "rm", self.volname, clone2)
9f95a23c
TL
6240
6241 # verify trash dir is clean
6242 self._wait_for_trash_empty()
6243
f67539c2
TL
6244 def test_subvolume_no_upgrade_v1_to_v2(self):
6245 """
6246 poor man's upgrade test -- theme continues...
6247 ensure v1 to v2 upgrades are not done automatically due to various states of v1
6248 """
6249 subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3)
6250 group = self._generate_random_group_name()
9f95a23c 6251
f67539c2
TL
6252 # emulate a v1 subvolume -- in the default group
6253 subvol1_path = self._create_v1_subvolume(subvolume1)
9f95a23c 6254
f67539c2
TL
6255 # emulate a v1 subvolume -- in a custom group
6256 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group)
9f95a23c 6257
f67539c2
TL
6258 # emulate a v1 subvolume -- in a clone pending state
6259 self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending')
9f95a23c 6260
f67539c2
TL
6261 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
6262 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
6263 self.assertEqual(subvolpath1, subvol1_path)
9f95a23c 6264
f67539c2
TL
6265 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
6266 self.assertEqual(subvolpath2, subvol2_path)
9f95a23c 6267
f67539c2
TL
6268 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
6269 # use clone status, as only certain operations are allowed in pending state
6270 status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3))
6271 self.assertEqual(status["status"]["state"], "pending")
9f95a23c 6272
9f95a23c 6273 # remove snapshot
f67539c2
TL
6274 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake")
6275 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group)
9f95a23c 6276
f67539c2
TL
6277 # ensure metadata file is in v1 location, with version retained as v1
6278 self._assert_meta_location_and_version(self.volname, subvolume1, version=1)
6279 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1)
6280
6281 # remove subvolume
6282 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
6283 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
6284 try:
6285 self._fs_cmd("subvolume", "rm", self.volname, subvolume3)
6286 except CommandFailedError as ce:
6287 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone")
6288 else:
6289 self.fail("expected rm of subvolume undergoing clone to fail")
6290
6291 # ensure metadata file is in v1 location, with version retained as v1
6292 self._assert_meta_location_and_version(self.volname, subvolume3, version=1)
6293 self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force")
6294
6295 # verify list subvolumes returns an empty list
6296 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6297 self.assertEqual(len(subvolumels), 0)
9f95a23c
TL
6298
6299 # verify trash dir is clean
6300 self._wait_for_trash_empty()
6301
f67539c2 6302 def test_subvolume_upgrade_v1_to_v2(self):
9f95a23c 6303 """
f67539c2
TL
6304 poor man's upgrade test -- theme continues...
6305 ensure v1 to v2 upgrades work
9f95a23c 6306 """
f67539c2
TL
6307 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
6308 group = self._generate_random_group_name()
9f95a23c 6309
f67539c2
TL
6310 # emulate a v1 subvolume -- in the default group
6311 subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False)
9f95a23c 6312
f67539c2
TL
6313 # emulate a v1 subvolume -- in a custom group
6314 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False)
9f95a23c 6315
f67539c2
TL
6316 # this would attempt auto-upgrade on access
6317 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
6318 self.assertEqual(subvolpath1, subvol1_path)
9f95a23c 6319
f67539c2
TL
6320 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
6321 self.assertEqual(subvolpath2, subvol2_path)
9f95a23c 6322
f67539c2
TL
6323 # ensure metadata file is in v2 location, with version retained as v2
6324 self._assert_meta_location_and_version(self.volname, subvolume1, version=2)
6325 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2)
9f95a23c 6326
f67539c2
TL
6327 # remove subvolume
6328 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
6329 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
9f95a23c
TL
6330
6331 # verify trash dir is clean
6332 self._wait_for_trash_empty()
0948533f
TL
6333
6334 def test_malicious_metafile_on_legacy_to_v1_upgrade(self):
6335 """
6336 Validate handcrafted .meta file on legacy subvol root doesn't break the system
6337 on legacy subvol upgrade to v1
6338 poor man's upgrade test -- theme continues...
6339 """
6340 subvol1, subvol2 = self._generate_random_subvolume_name(2)
6341 group = self._generate_random_group_name()
6342
6343 # emulate a old-fashioned subvolume in the default group
6344 createpath1 = os.path.join(".", "volumes", "_nogroup", subvol1)
6345 self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
6346
6347 # add required xattrs to subvolume
6348 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
6349 self.mount_a.setfattr(createpath1, 'ceph.dir.layout.pool', default_pool, sudo=True)
6350
6351 # create v2 subvolume
6352 self._fs_cmd("subvolume", "create", self.volname, subvol2)
6353
6354 # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume
6355 # .meta into legacy subvol1's root
6356 subvol2_metapath = os.path.join(".", "volumes", "_nogroup", subvol2, ".meta")
6357 self.mount_a.run_shell(["cp", subvol2_metapath, createpath1], sudo=True)
6358
6359 # Upgrade legacy subvol1 to v1
6360 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvol1)
6361 self.assertNotEqual(subvolpath1, None)
6362 subvolpath1 = subvolpath1.rstrip()
6363
6364 # the subvolume path returned should not be of subvol2 from handcrafted
6365 # .meta file
6366 self.assertEqual(createpath1[1:], subvolpath1)
6367
6368 # ensure metadata file is in legacy location, with required version v1
6369 self._assert_meta_location_and_version(self.volname, subvol1, version=1, legacy=True)
6370
6371 # Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2
6372 # path whose '.meta' file is copied to subvol1 root
6373 authid1 = "alice"
6374 self._fs_cmd("subvolume", "authorize", self.volname, subvol1, authid1)
6375
6376 # Validate that the mds path added is of subvol1 and not of subvol2
6377 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
6378 self.assertEqual("client.alice", out[0]["entity"])
6379 self.assertEqual("allow rw path={0}".format(createpath1[1:]), out[0]["caps"]["mds"])
6380
6381 # remove subvolume
6382 self._fs_cmd("subvolume", "rm", self.volname, subvol1)
6383 self._fs_cmd("subvolume", "rm", self.volname, subvol2)
6384
6385 # verify trash dir is clean
6386 self._wait_for_trash_empty()
6387
6388 def test_binary_metafile_on_legacy_to_v1_upgrade(self):
6389 """
6390 Validate binary .meta file on legacy subvol root doesn't break the system
6391 on legacy subvol upgrade to v1
6392 poor man's upgrade test -- theme continues...
6393 """
6394 subvol = self._generate_random_subvolume_name()
6395 group = self._generate_random_group_name()
6396
6397 # emulate a old-fashioned subvolume -- in a custom group
6398 createpath = os.path.join(".", "volumes", group, subvol)
6399 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
6400
6401 # add required xattrs to subvolume
6402 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
6403 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
6404
6405 # Create unparseable binary .meta file on legacy subvol's root
6406 meta_contents = os.urandom(4096)
6407 meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta")
6408 self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True)
6409
6410 # Upgrade legacy subvol to v1
6411 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group)
6412 self.assertNotEqual(subvolpath, None)
6413 subvolpath = subvolpath.rstrip()
6414
6415 # The legacy subvolume path should be returned for subvol.
6416 # Should ignore unparseable binary .meta file in subvol's root
6417 self.assertEqual(createpath[1:], subvolpath)
6418
6419 # ensure metadata file is in legacy location, with required version v1
6420 self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True)
6421
6422 # remove subvolume
6423 self._fs_cmd("subvolume", "rm", self.volname, subvol, group)
6424
6425 # verify trash dir is clean
6426 self._wait_for_trash_empty()
6427
6428 # remove group
6429 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
6430
6431 def test_unparseable_metafile_on_legacy_to_v1_upgrade(self):
6432 """
6433 Validate unparseable text .meta file on legacy subvol root doesn't break the system
6434 on legacy subvol upgrade to v1
6435 poor man's upgrade test -- theme continues...
6436 """
6437 subvol = self._generate_random_subvolume_name()
6438 group = self._generate_random_group_name()
6439
6440 # emulate a old-fashioned subvolume -- in a custom group
6441 createpath = os.path.join(".", "volumes", group, subvol)
6442 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
6443
6444 # add required xattrs to subvolume
6445 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
6446 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
6447
6448 # Create unparseable text .meta file on legacy subvol's root
6449 meta_contents = "unparseable config\nfile ...\nunparseable config\nfile ...\n"
6450 meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta")
6451 self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True)
6452
6453 # Upgrade legacy subvol to v1
6454 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group)
6455 self.assertNotEqual(subvolpath, None)
6456 subvolpath = subvolpath.rstrip()
6457
6458 # The legacy subvolume path should be returned for subvol.
6459 # Should ignore unparseable binary .meta file in subvol's root
6460 self.assertEqual(createpath[1:], subvolpath)
6461
6462 # ensure metadata file is in legacy location, with required version v1
6463 self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True)
6464
6465 # remove subvolume
6466 self._fs_cmd("subvolume", "rm", self.volname, subvol, group)
6467
6468 # verify trash dir is clean
6469 self._wait_for_trash_empty()
6470
6471 # remove group
6472 self._fs_cmd("subvolumegroup", "rm", self.volname, group)