]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_volumes.py
import quincy 17.2.0
[ceph.git] / ceph / qa / tasks / cephfs / test_volumes.py
CommitLineData
81eedcae
TL
1import os
2import json
92f5a8d4 3import time
81eedcae
TL
4import errno
5import random
6import logging
eafe8130 7import collections
adb31ebb
TL
8import uuid
9import unittest
10from hashlib import md5
11from textwrap import dedent
81eedcae
TL
12
13from tasks.cephfs.cephfs_test_case import CephFSTestCase
f67539c2 14from tasks.cephfs.fuse_mount import FuseMount
81eedcae
TL
15from teuthology.exceptions import CommandFailedError
16
17log = logging.getLogger(__name__)
18
f67539c2
TL
19class TestVolumesHelper(CephFSTestCase):
20 """Helper class for testing FS volume, subvolume group and subvolume operations."""
92f5a8d4 21 TEST_VOLUME_PREFIX = "volume"
81eedcae
TL
22 TEST_SUBVOLUME_PREFIX="subvolume"
23 TEST_GROUP_PREFIX="group"
24 TEST_SNAPSHOT_PREFIX="snapshot"
92f5a8d4 25 TEST_CLONE_PREFIX="clone"
494da23a
TL
26 TEST_FILE_NAME_PREFIX="subvolume_file"
27
28 # for filling subvolume with data
cd265ab1 29 CLIENTS_REQUIRED = 2
f6b5b4d7 30 MDSS_REQUIRED = 2
494da23a
TL
31
32 # io defaults
33 DEFAULT_FILE_SIZE = 1 # MB
34 DEFAULT_NUMBER_OF_FILES = 1024
81eedcae
TL
35
36 def _fs_cmd(self, *args):
37 return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
38
f6b5b4d7
TL
39 def _raw_cmd(self, *args):
40 return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
41
92f5a8d4
TL
42 def __check_clone_state(self, state, clone, clone_group=None, timo=120):
43 check = 0
44 args = ["clone", "status", self.volname, clone]
45 if clone_group:
46 args.append(clone_group)
47 args = tuple(args)
48 while check < timo:
49 result = json.loads(self._fs_cmd(*args))
50 if result["status"]["state"] == state:
51 break
52 check += 1
53 time.sleep(1)
54 self.assertTrue(check < timo)
55
56 def _wait_for_clone_to_complete(self, clone, clone_group=None, timo=120):
57 self.__check_clone_state("complete", clone, clone_group, timo)
58
59 def _wait_for_clone_to_fail(self, clone, clone_group=None, timo=120):
60 self.__check_clone_state("failed", clone, clone_group, timo)
61
9f95a23c
TL
62 def _check_clone_canceled(self, clone, clone_group=None):
63 self.__check_clone_state("canceled", clone, clone_group, timo=1)
64
adb31ebb
TL
65 def _get_subvolume_snapshot_path(self, subvolume, snapshot, source_group, subvol_path, source_version):
66 if source_version == 2:
67 # v2
68 if subvol_path is not None:
69 (base_path, uuid_str) = os.path.split(subvol_path)
70 else:
71 (base_path, uuid_str) = os.path.split(self._get_subvolume_path(self.volname, subvolume, group_name=source_group))
72 return os.path.join(base_path, ".snap", snapshot, uuid_str)
73
74 # v1
75 base_path = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
76 return os.path.join(base_path, ".snap", snapshot)
77
78 def _verify_clone_attrs(self, source_path, clone_path):
79 path1 = source_path
80 path2 = clone_path
92f5a8d4 81
9f95a23c
TL
82 p = self.mount_a.run_shell(["find", path1])
83 paths = p.stdout.getvalue().strip().split()
84
85 # for each entry in source and clone (sink) verify certain inode attributes:
86 # inode type, mode, ownership, [am]time.
87 for source_path in paths:
88 sink_entry = source_path[len(path1)+1:]
89 sink_path = os.path.join(path2, sink_entry)
90
91 # mode+type
92 sval = int(self.mount_a.run_shell(['stat', '-c' '%f', source_path]).stdout.getvalue().strip(), 16)
93 cval = int(self.mount_a.run_shell(['stat', '-c' '%f', sink_path]).stdout.getvalue().strip(), 16)
94 self.assertEqual(sval, cval)
95
96 # ownership
97 sval = int(self.mount_a.run_shell(['stat', '-c' '%u', source_path]).stdout.getvalue().strip())
98 cval = int(self.mount_a.run_shell(['stat', '-c' '%u', sink_path]).stdout.getvalue().strip())
99 self.assertEqual(sval, cval)
92f5a8d4 100
9f95a23c
TL
101 sval = int(self.mount_a.run_shell(['stat', '-c' '%g', source_path]).stdout.getvalue().strip())
102 cval = int(self.mount_a.run_shell(['stat', '-c' '%g', sink_path]).stdout.getvalue().strip())
103 self.assertEqual(sval, cval)
92f5a8d4 104
9f95a23c 105 # inode timestamps
f67539c2 106 # do not check access as kclient will generally not update this like ceph-fuse will.
9f95a23c
TL
107 sval = int(self.mount_a.run_shell(['stat', '-c' '%Y', source_path]).stdout.getvalue().strip())
108 cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip())
109 self.assertEqual(sval, cval)
110
adb31ebb
TL
111 def _verify_clone_root(self, source_path, clone_path, clone, clone_group, clone_pool):
112 # verifies following clone root attrs quota, data_pool and pool_namespace
113 # remaining attributes of clone root are validated in _verify_clone_attrs
114
115 clone_info = json.loads(self._get_subvolume_info(self.volname, clone, clone_group))
116
117 # verify quota is inherited from source snapshot
118 src_quota = self.mount_a.getfattr(source_path, "ceph.quota.max_bytes")
f67539c2
TL
119 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
120 if isinstance(self.mount_a, FuseMount):
121 self.assertEqual(clone_info["bytes_quota"], "infinite" if src_quota is None else int(src_quota))
adb31ebb
TL
122
123 if clone_pool:
124 # verify pool is set as per request
125 self.assertEqual(clone_info["data_pool"], clone_pool)
126 else:
127 # verify pool and pool namespace are inherited from snapshot
128 self.assertEqual(clone_info["data_pool"],
129 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool"))
130 self.assertEqual(clone_info["pool_namespace"],
131 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool_namespace"))
132
133 def _verify_clone(self, subvolume, snapshot, clone,
134 source_group=None, clone_group=None, clone_pool=None,
135 subvol_path=None, source_version=2, timo=120):
136 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
137 # but snapshots are retained for clone verification
138 path1 = self._get_subvolume_snapshot_path(subvolume, snapshot, source_group, subvol_path, source_version)
9f95a23c 139 path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group)
92f5a8d4
TL
140
141 check = 0
adb31ebb
TL
142 # TODO: currently snapshot rentries are not stable if snapshot source entries
143 # are removed, https://tracker.ceph.com/issues/46747
144 while check < timo and subvol_path is None:
92f5a8d4
TL
145 val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries"))
146 val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries"))
147 if val1 == val2:
148 break
149 check += 1
150 time.sleep(1)
151 self.assertTrue(check < timo)
152
adb31ebb
TL
153 self._verify_clone_root(path1, path2, clone, clone_group, clone_pool)
154 self._verify_clone_attrs(path1, path2)
9f95a23c 155
92f5a8d4 156 def _generate_random_volume_name(self, count=1):
f6b5b4d7
TL
157 n = self.volume_start
158 volumes = [f"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
159 self.volume_start += count
92f5a8d4
TL
160 return volumes[0] if count == 1 else volumes
161
162 def _generate_random_subvolume_name(self, count=1):
f6b5b4d7
TL
163 n = self.subvolume_start
164 subvolumes = [f"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
165 self.subvolume_start += count
92f5a8d4
TL
166 return subvolumes[0] if count == 1 else subvolumes
167
168 def _generate_random_group_name(self, count=1):
f6b5b4d7
TL
169 n = self.group_start
170 groups = [f"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i in range(n, n+count)]
171 self.group_start += count
92f5a8d4
TL
172 return groups[0] if count == 1 else groups
173
174 def _generate_random_snapshot_name(self, count=1):
f6b5b4d7
TL
175 n = self.snapshot_start
176 snaps = [f"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i in range(n, n+count)]
177 self.snapshot_start += count
92f5a8d4
TL
178 return snaps[0] if count == 1 else snaps
179
180 def _generate_random_clone_name(self, count=1):
f6b5b4d7
TL
181 n = self.clone_start
182 clones = [f"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i in range(n, n+count)]
183 self.clone_start += count
92f5a8d4 184 return clones[0] if count == 1 else clones
81eedcae
TL
185
186 def _enable_multi_fs(self):
187 self._fs_cmd("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
188
189 def _create_or_reuse_test_volume(self):
190 result = json.loads(self._fs_cmd("volume", "ls"))
191 if len(result) == 0:
192 self.vol_created = True
92f5a8d4 193 self.volname = self._generate_random_volume_name()
81eedcae
TL
194 self._fs_cmd("volume", "create", self.volname)
195 else:
196 self.volname = result[0]['name']
197
494da23a
TL
198 def _get_subvolume_group_path(self, vol_name, group_name):
199 args = ("subvolumegroup", "getpath", vol_name, group_name)
200 path = self._fs_cmd(*args)
201 # remove the leading '/', and trailing whitespaces
202 return path[1:].rstrip()
203
81eedcae
TL
204 def _get_subvolume_path(self, vol_name, subvol_name, group_name=None):
205 args = ["subvolume", "getpath", vol_name, subvol_name]
206 if group_name:
207 args.append(group_name)
208 args = tuple(args)
209 path = self._fs_cmd(*args)
210 # remove the leading '/', and trailing whitespaces
211 return path[1:].rstrip()
212
1911f103
TL
213 def _get_subvolume_info(self, vol_name, subvol_name, group_name=None):
214 args = ["subvolume", "info", vol_name, subvol_name]
215 if group_name:
216 args.append(group_name)
217 args = tuple(args)
218 subvol_md = self._fs_cmd(*args)
219 return subvol_md
220
e306af50
TL
221 def _get_subvolume_snapshot_info(self, vol_name, subvol_name, snapname, group_name=None):
222 args = ["subvolume", "snapshot", "info", vol_name, subvol_name, snapname]
223 if group_name:
224 args.append(group_name)
225 args = tuple(args)
226 snap_md = self._fs_cmd(*args)
227 return snap_md
228
81eedcae 229 def _delete_test_volume(self):
eafe8130 230 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
81eedcae 231
adb31ebb
TL
232 def _do_subvolume_pool_and_namespace_update(self, subvolume, pool=None, pool_namespace=None, subvolume_group=None):
233 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
234
235 if pool is not None:
522d829b 236 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool, sudo=True)
adb31ebb
TL
237
238 if pool_namespace is not None:
522d829b 239 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace, sudo=True)
adb31ebb
TL
240
241 def _do_subvolume_attr_update(self, subvolume, uid, gid, mode, subvolume_group=None):
242 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
243
244 # mode
522d829b 245 self.mount_a.run_shell(['chmod', mode, subvolpath], sudo=True)
adb31ebb
TL
246
247 # ownership
522d829b
TL
248 self.mount_a.run_shell(['chown', uid, subvolpath], sudo=True)
249 self.mount_a.run_shell(['chgrp', gid, subvolpath], sudo=True)
adb31ebb 250
92f5a8d4
TL
251 def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None,
252 number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE):
494da23a 253 # get subvolume path for IO
92f5a8d4
TL
254 args = ["subvolume", "getpath", self.volname, subvolume]
255 if subvolume_group:
256 args.append(subvolume_group)
257 args = tuple(args)
258 subvolpath = self._fs_cmd(*args)
494da23a
TL
259 self.assertNotEqual(subvolpath, None)
260 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
261
92f5a8d4
TL
262 io_path = subvolpath
263 if create_dir:
264 io_path = os.path.join(subvolpath, create_dir)
522d829b 265 self.mount_a.run_shell_payload(f"mkdir -p {io_path}")
92f5a8d4
TL
266
267 log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path))
494da23a
TL
268 for i in range(number_of_files):
269 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
92f5a8d4 270 self.mount_a.write_n_mb(os.path.join(io_path, filename), file_size)
494da23a 271
9f95a23c
TL
272 def _do_subvolume_io_mixed(self, subvolume, subvolume_group=None):
273 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
274
275 reg_file = "regfile.0"
9f95a23c
TL
276 dir_path = os.path.join(subvolpath, "dir.0")
277 sym_path1 = os.path.join(subvolpath, "sym.0")
278 # this symlink's ownership would be changed
279 sym_path2 = os.path.join(dir_path, "sym.0")
280
522d829b
TL
281 self.mount_a.run_shell(["mkdir", dir_path])
282 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path1])
283 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path2])
9f95a23c 284 # flip ownership to nobody. assumption: nobody's id is 65534
522d829b 285 self.mount_a.run_shell(["chown", "-h", "65534:65534", sym_path2], sudo=True, omit_sudo=False)
9f95a23c 286
494da23a
TL
287 def _wait_for_trash_empty(self, timeout=30):
288 # XXX: construct the trash dir path (note that there is no mgr
289 # [sub]volume interface for this).
290 trashdir = os.path.join("./", "volumes", "_deleting")
92f5a8d4 291 self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
494da23a 292
adb31ebb
TL
293 def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False):
294 if legacy:
295 subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group)
296 m = md5()
297 m.update(("/"+subvol_path).encode('utf-8'))
298 meta_filename = "{0}.meta".format(m.digest().hex())
299 metapath = os.path.join(".", "volumes", "_legacy", meta_filename)
300 else:
301 group = subvol_group if subvol_group is not None else '_nogroup'
302 metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
303
522d829b 304 out = self.mount_a.run_shell(['cat', metapath], sudo=True)
adb31ebb
TL
305 lines = out.stdout.getvalue().strip().split('\n')
306 sv_version = -1
307 for line in lines:
308 if line == "version = " + str(version):
309 sv_version = version
310 break
311 self.assertEqual(sv_version, version, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
312 version, sv_version, metapath))
313
314 def _create_v1_subvolume(self, subvol_name, subvol_group=None, has_snapshot=True, subvol_type='subvolume', state='complete'):
315 group = subvol_group if subvol_group is not None else '_nogroup'
316 basepath = os.path.join("volumes", group, subvol_name)
317 uuid_str = str(uuid.uuid4())
318 createpath = os.path.join(basepath, uuid_str)
522d829b 319 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
adb31ebb
TL
320
321 # create a v1 snapshot, to prevent auto upgrades
322 if has_snapshot:
323 snappath = os.path.join(createpath, ".snap", "fake")
522d829b 324 self.mount_a.run_shell(['mkdir', '-p', snappath], sudo=True)
adb31ebb
TL
325
326 # add required xattrs to subvolume
327 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
522d829b 328 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
adb31ebb
TL
329
330 # create a v1 .meta file
331 meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, "/" + createpath, state)
332 if state == 'pending':
333 # add a fake clone source
334 meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
335 meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta")
522d829b 336 self.mount_a.client_remote.write_file(meta_filepath1, meta_contents, sudo=True)
adb31ebb
TL
337 return createpath
338
339 def _update_fake_trash(self, subvol_name, subvol_group=None, trash_name='fake', create=True):
340 group = subvol_group if subvol_group is not None else '_nogroup'
341 trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name)
342 if create:
522d829b 343 self.mount_a.run_shell(['mkdir', '-p', trashpath], sudo=True)
adb31ebb 344 else:
522d829b 345 self.mount_a.run_shell(['rmdir', trashpath], sudo=True)
adb31ebb 346
cd265ab1
TL
347 def _configure_guest_auth(self, guest_mount, authid, key):
348 """
349 Set up auth credentials for a guest client.
350 """
351 # Create keyring file for the guest client.
352 keyring_txt = dedent("""
353 [client.{authid}]
354 key = {key}
355
356 """.format(authid=authid,key=key))
357
358 guest_mount.client_id = authid
359 guest_mount.client_remote.write_file(guest_mount.get_keyring_path(),
360 keyring_txt, sudo=True)
361 # Add a guest client section to the ceph config file.
362 self.config_set("client.{0}".format(authid), "debug client", 20)
363 self.config_set("client.{0}".format(authid), "debug objecter", 20)
364 self.set_conf("client.{0}".format(authid),
365 "keyring", guest_mount.get_keyring_path())
366
367 def _auth_metadata_get(self, filedata):
368 """
369 Return a deserialized JSON object, or None
370 """
371 try:
372 data = json.loads(filedata)
373 except json.decoder.JSONDecodeError:
374 data = None
375 return data
376
81eedcae 377 def setUp(self):
f67539c2 378 super(TestVolumesHelper, self).setUp()
81eedcae
TL
379 self.volname = None
380 self.vol_created = False
381 self._enable_multi_fs()
382 self._create_or_reuse_test_volume()
f6b5b4d7
TL
383 self.config_set('mon', 'mon_allow_pool_delete', True)
384 self.volume_start = random.randint(1, (1<<20))
385 self.subvolume_start = random.randint(1, (1<<20))
386 self.group_start = random.randint(1, (1<<20))
387 self.snapshot_start = random.randint(1, (1<<20))
388 self.clone_start = random.randint(1, (1<<20))
81eedcae
TL
389
390 def tearDown(self):
391 if self.vol_created:
392 self._delete_test_volume()
f67539c2 393 super(TestVolumesHelper, self).tearDown()
92f5a8d4 394
92f5a8d4 395
f67539c2
TL
396class TestVolumes(TestVolumesHelper):
397 """Tests for FS volume operations."""
92f5a8d4
TL
398 def test_volume_create(self):
399 """
400 That the volume can be created and then cleans up
401 """
402 volname = self._generate_random_volume_name()
403 self._fs_cmd("volume", "create", volname)
404 volumels = json.loads(self._fs_cmd("volume", "ls"))
405
406 if not (volname in ([volume['name'] for volume in volumels])):
407 raise RuntimeError("Error creating volume '{0}'".format(volname))
408 else:
409 # clean up
410 self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
411
412 def test_volume_ls(self):
413 """
414 That the existing and the newly created volumes can be listed and
415 finally cleans up.
416 """
417 vls = json.loads(self._fs_cmd("volume", "ls"))
418 volumes = [volume['name'] for volume in vls]
419
420 #create new volumes and add it to the existing list of volumes
cd265ab1 421 volumenames = self._generate_random_volume_name(2)
92f5a8d4
TL
422 for volumename in volumenames:
423 self._fs_cmd("volume", "create", volumename)
424 volumes.extend(volumenames)
425
426 # list volumes
427 try:
428 volumels = json.loads(self._fs_cmd('volume', 'ls'))
429 if len(volumels) == 0:
430 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
431 else:
432 volnames = [volume['name'] for volume in volumels]
433 if collections.Counter(volnames) != collections.Counter(volumes):
434 raise RuntimeError("Error creating or listing volumes")
435 finally:
436 # clean up
437 for volume in volumenames:
438 self._fs_cmd("volume", "rm", volume, "--yes-i-really-mean-it")
439
eafe8130 440 def test_volume_rm(self):
92f5a8d4
TL
441 """
442 That the volume can only be removed when --yes-i-really-mean-it is used
443 and verify that the deleted volume is not listed anymore.
444 """
adb31ebb
TL
445 for m in self.mounts:
446 m.umount_wait()
eafe8130
TL
447 try:
448 self._fs_cmd("volume", "rm", self.volname)
449 except CommandFailedError as ce:
450 if ce.exitstatus != errno.EPERM:
451 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
452 "but it failed with {0}".format(ce.exitstatus))
453 else:
454 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
455
456 #check if it's gone
92f5a8d4 457 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
eafe8130 458 if (self.volname in [volume['name'] for volume in volumes]):
92f5a8d4
TL
459 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
460 "The volume {0} not removed.".format(self.volname))
eafe8130
TL
461 else:
462 raise RuntimeError("expected the 'fs volume rm' command to fail.")
463
f6b5b4d7
TL
464 def test_volume_rm_arbitrary_pool_removal(self):
465 """
466 That the arbitrary pool added to the volume out of band is removed
467 successfully on volume removal.
468 """
adb31ebb
TL
469 for m in self.mounts:
470 m.umount_wait()
f6b5b4d7
TL
471 new_pool = "new_pool"
472 # add arbitrary data pool
473 self.fs.add_data_pool(new_pool)
474 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
475 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
476
477 #check if fs is gone
478 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
479 volnames = [volume['name'] for volume in volumes]
480 self.assertNotIn(self.volname, volnames)
481
482 #check if osd pools are gone
483 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
484 for pool in vol_status["pools"]:
485 self.assertNotIn(pool["name"], pools)
486
487 def test_volume_rm_when_mon_delete_pool_false(self):
488 """
489 That the volume can only be removed when mon_allowd_pool_delete is set
490 to true and verify that the pools are removed after volume deletion.
491 """
adb31ebb
TL
492 for m in self.mounts:
493 m.umount_wait()
f6b5b4d7
TL
494 self.config_set('mon', 'mon_allow_pool_delete', False)
495 try:
496 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
497 except CommandFailedError as ce:
498 self.assertEqual(ce.exitstatus, errno.EPERM,
499 "expected the 'fs volume rm' command to fail with EPERM, "
500 "but it failed with {0}".format(ce.exitstatus))
501 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
502 self.config_set('mon', 'mon_allow_pool_delete', True)
503 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
504
505 #check if fs is gone
506 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
507 volnames = [volume['name'] for volume in volumes]
508 self.assertNotIn(self.volname, volnames,
509 "volume {0} exists after removal".format(self.volname))
510 #check if pools are gone
511 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
512 for pool in vol_status["pools"]:
513 self.assertNotIn(pool["name"], pools,
514 "pool {0} exists after volume removal".format(pool["name"]))
515
1d09f67e
TL
516 def test_volume_rename(self):
517 """
518 That volume, its file system and pools, can be renamed.
519 """
520 for m in self.mounts:
521 m.umount_wait()
522 oldvolname = self.volname
523 newvolname = self._generate_random_volume_name()
524 new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
525 self._fs_cmd("volume", "rename", oldvolname, newvolname,
526 "--yes-i-really-mean-it")
527 volumels = json.loads(self._fs_cmd('volume', 'ls'))
528 volnames = [volume['name'] for volume in volumels]
529 # volume name changed
530 self.assertIn(newvolname, volnames)
531 self.assertNotIn(oldvolname, volnames)
532 # pool names changed
533 self.fs.get_pool_names(refresh=True)
534 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
535 self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
536
537 def test_volume_rename_idempotency(self):
538 """
539 That volume rename is idempotent.
540 """
541 for m in self.mounts:
542 m.umount_wait()
543 oldvolname = self.volname
544 newvolname = self._generate_random_volume_name()
545 new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
546 self._fs_cmd("volume", "rename", oldvolname, newvolname,
547 "--yes-i-really-mean-it")
548 self._fs_cmd("volume", "rename", oldvolname, newvolname,
549 "--yes-i-really-mean-it")
550 volumels = json.loads(self._fs_cmd('volume', 'ls'))
551 volnames = [volume['name'] for volume in volumels]
552 self.assertIn(newvolname, volnames)
553 self.assertNotIn(oldvolname, volnames)
554 self.fs.get_pool_names(refresh=True)
555 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
556 self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
557
558 def test_volume_rename_fails_without_confirmation_flag(self):
559 """
560 That renaming volume fails without --yes-i-really-mean-it flag.
561 """
562 newvolname = self._generate_random_volume_name()
563 try:
564 self._fs_cmd("volume", "rename", self.volname, newvolname)
565 except CommandFailedError as ce:
566 self.assertEqual(ce.exitstatus, errno.EPERM,
567 "invalid error code on renaming a FS volume without the "
568 "'--yes-i-really-mean-it' flag")
569 else:
570 self.fail("expected renaming of FS volume to fail without the "
571 "'--yes-i-really-mean-it' flag")
572
573 def test_volume_rename_for_more_than_one_data_pool(self):
574 """
575 That renaming a volume with more than one data pool does not change
576 the name of the data pools.
577 """
578 for m in self.mounts:
579 m.umount_wait()
580 self.fs.add_data_pool('another-data-pool')
581 oldvolname = self.volname
582 newvolname = self._generate_random_volume_name()
583 self.fs.get_pool_names(refresh=True)
584 orig_data_pool_names = list(self.fs.data_pools.values())
585 new_metadata_pool = f"cephfs.{newvolname}.meta"
586 self._fs_cmd("volume", "rename", self.volname, newvolname,
587 "--yes-i-really-mean-it")
588 volumels = json.loads(self._fs_cmd('volume', 'ls'))
589 volnames = [volume['name'] for volume in volumels]
590 # volume name changed
591 self.assertIn(newvolname, volnames)
592 self.assertNotIn(oldvolname, volnames)
593 self.fs.get_pool_names(refresh=True)
594 # metadata pool name changed
595 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
596 # data pool names unchanged
597 self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values()))
598
81eedcae 599
f67539c2
TL
600class TestSubvolumeGroups(TestVolumesHelper):
601 """Tests for FS subvolume group operations."""
602 def test_default_uid_gid_subvolume_group(self):
603 group = self._generate_random_group_name()
604 expected_uid = 0
605 expected_gid = 0
81eedcae 606
f67539c2
TL
607 # create group
608 self._fs_cmd("subvolumegroup", "create", self.volname, group)
609 group_path = self._get_subvolume_group_path(self.volname, group)
81eedcae 610
f67539c2
TL
611 # check group's uid and gid
612 stat = self.mount_a.stat(group_path)
613 self.assertEqual(stat['st_uid'], expected_uid)
614 self.assertEqual(stat['st_gid'], expected_gid)
615
616 # remove group
617 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
618
619 def test_nonexistent_subvolume_group_create(self):
620 subvolume = self._generate_random_subvolume_name()
621 group = "non_existent_group"
622
623 # try, creating subvolume in a nonexistent group
81eedcae 624 try:
f67539c2 625 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
81eedcae
TL
626 except CommandFailedError as ce:
627 if ce.exitstatus != errno.ENOENT:
628 raise
92f5a8d4 629 else:
f67539c2 630 raise RuntimeError("expected the 'fs subvolume create' command to fail")
81eedcae 631
f67539c2
TL
632 def test_nonexistent_subvolume_group_rm(self):
633 group = "non_existent_group"
494da23a 634
f67539c2
TL
635 # try, remove subvolume group
636 try:
637 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
638 except CommandFailedError as ce:
639 if ce.exitstatus != errno.ENOENT:
640 raise
641 else:
642 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
92f5a8d4 643
f67539c2
TL
644 def test_subvolume_group_create_with_auto_cleanup_on_fail(self):
645 group = self._generate_random_group_name()
646 data_pool = "invalid_pool"
647 # create group with invalid data pool layout
648 with self.assertRaises(CommandFailedError):
649 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
92f5a8d4 650
f67539c2
TL
651 # check whether group path is cleaned up
652 try:
653 self._fs_cmd("subvolumegroup", "getpath", self.volname, group)
654 except CommandFailedError as ce:
655 if ce.exitstatus != errno.ENOENT:
656 raise
657 else:
658 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
92f5a8d4 659
f67539c2
TL
660 def test_subvolume_group_create_with_desired_data_pool_layout(self):
661 group1, group2 = self._generate_random_group_name(2)
92f5a8d4 662
f67539c2
TL
663 # create group
664 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
665 group1_path = self._get_subvolume_group_path(self.volname, group1)
92f5a8d4 666
f67539c2
TL
667 default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool")
668 new_pool = "new_pool"
669 self.assertNotEqual(default_pool, new_pool)
adb31ebb 670
f67539c2
TL
671 # add data pool
672 newid = self.fs.add_data_pool(new_pool)
adb31ebb 673
f67539c2
TL
674 # create group specifying the new data pool as its pool layout
675 self._fs_cmd("subvolumegroup", "create", self.volname, group2,
676 "--pool_layout", new_pool)
677 group2_path = self._get_subvolume_group_path(self.volname, group2)
92f5a8d4 678
f67539c2
TL
679 desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool")
680 try:
681 self.assertEqual(desired_pool, new_pool)
682 except AssertionError:
683 self.assertEqual(int(desired_pool), newid) # old kernel returns id
92f5a8d4 684
f67539c2
TL
685 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
686 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
92f5a8d4 687
f67539c2
TL
688 def test_subvolume_group_create_with_desired_mode(self):
689 group1, group2 = self._generate_random_group_name(2)
690 # default mode
691 expected_mode1 = "755"
692 # desired mode
693 expected_mode2 = "777"
92f5a8d4 694
f67539c2 695 # create group
522d829b 696 self._fs_cmd("subvolumegroup", "create", self.volname, group2, f"--mode={expected_mode2}")
a4b75251 697 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
92f5a8d4 698
f67539c2
TL
699 group1_path = self._get_subvolume_group_path(self.volname, group1)
700 group2_path = self._get_subvolume_group_path(self.volname, group2)
a4b75251 701 volumes_path = os.path.dirname(group1_path)
adb31ebb 702
f67539c2
TL
703 # check group's mode
704 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip()
705 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip()
a4b75251 706 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
f67539c2
TL
707 self.assertEqual(actual_mode1, expected_mode1)
708 self.assertEqual(actual_mode2, expected_mode2)
a4b75251 709 self.assertEqual(actual_mode3, expected_mode1)
adb31ebb 710
f67539c2
TL
711 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
712 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
713
714 def test_subvolume_group_create_with_desired_uid_gid(self):
92f5a8d4 715 """
f67539c2
TL
716 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
717 expected values.
92f5a8d4 718 """
f67539c2
TL
719 uid = 1000
720 gid = 1000
92f5a8d4 721
f67539c2
TL
722 # create subvolume group
723 subvolgroupname = self._generate_random_group_name()
724 self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid))
92f5a8d4
TL
725
726 # make sure it exists
f67539c2
TL
727 subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname)
728 self.assertNotEqual(subvolgrouppath, None)
92f5a8d4 729
f67539c2
TL
730 # verify the uid and gid
731 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip())
732 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip())
733 self.assertEqual(uid, suid)
734 self.assertEqual(gid, sgid)
735
736 # remove group
737 self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname)
738
739 def test_subvolume_group_create_with_invalid_data_pool_layout(self):
740 group = self._generate_random_group_name()
741 data_pool = "invalid_pool"
742 # create group with invalid data pool layout
92f5a8d4 743 try:
f67539c2 744 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
92f5a8d4 745 except CommandFailedError as ce:
f67539c2
TL
746 if ce.exitstatus != errno.EINVAL:
747 raise
92f5a8d4 748 else:
f67539c2 749 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
92f5a8d4 750
f67539c2
TL
751 def test_subvolume_group_ls(self):
752 # tests the 'fs subvolumegroup ls' command
92f5a8d4 753
f67539c2 754 subvolumegroups = []
adb31ebb 755
f67539c2
TL
756 #create subvolumegroups
757 subvolumegroups = self._generate_random_group_name(3)
758 for groupname in subvolumegroups:
759 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
adb31ebb 760
f67539c2
TL
761 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
762 if len(subvolumegroupls) == 0:
763 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
764 else:
765 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
766 if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups):
767 raise RuntimeError("Error creating or listing subvolume groups")
92f5a8d4 768
1d09f67e
TL
769 def test_subvolume_group_ls_filter(self):
770 # tests the 'fs subvolumegroup ls' command filters '_deleting' directory
771
772 subvolumegroups = []
773
774 #create subvolumegroup
775 subvolumegroups = self._generate_random_group_name(3)
776 for groupname in subvolumegroups:
777 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
778
779 # create subvolume and remove. This creates '_deleting' directory.
780 subvolume = self._generate_random_subvolume_name()
781 self._fs_cmd("subvolume", "create", self.volname, subvolume)
782 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
783
784 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
785 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
786 if "_deleting" in subvolgroupnames:
787 self.fail("Listing subvolume groups listed '_deleting' directory")
788
f67539c2
TL
789 def test_subvolume_group_ls_for_nonexistent_volume(self):
790 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
791 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
92f5a8d4 792
f67539c2
TL
793 # list subvolume groups
794 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
795 if len(subvolumegroupls) > 0:
796 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
92f5a8d4 797
f67539c2
TL
798 def test_subvolumegroup_pin_distributed(self):
799 self.fs.set_max_mds(2)
800 status = self.fs.wait_for_daemons()
801 self.config_set('mds', 'mds_export_ephemeral_distributed', True)
92f5a8d4 802
f67539c2
TL
803 group = "pinme"
804 self._fs_cmd("subvolumegroup", "create", self.volname, group)
805 self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True")
806 subvolumes = self._generate_random_subvolume_name(50)
807 for subvolume in subvolumes:
808 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
809 self._wait_distributed_subtrees(2 * 2, status=status, rank="all")
92f5a8d4 810
f67539c2
TL
811 # remove subvolumes
812 for subvolume in subvolumes:
813 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
adb31ebb
TL
814
815 # verify trash dir is clean
816 self._wait_for_trash_empty()
817
f67539c2
TL
818 def test_subvolume_group_rm_force(self):
819 # test removing non-existing subvolume group with --force
820 group = self._generate_random_group_name()
821 try:
822 self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force")
823 except CommandFailedError:
824 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
92f5a8d4 825
92f5a8d4 826
f67539c2
TL
827class TestSubvolumes(TestVolumesHelper):
828 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
829 def test_async_subvolume_rm(self):
830 subvolumes = self._generate_random_subvolume_name(100)
92f5a8d4 831
f67539c2
TL
832 # create subvolumes
833 for subvolume in subvolumes:
522d829b 834 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
f67539c2 835 self._do_subvolume_io(subvolume, number_of_files=10)
92f5a8d4 836
f67539c2 837 self.mount_a.umount_wait()
92f5a8d4 838
f67539c2
TL
839 # remove subvolumes
840 for subvolume in subvolumes:
841 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
92f5a8d4 842
f67539c2
TL
843 self.mount_a.mount_wait()
844
845 # verify trash dir is clean
846 self._wait_for_trash_empty(timeout=300)
847
848 def test_default_uid_gid_subvolume(self):
849 subvolume = self._generate_random_subvolume_name()
850 expected_uid = 0
851 expected_gid = 0
852
853 # create subvolume
854 self._fs_cmd("subvolume", "create", self.volname, subvolume)
855 subvol_path = self._get_subvolume_path(self.volname, subvolume)
856
857 # check subvolume's uid and gid
858 stat = self.mount_a.stat(subvol_path)
859 self.assertEqual(stat['st_uid'], expected_uid)
860 self.assertEqual(stat['st_gid'], expected_gid)
92f5a8d4 861
adb31ebb 862 # remove subvolume
f67539c2 863 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
864
865 # verify trash dir is clean
866 self._wait_for_trash_empty()
867
f67539c2
TL
868 def test_nonexistent_subvolume_rm(self):
869 # remove non-existing subvolume
870 subvolume = "non_existent_subvolume"
92f5a8d4 871
f67539c2
TL
872 # try, remove subvolume
873 try:
874 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
875 except CommandFailedError as ce:
876 if ce.exitstatus != errno.ENOENT:
877 raise
878 else:
879 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
92f5a8d4 880
f67539c2 881 def test_subvolume_create_and_rm(self):
92f5a8d4 882 # create subvolume
f67539c2
TL
883 subvolume = self._generate_random_subvolume_name()
884 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4
TL
885
886 # make sure it exists
f67539c2 887 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
92f5a8d4
TL
888 self.assertNotEqual(subvolpath, None)
889
f67539c2
TL
890 # remove subvolume
891 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
892 # make sure its gone
92f5a8d4 893 try:
f67539c2 894 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
92f5a8d4 895 except CommandFailedError as ce:
f67539c2
TL
896 if ce.exitstatus != errno.ENOENT:
897 raise
92f5a8d4 898 else:
f67539c2 899 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
adb31ebb
TL
900
901 # verify trash dir is clean
902 self._wait_for_trash_empty()
903
f67539c2
TL
904 def test_subvolume_create_and_rm_in_group(self):
905 subvolume = self._generate_random_subvolume_name()
906 group = self._generate_random_group_name()
92f5a8d4 907
f67539c2
TL
908 # create group
909 self._fs_cmd("subvolumegroup", "create", self.volname, group)
92f5a8d4 910
f67539c2
TL
911 # create subvolume in group
912 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
92f5a8d4 913
adb31ebb 914 # remove subvolume
f67539c2 915 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
adb31ebb
TL
916
917 # verify trash dir is clean
918 self._wait_for_trash_empty()
919
f67539c2
TL
920 # remove group
921 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
922
81eedcae
TL
923 def test_subvolume_create_idempotence(self):
924 # create subvolume
925 subvolume = self._generate_random_subvolume_name()
926 self._fs_cmd("subvolume", "create", self.volname, subvolume)
927
928 # try creating w/ same subvolume name -- should be idempotent
929 self._fs_cmd("subvolume", "create", self.volname, subvolume)
930
931 # remove subvolume
932 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
933
494da23a
TL
934 # verify trash dir is clean
935 self._wait_for_trash_empty()
936
e306af50
TL
937 def test_subvolume_create_idempotence_resize(self):
938 # create subvolume
939 subvolume = self._generate_random_subvolume_name()
940 self._fs_cmd("subvolume", "create", self.volname, subvolume)
941
942 # try creating w/ same subvolume name with size -- should set quota
943 self._fs_cmd("subvolume", "create", self.volname, subvolume, "1000000000")
944
945 # get subvolume metadata
946 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
947 self.assertEqual(subvol_info["bytes_quota"], 1000000000)
948
949 # remove subvolume
950 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
951
952 # verify trash dir is clean
953 self._wait_for_trash_empty()
954
1d09f67e
TL
955 def test_subvolume_create_idempotence_mode(self):
956 # default mode
957 default_mode = "755"
958
959 # create subvolume
960 subvolume = self._generate_random_subvolume_name()
961 self._fs_cmd("subvolume", "create", self.volname, subvolume)
962
963 subvol_path = self._get_subvolume_path(self.volname, subvolume)
964
965 actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
966 self.assertEqual(actual_mode_1, default_mode)
967
968 # try creating w/ same subvolume name with --mode 777
969 new_mode = "777"
970 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", new_mode)
971
972 actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
973 self.assertEqual(actual_mode_2, new_mode)
974
975 # remove subvolume
976 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
977
978 # verify trash dir is clean
979 self._wait_for_trash_empty()
980
981 def test_subvolume_create_idempotence_without_passing_mode(self):
982 # create subvolume
983 desired_mode = "777"
984 subvolume = self._generate_random_subvolume_name()
985 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", desired_mode)
986
987 subvol_path = self._get_subvolume_path(self.volname, subvolume)
988
989 actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
990 self.assertEqual(actual_mode_1, desired_mode)
991
992 # default mode
993 default_mode = "755"
994
995 # try creating w/ same subvolume name without passing --mode argument
996 self._fs_cmd("subvolume", "create", self.volname, subvolume)
997
998 actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
999 self.assertEqual(actual_mode_2, default_mode)
1000
1001 # remove subvolume
1002 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1003
1004 # verify trash dir is clean
1005 self._wait_for_trash_empty()
1006
f67539c2
TL
1007 def test_subvolume_create_isolated_namespace(self):
1008 """
1009 Create subvolume in separate rados namespace
1010 """
f6b5b4d7 1011
f67539c2 1012 # create subvolume
f6b5b4d7 1013 subvolume = self._generate_random_subvolume_name()
f67539c2 1014 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated")
f6b5b4d7 1015
f67539c2
TL
1016 # get subvolume metadata
1017 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1018 self.assertNotEqual(len(subvol_info), 0)
1019 self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume)
f6b5b4d7 1020
f67539c2 1021 # remove subvolumes
adb31ebb
TL
1022 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1023
1024 # verify trash dir is clean
1025 self._wait_for_trash_empty()
1026
f67539c2
TL
1027 def test_subvolume_create_with_auto_cleanup_on_fail(self):
1028 subvolume = self._generate_random_subvolume_name()
1029 data_pool = "invalid_pool"
1030 # create subvolume with invalid data pool layout fails
1031 with self.assertRaises(CommandFailedError):
1032 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
adb31ebb 1033
f67539c2
TL
1034 # check whether subvol path is cleaned up
1035 try:
1036 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1037 except CommandFailedError as ce:
1038 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume")
1039 else:
1040 self.fail("expected the 'fs subvolume getpath' command to fail")
1041
1042 # verify trash dir is clean
adb31ebb
TL
1043 self._wait_for_trash_empty()
1044
f67539c2
TL
1045 def test_subvolume_create_with_desired_data_pool_layout_in_group(self):
1046 subvol1, subvol2 = self._generate_random_subvolume_name(2)
1047 group = self._generate_random_group_name()
f6b5b4d7 1048
f67539c2
TL
1049 # create group. this also helps set default pool layout for subvolumes
1050 # created within the group.
1051 self._fs_cmd("subvolumegroup", "create", self.volname, group)
f6b5b4d7 1052
f67539c2
TL
1053 # create subvolume in group.
1054 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
1055 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
1056
1057 default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool")
1058 new_pool = "new_pool"
1059 self.assertNotEqual(default_pool, new_pool)
1060
1061 # add data pool
1062 newid = self.fs.add_data_pool(new_pool)
1063
1064 # create subvolume specifying the new data pool as its pool layout
1065 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group,
1066 "--pool_layout", new_pool)
1067 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
1068
1069 desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool")
1070 try:
1071 self.assertEqual(desired_pool, new_pool)
1072 except AssertionError:
1073 self.assertEqual(int(desired_pool), newid) # old kernel returns id
1074
1075 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
1076 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
1077 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
adb31ebb
TL
1078
1079 # verify trash dir is clean
1080 self._wait_for_trash_empty()
1081
a4b75251
TL
1082 def test_subvolume_create_with_desired_mode(self):
1083 subvol1 = self._generate_random_subvolume_name()
1084
1085 # default mode
1086 default_mode = "755"
1087 # desired mode
1088 desired_mode = "777"
1089
1090 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--mode", "777")
1091
1092 subvol1_path = self._get_subvolume_path(self.volname, subvol1)
1093
1094 # check subvolumegroup's mode
1095 subvol_par_path = os.path.dirname(subvol1_path)
1096 group_path = os.path.dirname(subvol_par_path)
1097 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip()
1098 self.assertEqual(actual_mode1, default_mode)
1099 # check /volumes mode
1100 volumes_path = os.path.dirname(group_path)
1101 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
1102 self.assertEqual(actual_mode2, default_mode)
1103 # check subvolume's mode
1104 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
1105 self.assertEqual(actual_mode3, desired_mode)
1106
1107 self._fs_cmd("subvolume", "rm", self.volname, subvol1)
1108
1109 # verify trash dir is clean
1110 self._wait_for_trash_empty()
1111
f67539c2
TL
1112 def test_subvolume_create_with_desired_mode_in_group(self):
1113 subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
1114
1115 group = self._generate_random_group_name()
1116 # default mode
1117 expected_mode1 = "755"
1118 # desired mode
1119 expected_mode2 = "777"
1120
1121 # create group
1122 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1123
1124 # create subvolume in group
1125 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
1126 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777")
1127 # check whether mode 0777 also works
1128 self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777")
1129
1130 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
1131 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
1132 subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group)
1133
1134 # check subvolume's mode
1135 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
1136 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip()
1137 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip()
1138 self.assertEqual(actual_mode1, expected_mode1)
1139 self.assertEqual(actual_mode2, expected_mode2)
1140 self.assertEqual(actual_mode3, expected_mode2)
1141
1142 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
1143 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
1144 self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
1145 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1146
1147 # verify trash dir is clean
1148 self._wait_for_trash_empty()
1149
1150 def test_subvolume_create_with_desired_uid_gid(self):
e306af50 1151 """
f67539c2
TL
1152 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
1153 expected values.
e306af50 1154 """
f67539c2
TL
1155 uid = 1000
1156 gid = 1000
e306af50
TL
1157
1158 # create subvolume
f67539c2
TL
1159 subvolname = self._generate_random_subvolume_name()
1160 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid))
e306af50 1161
f67539c2
TL
1162 # make sure it exists
1163 subvolpath = self._get_subvolume_path(self.volname, subvolname)
1164 self.assertNotEqual(subvolpath, None)
e306af50 1165
f67539c2
TL
1166 # verify the uid and gid
1167 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip())
1168 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip())
1169 self.assertEqual(uid, suid)
1170 self.assertEqual(gid, sgid)
1171
1172 # remove subvolume
1173 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
e306af50
TL
1174
1175 # verify trash dir is clean
1176 self._wait_for_trash_empty()
1177
eafe8130
TL
1178 def test_subvolume_create_with_invalid_data_pool_layout(self):
1179 subvolume = self._generate_random_subvolume_name()
1180 data_pool = "invalid_pool"
1181 # create subvolume with invalid data pool layout
1182 try:
1183 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
1184 except CommandFailedError as ce:
adb31ebb 1185 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid pool layout")
eafe8130 1186 else:
adb31ebb
TL
1187 self.fail("expected the 'fs subvolume create' command to fail")
1188
1189 # verify trash dir is clean
1190 self._wait_for_trash_empty()
92f5a8d4 1191
eafe8130
TL
1192 def test_subvolume_create_with_invalid_size(self):
1193 # create subvolume with an invalid size -1
1194 subvolume = self._generate_random_subvolume_name()
1195 try:
1196 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--size", "-1")
1197 except CommandFailedError as ce:
adb31ebb 1198 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid size")
eafe8130 1199 else:
adb31ebb
TL
1200 self.fail("expected the 'fs subvolume create' command to fail")
1201
1202 # verify trash dir is clean
1203 self._wait_for_trash_empty()
eafe8130 1204
f67539c2
TL
1205 def test_subvolume_expand(self):
1206 """
1207 That a subvolume can be expanded in size and its quota matches the expected size.
1208 """
81eedcae 1209
f67539c2
TL
1210 # create subvolume
1211 subvolname = self._generate_random_subvolume_name()
1212 osize = self.DEFAULT_FILE_SIZE*1024*1024
1213 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 1214
f67539c2
TL
1215 # make sure it exists
1216 subvolpath = self._get_subvolume_path(self.volname, subvolname)
1217 self.assertNotEqual(subvolpath, None)
81eedcae 1218
f67539c2
TL
1219 # expand the subvolume
1220 nsize = osize*2
1221 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
81eedcae 1222
f67539c2
TL
1223 # verify the quota
1224 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
1225 self.assertEqual(size, nsize)
1226
1227 # remove subvolume
1228 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
1229
1230 # verify trash dir is clean
1231 self._wait_for_trash_empty()
1232
1233 def test_subvolume_info(self):
1234 # tests the 'fs subvolume info' command
1235
1236 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
1237 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
1238 "type", "uid", "features", "state"]
494da23a
TL
1239
1240 # create subvolume
f67539c2 1241 subvolume = self._generate_random_subvolume_name()
494da23a 1242 self._fs_cmd("subvolume", "create", self.volname, subvolume)
494da23a 1243
f67539c2
TL
1244 # get subvolume metadata
1245 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1246 for md in subvol_md:
1247 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
494da23a 1248
f67539c2
TL
1249 self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
1250 self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
1251 self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty")
1252 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
1253
1254 self.assertEqual(len(subvol_info["features"]), 3,
1255 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
1256 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1257 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
1258
1259 nsize = self.DEFAULT_FILE_SIZE*1024*1024
1260 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
1261
1262 # get subvolume metadata after quota set
1263 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1264 for md in subvol_md:
1265 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
1266
1267 self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
1268 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
1269 self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume")
1270 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
1271
1272 self.assertEqual(len(subvol_info["features"]), 3,
1273 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
1274 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1275 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
1276
1277 # remove subvolumes
494da23a
TL
1278 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1279
adb31ebb
TL
1280 # verify trash dir is clean
1281 self._wait_for_trash_empty()
1282
eafe8130
TL
1283 def test_subvolume_ls(self):
1284 # tests the 'fs subvolume ls' command
1285
1286 subvolumes = []
1287
1288 # create subvolumes
92f5a8d4
TL
1289 subvolumes = self._generate_random_subvolume_name(3)
1290 for subvolume in subvolumes:
1291 self._fs_cmd("subvolume", "create", self.volname, subvolume)
eafe8130
TL
1292
1293 # list subvolumes
1294 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
1295 if len(subvolumels) == 0:
adb31ebb 1296 self.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
eafe8130
TL
1297 else:
1298 subvolnames = [subvolume['name'] for subvolume in subvolumels]
1299 if collections.Counter(subvolnames) != collections.Counter(subvolumes):
adb31ebb
TL
1300 self.fail("Error creating or listing subvolumes")
1301
1302 # remove subvolume
1303 for subvolume in subvolumes:
1304 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1305
1306 # verify trash dir is clean
1307 self._wait_for_trash_empty()
eafe8130
TL
1308
1309 def test_subvolume_ls_for_notexistent_default_group(self):
1310 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
1311 # prerequisite: we expect that the volume is created and the default group _nogroup is
1312 # NOT created (i.e. a subvolume without group is not created)
1313
1314 # list subvolumes
1315 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
1316 if len(subvolumels) > 0:
1317 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
1318
f67539c2 1319 def test_subvolume_marked(self):
92f5a8d4 1320 """
f67539c2 1321 ensure a subvolume is marked with the ceph.dir.subvolume xattr
92f5a8d4 1322 """
f67539c2 1323 subvolume = self._generate_random_subvolume_name()
92f5a8d4
TL
1324
1325 # create subvolume
f67539c2 1326 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4 1327
f67539c2
TL
1328 # getpath
1329 subvolpath = self._get_subvolume_path(self.volname, subvolume)
92f5a8d4 1330
f67539c2
TL
1331 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
1332 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
1333 # outside the subvolume
1334 dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location')
1335 srcpath = os.path.join(self.mount_a.mountpoint, subvolpath)
1336 rename_script = dedent("""
1337 import os
1338 import errno
1339 try:
1340 os.rename("{src}", "{dst}")
1341 except OSError as e:
1342 if e.errno != errno.EXDEV:
1343 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
1344 else:
1345 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
1346 """)
522d829b 1347 self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath), sudo=True)
f67539c2
TL
1348
1349 # remove subvolume
1350 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1351
1352 # verify trash dir is clean
1353 self._wait_for_trash_empty()
1354
1355 def test_subvolume_pin_export(self):
1356 self.fs.set_max_mds(2)
1357 status = self.fs.wait_for_daemons()
1358
1359 subvolume = self._generate_random_subvolume_name()
1360 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1361 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1")
1362 path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1363 path = os.path.dirname(path) # get subvolume path
1364
1365 self._get_subtrees(status=status, rank=1)
1366 self._wait_subtrees([(path, 1)], status=status)
1367
1368 # remove subvolume
1369 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
1370
1371 # verify trash dir is clean
1372 self._wait_for_trash_empty()
1373
cd265ab1
TL
1374 ### authorize operations
1375
1376 def test_authorize_deauthorize_legacy_subvolume(self):
1377 subvolume = self._generate_random_subvolume_name()
1378 group = self._generate_random_group_name()
1379 authid = "alice"
1380
1381 guest_mount = self.mount_b
1382 guest_mount.umount_wait()
1383
1384 # emulate a old-fashioned subvolume in a custom group
1385 createpath = os.path.join(".", "volumes", group, subvolume)
522d829b 1386 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
cd265ab1
TL
1387
1388 # add required xattrs to subvolume
1389 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
522d829b 1390 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
cd265ab1
TL
1391
1392 mount_path = os.path.join("/", "volumes", group, subvolume)
1393
1394 # authorize guest authID read-write access to subvolume
1395 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1396 "--group_name", group, "--tenant_id", "tenant_id")
1397
1398 # guest authID should exist
1399 existing_ids = [a['entity'] for a in self.auth_list()]
1400 self.assertIn("client.{0}".format(authid), existing_ids)
1401
1402 # configure credentials for guest client
1403 self._configure_guest_auth(guest_mount, authid, key)
1404
1405 # mount the subvolume, and write to it
522d829b 1406 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1407 guest_mount.write_n_mb("data.bin", 1)
1408
1409 # authorize guest authID read access to subvolume
1410 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1411 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
1412
1413 # guest client sees the change in access level to read only after a
1414 # remount of the subvolume.
1415 guest_mount.umount_wait()
522d829b 1416 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1417
1418 # read existing content of the subvolume
1419 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
1420 # cannot write into read-only subvolume
1421 with self.assertRaises(CommandFailedError):
1422 guest_mount.write_n_mb("rogue.bin", 1)
1423
1424 # cleanup
1425 guest_mount.umount_wait()
1426 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
1427 "--group_name", group)
1428 # guest authID should no longer exist
1429 existing_ids = [a['entity'] for a in self.auth_list()]
1430 self.assertNotIn("client.{0}".format(authid), existing_ids)
1431 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1432 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1433
1434 def test_authorize_deauthorize_subvolume(self):
1435 subvolume = self._generate_random_subvolume_name()
1436 group = self._generate_random_group_name()
1437 authid = "alice"
1438
1439 guest_mount = self.mount_b
1440 guest_mount.umount_wait()
1441
1442 # create group
522d829b 1443 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=777")
cd265ab1
TL
1444
1445 # create subvolume in group
1446 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1447 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
1448 "--group_name", group).rstrip()
1449
1450 # authorize guest authID read-write access to subvolume
1451 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1452 "--group_name", group, "--tenant_id", "tenant_id")
1453
1454 # guest authID should exist
1455 existing_ids = [a['entity'] for a in self.auth_list()]
1456 self.assertIn("client.{0}".format(authid), existing_ids)
1457
1458 # configure credentials for guest client
1459 self._configure_guest_auth(guest_mount, authid, key)
1460
1461 # mount the subvolume, and write to it
522d829b 1462 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1463 guest_mount.write_n_mb("data.bin", 1)
1464
1465 # authorize guest authID read access to subvolume
1466 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1467 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
1468
1469 # guest client sees the change in access level to read only after a
1470 # remount of the subvolume.
1471 guest_mount.umount_wait()
522d829b 1472 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1473
1474 # read existing content of the subvolume
1475 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
1476 # cannot write into read-only subvolume
1477 with self.assertRaises(CommandFailedError):
1478 guest_mount.write_n_mb("rogue.bin", 1)
1479
1480 # cleanup
1481 guest_mount.umount_wait()
1482 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
1483 "--group_name", group)
1484 # guest authID should no longer exist
1485 existing_ids = [a['entity'] for a in self.auth_list()]
1486 self.assertNotIn("client.{0}".format(authid), existing_ids)
1487 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1488 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1489
cd265ab1
TL
1490 def test_multitenant_subvolumes(self):
1491 """
1492 That subvolume access can be restricted to a tenant.
1493
1494 That metadata used to enforce tenant isolation of
1495 subvolumes is stored as a two-way mapping between auth
1496 IDs and subvolumes that they're authorized to access.
1497 """
1498 subvolume = self._generate_random_subvolume_name()
1499 group = self._generate_random_group_name()
1500
1501 guest_mount = self.mount_b
1502
1503 # Guest clients belonging to different tenants, but using the same
1504 # auth ID.
1505 auth_id = "alice"
1506 guestclient_1 = {
1507 "auth_id": auth_id,
1508 "tenant_id": "tenant1",
1509 }
1510 guestclient_2 = {
1511 "auth_id": auth_id,
1512 "tenant_id": "tenant2",
1513 }
1514
1515 # create group
1516 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1517
1518 # create subvolume in group
1519 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1520
1521 # Check that subvolume metadata file is created on subvolume creation.
1522 subvol_metadata_filename = "_{0}:{1}.meta".format(group, subvolume)
1523 self.assertIn(subvol_metadata_filename, guest_mount.ls("volumes"))
1524
1525 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
1526 # 'tenant1', with 'rw' access to the volume.
1527 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1528 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1529
1530 # Check that auth metadata file for auth ID 'alice', is
1531 # created on authorizing 'alice' access to the subvolume.
1532 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1533 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1534
1535 # Verify that the auth metadata file stores the tenant ID that the
1536 # auth ID belongs to, the auth ID's authorized access levels
1537 # for different subvolumes, versioning details, etc.
1538 expected_auth_metadata = {
1539 "version": 5,
1540 "compat_version": 6,
1541 "dirty": False,
1542 "tenant_id": "tenant1",
1543 "subvolumes": {
1544 "{0}/{1}".format(group,subvolume): {
1545 "dirty": False,
1546 "access_level": "rw"
1547 }
1548 }
1549 }
1550
1551 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
1552 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
1553 del expected_auth_metadata["version"]
1554 del auth_metadata["version"]
1555 self.assertEqual(expected_auth_metadata, auth_metadata)
1556
1557 # Verify that the subvolume metadata file stores info about auth IDs
1558 # and their access levels to the subvolume, versioning details, etc.
1559 expected_subvol_metadata = {
1560 "version": 1,
1561 "compat_version": 1,
1562 "auths": {
1563 "alice": {
1564 "dirty": False,
1565 "access_level": "rw"
1566 }
1567 }
1568 }
1569 subvol_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(subvol_metadata_filename)))
1570
1571 self.assertGreaterEqual(subvol_metadata["version"], expected_subvol_metadata["version"])
1572 del expected_subvol_metadata["version"]
1573 del subvol_metadata["version"]
1574 self.assertEqual(expected_subvol_metadata, subvol_metadata)
1575
1576 # Cannot authorize 'guestclient_2' to access the volume.
1577 # It uses auth ID 'alice', which has already been used by a
1578 # 'guestclient_1' belonging to an another tenant for accessing
1579 # the volume.
1580
1581 try:
1582 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_2["auth_id"],
1583 "--group_name", group, "--tenant_id", guestclient_2["tenant_id"])
1584 except CommandFailedError as ce:
1585 self.assertEqual(ce.exitstatus, errno.EPERM,
1586 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
1587 else:
1588 self.fail("expected the 'fs subvolume authorize' command to fail")
1589
1590 # Check that auth metadata file is cleaned up on removing
1591 # auth ID's only access to a volume.
1592
1593 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
1594 "--group_name", group)
1595 self.assertNotIn(auth_metadata_filename, guest_mount.ls("volumes"))
1596
1597 # Check that subvolume metadata file is cleaned up on subvolume deletion.
1598 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1599 self.assertNotIn(subvol_metadata_filename, guest_mount.ls("volumes"))
1600
1601 # clean up
1602 guest_mount.umount_wait()
1603 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1604
1605 def test_subvolume_authorized_list(self):
1606 subvolume = self._generate_random_subvolume_name()
1607 group = self._generate_random_group_name()
1608 authid1 = "alice"
1609 authid2 = "guest1"
1610 authid3 = "guest2"
1611
1612 # create group
1613 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1614
1615 # create subvolume in group
1616 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1617
1618 # authorize alice authID read-write access to subvolume
1619 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid1,
1620 "--group_name", group)
1621 # authorize guest1 authID read-write access to subvolume
1622 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid2,
1623 "--group_name", group)
1624 # authorize guest2 authID read access to subvolume
1625 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid3,
1626 "--group_name", group, "--access_level", "r")
1627
1628 # list authorized-ids of the subvolume
1629 expected_auth_list = [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
1630 auth_list = json.loads(self._fs_cmd('subvolume', 'authorized_list', self.volname, subvolume, "--group_name", group))
1631 self.assertCountEqual(expected_auth_list, auth_list)
1632
1633 # cleanup
1634 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid1,
1635 "--group_name", group)
1636 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid2,
1637 "--group_name", group)
1638 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid3,
1639 "--group_name", group)
1640 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1641 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1642
1643 def test_authorize_auth_id_not_created_by_mgr_volumes(self):
1644 """
1645 If the auth_id already exists and is not created by mgr plugin,
1646 it's not allowed to authorize the auth-id by default.
1647 """
1648
1649 subvolume = self._generate_random_subvolume_name()
1650 group = self._generate_random_group_name()
1651
1652 # Create auth_id
1653 self.fs.mon_manager.raw_cluster_cmd(
1654 "auth", "get-or-create", "client.guest1",
1655 "mds", "allow *",
1656 "osd", "allow rw",
1657 "mon", "allow *"
1658 )
1659
1660 auth_id = "guest1"
1661 guestclient_1 = {
1662 "auth_id": auth_id,
1663 "tenant_id": "tenant1",
1664 }
1665
1666 # create group
1667 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1668
1669 # create subvolume in group
1670 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1671
1672 try:
1673 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1674 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1675 except CommandFailedError as ce:
1676 self.assertEqual(ce.exitstatus, errno.EPERM,
1677 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
1678 else:
1679 self.fail("expected the 'fs subvolume authorize' command to fail")
1680
1681 # clean up
1682 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1683 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1684 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1685
1686 def test_authorize_allow_existing_id_option(self):
1687 """
1688 If the auth_id already exists and is not created by mgr volumes,
1689 it's not allowed to authorize the auth-id by default but is
1690 allowed with option allow_existing_id.
1691 """
1692
1693 subvolume = self._generate_random_subvolume_name()
1694 group = self._generate_random_group_name()
1695
1696 # Create auth_id
1697 self.fs.mon_manager.raw_cluster_cmd(
1698 "auth", "get-or-create", "client.guest1",
1699 "mds", "allow *",
1700 "osd", "allow rw",
1701 "mon", "allow *"
1702 )
1703
1704 auth_id = "guest1"
1705 guestclient_1 = {
1706 "auth_id": auth_id,
1707 "tenant_id": "tenant1",
1708 }
1709
1710 # create group
1711 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1712
1713 # create subvolume in group
1714 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1715
1716 # Cannot authorize 'guestclient_1' to access the volume by default,
1717 # which already exists and not created by mgr volumes but is allowed
1718 # with option 'allow_existing_id'.
1719 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1720 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"], "--allow-existing-id")
1721
1722 # clean up
1723 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
1724 "--group_name", group)
1725 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1726 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1727 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1728
1729 def test_deauthorize_auth_id_after_out_of_band_update(self):
1730 """
1731 If the auth_id authorized by mgr/volumes plugin is updated
1732 out of band, the auth_id should not be deleted after a
1733 deauthorize. It should only remove caps associated with it.
1734 """
1735
1736 subvolume = self._generate_random_subvolume_name()
1737 group = self._generate_random_group_name()
1738
1739 auth_id = "guest1"
1740 guestclient_1 = {
1741 "auth_id": auth_id,
1742 "tenant_id": "tenant1",
1743 }
1744
1745 # create group
1746 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1747
1748 # create subvolume in group
1749 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1750
1751 # Authorize 'guestclient_1' to access the subvolume.
1752 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1753 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1754
1755 subvol_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
1756 "--group_name", group).rstrip()
1757
1758 # Update caps for guestclient_1 out of band
1759 out = self.fs.mon_manager.raw_cluster_cmd(
1760 "auth", "caps", "client.guest1",
1761 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path),
1762 "osd", "allow rw pool=cephfs_data",
1763 "mon", "allow r",
1764 "mgr", "allow *"
1765 )
1766
1767 # Deauthorize guestclient_1
1768 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
1769
1770 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
1771 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
1772 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
1773
1774 self.assertEqual("client.guest1", out[0]["entity"])
1775 self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"])
1776 self.assertEqual("allow *", out[0]["caps"]["mgr"])
1777 self.assertNotIn("osd", out[0]["caps"])
1778
1779 # clean up
1780 out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1781 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1782 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1783
1784 def test_recover_auth_metadata_during_authorize(self):
1785 """
1786 That auth metadata manager can recover from partial auth updates using
1787 metadata files, which store auth info and its update status info. This
1788 test validates the recovery during authorize.
1789 """
1790
1791 guest_mount = self.mount_b
1792
1793 subvolume = self._generate_random_subvolume_name()
1794 group = self._generate_random_group_name()
1795
1796 auth_id = "guest1"
1797 guestclient_1 = {
1798 "auth_id": auth_id,
1799 "tenant_id": "tenant1",
1800 }
1801
1802 # create group
1803 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1804
1805 # create subvolume in group
1806 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1807
1808 # Authorize 'guestclient_1' to access the subvolume.
1809 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1810 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1811
1812 # Check that auth metadata file for auth ID 'guest1', is
1813 # created on authorizing 'guest1' access to the subvolume.
1814 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1815 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1816 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1817
1818 # Induce partial auth update state by modifying the auth metadata file,
1819 # and then run authorize again.
522d829b 1820 guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
1821
1822 # Authorize 'guestclient_1' to access the subvolume.
1823 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1824 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1825
1826 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1827 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
1828
1829 # clean up
1830 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
1831 guest_mount.umount_wait()
1832 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1833 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1834 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1835
1836 def test_recover_auth_metadata_during_deauthorize(self):
1837 """
1838 That auth metadata manager can recover from partial auth updates using
1839 metadata files, which store auth info and its update status info. This
1840 test validates the recovery during deauthorize.
1841 """
1842
1843 guest_mount = self.mount_b
1844
1845 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
1846 group = self._generate_random_group_name()
1847
1848 guestclient_1 = {
1849 "auth_id": "guest1",
1850 "tenant_id": "tenant1",
1851 }
1852
1853 # create group
1854 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1855
1856 # create subvolumes in group
1857 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
1858 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
1859
1860 # Authorize 'guestclient_1' to access the subvolume1.
1861 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
1862 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1863
1864 # Check that auth metadata file for auth ID 'guest1', is
1865 # created on authorizing 'guest1' access to the subvolume1.
1866 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1867 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1868 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1869
1870 # Authorize 'guestclient_1' to access the subvolume2.
1871 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
1872 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1873
1874 # Induce partial auth update state by modifying the auth metadata file,
1875 # and then run de-authorize.
522d829b 1876 guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
1877
1878 # Deauthorize 'guestclient_1' to access the subvolume2.
1879 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"],
1880 "--group_name", group)
1881
1882 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1883 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
1884
1885 # clean up
1886 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
1887 guest_mount.umount_wait()
1888 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1889 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
1890 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
1891 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1892
1893 def test_update_old_style_auth_metadata_to_new_during_authorize(self):
1894 """
1895 CephVolumeClient stores the subvolume data in auth metadata file with
1896 'volumes' key as there was no subvolume namespace. It doesn't makes sense
1897 with mgr/volumes. This test validates the transparent update of 'volumes'
1898 key to 'subvolumes' key in auth metadata file during authorize.
1899 """
1900
1901 guest_mount = self.mount_b
1902
1903 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
1904 group = self._generate_random_group_name()
1905
1906 auth_id = "guest1"
1907 guestclient_1 = {
1908 "auth_id": auth_id,
1909 "tenant_id": "tenant1",
1910 }
1911
1912 # create group
1913 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1914
1915 # create subvolumes in group
1916 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
1917 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
1918
1919 # Authorize 'guestclient_1' to access the subvolume1.
1920 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
1921 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1922
1923 # Check that auth metadata file for auth ID 'guest1', is
1924 # created on authorizing 'guest1' access to the subvolume1.
1925 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1926 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1927
1928 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
522d829b 1929 guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
1930
1931 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
1932 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
1933 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1934
1935 expected_auth_metadata = {
1936 "version": 5,
1937 "compat_version": 6,
1938 "dirty": False,
1939 "tenant_id": "tenant1",
1940 "subvolumes": {
1941 "{0}/{1}".format(group,subvolume1): {
1942 "dirty": False,
1943 "access_level": "rw"
1944 },
1945 "{0}/{1}".format(group,subvolume2): {
1946 "dirty": False,
1947 "access_level": "rw"
1948 }
1949 }
1950 }
1951
1952 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
1953
1954 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
1955 del expected_auth_metadata["version"]
1956 del auth_metadata["version"]
1957 self.assertEqual(expected_auth_metadata, auth_metadata)
1958
1959 # clean up
1960 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
1961 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
1962 guest_mount.umount_wait()
1963 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1964 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
1965 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
1966 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1967
1968 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self):
1969 """
1970 CephVolumeClient stores the subvolume data in auth metadata file with
1971 'volumes' key as there was no subvolume namespace. It doesn't makes sense
1972 with mgr/volumes. This test validates the transparent update of 'volumes'
1973 key to 'subvolumes' key in auth metadata file during deauthorize.
1974 """
1975
1976 guest_mount = self.mount_b
1977
1978 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
1979 group = self._generate_random_group_name()
1980
1981 auth_id = "guest1"
1982 guestclient_1 = {
1983 "auth_id": auth_id,
1984 "tenant_id": "tenant1",
1985 }
1986
1987 # create group
1988 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1989
1990 # create subvolumes in group
1991 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
1992 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
1993
1994 # Authorize 'guestclient_1' to access the subvolume1.
1995 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
1996 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1997
1998 # Authorize 'guestclient_1' to access the subvolume2.
1999 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
2000 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2001
2002 # Check that auth metadata file for auth ID 'guest1', is created.
2003 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2004 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2005
2006 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
522d829b 2007 guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
2008
2009 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
2010 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
2011
2012 expected_auth_metadata = {
2013 "version": 5,
2014 "compat_version": 6,
2015 "dirty": False,
2016 "tenant_id": "tenant1",
2017 "subvolumes": {
2018 "{0}/{1}".format(group,subvolume1): {
2019 "dirty": False,
2020 "access_level": "rw"
2021 }
2022 }
2023 }
2024
2025 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
2026
2027 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
2028 del expected_auth_metadata["version"]
2029 del auth_metadata["version"]
2030 self.assertEqual(expected_auth_metadata, auth_metadata)
2031
2032 # clean up
2033 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
2034 guest_mount.umount_wait()
2035 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2036 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
2037 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
2038 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2039
cd265ab1
TL
2040 def test_subvolume_evict_client(self):
2041 """
2042 That a subvolume client can be evicted based on the auth ID
2043 """
2044
2045 subvolumes = self._generate_random_subvolume_name(2)
2046 group = self._generate_random_group_name()
2047
2048 # create group
2049 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2050
2051 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
2052 for i in range(0, 2):
2053 self.mounts[i].umount_wait()
2054 guest_mounts = (self.mounts[0], self.mounts[1])
2055 auth_id = "guest"
2056 guestclient_1 = {
2057 "auth_id": auth_id,
2058 "tenant_id": "tenant1",
2059 }
2060
2061 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
2062 # subvolumes. Mount the two subvolumes. Write data to the volumes.
2063 for i in range(2):
2064 # Create subvolume.
522d829b 2065 self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group, "--mode=777")
cd265ab1
TL
2066
2067 # authorize guest authID read-write access to subvolume
2068 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolumes[i], guestclient_1["auth_id"],
2069 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2070
2071 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolumes[i],
2072 "--group_name", group).rstrip()
2073 # configure credentials for guest client
2074 self._configure_guest_auth(guest_mounts[i], auth_id, key)
2075
2076 # mount the subvolume, and write to it
522d829b 2077 guest_mounts[i].mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
2078 guest_mounts[i].write_n_mb("data.bin", 1)
2079
2080 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
2081 # one volume.
2082 self._fs_cmd("subvolume", "evict", self.volname, subvolumes[0], auth_id, "--group_name", group)
2083
2084 # Evicted guest client, guest_mounts[0], should not be able to do
2085 # anymore metadata ops. It should start failing all operations
2086 # when it sees that its own address is in the blocklist.
2087 try:
2088 guest_mounts[0].write_n_mb("rogue.bin", 1)
2089 except CommandFailedError:
2090 pass
2091 else:
2092 raise RuntimeError("post-eviction write should have failed!")
2093
2094 # The blocklisted guest client should now be unmountable
2095 guest_mounts[0].umount_wait()
2096
2097 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
2098 # has mounted the other volume, should be able to use its volume
2099 # unaffected.
2100 guest_mounts[1].write_n_mb("data.bin.1", 1)
2101
2102 # Cleanup.
2103 guest_mounts[1].umount_wait()
2104 for i in range(2):
2105 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolumes[i], auth_id, "--group_name", group)
2106 self._fs_cmd("subvolume", "rm", self.volname, subvolumes[i], "--group_name", group)
2107 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2108
f67539c2
TL
2109 def test_subvolume_pin_random(self):
2110 self.fs.set_max_mds(2)
2111 self.fs.wait_for_daemons()
2112 self.config_set('mds', 'mds_export_ephemeral_random', True)
1911f103
TL
2113
2114 subvolume = self._generate_random_subvolume_name()
1911f103 2115 self._fs_cmd("subvolume", "create", self.volname, subvolume)
f67539c2
TL
2116 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01")
2117 # no verification
1911f103 2118
f67539c2 2119 # remove subvolume
1911f103 2120 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1911f103
TL
2121
2122 # verify trash dir is clean
2123 self._wait_for_trash_empty()
2124
f67539c2
TL
2125 def test_subvolume_resize_fail_invalid_size(self):
2126 """
2127 That a subvolume cannot be resized to an invalid size and the quota did not change
2128 """
1911f103 2129
f67539c2
TL
2130 osize = self.DEFAULT_FILE_SIZE*1024*1024
2131 # create subvolume
2132 subvolname = self._generate_random_subvolume_name()
2133 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 2134
f67539c2
TL
2135 # make sure it exists
2136 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2137 self.assertNotEqual(subvolpath, None)
81eedcae 2138
f67539c2
TL
2139 # try to resize the subvolume with an invalid size -10
2140 nsize = -10
2141 try:
2142 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2143 except CommandFailedError as ce:
2144 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
2145 else:
2146 self.fail("expected the 'fs subvolume resize' command to fail")
81eedcae 2147
f67539c2
TL
2148 # verify the quota did not change
2149 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2150 self.assertEqual(size, osize)
81eedcae
TL
2151
2152 # remove subvolume
f67539c2 2153 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2154
494da23a
TL
2155 # verify trash dir is clean
2156 self._wait_for_trash_empty()
2157
f67539c2
TL
2158 def test_subvolume_resize_fail_zero_size(self):
2159 """
2160 That a subvolume cannot be resized to a zero size and the quota did not change
2161 """
81eedcae 2162
f67539c2
TL
2163 osize = self.DEFAULT_FILE_SIZE*1024*1024
2164 # create subvolume
2165 subvolname = self._generate_random_subvolume_name()
2166 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 2167
f67539c2
TL
2168 # make sure it exists
2169 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2170 self.assertNotEqual(subvolpath, None)
81eedcae 2171
f67539c2
TL
2172 # try to resize the subvolume with size 0
2173 nsize = 0
2174 try:
2175 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2176 except CommandFailedError as ce:
2177 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
2178 else:
2179 self.fail("expected the 'fs subvolume resize' command to fail")
81eedcae 2180
f67539c2
TL
2181 # verify the quota did not change
2182 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2183 self.assertEqual(size, osize)
81eedcae 2184
f67539c2
TL
2185 # remove subvolume
2186 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2187
f67539c2
TL
2188 # verify trash dir is clean
2189 self._wait_for_trash_empty()
81eedcae 2190
f67539c2
TL
2191 def test_subvolume_resize_quota_lt_used_size(self):
2192 """
2193 That a subvolume can be resized to a size smaller than the current used size
2194 and the resulting quota matches the expected size.
2195 """
81eedcae 2196
f67539c2
TL
2197 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
2198 # create subvolume
2199 subvolname = self._generate_random_subvolume_name()
522d829b 2200 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
81eedcae 2201
f67539c2
TL
2202 # make sure it exists
2203 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2204 self.assertNotEqual(subvolpath, None)
81eedcae 2205
f67539c2
TL
2206 # create one file of 10MB
2207 file_size=self.DEFAULT_FILE_SIZE*10
2208 number_of_files=1
2209 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2210 number_of_files,
2211 file_size))
2212 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
2213 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
81eedcae 2214
f67539c2
TL
2215 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
2216 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
2217 if isinstance(self.mount_a, FuseMount):
2218 # kclient dir does not have size==rbytes
2219 self.assertEqual(usedsize, susedsize)
81eedcae 2220
f67539c2
TL
2221 # shrink the subvolume
2222 nsize = usedsize // 2
2223 try:
2224 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2225 except CommandFailedError:
2226 self.fail("expected the 'fs subvolume resize' command to succeed")
81eedcae 2227
f67539c2
TL
2228 # verify the quota
2229 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2230 self.assertEqual(size, nsize)
81eedcae 2231
f67539c2
TL
2232 # remove subvolume
2233 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2234
adb31ebb
TL
2235 # verify trash dir is clean
2236 self._wait_for_trash_empty()
2237
f67539c2 2238 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self):
92f5a8d4 2239 """
f67539c2
TL
2240 That a subvolume cannot be resized to a size smaller than the current used size
2241 when --no_shrink is given and the quota did not change.
92f5a8d4 2242 """
92f5a8d4 2243
f67539c2
TL
2244 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
2245 # create subvolume
2246 subvolname = self._generate_random_subvolume_name()
522d829b 2247 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
92f5a8d4
TL
2248
2249 # make sure it exists
f67539c2
TL
2250 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2251 self.assertNotEqual(subvolpath, None)
81eedcae 2252
f67539c2
TL
2253 # create one file of 10MB
2254 file_size=self.DEFAULT_FILE_SIZE*10
2255 number_of_files=1
2256 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2257 number_of_files,
2258 file_size))
2259 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
2260 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
81eedcae 2261
f67539c2
TL
2262 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
2263 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
2264 if isinstance(self.mount_a, FuseMount):
2265 # kclient dir does not have size==rbytes
2266 self.assertEqual(usedsize, susedsize)
81eedcae 2267
f67539c2
TL
2268 # shrink the subvolume
2269 nsize = usedsize // 2
2270 try:
2271 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink")
2272 except CommandFailedError as ce:
2273 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
2274 else:
2275 self.fail("expected the 'fs subvolume resize' command to fail")
81eedcae 2276
f67539c2
TL
2277 # verify the quota did not change
2278 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2279 self.assertEqual(size, osize)
81eedcae 2280
f67539c2
TL
2281 # remove subvolume
2282 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2283
adb31ebb
TL
2284 # verify trash dir is clean
2285 self._wait_for_trash_empty()
2286
f67539c2 2287 def test_subvolume_resize_expand_on_full_subvolume(self):
92f5a8d4 2288 """
f67539c2 2289 That the subvolume can be expanded from a full subvolume and future writes succeed.
92f5a8d4 2290 """
92f5a8d4 2291
f67539c2
TL
2292 osize = self.DEFAULT_FILE_SIZE*1024*1024*10
2293 # create subvolume of quota 10MB and make sure it exists
92f5a8d4 2294 subvolname = self._generate_random_subvolume_name()
522d829b 2295 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
92f5a8d4
TL
2296 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2297 self.assertNotEqual(subvolpath, None)
2298
f67539c2
TL
2299 # create one file of size 10MB and write
2300 file_size=self.DEFAULT_FILE_SIZE*10
2301 number_of_files=1
2302 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2303 number_of_files,
2304 file_size))
2305 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3)
2306 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2307
2308 # create a file of size 5MB and try write more
2309 file_size=file_size // 2
2310 number_of_files=1
2311 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2312 number_of_files,
2313 file_size))
2314 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4)
2315 try:
2316 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2317 except CommandFailedError:
2318 # Not able to write. So expand the subvolume more and try writing the 5MB file again
2319 nsize = osize*2
2320 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2321 try:
2322 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2323 except CommandFailedError:
2324 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
2325 "to succeed".format(subvolname, number_of_files, file_size))
2326 else:
2327 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
2328 "to fail".format(subvolname, number_of_files, file_size))
92f5a8d4
TL
2329
2330 # remove subvolume
2331 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2332
adb31ebb
TL
2333 # verify trash dir is clean
2334 self._wait_for_trash_empty()
2335
f67539c2
TL
2336 def test_subvolume_resize_infinite_size(self):
2337 """
2338 That a subvolume can be resized to an infinite size by unsetting its quota.
2339 """
81eedcae
TL
2340
2341 # create subvolume
f67539c2
TL
2342 subvolname = self._generate_random_subvolume_name()
2343 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
2344 str(self.DEFAULT_FILE_SIZE*1024*1024))
81eedcae 2345
f67539c2
TL
2346 # make sure it exists
2347 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2348 self.assertNotEqual(subvolpath, None)
81eedcae 2349
f67539c2
TL
2350 # resize inf
2351 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
2352
2353 # verify that the quota is None
2354 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
2355 self.assertEqual(size, None)
81eedcae
TL
2356
2357 # remove subvolume
f67539c2 2358 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2359
494da23a
TL
2360 # verify trash dir is clean
2361 self._wait_for_trash_empty()
2362
f67539c2 2363 def test_subvolume_resize_infinite_size_future_writes(self):
e306af50 2364 """
f67539c2 2365 That a subvolume can be resized to an infinite size and the future writes succeed.
e306af50
TL
2366 """
2367
e306af50 2368 # create subvolume
f67539c2
TL
2369 subvolname = self._generate_random_subvolume_name()
2370 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
522d829b 2371 str(self.DEFAULT_FILE_SIZE*1024*1024*5), "--mode=777")
e306af50 2372
f67539c2
TL
2373 # make sure it exists
2374 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2375 self.assertNotEqual(subvolpath, None)
e306af50 2376
f67539c2
TL
2377 # resize inf
2378 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
e306af50 2379
f67539c2
TL
2380 # verify that the quota is None
2381 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
2382 self.assertEqual(size, None)
e306af50 2383
f67539c2
TL
2384 # create one file of 10MB and try to write
2385 file_size=self.DEFAULT_FILE_SIZE*10
2386 number_of_files=1
2387 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2388 number_of_files,
2389 file_size))
2390 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5)
adb31ebb 2391
f67539c2
TL
2392 try:
2393 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2394 except CommandFailedError:
2395 self.fail("expected filling subvolume {0} with {1} file of size {2}MB "
2396 "to succeed".format(subvolname, number_of_files, file_size))
e306af50
TL
2397
2398 # remove subvolume
f67539c2 2399 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
e306af50
TL
2400
2401 # verify trash dir is clean
2402 self._wait_for_trash_empty()
2403
f67539c2
TL
2404 def test_subvolume_rm_force(self):
2405 # test removing non-existing subvolume with --force
81eedcae 2406 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
2407 try:
2408 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
2409 except CommandFailedError:
2410 self.fail("expected the 'fs subvolume rm --force' command to succeed")
2411
2412 def test_subvolume_shrink(self):
2413 """
2414 That a subvolume can be shrinked in size and its quota matches the expected size.
2415 """
81eedcae
TL
2416
2417 # create subvolume
f67539c2
TL
2418 subvolname = self._generate_random_subvolume_name()
2419 osize = self.DEFAULT_FILE_SIZE*1024*1024
2420 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 2421
f67539c2
TL
2422 # make sure it exists
2423 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2424 self.assertNotEqual(subvolpath, None)
81eedcae 2425
f67539c2
TL
2426 # shrink the subvolume
2427 nsize = osize // 2
2428 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
81eedcae 2429
f67539c2
TL
2430 # verify the quota
2431 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2432 self.assertEqual(size, nsize)
81eedcae
TL
2433
2434 # remove subvolume
f67539c2 2435 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2436
494da23a
TL
2437 # verify trash dir is clean
2438 self._wait_for_trash_empty()
2439
f67539c2
TL
2440
2441class TestSubvolumeGroupSnapshots(TestVolumesHelper):
2442 """Tests for FS subvolume group snapshot operations."""
2443 @unittest.skip("skipping subvolumegroup snapshot tests")
2444 def test_nonexistent_subvolume_group_snapshot_rm(self):
81eedcae 2445 subvolume = self._generate_random_subvolume_name()
f67539c2 2446 group = self._generate_random_group_name()
81eedcae
TL
2447 snapshot = self._generate_random_snapshot_name()
2448
f67539c2
TL
2449 # create group
2450 self._fs_cmd("subvolumegroup", "create", self.volname, group)
81eedcae 2451
f67539c2
TL
2452 # create subvolume in group
2453 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2454
2455 # snapshot group
2456 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
81eedcae
TL
2457
2458 # remove snapshot
f67539c2 2459 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
81eedcae 2460
f67539c2 2461 # remove snapshot
81eedcae 2462 try:
f67539c2 2463 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
81eedcae
TL
2464 except CommandFailedError as ce:
2465 if ce.exitstatus != errno.ENOENT:
2466 raise
92f5a8d4 2467 else:
f67539c2 2468 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
81eedcae
TL
2469
2470 # remove subvolume
f67539c2 2471 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
81eedcae 2472
494da23a
TL
2473 # verify trash dir is clean
2474 self._wait_for_trash_empty()
2475
f67539c2
TL
2476 # remove group
2477 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2478
2479 @unittest.skip("skipping subvolumegroup snapshot tests")
2480 def test_subvolume_group_snapshot_create_and_rm(self):
92f5a8d4 2481 subvolume = self._generate_random_subvolume_name()
f67539c2 2482 group = self._generate_random_group_name()
92f5a8d4
TL
2483 snapshot = self._generate_random_snapshot_name()
2484
f67539c2
TL
2485 # create group
2486 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2487
2488 # create subvolume in group
2489 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2490
2491 # snapshot group
2492 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
2493
92f5a8d4 2494 # remove snapshot
f67539c2 2495 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
92f5a8d4 2496
f67539c2
TL
2497 # remove subvolume
2498 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
2499
2500 # verify trash dir is clean
2501 self._wait_for_trash_empty()
2502
2503 # remove group
2504 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2505
2506 @unittest.skip("skipping subvolumegroup snapshot tests")
2507 def test_subvolume_group_snapshot_idempotence(self):
81eedcae
TL
2508 subvolume = self._generate_random_subvolume_name()
2509 group = self._generate_random_group_name()
2510 snapshot = self._generate_random_snapshot_name()
2511
2512 # create group
2513 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2514
2515 # create subvolume in group
2516 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2517
f67539c2
TL
2518 # snapshot group
2519 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
2520
2521 # try creating snapshot w/ same snapshot name -- shoule be idempotent
2522 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
81eedcae
TL
2523
2524 # remove snapshot
f67539c2 2525 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
81eedcae
TL
2526
2527 # remove subvolume
2528 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
2529
494da23a
TL
2530 # verify trash dir is clean
2531 self._wait_for_trash_empty()
2532
81eedcae
TL
2533 # remove group
2534 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2535
f67539c2
TL
2536 @unittest.skip("skipping subvolumegroup snapshot tests")
2537 def test_subvolume_group_snapshot_ls(self):
2538 # tests the 'fs subvolumegroup snapshot ls' command
eafe8130
TL
2539
2540 snapshots = []
2541
f67539c2
TL
2542 # create group
2543 group = self._generate_random_group_name()
2544 self._fs_cmd("subvolumegroup", "create", self.volname, group)
eafe8130 2545
f67539c2 2546 # create subvolumegroup snapshots
92f5a8d4
TL
2547 snapshots = self._generate_random_snapshot_name(3)
2548 for snapshot in snapshots:
f67539c2 2549 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
eafe8130 2550
f67539c2
TL
2551 subvolgrpsnapshotls = json.loads(self._fs_cmd('subvolumegroup', 'snapshot', 'ls', self.volname, group))
2552 if len(subvolgrpsnapshotls) == 0:
2553 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
eafe8130 2554 else:
f67539c2 2555 snapshotnames = [snapshot['name'] for snapshot in subvolgrpsnapshotls]
eafe8130 2556 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
f67539c2 2557 raise RuntimeError("Error creating or listing subvolume group snapshots")
adb31ebb 2558
f67539c2
TL
2559 @unittest.skip("skipping subvolumegroup snapshot tests")
2560 def test_subvolume_group_snapshot_rm_force(self):
2561 # test removing non-existing subvolume group snapshot with --force
2562 group = self._generate_random_group_name()
2563 snapshot = self._generate_random_snapshot_name()
2564 # remove snapshot
2565 try:
2566 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force")
2567 except CommandFailedError:
2568 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
adb31ebb
TL
2569
2570 def test_subvolume_group_snapshot_unsupported_status(self):
2571 group = self._generate_random_group_name()
2572 snapshot = self._generate_random_snapshot_name()
eafe8130 2573
adb31ebb
TL
2574 # create group
2575 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2576
2577 # snapshot group
2578 try:
2579 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
2580 except CommandFailedError as ce:
2581 self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create")
2582 else:
2583 self.fail("expected subvolumegroup snapshot create command to fail")
2584
2585 # remove group
2586 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2587
cd265ab1 2588
f67539c2
TL
2589class TestSubvolumeSnapshots(TestVolumesHelper):
2590 """Tests for FS subvolume snapshot operations."""
2591 def test_nonexistent_subvolume_snapshot_rm(self):
cd265ab1 2592 subvolume = self._generate_random_subvolume_name()
f67539c2 2593 snapshot = self._generate_random_snapshot_name()
cd265ab1 2594
f67539c2
TL
2595 # create subvolume
2596 self._fs_cmd("subvolume", "create", self.volname, subvolume)
cd265ab1 2597
f67539c2
TL
2598 # snapshot subvolume
2599 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
cd265ab1 2600
f67539c2
TL
2601 # remove snapshot
2602 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
cd265ab1 2603
f67539c2
TL
2604 # remove snapshot again
2605 try:
2606 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2607 except CommandFailedError as ce:
2608 if ce.exitstatus != errno.ENOENT:
2609 raise
2610 else:
2611 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
cd265ab1 2612
f67539c2
TL
2613 # remove subvolume
2614 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
cd265ab1 2615
f67539c2
TL
2616 # verify trash dir is clean
2617 self._wait_for_trash_empty()
2618
2619 def test_subvolume_snapshot_create_and_rm(self):
2620 subvolume = self._generate_random_subvolume_name()
2621 snapshot = self._generate_random_snapshot_name()
2622
2623 # create subvolume
2624 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2625
2626 # snapshot subvolume
2627 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
cd265ab1
TL
2628
2629 # remove snapshot
f67539c2 2630 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
cd265ab1
TL
2631
2632 # remove subvolume
f67539c2 2633 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
cd265ab1
TL
2634
2635 # verify trash dir is clean
2636 self._wait_for_trash_empty()
2637
f67539c2 2638 def test_subvolume_snapshot_create_idempotence(self):
cd265ab1 2639 subvolume = self._generate_random_subvolume_name()
f67539c2 2640 snapshot = self._generate_random_snapshot_name()
cd265ab1 2641
f67539c2
TL
2642 # create subvolume
2643 self._fs_cmd("subvolume", "create", self.volname, subvolume)
cd265ab1 2644
f67539c2
TL
2645 # snapshot subvolume
2646 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
cd265ab1 2647
f67539c2
TL
2648 # try creating w/ same subvolume snapshot name -- should be idempotent
2649 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
cd265ab1 2650
f67539c2
TL
2651 # remove snapshot
2652 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
cd265ab1
TL
2653
2654 # remove subvolume
f67539c2 2655 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
cd265ab1
TL
2656
2657 # verify trash dir is clean
2658 self._wait_for_trash_empty()
2659
f67539c2 2660 def test_subvolume_snapshot_info(self):
cd265ab1 2661
cd265ab1 2662 """
f67539c2 2663 tests the 'fs subvolume snapshot info' command
cd265ab1
TL
2664 """
2665
f67539c2
TL
2666 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
2667
cd265ab1 2668 subvolume = self._generate_random_subvolume_name()
f67539c2 2669 snapshot, snap_missing = self._generate_random_snapshot_name(2)
cd265ab1 2670
f67539c2 2671 # create subvolume
522d829b 2672 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
cd265ab1 2673
f67539c2
TL
2674 # do some IO
2675 self._do_subvolume_io(subvolume, number_of_files=1)
cd265ab1 2676
f67539c2
TL
2677 # snapshot subvolume
2678 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
cd265ab1 2679
f67539c2
TL
2680 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
2681 for md in snap_md:
2682 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
2683 self.assertEqual(snap_info["has_pending_clones"], "no")
cd265ab1 2684
f67539c2 2685 # snapshot info for non-existent snapshot
cd265ab1 2686 try:
f67539c2 2687 self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing)
cd265ab1 2688 except CommandFailedError as ce:
f67539c2 2689 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot")
cd265ab1 2690 else:
f67539c2 2691 self.fail("expected snapshot info of non-existent snapshot to fail")
cd265ab1 2692
f67539c2
TL
2693 # remove snapshot
2694 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
cd265ab1
TL
2695
2696 # remove subvolume
f67539c2 2697 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
cd265ab1
TL
2698
2699 # verify trash dir is clean
2700 self._wait_for_trash_empty()
2701
f67539c2 2702 def test_subvolume_snapshot_in_group(self):
cd265ab1
TL
2703 subvolume = self._generate_random_subvolume_name()
2704 group = self._generate_random_group_name()
f67539c2 2705 snapshot = self._generate_random_snapshot_name()
cd265ab1
TL
2706
2707 # create group
2708 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2709
2710 # create subvolume in group
2711 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2712
f67539c2
TL
2713 # snapshot subvolume in group
2714 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
cd265ab1 2715
f67539c2
TL
2716 # remove snapshot
2717 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
cd265ab1
TL
2718
2719 # remove subvolume
2720 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
2721
2722 # verify trash dir is clean
2723 self._wait_for_trash_empty()
2724
2725 # remove group
2726 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2727
f67539c2
TL
2728 def test_subvolume_snapshot_ls(self):
2729 # tests the 'fs subvolume snapshot ls' command
81eedcae 2730
f67539c2 2731 snapshots = []
81eedcae 2732
f67539c2
TL
2733 # create subvolume
2734 subvolume = self._generate_random_subvolume_name()
2735 self._fs_cmd("subvolume", "create", self.volname, subvolume)
81eedcae 2736
f67539c2
TL
2737 # create subvolume snapshots
2738 snapshots = self._generate_random_snapshot_name(3)
2739 for snapshot in snapshots:
2740 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2741
2742 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
2743 if len(subvolsnapshotls) == 0:
2744 self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
2745 else:
2746 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
2747 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
2748 self.fail("Error creating or listing subvolume snapshots")
81eedcae
TL
2749
2750 # remove snapshot
f67539c2
TL
2751 for snapshot in snapshots:
2752 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
81eedcae
TL
2753
2754 # remove subvolume
f67539c2 2755 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
81eedcae 2756
494da23a
TL
2757 # verify trash dir is clean
2758 self._wait_for_trash_empty()
2759
f67539c2
TL
2760 def test_subvolume_inherited_snapshot_ls(self):
2761 # tests the scenario where 'fs subvolume snapshot ls' command
2762 # should not list inherited snapshots created as part of snapshot
2763 # at ancestral level
81eedcae 2764
f67539c2 2765 snapshots = []
81eedcae
TL
2766 subvolume = self._generate_random_subvolume_name()
2767 group = self._generate_random_group_name()
f67539c2 2768 snap_count = 3
81eedcae
TL
2769
2770 # create group
2771 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2772
2773 # create subvolume in group
2774 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2775
f67539c2
TL
2776 # create subvolume snapshots
2777 snapshots = self._generate_random_snapshot_name(snap_count)
2778 for snapshot in snapshots:
2779 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
2780
2781 # Create snapshot at ancestral level
2782 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1")
2783 ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2")
522d829b 2784 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1, ancestral_snappath2], sudo=True)
f67539c2
TL
2785
2786 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group))
2787 self.assertEqual(len(subvolsnapshotls), snap_count)
81eedcae 2788
f67539c2 2789 # remove ancestral snapshots
522d829b 2790 self.mount_a.run_shell(['rmdir', ancestral_snappath1, ancestral_snappath2], sudo=True)
81eedcae
TL
2791
2792 # remove snapshot
f67539c2
TL
2793 for snapshot in snapshots:
2794 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
81eedcae
TL
2795
2796 # remove subvolume
2797 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
2798
494da23a
TL
2799 # verify trash dir is clean
2800 self._wait_for_trash_empty()
2801
81eedcae
TL
2802 # remove group
2803 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2804
f67539c2
TL
2805 def test_subvolume_inherited_snapshot_info(self):
2806 """
2807 tests the scenario where 'fs subvolume snapshot info' command
2808 should fail for inherited snapshots created as part of snapshot
2809 at ancestral level
2810 """
2811
81eedcae
TL
2812 subvolume = self._generate_random_subvolume_name()
2813 group = self._generate_random_group_name()
81eedcae
TL
2814
2815 # create group
2816 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2817
2818 # create subvolume in group
2819 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2820
f67539c2
TL
2821 # Create snapshot at ancestral level
2822 ancestral_snap_name = "ancestral_snap_1"
2823 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
522d829b 2824 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
81eedcae 2825
f67539c2
TL
2826 # Validate existence of inherited snapshot
2827 group_path = os.path.join(".", "volumes", group)
2828 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
2829 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
2830 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
2831 self.mount_a.run_shell(['ls', inherited_snappath])
81eedcae 2832
f67539c2 2833 # snapshot info on inherited snapshot
81eedcae 2834 try:
f67539c2 2835 self._get_subvolume_snapshot_info(self.volname, subvolume, inherited_snap, group)
81eedcae 2836 except CommandFailedError as ce:
f67539c2 2837 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on snapshot info of inherited snapshot")
92f5a8d4 2838 else:
f67539c2
TL
2839 self.fail("expected snapshot info of inherited snapshot to fail")
2840
2841 # remove ancestral snapshots
522d829b 2842 self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
81eedcae
TL
2843
2844 # remove subvolume
f67539c2 2845 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
81eedcae 2846
494da23a
TL
2847 # verify trash dir is clean
2848 self._wait_for_trash_empty()
2849
81eedcae
TL
2850 # remove group
2851 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
494da23a 2852
f67539c2
TL
2853 def test_subvolume_inherited_snapshot_rm(self):
2854 """
2855 tests the scenario where 'fs subvolume snapshot rm' command
2856 should fail for inherited snapshots created as part of snapshot
2857 at ancestral level
2858 """
eafe8130 2859
f67539c2
TL
2860 subvolume = self._generate_random_subvolume_name()
2861 group = self._generate_random_group_name()
eafe8130
TL
2862
2863 # create group
eafe8130
TL
2864 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2865
f67539c2
TL
2866 # create subvolume in group
2867 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
eafe8130 2868
f67539c2
TL
2869 # Create snapshot at ancestral level
2870 ancestral_snap_name = "ancestral_snap_1"
2871 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
522d829b 2872 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
92f5a8d4 2873
f67539c2
TL
2874 # Validate existence of inherited snap
2875 group_path = os.path.join(".", "volumes", group)
2876 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
2877 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
2878 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
2879 self.mount_a.run_shell(['ls', inherited_snappath])
92f5a8d4 2880
f67539c2
TL
2881 # inherited snapshot should not be deletable
2882 try:
2883 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, inherited_snap, "--group_name", group)
2884 except CommandFailedError as ce:
2885 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when removing inherited snapshot")
2886 else:
2887 self.fail("expected removing inheirted snapshot to fail")
92f5a8d4 2888
f67539c2 2889 # remove ancestral snapshots
522d829b 2890 self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
92f5a8d4 2891
f67539c2
TL
2892 # remove subvolume
2893 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
92f5a8d4
TL
2894
2895 # verify trash dir is clean
f67539c2 2896 self._wait_for_trash_empty()
9f95a23c 2897
f67539c2
TL
2898 # remove group
2899 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
9f95a23c 2900
f67539c2 2901 def test_subvolume_subvolumegroup_snapshot_name_conflict(self):
92f5a8d4 2902 """
f67539c2
TL
2903 tests the scenario where creation of subvolume snapshot name
2904 with same name as it's subvolumegroup snapshot name. This should
2905 fail.
92f5a8d4 2906 """
92f5a8d4 2907
f67539c2
TL
2908 subvolume = self._generate_random_subvolume_name()
2909 group = self._generate_random_group_name()
2910 group_snapshot = self._generate_random_snapshot_name()
92f5a8d4
TL
2911
2912 # create group
f67539c2 2913 self._fs_cmd("subvolumegroup", "create", self.volname, group)
92f5a8d4 2914
f67539c2
TL
2915 # create subvolume in group
2916 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
92f5a8d4 2917
f67539c2
TL
2918 # Create subvolumegroup snapshot
2919 group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot)
522d829b 2920 self.mount_a.run_shell(['mkdir', '-p', group_snapshot_path], sudo=True)
92f5a8d4 2921
f67539c2
TL
2922 # Validate existence of subvolumegroup snapshot
2923 self.mount_a.run_shell(['ls', group_snapshot_path])
92f5a8d4 2924
f67539c2
TL
2925 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
2926 try:
2927 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, group_snapshot, "--group_name", group)
2928 except CommandFailedError as ce:
2929 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
2930 else:
2931 self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
2932
2933 # remove subvolumegroup snapshot
522d829b 2934 self.mount_a.run_shell(['rmdir', group_snapshot_path], sudo=True)
adb31ebb 2935
92f5a8d4 2936 # remove subvolume
f67539c2 2937 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
92f5a8d4
TL
2938
2939 # verify trash dir is clean
2940 self._wait_for_trash_empty()
2941
2942 # remove group
2943 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2944
f67539c2 2945 def test_subvolume_retain_snapshot_invalid_recreate(self):
adb31ebb 2946 """
f67539c2 2947 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
adb31ebb 2948 """
494da23a 2949 subvolume = self._generate_random_subvolume_name()
92f5a8d4 2950 snapshot = self._generate_random_snapshot_name()
494da23a 2951
f67539c2
TL
2952 # create subvolume
2953 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4 2954
f67539c2
TL
2955 # snapshot subvolume
2956 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
494da23a 2957
f67539c2
TL
2958 # remove with snapshot retention
2959 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
494da23a 2960
f67539c2
TL
2961 # recreate subvolume with an invalid pool
2962 data_pool = "invalid_pool"
2963 try:
2964 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
2965 except CommandFailedError as ce:
2966 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname")
2967 else:
2968 self.fail("expected recreate of subvolume with invalid poolname to fail")
92f5a8d4 2969
f67539c2
TL
2970 # fetch info
2971 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
2972 self.assertEqual(subvol_info["state"], "snapshot-retained",
2973 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
92f5a8d4 2974
f67539c2
TL
2975 # getpath
2976 try:
2977 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
2978 except CommandFailedError as ce:
2979 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
2980 else:
2981 self.fail("expected getpath of subvolume with retained snapshots to fail")
92f5a8d4 2982
f67539c2
TL
2983 # remove snapshot (should remove volume)
2984 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4 2985
f67539c2
TL
2986 # verify trash dir is clean
2987 self._wait_for_trash_empty()
92f5a8d4 2988
f67539c2
TL
2989 def test_subvolume_retain_snapshot_recreate_subvolume(self):
2990 """
2991 ensure a retained subvolume can be recreated and further snapshotted
2992 """
2993 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
92f5a8d4 2994
f67539c2
TL
2995 subvolume = self._generate_random_subvolume_name()
2996 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
92f5a8d4 2997
f67539c2
TL
2998 # create subvolume
2999 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4 3000
f67539c2
TL
3001 # snapshot subvolume
3002 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
92f5a8d4 3003
f67539c2
TL
3004 # remove with snapshot retention
3005 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
adb31ebb 3006
f67539c2
TL
3007 # fetch info
3008 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
3009 self.assertEqual(subvol_info["state"], "snapshot-retained",
3010 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
adb31ebb 3011
f67539c2
TL
3012 # recreate retained subvolume
3013 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 3014
f67539c2
TL
3015 # fetch info
3016 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
3017 self.assertEqual(subvol_info["state"], "complete",
3018 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
adb31ebb 3019
f67539c2
TL
3020 # snapshot info (older snapshot)
3021 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1))
adb31ebb
TL
3022 for md in snap_md:
3023 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
3024 self.assertEqual(snap_info["has_pending_clones"], "no")
3025
f67539c2
TL
3026 # snap-create (new snapshot)
3027 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
adb31ebb 3028
f67539c2
TL
3029 # remove with retain snapshots
3030 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
adb31ebb 3031
f67539c2
TL
3032 # list snapshots
3033 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
3034 self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
3035 " created subvolume snapshots")
3036 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
3037 for snap in [snapshot1, snapshot2]:
3038 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
adb31ebb 3039
f67539c2
TL
3040 # remove snapshots (should remove volume)
3041 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
3042 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
3043
3044 # verify list subvolumes returns an empty list
3045 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3046 self.assertEqual(len(subvolumels), 0)
adb31ebb
TL
3047
3048 # verify trash dir is clean
3049 self._wait_for_trash_empty()
3050
f67539c2 3051 def test_subvolume_retain_snapshot_with_snapshots(self):
adb31ebb 3052 """
f67539c2
TL
3053 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
3054 also test allowed and dis-allowed operations on a retained subvolume
adb31ebb 3055 """
f67539c2 3056 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
adb31ebb 3057
f67539c2
TL
3058 subvolume = self._generate_random_subvolume_name()
3059 snapshot = self._generate_random_snapshot_name()
adb31ebb 3060
f67539c2
TL
3061 # create subvolume
3062 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 3063
f67539c2
TL
3064 # snapshot subvolume
3065 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb 3066
f67539c2
TL
3067 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
3068 try:
3069 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3070 except CommandFailedError as ce:
3071 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of retained subvolume with snapshots")
3072 else:
3073 self.fail("expected rm of subvolume with retained snapshots to fail")
adb31ebb 3074
f67539c2
TL
3075 # remove with snapshot retention
3076 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
adb31ebb 3077
f67539c2
TL
3078 # fetch info
3079 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
3080 self.assertEqual(subvol_info["state"], "snapshot-retained",
3081 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
adb31ebb 3082
f67539c2
TL
3083 ## test allowed ops in retained state
3084 # ls
3085 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3086 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
3087 self.assertEqual(subvolumes[0]['name'], subvolume,
3088 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
adb31ebb 3089
f67539c2
TL
3090 # snapshot info
3091 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
3092 for md in snap_md:
3093 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
3094 self.assertEqual(snap_info["has_pending_clones"], "no")
adb31ebb 3095
f67539c2 3096 # rm --force (allowed but should fail)
adb31ebb 3097 try:
f67539c2 3098 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
adb31ebb 3099 except CommandFailedError as ce:
f67539c2 3100 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
adb31ebb 3101 else:
f67539c2 3102 self.fail("expected rm of subvolume with retained snapshots to fail")
adb31ebb 3103
f67539c2
TL
3104 # rm (allowed but should fail)
3105 try:
3106 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3107 except CommandFailedError as ce:
3108 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
3109 else:
3110 self.fail("expected rm of subvolume with retained snapshots to fail")
3111
3112 ## test disallowed ops
3113 # getpath
3114 try:
3115 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
3116 except CommandFailedError as ce:
3117 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
3118 else:
3119 self.fail("expected getpath of subvolume with retained snapshots to fail")
3120
3121 # resize
3122 nsize = self.DEFAULT_FILE_SIZE*1024*1024
3123 try:
3124 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
3125 except CommandFailedError as ce:
3126 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on resize of subvolume with retained snapshots")
3127 else:
3128 self.fail("expected resize of subvolume with retained snapshots to fail")
3129
3130 # snap-create
3131 try:
3132 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, "fail")
3133 except CommandFailedError as ce:
3134 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot create of subvolume with retained snapshots")
3135 else:
3136 self.fail("expected snapshot create of subvolume with retained snapshots to fail")
3137
3138 # remove snapshot (should remove volume)
3139 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb
TL
3140
3141 # verify list subvolumes returns an empty list
3142 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3143 self.assertEqual(len(subvolumels), 0)
3144
3145 # verify trash dir is clean
3146 self._wait_for_trash_empty()
3147
f67539c2 3148 def test_subvolume_retain_snapshot_without_snapshots(self):
adb31ebb 3149 """
f67539c2 3150 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
adb31ebb 3151 """
f67539c2 3152 subvolume = self._generate_random_subvolume_name()
adb31ebb 3153
f67539c2
TL
3154 # create subvolume
3155 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 3156
f67539c2
TL
3157 # remove with snapshot retention (should remove volume, no snapshots to retain)
3158 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
adb31ebb 3159
f67539c2
TL
3160 # verify list subvolumes returns an empty list
3161 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3162 self.assertEqual(len(subvolumels), 0)
3163
3164 # verify trash dir is clean
3165 self._wait_for_trash_empty()
3166
3167 def test_subvolume_retain_snapshot_trash_busy_recreate(self):
3168 """
3169 ensure retained subvolume recreate fails if its trash is not yet purged
3170 """
3171 subvolume = self._generate_random_subvolume_name()
3172 snapshot = self._generate_random_snapshot_name()
3173
3174 # create subvolume
3175 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3176
3177 # snapshot subvolume
3178 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3179
3180 # remove with snapshot retention
3181 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3182
3183 # fake a trash entry
3184 self._update_fake_trash(subvolume)
3185
3186 # recreate subvolume
3187 try:
3188 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3189 except CommandFailedError as ce:
3190 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of subvolume with purge pending")
3191 else:
3192 self.fail("expected recreate of subvolume with purge pending to fail")
3193
3194 # clear fake trash entry
3195 self._update_fake_trash(subvolume, create=False)
adb31ebb 3196
f67539c2
TL
3197 # recreate subvolume
3198 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 3199
f67539c2
TL
3200 # remove snapshot
3201 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb
TL
3202
3203 # remove subvolume
f67539c2 3204 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
3205
3206 # verify trash dir is clean
3207 self._wait_for_trash_empty()
3208
3209 def test_subvolume_rm_with_snapshots(self):
3210 subvolume = self._generate_random_subvolume_name()
3211 snapshot = self._generate_random_snapshot_name()
3212
3213 # create subvolume
3214 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3215
3216 # snapshot subvolume
3217 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3218
3219 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
3220 try:
3221 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3222 except CommandFailedError as ce:
3223 if ce.exitstatus != errno.ENOTEMPTY:
3224 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
3225 else:
3226 raise RuntimeError("expected subvolume deletion to fail")
3227
3228 # remove snapshot
3229 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3230
3231 # remove subvolume
3232 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3233
3234 # verify trash dir is clean
3235 self._wait_for_trash_empty()
3236
f67539c2 3237 def test_subvolume_snapshot_protect_unprotect_sanity(self):
adb31ebb 3238 """
f67539c2
TL
3239 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
3240 invoking the command does not cause errors, till they are removed from a subsequent release.
adb31ebb
TL
3241 """
3242 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
3243 snapshot = self._generate_random_snapshot_name()
3244 clone = self._generate_random_clone_name()
adb31ebb
TL
3245
3246 # create subvolume
522d829b 3247 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3248
f67539c2
TL
3249 # do some IO
3250 self._do_subvolume_io(subvolume, number_of_files=64)
adb31ebb 3251
f67539c2
TL
3252 # snapshot subvolume
3253 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3254
3255 # now, protect snapshot
3256 self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot)
3257
3258 # schedule a clone
3259 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3260
3261 # check clone status
3262 self._wait_for_clone_to_complete(clone)
3263
3264 # now, unprotect snapshot
3265 self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot)
3266
3267 # verify clone
3268 self._verify_clone(subvolume, snapshot, clone)
3269
3270 # remove snapshot
3271 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3272
3273 # remove subvolumes
3274 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3275 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
3276
3277 # verify trash dir is clean
3278 self._wait_for_trash_empty()
3279
f67539c2
TL
3280 def test_subvolume_snapshot_rm_force(self):
3281 # test removing non existing subvolume snapshot with --force
3282 subvolume = self._generate_random_subvolume_name()
3283 snapshot = self._generate_random_snapshot_name()
3284
3285 # remove snapshot
3286 try:
3287 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force")
3288 except CommandFailedError:
3289 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
3290
3291
3292class TestSubvolumeSnapshotClones(TestVolumesHelper):
3293 """ Tests for FS subvolume snapshot clone operations."""
3294 def test_clone_subvolume_info(self):
3295 # tests the 'fs subvolume info' command for a clone
3296 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
3297 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
3298 "type", "uid"]
adb31ebb
TL
3299
3300 subvolume = self._generate_random_subvolume_name()
3301 snapshot = self._generate_random_snapshot_name()
f67539c2 3302 clone = self._generate_random_clone_name()
adb31ebb
TL
3303
3304 # create subvolume
522d829b 3305 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3306
f67539c2
TL
3307 # do some IO
3308 self._do_subvolume_io(subvolume, number_of_files=1)
3309
adb31ebb
TL
3310 # snapshot subvolume
3311 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3312
f67539c2
TL
3313 # schedule a clone
3314 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 3315
f67539c2
TL
3316 # check clone status
3317 self._wait_for_clone_to_complete(clone)
adb31ebb 3318
f67539c2
TL
3319 # remove snapshot
3320 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb 3321
f67539c2
TL
3322 subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
3323 if len(subvol_info) == 0:
3324 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
3325 for md in subvol_md:
3326 if md not in subvol_info.keys():
3327 raise RuntimeError("%s not present in the metadata of subvolume" % md)
3328 if subvol_info["type"] != "clone":
3329 raise RuntimeError("type should be set to clone")
adb31ebb 3330
f67539c2
TL
3331 # remove subvolumes
3332 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3333 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb 3334
f67539c2
TL
3335 # verify trash dir is clean
3336 self._wait_for_trash_empty()
adb31ebb 3337
f67539c2
TL
3338 def test_non_clone_status(self):
3339 subvolume = self._generate_random_subvolume_name()
adb31ebb 3340
f67539c2
TL
3341 # create subvolume
3342 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 3343
adb31ebb 3344 try:
f67539c2 3345 self._fs_cmd("clone", "status", self.volname, subvolume)
adb31ebb 3346 except CommandFailedError as ce:
f67539c2
TL
3347 if ce.exitstatus != errno.ENOTSUP:
3348 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
adb31ebb 3349 else:
f67539c2 3350 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
adb31ebb 3351
f67539c2
TL
3352 # remove subvolume
3353 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb 3354
f67539c2
TL
3355 # verify trash dir is clean
3356 self._wait_for_trash_empty()
3357
3358 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self):
3359 subvolume = self._generate_random_subvolume_name()
3360 snapshot = self._generate_random_snapshot_name()
3361 clone = self._generate_random_clone_name()
3362 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
3363
3364 # create subvolume, in an isolated namespace with a specified size
522d829b 3365 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize), "--mode=777")
f67539c2
TL
3366
3367 # do some IO
3368 self._do_subvolume_io(subvolume, number_of_files=8)
3369
3370 # snapshot subvolume
3371 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3372
3373 # create a pool different from current subvolume pool
3374 subvol_path = self._get_subvolume_path(self.volname, subvolume)
3375 default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
3376 new_pool = "new_pool"
3377 self.assertNotEqual(default_pool, new_pool)
3378 self.fs.add_data_pool(new_pool)
3379
3380 # update source subvolume pool
3381 self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="")
3382
3383 # schedule a clone, with NO --pool specification
3384 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3385
3386 # check clone status
3387 self._wait_for_clone_to_complete(clone)
3388
3389 # verify clone
3390 self._verify_clone(subvolume, snapshot, clone)
3391
3392 # remove snapshot
adb31ebb
TL
3393 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3394
f67539c2
TL
3395 # remove subvolumes
3396 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3397 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
3398
3399 # verify trash dir is clean
3400 self._wait_for_trash_empty()
3401
1d09f67e
TL
3402 def test_subvolume_clone_inherit_quota_attrs(self):
3403 subvolume = self._generate_random_subvolume_name()
3404 snapshot = self._generate_random_snapshot_name()
3405 clone = self._generate_random_clone_name()
3406 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
3407
3408 # create subvolume with a specified size
3409 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777", "--size", str(osize))
3410
3411 # do some IO
3412 self._do_subvolume_io(subvolume, number_of_files=8)
3413
3414 # get subvolume path
3415 subvolpath = self._get_subvolume_path(self.volname, subvolume)
3416
3417 # set quota on number of files
3418 self.mount_a.setfattr(subvolpath, 'ceph.quota.max_files', "20", sudo=True)
3419
3420 # snapshot subvolume
3421 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3422
3423 # schedule a clone
3424 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3425
3426 # check clone status
3427 self._wait_for_clone_to_complete(clone)
3428
3429 # verify clone
3430 self._verify_clone(subvolume, snapshot, clone)
3431
3432 # get subvolume path
3433 clonepath = self._get_subvolume_path(self.volname, clone)
3434
3435 # verify quota max_files is inherited from source snapshot
3436 subvol_quota = self.mount_a.getfattr(subvolpath, "ceph.quota.max_files")
3437 clone_quota = self.mount_a.getfattr(clonepath, "ceph.quota.max_files")
3438 self.assertEqual(subvol_quota, clone_quota)
3439
3440 # remove snapshot
3441 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3442
3443 # remove subvolumes
3444 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3445 self._fs_cmd("subvolume", "rm", self.volname, clone)
3446
3447 # verify trash dir is clean
3448 self._wait_for_trash_empty()
3449
f67539c2 3450 def test_subvolume_clone_in_progress_getpath(self):
adb31ebb
TL
3451 subvolume = self._generate_random_subvolume_name()
3452 snapshot = self._generate_random_snapshot_name()
f67539c2 3453 clone = self._generate_random_clone_name()
adb31ebb
TL
3454
3455 # create subvolume
522d829b 3456 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3457
f67539c2
TL
3458 # do some IO
3459 self._do_subvolume_io(subvolume, number_of_files=64)
3460
adb31ebb
TL
3461 # snapshot subvolume
3462 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3463
522d829b
TL
3464 # Insert delay at the beginning of snapshot clone
3465 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3466
f67539c2
TL
3467 # schedule a clone
3468 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 3469
f67539c2 3470 # clone should not be accessible right now
adb31ebb 3471 try:
f67539c2 3472 self._get_subvolume_path(self.volname, clone)
adb31ebb 3473 except CommandFailedError as ce:
f67539c2
TL
3474 if ce.exitstatus != errno.EAGAIN:
3475 raise RuntimeError("invalid error code when fetching path of an pending clone")
adb31ebb 3476 else:
f67539c2 3477 raise RuntimeError("expected fetching path of an pending clone to fail")
adb31ebb 3478
f67539c2
TL
3479 # check clone status
3480 self._wait_for_clone_to_complete(clone)
adb31ebb 3481
f67539c2
TL
3482 # clone should be accessible now
3483 subvolpath = self._get_subvolume_path(self.volname, clone)
3484 self.assertNotEqual(subvolpath, None)
adb31ebb 3485
f67539c2
TL
3486 # verify clone
3487 self._verify_clone(subvolume, snapshot, clone)
3488
3489 # remove snapshot
adb31ebb
TL
3490 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3491
f67539c2
TL
3492 # remove subvolumes
3493 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3494 self._fs_cmd("subvolume", "rm", self.volname, clone)
3495
adb31ebb
TL
3496 # verify trash dir is clean
3497 self._wait_for_trash_empty()
3498
f67539c2 3499 def test_subvolume_clone_in_progress_snapshot_rm(self):
adb31ebb
TL
3500 subvolume = self._generate_random_subvolume_name()
3501 snapshot = self._generate_random_snapshot_name()
f67539c2 3502 clone = self._generate_random_clone_name()
adb31ebb
TL
3503
3504 # create subvolume
522d829b 3505 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3506
f67539c2
TL
3507 # do some IO
3508 self._do_subvolume_io(subvolume, number_of_files=64)
3509
adb31ebb
TL
3510 # snapshot subvolume
3511 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3512
522d829b
TL
3513 # Insert delay at the beginning of snapshot clone
3514 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3515
f67539c2
TL
3516 # schedule a clone
3517 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 3518
f67539c2 3519 # snapshot should not be deletable now
adb31ebb 3520 try:
f67539c2 3521 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb 3522 except CommandFailedError as ce:
f67539c2 3523 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
adb31ebb 3524 else:
f67539c2 3525 self.fail("expected removing source snapshot of a clone to fail")
adb31ebb 3526
f67539c2
TL
3527 # check clone status
3528 self._wait_for_clone_to_complete(clone)
adb31ebb 3529
f67539c2
TL
3530 # clone should be accessible now
3531 subvolpath = self._get_subvolume_path(self.volname, clone)
3532 self.assertNotEqual(subvolpath, None)
3533
3534 # verify clone
3535 self._verify_clone(subvolume, snapshot, clone)
adb31ebb
TL
3536
3537 # remove snapshot
3538 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3539
f67539c2 3540 # remove subvolumes
adb31ebb 3541 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2 3542 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
3543
3544 # verify trash dir is clean
3545 self._wait_for_trash_empty()
3546
f67539c2 3547 def test_subvolume_clone_in_progress_source(self):
adb31ebb
TL
3548 subvolume = self._generate_random_subvolume_name()
3549 snapshot = self._generate_random_snapshot_name()
3550 clone = self._generate_random_clone_name()
3551
3552 # create subvolume
522d829b 3553 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3554
f67539c2
TL
3555 # do some IO
3556 self._do_subvolume_io(subvolume, number_of_files=64)
3557
adb31ebb
TL
3558 # snapshot subvolume
3559 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3560
522d829b
TL
3561 # Insert delay at the beginning of snapshot clone
3562 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3563
f67539c2 3564 # schedule a clone
adb31ebb
TL
3565 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3566
f67539c2
TL
3567 # verify clone source
3568 result = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
3569 source = result['status']['source']
3570 self.assertEqual(source['volume'], self.volname)
3571 self.assertEqual(source['subvolume'], subvolume)
3572 self.assertEqual(source.get('group', None), None)
3573 self.assertEqual(source['snapshot'], snapshot)
3574
adb31ebb
TL
3575 # check clone status
3576 self._wait_for_clone_to_complete(clone)
3577
f67539c2
TL
3578 # clone should be accessible now
3579 subvolpath = self._get_subvolume_path(self.volname, clone)
3580 self.assertNotEqual(subvolpath, None)
adb31ebb 3581
f67539c2
TL
3582 # verify clone
3583 self._verify_clone(subvolume, snapshot, clone)
adb31ebb
TL
3584
3585 # remove snapshot
3586 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb 3587
f67539c2 3588 # remove subvolumes
adb31ebb
TL
3589 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3590 self._fs_cmd("subvolume", "rm", self.volname, clone)
3591
3592 # verify trash dir is clean
3593 self._wait_for_trash_empty()
3594
f67539c2 3595 def test_subvolume_clone_retain_snapshot_with_snapshots(self):
adb31ebb 3596 """
f67539c2 3597 retain snapshots of a cloned subvolume and check disallowed operations
adb31ebb 3598 """
adb31ebb
TL
3599 subvolume = self._generate_random_subvolume_name()
3600 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
f67539c2 3601 clone = self._generate_random_clone_name()
adb31ebb
TL
3602
3603 # create subvolume
522d829b 3604 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3605
f67539c2
TL
3606 # store path for clone verification
3607 subvol1_path = self._get_subvolume_path(self.volname, subvolume)
3608
3609 # do some IO
3610 self._do_subvolume_io(subvolume, number_of_files=16)
3611
adb31ebb
TL
3612 # snapshot subvolume
3613 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
3614
3615 # remove with snapshot retention
3616 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3617
f67539c2
TL
3618 # clone retained subvolume snapshot
3619 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot1, clone)
adb31ebb 3620
f67539c2
TL
3621 # check clone status
3622 self._wait_for_clone_to_complete(clone)
adb31ebb 3623
f67539c2
TL
3624 # verify clone
3625 self._verify_clone(subvolume, snapshot1, clone, subvol_path=subvol1_path)
adb31ebb 3626
f67539c2
TL
3627 # create a snapshot on the clone
3628 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot2)
adb31ebb 3629
f67539c2
TL
3630 # retain a clone
3631 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
adb31ebb
TL
3632
3633 # list snapshots
f67539c2
TL
3634 clonesnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, clone))
3635 self.assertEqual(len(clonesnapshotls), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
adb31ebb 3636 " created subvolume snapshots")
f67539c2
TL
3637 snapshotnames = [snapshot['name'] for snapshot in clonesnapshotls]
3638 for snap in [snapshot2]:
adb31ebb
TL
3639 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
3640
f67539c2
TL
3641 ## check disallowed operations on retained clone
3642 # clone-status
3643 try:
3644 self._fs_cmd("clone", "status", self.volname, clone)
3645 except CommandFailedError as ce:
3646 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone status of clone with retained snapshots")
3647 else:
3648 self.fail("expected clone status of clone with retained snapshots to fail")
3649
3650 # clone-cancel
3651 try:
3652 self._fs_cmd("clone", "cancel", self.volname, clone)
3653 except CommandFailedError as ce:
3654 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone cancel of clone with retained snapshots")
3655 else:
3656 self.fail("expected clone cancel of clone with retained snapshots to fail")
3657
3658 # remove snapshots (removes subvolumes as all are in retained state)
adb31ebb 3659 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
f67539c2 3660 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot2)
adb31ebb
TL
3661
3662 # verify list subvolumes returns an empty list
3663 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3664 self.assertEqual(len(subvolumels), 0)
3665
3666 # verify trash dir is clean
3667 self._wait_for_trash_empty()
3668
3669 def test_subvolume_retain_snapshot_clone(self):
3670 """
3671 clone a snapshot from a snapshot retained subvolume
3672 """
3673 subvolume = self._generate_random_subvolume_name()
3674 snapshot = self._generate_random_snapshot_name()
3675 clone = self._generate_random_clone_name()
3676
3677 # create subvolume
522d829b 3678 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
3679
3680 # store path for clone verification
3681 subvol_path = self._get_subvolume_path(self.volname, subvolume)
3682
3683 # do some IO
3684 self._do_subvolume_io(subvolume, number_of_files=16)
3685
3686 # snapshot subvolume
3687 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3688
3689 # remove with snapshot retention
3690 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3691
3692 # clone retained subvolume snapshot
3693 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3694
3695 # check clone status
3696 self._wait_for_clone_to_complete(clone)
3697
3698 # verify clone
3699 self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path)
3700
3701 # remove snapshots (removes retained volume)
3702 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3703
3704 # remove subvolume
3705 self._fs_cmd("subvolume", "rm", self.volname, clone)
3706
3707 # verify list subvolumes returns an empty list
3708 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3709 self.assertEqual(len(subvolumels), 0)
92f5a8d4
TL
3710
3711 # verify trash dir is clean
3712 self._wait_for_trash_empty()
3713
f67539c2 3714 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self):
adb31ebb 3715 """
f67539c2 3716 clone a subvolume from recreated subvolume's latest snapshot
adb31ebb
TL
3717 """
3718 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
3719 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
3720 clone = self._generate_random_clone_name(1)
adb31ebb
TL
3721
3722 # create subvolume
522d829b 3723 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3724
adb31ebb
TL
3725 # do some IO
3726 self._do_subvolume_io(subvolume, number_of_files=16)
3727
3728 # snapshot subvolume
f67539c2 3729 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
adb31ebb
TL
3730
3731 # remove with snapshot retention
3732 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3733
f67539c2 3734 # recreate subvolume
522d829b 3735 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
f67539c2
TL
3736
3737 # get and store path for clone verification
3738 subvol2_path = self._get_subvolume_path(self.volname, subvolume)
3739
3740 # do some IO
3741 self._do_subvolume_io(subvolume, number_of_files=16)
3742
3743 # snapshot newer subvolume
3744 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
3745
3746 # remove with snapshot retention
3747 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3748
3749 # clone retained subvolume's newer snapshot
3750 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot2, clone)
adb31ebb
TL
3751
3752 # check clone status
f67539c2 3753 self._wait_for_clone_to_complete(clone)
adb31ebb
TL
3754
3755 # verify clone
f67539c2 3756 self._verify_clone(subvolume, snapshot2, clone, subvol_path=subvol2_path)
adb31ebb
TL
3757
3758 # remove snapshot
f67539c2
TL
3759 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
3760 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
adb31ebb
TL
3761
3762 # remove subvolume
f67539c2 3763 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
3764
3765 # verify list subvolumes returns an empty list
3766 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3767 self.assertEqual(len(subvolumels), 0)
3768
3769 # verify trash dir is clean
3770 self._wait_for_trash_empty()
3771
f67539c2 3772 def test_subvolume_retain_snapshot_recreate(self):
adb31ebb 3773 """
f67539c2 3774 recreate a subvolume from one of its retained snapshots
adb31ebb
TL
3775 """
3776 subvolume = self._generate_random_subvolume_name()
f67539c2 3777 snapshot = self._generate_random_snapshot_name()
adb31ebb
TL
3778
3779 # create subvolume
522d829b 3780 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
3781
3782 # store path for clone verification
f67539c2 3783 subvol_path = self._get_subvolume_path(self.volname, subvolume)
adb31ebb
TL
3784
3785 # do some IO
3786 self._do_subvolume_io(subvolume, number_of_files=16)
3787
3788 # snapshot subvolume
f67539c2 3789 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb
TL
3790
3791 # remove with snapshot retention
3792 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3793
f67539c2
TL
3794 # recreate retained subvolume using its own snapshot to clone
3795 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume)
adb31ebb
TL
3796
3797 # check clone status
f67539c2 3798 self._wait_for_clone_to_complete(subvolume)
adb31ebb
TL
3799
3800 # verify clone
f67539c2 3801 self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path)
adb31ebb 3802
f67539c2
TL
3803 # remove snapshot
3804 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3805
3806 # remove subvolume
3807 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
3808
3809 # verify list subvolumes returns an empty list
3810 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3811 self.assertEqual(len(subvolumels), 0)
3812
3813 # verify trash dir is clean
3814 self._wait_for_trash_empty()
3815
f67539c2 3816 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self):
adb31ebb 3817 """
f67539c2 3818 ensure retained clone recreate fails if its trash is not yet purged
adb31ebb
TL
3819 """
3820 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
3821 snapshot = self._generate_random_snapshot_name()
3822 clone = self._generate_random_clone_name()
adb31ebb
TL
3823
3824 # create subvolume
3825 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3826
adb31ebb 3827 # snapshot subvolume
f67539c2 3828 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb 3829
f67539c2
TL
3830 # clone subvolume snapshot
3831 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 3832
f67539c2
TL
3833 # check clone status
3834 self._wait_for_clone_to_complete(clone)
adb31ebb 3835
f67539c2
TL
3836 # snapshot clone
3837 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot)
adb31ebb 3838
f67539c2
TL
3839 # remove clone with snapshot retention
3840 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
adb31ebb 3841
f67539c2
TL
3842 # fake a trash entry
3843 self._update_fake_trash(clone)
adb31ebb 3844
f67539c2
TL
3845 # clone subvolume snapshot (recreate)
3846 try:
3847 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3848 except CommandFailedError as ce:
3849 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending")
3850 else:
3851 self.fail("expected recreate of clone with purge pending to fail")
adb31ebb 3852
f67539c2
TL
3853 # clear fake trash entry
3854 self._update_fake_trash(clone, create=False)
3855
3856 # recreate subvolume
3857 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb
TL
3858
3859 # check clone status
3860 self._wait_for_clone_to_complete(clone)
3861
adb31ebb 3862 # remove snapshot
f67539c2
TL
3863 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3864 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot)
adb31ebb
TL
3865
3866 # remove subvolume
f67539c2 3867 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
3868 self._fs_cmd("subvolume", "rm", self.volname, clone)
3869
adb31ebb
TL
3870 # verify trash dir is clean
3871 self._wait_for_trash_empty()
3872
f67539c2 3873 def test_subvolume_snapshot_attr_clone(self):
92f5a8d4
TL
3874 subvolume = self._generate_random_subvolume_name()
3875 snapshot = self._generate_random_snapshot_name()
3876 clone = self._generate_random_clone_name()
3877
3878 # create subvolume
522d829b 3879 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
3880
3881 # do some IO
f67539c2 3882 self._do_subvolume_io_mixed(subvolume)
92f5a8d4
TL
3883
3884 # snapshot subvolume
3885 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3886
92f5a8d4
TL
3887 # schedule a clone
3888 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3889
92f5a8d4
TL
3890 # check clone status
3891 self._wait_for_clone_to_complete(clone)
3892
adb31ebb
TL
3893 # verify clone
3894 self._verify_clone(subvolume, snapshot, clone)
3895
92f5a8d4
TL
3896 # remove snapshot
3897 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3898
adb31ebb
TL
3899 # remove subvolumes
3900 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3901 self._fs_cmd("subvolume", "rm", self.volname, clone)
3902
3903 # verify trash dir is clean
3904 self._wait_for_trash_empty()
3905
3906 def test_subvolume_snapshot_clone(self):
3907 subvolume = self._generate_random_subvolume_name()
3908 snapshot = self._generate_random_snapshot_name()
3909 clone = self._generate_random_clone_name()
3910
3911 # create subvolume
522d829b 3912 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
3913
3914 # do some IO
3915 self._do_subvolume_io(subvolume, number_of_files=64)
3916
3917 # snapshot subvolume
3918 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3919
3920 # schedule a clone
3921 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3922
3923 # check clone status
3924 self._wait_for_clone_to_complete(clone)
3925
92f5a8d4 3926 # verify clone
adb31ebb
TL
3927 self._verify_clone(subvolume, snapshot, clone)
3928
3929 # remove snapshot
3930 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4
TL
3931
3932 # remove subvolumes
3933 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3934 self._fs_cmd("subvolume", "rm", self.volname, clone)
3935
3936 # verify trash dir is clean
3937 self._wait_for_trash_empty()
3938
20effc67
TL
3939 def test_subvolume_snapshot_clone_quota_exceeded(self):
3940 subvolume = self._generate_random_subvolume_name()
3941 snapshot = self._generate_random_snapshot_name()
3942 clone = self._generate_random_clone_name()
3943
3944 # create subvolume with 20MB quota
3945 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
3946 self._fs_cmd("subvolume", "create", self.volname, subvolume,"--mode=777", "--size", str(osize))
3947
3948 # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
3949 self._do_subvolume_io(subvolume, number_of_files=50)
3950
3951 # snapshot subvolume
3952 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3953
3954 # schedule a clone
3955 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3956
3957 # check clone status
3958 self._wait_for_clone_to_complete(clone)
3959
3960 # verify clone
3961 self._verify_clone(subvolume, snapshot, clone)
3962
3963 # remove snapshot
3964 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3965
3966 # remove subvolumes
3967 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3968 self._fs_cmd("subvolume", "rm", self.volname, clone)
3969
3970 # verify trash dir is clean
3971 self._wait_for_trash_empty()
3972
3973 def test_subvolume_snapshot_in_complete_clone_rm(self):
3974 """
3975 Validates the removal of clone when it is not in 'complete|cancelled|failed' state.
3976 The forceful removl of subvolume clone succeeds only if it's in any of the
3977 'complete|cancelled|failed' states. It fails with EAGAIN in any other states.
3978 """
3979
3980 subvolume = self._generate_random_subvolume_name()
3981 snapshot = self._generate_random_snapshot_name()
3982 clone = self._generate_random_clone_name()
3983
3984 # create subvolume
3985 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
3986
3987 # do some IO
3988 self._do_subvolume_io(subvolume, number_of_files=64)
3989
3990 # snapshot subvolume
3991 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3992
3993 # Insert delay at the beginning of snapshot clone
3994 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3995
3996 # schedule a clone
3997 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3998
3999 # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled.
4000 try:
4001 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
4002 except CommandFailedError as ce:
4003 if ce.exitstatus != errno.EAGAIN:
4004 raise RuntimeError("invalid error code when trying to remove failed clone")
4005 else:
4006 raise RuntimeError("expected error when removing a failed clone")
4007
4008 # cancel on-going clone
4009 self._fs_cmd("clone", "cancel", self.volname, clone)
4010
4011 # verify canceled state
4012 self._check_clone_canceled(clone)
4013
4014 # clone removal should succeed after cancel
4015 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
4016
4017 # remove snapshot
4018 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4019
4020 # remove subvolumes
4021 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4022
4023 # verify trash dir is clean
4024 self._wait_for_trash_empty()
4025
f67539c2
TL
4026 def test_subvolume_snapshot_clone_retain_suid_guid(self):
4027 subvolume = self._generate_random_subvolume_name()
4028 snapshot = self._generate_random_snapshot_name()
4029 clone = self._generate_random_clone_name()
f91f0fd5 4030
f67539c2 4031 # create subvolume
522d829b 4032 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
f91f0fd5 4033
f67539c2
TL
4034 # Create a file with suid, guid bits set along with executable bit.
4035 args = ["subvolume", "getpath", self.volname, subvolume]
4036 args = tuple(args)
4037 subvolpath = self._fs_cmd(*args)
4038 self.assertNotEqual(subvolpath, None)
4039 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
f91f0fd5 4040
f67539c2
TL
4041 file_path = subvolpath
4042 file_path = os.path.join(subvolpath, "test_suid_file")
4043 self.mount_a.run_shell(["touch", file_path])
4044 self.mount_a.run_shell(["chmod", "u+sx,g+sx", file_path])
f91f0fd5 4045
f67539c2
TL
4046 # snapshot subvolume
4047 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4048
4049 # schedule a clone
4050 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4051
4052 # check clone status
4053 self._wait_for_clone_to_complete(clone)
4054
4055 # verify clone
4056 self._verify_clone(subvolume, snapshot, clone)
4057
4058 # remove snapshot
4059 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4060
4061 # remove subvolumes
4062 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4063 self._fs_cmd("subvolume", "rm", self.volname, clone)
4064
4065 # verify trash dir is clean
4066 self._wait_for_trash_empty()
4067
4068 def test_subvolume_snapshot_clone_and_reclone(self):
92f5a8d4
TL
4069 subvolume = self._generate_random_subvolume_name()
4070 snapshot = self._generate_random_snapshot_name()
f67539c2 4071 clone1, clone2 = self._generate_random_clone_name(2)
92f5a8d4
TL
4072
4073 # create subvolume
522d829b 4074 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
4075
4076 # do some IO
4077 self._do_subvolume_io(subvolume, number_of_files=32)
4078
4079 # snapshot subvolume
4080 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4081
92f5a8d4 4082 # schedule a clone
f67539c2 4083 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
92f5a8d4
TL
4084
4085 # check clone status
f67539c2 4086 self._wait_for_clone_to_complete(clone1)
92f5a8d4 4087
adb31ebb 4088 # verify clone
f67539c2 4089 self._verify_clone(subvolume, snapshot, clone1)
adb31ebb 4090
92f5a8d4
TL
4091 # remove snapshot
4092 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4093
f67539c2
TL
4094 # now the clone is just like a normal subvolume -- snapshot the clone and fork
4095 # another clone. before that do some IO so it's can be differentiated.
4096 self._do_subvolume_io(clone1, create_dir="data", number_of_files=32)
4097
4098 # snapshot clone -- use same snap name
4099 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot)
4100
4101 # schedule a clone
4102 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2)
4103
4104 # check clone status
4105 self._wait_for_clone_to_complete(clone2)
4106
4107 # verify clone
4108 self._verify_clone(clone1, snapshot, clone2)
4109
4110 # remove snapshot
4111 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot)
92f5a8d4
TL
4112
4113 # remove subvolumes
4114 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
4115 self._fs_cmd("subvolume", "rm", self.volname, clone1)
4116 self._fs_cmd("subvolume", "rm", self.volname, clone2)
92f5a8d4
TL
4117
4118 # verify trash dir is clean
4119 self._wait_for_trash_empty()
4120
f67539c2 4121 def test_subvolume_snapshot_clone_cancel_in_progress(self):
92f5a8d4
TL
4122 subvolume = self._generate_random_subvolume_name()
4123 snapshot = self._generate_random_snapshot_name()
4124 clone = self._generate_random_clone_name()
4125
92f5a8d4 4126 # create subvolume
522d829b 4127 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
4128
4129 # do some IO
f67539c2 4130 self._do_subvolume_io(subvolume, number_of_files=128)
92f5a8d4
TL
4131
4132 # snapshot subvolume
4133 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4134
522d829b
TL
4135 # Insert delay at the beginning of snapshot clone
4136 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4137
92f5a8d4
TL
4138 # schedule a clone
4139 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4140
f67539c2
TL
4141 # cancel on-going clone
4142 self._fs_cmd("clone", "cancel", self.volname, clone)
92f5a8d4 4143
f67539c2
TL
4144 # verify canceled state
4145 self._check_clone_canceled(clone)
adb31ebb 4146
92f5a8d4
TL
4147 # remove snapshot
4148 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4149
adb31ebb
TL
4150 # remove subvolumes
4151 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2 4152 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
adb31ebb
TL
4153
4154 # verify trash dir is clean
4155 self._wait_for_trash_empty()
4156
f67539c2
TL
4157 def test_subvolume_snapshot_clone_cancel_pending(self):
4158 """
4159 this test is a bit more involved compared to canceling an in-progress clone.
4160 we'd need to ensure that a to-be canceled clone has still not been picked up
4161 by cloner threads. exploit the fact that clones are picked up in an FCFS
4162 fashion and there are four (4) cloner threads by default. When the number of
4163 cloner threads increase, this test _may_ start tripping -- so, the number of
4164 clone operations would need to be jacked up.
4165 """
4166 # default number of clone threads
4167 NR_THREADS = 4
4168 # good enough for 4 threads
4169 NR_CLONES = 5
4170 # yeh, 1gig -- we need the clone to run for sometime
4171 FILE_SIZE_MB = 1024
4172
adb31ebb
TL
4173 subvolume = self._generate_random_subvolume_name()
4174 snapshot = self._generate_random_snapshot_name()
f67539c2 4175 clones = self._generate_random_clone_name(NR_CLONES)
adb31ebb 4176
f67539c2 4177 # create subvolume
522d829b 4178 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
4179
4180 # do some IO
f67539c2 4181 self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB)
adb31ebb
TL
4182
4183 # snapshot subvolume
4184 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4185
f67539c2
TL
4186 # schedule clones
4187 for clone in clones:
4188 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 4189
f67539c2
TL
4190 to_wait = clones[0:NR_THREADS]
4191 to_cancel = clones[NR_THREADS:]
adb31ebb 4192
f67539c2
TL
4193 # cancel pending clones and verify
4194 for clone in to_cancel:
4195 status = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
4196 self.assertEqual(status["status"]["state"], "pending")
4197 self._fs_cmd("clone", "cancel", self.volname, clone)
4198 self._check_clone_canceled(clone)
adb31ebb 4199
f67539c2
TL
4200 # let's cancel on-going clones. handle the case where some of the clones
4201 # _just_ complete
4202 for clone in list(to_wait):
4203 try:
4204 self._fs_cmd("clone", "cancel", self.volname, clone)
4205 to_cancel.append(clone)
4206 to_wait.remove(clone)
4207 except CommandFailedError as ce:
4208 if ce.exitstatus != errno.EINVAL:
4209 raise RuntimeError("invalid error code when cancelling on-going clone")
adb31ebb
TL
4210
4211 # remove snapshot
4212 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4
TL
4213
4214 # remove subvolumes
4215 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
4216 for clone in to_wait:
4217 self._fs_cmd("subvolume", "rm", self.volname, clone)
4218 for clone in to_cancel:
4219 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
92f5a8d4
TL
4220
4221 # verify trash dir is clean
4222 self._wait_for_trash_empty()
4223
f67539c2 4224 def test_subvolume_snapshot_clone_different_groups(self):
92f5a8d4
TL
4225 subvolume = self._generate_random_subvolume_name()
4226 snapshot = self._generate_random_snapshot_name()
f67539c2
TL
4227 clone = self._generate_random_clone_name()
4228 s_group, c_group = self._generate_random_group_name(2)
4229
4230 # create groups
4231 self._fs_cmd("subvolumegroup", "create", self.volname, s_group)
4232 self._fs_cmd("subvolumegroup", "create", self.volname, c_group)
92f5a8d4
TL
4233
4234 # create subvolume
522d829b 4235 self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group, "--mode=777")
92f5a8d4
TL
4236
4237 # do some IO
f67539c2 4238 self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32)
92f5a8d4
TL
4239
4240 # snapshot subvolume
f67539c2 4241 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group)
92f5a8d4 4242
92f5a8d4 4243 # schedule a clone
f67539c2
TL
4244 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
4245 '--group_name', s_group, '--target_group_name', c_group)
92f5a8d4
TL
4246
4247 # check clone status
f67539c2 4248 self._wait_for_clone_to_complete(clone, clone_group=c_group)
92f5a8d4 4249
adb31ebb 4250 # verify clone
f67539c2 4251 self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group)
adb31ebb 4252
92f5a8d4 4253 # remove snapshot
f67539c2 4254 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group)
92f5a8d4 4255
92f5a8d4 4256 # remove subvolumes
f67539c2
TL
4257 self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group)
4258 self._fs_cmd("subvolume", "rm", self.volname, clone, c_group)
4259
4260 # remove groups
4261 self._fs_cmd("subvolumegroup", "rm", self.volname, s_group)
4262 self._fs_cmd("subvolumegroup", "rm", self.volname, c_group)
92f5a8d4
TL
4263
4264 # verify trash dir is clean
4265 self._wait_for_trash_empty()
4266
f67539c2 4267 def test_subvolume_snapshot_clone_fail_with_remove(self):
92f5a8d4
TL
4268 subvolume = self._generate_random_subvolume_name()
4269 snapshot = self._generate_random_snapshot_name()
f67539c2
TL
4270 clone1, clone2 = self._generate_random_clone_name(2)
4271
4272 pool_capacity = 32 * 1024 * 1024
4273 # number of files required to fill up 99% of the pool
4274 nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
92f5a8d4
TL
4275
4276 # create subvolume
522d829b 4277 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
4278
4279 # do some IO
f67539c2 4280 self._do_subvolume_io(subvolume, number_of_files=nr_files)
92f5a8d4
TL
4281
4282 # snapshot subvolume
4283 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4284
f67539c2
TL
4285 # add data pool
4286 new_pool = "new_pool"
4287 self.fs.add_data_pool(new_pool)
4288
4289 self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
4290 "max_bytes", "{0}".format(pool_capacity // 4))
92f5a8d4
TL
4291
4292 # schedule a clone
f67539c2 4293 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
92f5a8d4 4294
f67539c2
TL
4295 # check clone status -- this should dramatically overshoot the pool quota
4296 self._wait_for_clone_to_complete(clone1)
92f5a8d4 4297
adb31ebb 4298 # verify clone
f67539c2
TL
4299 self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool)
4300
4301 # wait a bit so that subsequent I/O will give pool full error
4302 time.sleep(120)
4303
4304 # schedule a clone
4305 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool)
4306
4307 # check clone status
4308 self._wait_for_clone_to_fail(clone2)
adb31ebb 4309
92f5a8d4
TL
4310 # remove snapshot
4311 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4312
92f5a8d4
TL
4313 # remove subvolumes
4314 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
4315 self._fs_cmd("subvolume", "rm", self.volname, clone1)
4316 try:
4317 self._fs_cmd("subvolume", "rm", self.volname, clone2)
4318 except CommandFailedError as ce:
4319 if ce.exitstatus != errno.EAGAIN:
4320 raise RuntimeError("invalid error code when trying to remove failed clone")
4321 else:
4322 raise RuntimeError("expected error when removing a failed clone")
92f5a8d4 4323
f67539c2
TL
4324 # ... and with force, failed clone can be removed
4325 self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force")
92f5a8d4
TL
4326
4327 # verify trash dir is clean
4328 self._wait_for_trash_empty()
4329
f67539c2
TL
4330 def test_subvolume_snapshot_clone_on_existing_subvolumes(self):
4331 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
92f5a8d4
TL
4332 snapshot = self._generate_random_snapshot_name()
4333 clone = self._generate_random_clone_name()
4334
f67539c2 4335 # create subvolumes
522d829b
TL
4336 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--mode=777")
4337 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--mode=777")
92f5a8d4
TL
4338
4339 # do some IO
f67539c2 4340 self._do_subvolume_io(subvolume1, number_of_files=32)
92f5a8d4
TL
4341
4342 # snapshot subvolume
f67539c2 4343 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot)
92f5a8d4 4344
f67539c2
TL
4345 # schedule a clone with target as subvolume2
4346 try:
4347 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2)
4348 except CommandFailedError as ce:
4349 if ce.exitstatus != errno.EEXIST:
4350 raise RuntimeError("invalid error code when cloning to existing subvolume")
4351 else:
4352 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
4353
4354 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
4355
4356 # schedule a clone with target as clone
4357 try:
4358 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
4359 except CommandFailedError as ce:
4360 if ce.exitstatus != errno.EEXIST:
4361 raise RuntimeError("invalid error code when cloning to existing clone")
4362 else:
4363 raise RuntimeError("expected cloning to fail if the target is an existing clone")
92f5a8d4
TL
4364
4365 # check clone status
4366 self._wait_for_clone_to_complete(clone)
4367
adb31ebb 4368 # verify clone
f67539c2 4369 self._verify_clone(subvolume1, snapshot, clone)
adb31ebb 4370
92f5a8d4 4371 # remove snapshot
f67539c2 4372 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot)
92f5a8d4 4373
92f5a8d4 4374 # remove subvolumes
f67539c2
TL
4375 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
4376 self._fs_cmd("subvolume", "rm", self.volname, subvolume2)
92f5a8d4
TL
4377 self._fs_cmd("subvolume", "rm", self.volname, clone)
4378
92f5a8d4
TL
4379 # verify trash dir is clean
4380 self._wait_for_trash_empty()
4381
f67539c2 4382 def test_subvolume_snapshot_clone_pool_layout(self):
92f5a8d4
TL
4383 subvolume = self._generate_random_subvolume_name()
4384 snapshot = self._generate_random_snapshot_name()
4385 clone = self._generate_random_clone_name()
92f5a8d4 4386
f67539c2
TL
4387 # add data pool
4388 new_pool = "new_pool"
4389 newid = self.fs.add_data_pool(new_pool)
92f5a8d4
TL
4390
4391 # create subvolume
522d829b 4392 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
4393
4394 # do some IO
f67539c2 4395 self._do_subvolume_io(subvolume, number_of_files=32)
92f5a8d4
TL
4396
4397 # snapshot subvolume
f67539c2 4398 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
92f5a8d4 4399
92f5a8d4 4400 # schedule a clone
f67539c2 4401 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool)
92f5a8d4
TL
4402
4403 # check clone status
f67539c2 4404 self._wait_for_clone_to_complete(clone)
92f5a8d4 4405
adb31ebb 4406 # verify clone
f67539c2 4407 self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool)
adb31ebb 4408
92f5a8d4 4409 # remove snapshot
f67539c2 4410 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4 4411
f67539c2
TL
4412 subvol_path = self._get_subvolume_path(self.volname, clone)
4413 desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
4414 try:
4415 self.assertEqual(desired_pool, new_pool)
4416 except AssertionError:
4417 self.assertEqual(int(desired_pool), newid) # old kernel returns id
92f5a8d4 4418
f67539c2
TL
4419 # remove subvolumes
4420 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4421 self._fs_cmd("subvolume", "rm", self.volname, clone)
92f5a8d4
TL
4422
4423 # verify trash dir is clean
4424 self._wait_for_trash_empty()
4425
f67539c2 4426 def test_subvolume_snapshot_clone_under_group(self):
92f5a8d4
TL
4427 subvolume = self._generate_random_subvolume_name()
4428 snapshot = self._generate_random_snapshot_name()
4429 clone = self._generate_random_clone_name()
f67539c2 4430 group = self._generate_random_group_name()
92f5a8d4 4431
f67539c2 4432 # create subvolume
522d829b 4433 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 4434
92f5a8d4 4435 # do some IO
f67539c2 4436 self._do_subvolume_io(subvolume, number_of_files=32)
92f5a8d4
TL
4437
4438 # snapshot subvolume
4439 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4440
f67539c2
TL
4441 # create group
4442 self._fs_cmd("subvolumegroup", "create", self.volname, group)
adb31ebb 4443
92f5a8d4 4444 # schedule a clone
f67539c2 4445 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group)
f6b5b4d7 4446
92f5a8d4 4447 # check clone status
f67539c2 4448 self._wait_for_clone_to_complete(clone, clone_group=group)
92f5a8d4 4449
adb31ebb 4450 # verify clone
f67539c2 4451 self._verify_clone(subvolume, snapshot, clone, clone_group=group)
adb31ebb 4452
92f5a8d4
TL
4453 # remove snapshot
4454 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4455
92f5a8d4
TL
4456 # remove subvolumes
4457 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
4458 self._fs_cmd("subvolume", "rm", self.volname, clone, group)
4459
4460 # remove group
4461 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
92f5a8d4
TL
4462
4463 # verify trash dir is clean
4464 self._wait_for_trash_empty()
4465
f67539c2 4466 def test_subvolume_snapshot_clone_with_attrs(self):
92f5a8d4
TL
4467 subvolume = self._generate_random_subvolume_name()
4468 snapshot = self._generate_random_snapshot_name()
4469 clone = self._generate_random_clone_name()
4470
f67539c2
TL
4471 mode = "777"
4472 uid = "1000"
4473 gid = "1000"
4474 new_uid = "1001"
4475 new_gid = "1001"
4476 new_mode = "700"
4477
92f5a8d4 4478 # create subvolume
f67539c2 4479 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
92f5a8d4
TL
4480
4481 # do some IO
f67539c2 4482 self._do_subvolume_io(subvolume, number_of_files=32)
92f5a8d4
TL
4483
4484 # snapshot subvolume
4485 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4486
f67539c2
TL
4487 # change subvolume attrs (to ensure clone picks up snapshot attrs)
4488 self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode)
92f5a8d4 4489
f67539c2
TL
4490 # schedule a clone
4491 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
92f5a8d4
TL
4492
4493 # check clone status
4494 self._wait_for_clone_to_complete(clone)
4495
adb31ebb
TL
4496 # verify clone
4497 self._verify_clone(subvolume, snapshot, clone)
4498
f6b5b4d7
TL
4499 # remove snapshot
4500 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4501
f6b5b4d7
TL
4502 # remove subvolumes
4503 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4504 self._fs_cmd("subvolume", "rm", self.volname, clone)
4505
4506 # verify trash dir is clean
4507 self._wait_for_trash_empty()
4508
f67539c2
TL
4509 def test_subvolume_snapshot_clone_with_upgrade(self):
4510 """
4511 yet another poor man's upgrade test -- rather than going through a full
4512 upgrade cycle, emulate old types subvolumes by going through the wormhole
4513 and verify clone operation.
4514 further ensure that a legacy volume is not updated to v2, but clone is.
4515 """
f6b5b4d7
TL
4516 subvolume = self._generate_random_subvolume_name()
4517 snapshot = self._generate_random_snapshot_name()
4518 clone = self._generate_random_clone_name()
4519
f67539c2
TL
4520 # emulate a old-fashioned subvolume
4521 createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
522d829b 4522 self.mount_a.run_shell_payload(f"mkdir -p -m 777 {createpath}", sudo=True)
f67539c2
TL
4523
4524 # add required xattrs to subvolume
4525 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
522d829b 4526 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
f6b5b4d7
TL
4527
4528 # do some IO
4529 self._do_subvolume_io(subvolume, number_of_files=64)
4530
4531 # snapshot subvolume
4532 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4533
f67539c2
TL
4534 # ensure metadata file is in legacy location, with required version v1
4535 self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
4536
522d829b
TL
4537 # Insert delay at the beginning of snapshot clone
4538 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4539
f6b5b4d7
TL
4540 # schedule a clone
4541 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4542
4543 # snapshot should not be deletable now
4544 try:
4545 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4546 except CommandFailedError as ce:
4547 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
4548 else:
4549 self.fail("expected removing source snapshot of a clone to fail")
4550
4551 # check clone status
4552 self._wait_for_clone_to_complete(clone)
4553
adb31ebb 4554 # verify clone
f67539c2 4555 self._verify_clone(subvolume, snapshot, clone, source_version=1)
adb31ebb 4556
92f5a8d4
TL
4557 # remove snapshot
4558 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4559
f67539c2
TL
4560 # ensure metadata file is in v2 location, with required version v2
4561 self._assert_meta_location_and_version(self.volname, clone)
4562
92f5a8d4
TL
4563 # remove subvolumes
4564 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4565 self._fs_cmd("subvolume", "rm", self.volname, clone)
4566
4567 # verify trash dir is clean
4568 self._wait_for_trash_empty()
4569
f67539c2
TL
4570 def test_subvolume_snapshot_reconf_max_concurrent_clones(self):
4571 """
4572 Validate 'max_concurrent_clones' config option
4573 """
4574
4575 # get the default number of cloner threads
4576 default_max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4577 self.assertEqual(default_max_concurrent_clones, 4)
4578
4579 # Increase number of cloner threads
4580 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
4581 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4582 self.assertEqual(max_concurrent_clones, 6)
4583
4584 # Decrease number of cloner threads
4585 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
4586 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4587 self.assertEqual(max_concurrent_clones, 2)
4588
522d829b
TL
4589 def test_subvolume_snapshot_config_snapshot_clone_delay(self):
4590 """
4591 Validate 'snapshot_clone_delay' config option
4592 """
4593
4594 # get the default delay before starting the clone
4595 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
4596 self.assertEqual(default_timeout, 0)
4597
4598 # Insert delay of 2 seconds at the beginning of the snapshot clone
4599 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4600 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
4601 self.assertEqual(default_timeout, 2)
4602
4603 # Decrease number of cloner threads
4604 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
4605 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4606 self.assertEqual(max_concurrent_clones, 2)
4607
f67539c2 4608 def test_subvolume_under_group_snapshot_clone(self):
92f5a8d4 4609 subvolume = self._generate_random_subvolume_name()
f67539c2 4610 group = self._generate_random_group_name()
92f5a8d4
TL
4611 snapshot = self._generate_random_snapshot_name()
4612 clone = self._generate_random_clone_name()
4613
f67539c2
TL
4614 # create group
4615 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4616
92f5a8d4 4617 # create subvolume
522d829b 4618 self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777")
92f5a8d4
TL
4619
4620 # do some IO
f67539c2 4621 self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32)
92f5a8d4
TL
4622
4623 # snapshot subvolume
f67539c2 4624 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
92f5a8d4 4625
92f5a8d4 4626 # schedule a clone
f67539c2 4627 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group)
92f5a8d4
TL
4628
4629 # check clone status
4630 self._wait_for_clone_to_complete(clone)
4631
adb31ebb 4632 # verify clone
f67539c2 4633 self._verify_clone(subvolume, snapshot, clone, source_group=group)
adb31ebb 4634
92f5a8d4 4635 # remove snapshot
f67539c2 4636 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
92f5a8d4 4637
92f5a8d4 4638 # remove subvolumes
f67539c2 4639 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
92f5a8d4
TL
4640 self._fs_cmd("subvolume", "rm", self.volname, clone)
4641
f67539c2
TL
4642 # remove group
4643 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4644
92f5a8d4
TL
4645 # verify trash dir is clean
4646 self._wait_for_trash_empty()
4647
f67539c2
TL
4648
4649class TestMisc(TestVolumesHelper):
4650 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
4651 def test_connection_expiration(self):
4652 # unmount any cephfs mounts
4653 for i in range(0, self.CLIENTS_REQUIRED):
4654 self.mounts[i].umount_wait()
4655 sessions = self._session_list()
4656 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
4657
4658 # Get the mgr to definitely mount cephfs
92f5a8d4 4659 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
4660 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4661 sessions = self._session_list()
4662 self.assertEqual(len(sessions), 1)
92f5a8d4 4663
f67539c2
TL
4664 # Now wait for the mgr to expire the connection:
4665 self.wait_until_evicted(sessions[0]['id'], timeout=90)
4666
4667 def test_mgr_eviction(self):
4668 # unmount any cephfs mounts
4669 for i in range(0, self.CLIENTS_REQUIRED):
4670 self.mounts[i].umount_wait()
4671 sessions = self._session_list()
4672 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
4673
4674 # Get the mgr to definitely mount cephfs
4675 subvolume = self._generate_random_subvolume_name()
92f5a8d4 4676 self._fs_cmd("subvolume", "create", self.volname, subvolume)
f67539c2
TL
4677 sessions = self._session_list()
4678 self.assertEqual(len(sessions), 1)
92f5a8d4 4679
f67539c2
TL
4680 # Now fail the mgr, check the session was evicted
4681 mgr = self.mgr_cluster.get_active_id()
4682 self.mgr_cluster.mgr_fail(mgr)
4683 self.wait_until_evicted(sessions[0]['id'])
92f5a8d4 4684
f67539c2
TL
4685 def test_names_can_only_be_goodchars(self):
4686 """
4687 Test the creating vols, subvols subvolgroups fails when their names uses
4688 characters beyond [a-zA-Z0-9 -_.].
4689 """
4690 volname, badname = 'testvol', 'abcd@#'
92f5a8d4 4691
f67539c2
TL
4692 with self.assertRaises(CommandFailedError):
4693 self._fs_cmd('volume', 'create', badname)
4694 self._fs_cmd('volume', 'create', volname)
92f5a8d4 4695
f67539c2
TL
4696 with self.assertRaises(CommandFailedError):
4697 self._fs_cmd('subvolumegroup', 'create', volname, badname)
92f5a8d4 4698
f67539c2
TL
4699 with self.assertRaises(CommandFailedError):
4700 self._fs_cmd('subvolume', 'create', volname, badname)
4701 self._fs_cmd('volume', 'rm', volname, '--yes-i-really-mean-it')
92f5a8d4 4702
f67539c2
TL
4703 def test_subvolume_ops_on_nonexistent_vol(self):
4704 # tests the fs subvolume operations on non existing volume
92f5a8d4 4705
f67539c2 4706 volname = "non_existent_subvolume"
92f5a8d4 4707
f67539c2
TL
4708 # try subvolume operations
4709 for op in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
4710 try:
4711 if op == "resize":
4712 self._fs_cmd("subvolume", "resize", volname, "subvolname_1", "inf")
4713 elif op == "pin":
4714 self._fs_cmd("subvolume", "pin", volname, "subvolname_1", "export", "1")
4715 elif op == "ls":
4716 self._fs_cmd("subvolume", "ls", volname)
4717 else:
4718 self._fs_cmd("subvolume", op, volname, "subvolume_1")
4719 except CommandFailedError as ce:
4720 self.assertEqual(ce.exitstatus, errno.ENOENT)
4721 else:
4722 self.fail("expected the 'fs subvolume {0}' command to fail".format(op))
92f5a8d4 4723
f67539c2
TL
4724 # try subvolume snapshot operations and clone create
4725 for op in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
4726 try:
4727 if op == "ls":
4728 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1")
4729 elif op == "clone":
4730 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1", "clone_1")
4731 else:
4732 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1")
4733 except CommandFailedError as ce:
4734 self.assertEqual(ce.exitstatus, errno.ENOENT)
4735 else:
4736 self.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op))
92f5a8d4 4737
f67539c2 4738 # try, clone status
92f5a8d4 4739 try:
f67539c2 4740 self._fs_cmd("clone", "status", volname, "clone_1")
92f5a8d4 4741 except CommandFailedError as ce:
f67539c2 4742 self.assertEqual(ce.exitstatus, errno.ENOENT)
92f5a8d4 4743 else:
f67539c2 4744 self.fail("expected the 'fs clone status' command to fail")
92f5a8d4 4745
f67539c2
TL
4746 # try subvolumegroup operations
4747 for op in ("create", "rm", "getpath", "pin", "ls"):
4748 try:
4749 if op == "pin":
4750 self._fs_cmd("subvolumegroup", "pin", volname, "group_1", "export", "0")
4751 elif op == "ls":
4752 self._fs_cmd("subvolumegroup", op, volname)
4753 else:
4754 self._fs_cmd("subvolumegroup", op, volname, "group_1")
4755 except CommandFailedError as ce:
4756 self.assertEqual(ce.exitstatus, errno.ENOENT)
4757 else:
4758 self.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op))
92f5a8d4 4759
f67539c2
TL
4760 # try subvolumegroup snapshot operations
4761 for op in ("create", "rm", "ls"):
4762 try:
4763 if op == "ls":
4764 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1")
4765 else:
4766 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1", "snapshot_1")
4767 except CommandFailedError as ce:
4768 self.assertEqual(ce.exitstatus, errno.ENOENT)
4769 else:
4770 self.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op))
adb31ebb 4771
f67539c2
TL
4772 def test_subvolume_upgrade_legacy_to_v1(self):
4773 """
4774 poor man's upgrade test -- rather than going through a full upgrade cycle,
4775 emulate subvolumes by going through the wormhole and verify if they are
4776 accessible.
4777 further ensure that a legacy volume is not updated to v2.
4778 """
4779 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
4780 group = self._generate_random_group_name()
92f5a8d4 4781
f67539c2
TL
4782 # emulate a old-fashioned subvolume -- one in the default group and
4783 # the other in a custom group
4784 createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
522d829b 4785 self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
92f5a8d4 4786
f67539c2
TL
4787 # create group
4788 createpath2 = os.path.join(".", "volumes", group, subvolume2)
522d829b 4789 self.mount_a.run_shell(['mkdir', '-p', createpath2], sudo=True)
92f5a8d4 4790
f67539c2
TL
4791 # this would auto-upgrade on access without anyone noticing
4792 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
4793 self.assertNotEqual(subvolpath1, None)
4794 subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
92f5a8d4 4795
f67539c2
TL
4796 subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
4797 self.assertNotEqual(subvolpath2, None)
4798 subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline
92f5a8d4 4799
f67539c2
TL
4800 # and... the subvolume path returned should be what we created behind the scene
4801 self.assertEqual(createpath1[1:], subvolpath1)
4802 self.assertEqual(createpath2[1:], subvolpath2)
92f5a8d4 4803
f67539c2
TL
4804 # ensure metadata file is in legacy location, with required version v1
4805 self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True)
4806 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True)
92f5a8d4 4807
f67539c2
TL
4808 # remove subvolume
4809 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
4810 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
92f5a8d4 4811
f67539c2
TL
4812 # verify trash dir is clean
4813 self._wait_for_trash_empty()
92f5a8d4 4814
f67539c2
TL
4815 # remove group
4816 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
92f5a8d4 4817
f67539c2
TL
4818 def test_subvolume_no_upgrade_v1_sanity(self):
4819 """
4820 poor man's upgrade test -- theme continues...
4821
4822 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
4823 a series of operations on the v1 subvolume to ensure they work as expected.
4824 """
4825 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
4826 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
4827 "type", "uid", "features", "state"]
4828 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
4829
4830 subvolume = self._generate_random_subvolume_name()
4831 snapshot = self._generate_random_snapshot_name()
4832 clone1, clone2 = self._generate_random_clone_name(2)
4833 mode = "777"
4834 uid = "1000"
4835 gid = "1000"
92f5a8d4 4836
f67539c2
TL
4837 # emulate a v1 subvolume -- in the default group
4838 subvolume_path = self._create_v1_subvolume(subvolume)
92f5a8d4 4839
f67539c2
TL
4840 # getpath
4841 subvolpath = self._get_subvolume_path(self.volname, subvolume)
4842 self.assertEqual(subvolpath, subvolume_path)
92f5a8d4 4843
f67539c2
TL
4844 # ls
4845 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4846 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
4847 self.assertEqual(subvolumes[0]['name'], subvolume,
4848 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
92f5a8d4 4849
f67539c2
TL
4850 # info
4851 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
4852 for md in subvol_md:
4853 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
92f5a8d4 4854
f67539c2
TL
4855 self.assertEqual(subvol_info["state"], "complete",
4856 msg="expected state to be 'complete', found '{0}".format(subvol_info["state"]))
4857 self.assertEqual(len(subvol_info["features"]), 2,
4858 msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
4859 for feature in ['snapshot-clone', 'snapshot-autoprotect']:
4860 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
92f5a8d4 4861
f67539c2
TL
4862 # resize
4863 nsize = self.DEFAULT_FILE_SIZE*1024*1024*10
4864 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
4865 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
4866 for md in subvol_md:
4867 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
4868 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
92f5a8d4 4869
f67539c2
TL
4870 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
4871 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
92f5a8d4 4872
f67539c2
TL
4873 # do some IO
4874 self._do_subvolume_io(subvolume, number_of_files=8)
494da23a 4875
f67539c2
TL
4876 # snap-create
4877 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
9f95a23c 4878
f67539c2
TL
4879 # clone
4880 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
9f95a23c 4881
f67539c2
TL
4882 # check clone status
4883 self._wait_for_clone_to_complete(clone1)
9f95a23c 4884
f67539c2
TL
4885 # ensure clone is v2
4886 self._assert_meta_location_and_version(self.volname, clone1, version=2)
9f95a23c 4887
f67539c2
TL
4888 # verify clone
4889 self._verify_clone(subvolume, snapshot, clone1, source_version=1)
9f95a23c 4890
f67539c2
TL
4891 # clone (older snapshot)
4892 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2)
9f95a23c
TL
4893
4894 # check clone status
f67539c2
TL
4895 self._wait_for_clone_to_complete(clone2)
4896
4897 # ensure clone is v2
4898 self._assert_meta_location_and_version(self.volname, clone2, version=2)
9f95a23c 4899
adb31ebb 4900 # verify clone
f67539c2
TL
4901 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
4902 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
adb31ebb 4903
f67539c2
TL
4904 # snap-info
4905 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
4906 for md in snap_md:
4907 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4908 self.assertEqual(snap_info["has_pending_clones"], "no")
4909
4910 # snap-ls
4911 subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
4912 self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots)))
4913 snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots]
4914 for name in [snapshot, 'fake']:
4915 self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name))
4916
4917 # snap-rm
9f95a23c 4918 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
f67539c2 4919 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake")
9f95a23c 4920
f67539c2
TL
4921 # ensure volume is still at version 1
4922 self._assert_meta_location_and_version(self.volname, subvolume, version=1)
4923
4924 # rm
9f95a23c 4925 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
4926 self._fs_cmd("subvolume", "rm", self.volname, clone1)
4927 self._fs_cmd("subvolume", "rm", self.volname, clone2)
9f95a23c
TL
4928
4929 # verify trash dir is clean
4930 self._wait_for_trash_empty()
4931
f67539c2
TL
4932 def test_subvolume_no_upgrade_v1_to_v2(self):
4933 """
4934 poor man's upgrade test -- theme continues...
4935 ensure v1 to v2 upgrades are not done automatically due to various states of v1
4936 """
4937 subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3)
4938 group = self._generate_random_group_name()
9f95a23c 4939
f67539c2
TL
4940 # emulate a v1 subvolume -- in the default group
4941 subvol1_path = self._create_v1_subvolume(subvolume1)
9f95a23c 4942
f67539c2
TL
4943 # emulate a v1 subvolume -- in a custom group
4944 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group)
9f95a23c 4945
f67539c2
TL
4946 # emulate a v1 subvolume -- in a clone pending state
4947 self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending')
9f95a23c 4948
f67539c2
TL
4949 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
4950 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
4951 self.assertEqual(subvolpath1, subvol1_path)
9f95a23c 4952
f67539c2
TL
4953 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
4954 self.assertEqual(subvolpath2, subvol2_path)
9f95a23c 4955
f67539c2
TL
4956 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
4957 # use clone status, as only certain operations are allowed in pending state
4958 status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3))
4959 self.assertEqual(status["status"]["state"], "pending")
9f95a23c 4960
9f95a23c 4961 # remove snapshot
f67539c2
TL
4962 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake")
4963 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group)
9f95a23c 4964
f67539c2
TL
4965 # ensure metadata file is in v1 location, with version retained as v1
4966 self._assert_meta_location_and_version(self.volname, subvolume1, version=1)
4967 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1)
4968
4969 # remove subvolume
4970 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
4971 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
4972 try:
4973 self._fs_cmd("subvolume", "rm", self.volname, subvolume3)
4974 except CommandFailedError as ce:
4975 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone")
4976 else:
4977 self.fail("expected rm of subvolume undergoing clone to fail")
4978
4979 # ensure metadata file is in v1 location, with version retained as v1
4980 self._assert_meta_location_and_version(self.volname, subvolume3, version=1)
4981 self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force")
4982
4983 # verify list subvolumes returns an empty list
4984 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4985 self.assertEqual(len(subvolumels), 0)
9f95a23c
TL
4986
4987 # verify trash dir is clean
4988 self._wait_for_trash_empty()
4989
f67539c2 4990 def test_subvolume_upgrade_v1_to_v2(self):
9f95a23c 4991 """
f67539c2
TL
4992 poor man's upgrade test -- theme continues...
4993 ensure v1 to v2 upgrades work
9f95a23c 4994 """
f67539c2
TL
4995 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
4996 group = self._generate_random_group_name()
9f95a23c 4997
f67539c2
TL
4998 # emulate a v1 subvolume -- in the default group
4999 subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False)
9f95a23c 5000
f67539c2
TL
5001 # emulate a v1 subvolume -- in a custom group
5002 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False)
9f95a23c 5003
f67539c2
TL
5004 # this would attempt auto-upgrade on access
5005 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
5006 self.assertEqual(subvolpath1, subvol1_path)
9f95a23c 5007
f67539c2
TL
5008 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
5009 self.assertEqual(subvolpath2, subvol2_path)
9f95a23c 5010
f67539c2
TL
5011 # ensure metadata file is in v2 location, with version retained as v2
5012 self._assert_meta_location_and_version(self.volname, subvolume1, version=2)
5013 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2)
9f95a23c 5014
f67539c2
TL
5015 # remove subvolume
5016 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
5017 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
9f95a23c
TL
5018
5019 # verify trash dir is clean
5020 self._wait_for_trash_empty()