]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volumes.py
ed78775b687c0972e45fee935d28e552a9e25dcd
[ceph.git] / ceph / qa / tasks / cephfs / test_volumes.py
1 import os
2 import json
3 import time
4 import errno
5 import random
6 import logging
7 import collections
8 import uuid
9 import unittest
10 from hashlib import md5
11 from textwrap import dedent
12
13 from tasks.cephfs.cephfs_test_case import CephFSTestCase
14 from teuthology.exceptions import CommandFailedError
15 from teuthology.misc import sudo_write_file
16
17 log = logging.getLogger(__name__)
18
19 class TestVolumes(CephFSTestCase):
20 TEST_VOLUME_PREFIX = "volume"
21 TEST_SUBVOLUME_PREFIX="subvolume"
22 TEST_GROUP_PREFIX="group"
23 TEST_SNAPSHOT_PREFIX="snapshot"
24 TEST_CLONE_PREFIX="clone"
25 TEST_FILE_NAME_PREFIX="subvolume_file"
26
27 # for filling subvolume with data
28 CLIENTS_REQUIRED = 1
29 MDSS_REQUIRED = 2
30
31 # io defaults
32 DEFAULT_FILE_SIZE = 1 # MB
33 DEFAULT_NUMBER_OF_FILES = 1024
34
35 def _fs_cmd(self, *args):
36 return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
37
38 def _raw_cmd(self, *args):
39 return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
40
41 def __check_clone_state(self, state, clone, clone_group=None, timo=120):
42 check = 0
43 args = ["clone", "status", self.volname, clone]
44 if clone_group:
45 args.append(clone_group)
46 args = tuple(args)
47 while check < timo:
48 result = json.loads(self._fs_cmd(*args))
49 if result["status"]["state"] == state:
50 break
51 check += 1
52 time.sleep(1)
53 self.assertTrue(check < timo)
54
55 def _wait_for_clone_to_complete(self, clone, clone_group=None, timo=120):
56 self.__check_clone_state("complete", clone, clone_group, timo)
57
58 def _wait_for_clone_to_fail(self, clone, clone_group=None, timo=120):
59 self.__check_clone_state("failed", clone, clone_group, timo)
60
61 def _check_clone_canceled(self, clone, clone_group=None):
62 self.__check_clone_state("canceled", clone, clone_group, timo=1)
63
64 def _get_subvolume_snapshot_path(self, subvolume, snapshot, source_group, subvol_path, source_version):
65 if source_version == 2:
66 # v2
67 if subvol_path is not None:
68 (base_path, uuid_str) = os.path.split(subvol_path)
69 else:
70 (base_path, uuid_str) = os.path.split(self._get_subvolume_path(self.volname, subvolume, group_name=source_group))
71 return os.path.join(base_path, ".snap", snapshot, uuid_str)
72
73 # v1
74 base_path = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
75 return os.path.join(base_path, ".snap", snapshot)
76
77 def _verify_clone_attrs(self, source_path, clone_path):
78 path1 = source_path
79 path2 = clone_path
80
81 p = self.mount_a.run_shell(["find", path1])
82 paths = p.stdout.getvalue().strip().split()
83
84 # for each entry in source and clone (sink) verify certain inode attributes:
85 # inode type, mode, ownership, [am]time.
86 for source_path in paths:
87 sink_entry = source_path[len(path1)+1:]
88 sink_path = os.path.join(path2, sink_entry)
89
90 # mode+type
91 sval = int(self.mount_a.run_shell(['stat', '-c' '%f', source_path]).stdout.getvalue().strip(), 16)
92 cval = int(self.mount_a.run_shell(['stat', '-c' '%f', sink_path]).stdout.getvalue().strip(), 16)
93 self.assertEqual(sval, cval)
94
95 # ownership
96 sval = int(self.mount_a.run_shell(['stat', '-c' '%u', source_path]).stdout.getvalue().strip())
97 cval = int(self.mount_a.run_shell(['stat', '-c' '%u', sink_path]).stdout.getvalue().strip())
98 self.assertEqual(sval, cval)
99
100 sval = int(self.mount_a.run_shell(['stat', '-c' '%g', source_path]).stdout.getvalue().strip())
101 cval = int(self.mount_a.run_shell(['stat', '-c' '%g', sink_path]).stdout.getvalue().strip())
102 self.assertEqual(sval, cval)
103
104 # inode timestamps
105 sval = int(self.mount_a.run_shell(['stat', '-c' '%X', source_path]).stdout.getvalue().strip())
106 cval = int(self.mount_a.run_shell(['stat', '-c' '%X', sink_path]).stdout.getvalue().strip())
107 self.assertEqual(sval, cval)
108
109 sval = int(self.mount_a.run_shell(['stat', '-c' '%Y', source_path]).stdout.getvalue().strip())
110 cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip())
111 self.assertEqual(sval, cval)
112
113 def _verify_clone_root(self, source_path, clone_path, clone, clone_group, clone_pool):
114 # verifies following clone root attrs quota, data_pool and pool_namespace
115 # remaining attributes of clone root are validated in _verify_clone_attrs
116
117 clone_info = json.loads(self._get_subvolume_info(self.volname, clone, clone_group))
118
119 # verify quota is inherited from source snapshot
120 src_quota = self.mount_a.getfattr(source_path, "ceph.quota.max_bytes")
121 self.assertEqual(clone_info["bytes_quota"], "infinite" if src_quota is None else int(src_quota))
122
123 if clone_pool:
124 # verify pool is set as per request
125 self.assertEqual(clone_info["data_pool"], clone_pool)
126 else:
127 # verify pool and pool namespace are inherited from snapshot
128 self.assertEqual(clone_info["data_pool"],
129 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool"))
130 self.assertEqual(clone_info["pool_namespace"],
131 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool_namespace"))
132
133 def _verify_clone(self, subvolume, snapshot, clone,
134 source_group=None, clone_group=None, clone_pool=None,
135 subvol_path=None, source_version=2, timo=120):
136 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
137 # but snapshots are retained for clone verification
138 path1 = self._get_subvolume_snapshot_path(subvolume, snapshot, source_group, subvol_path, source_version)
139 path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group)
140
141 check = 0
142 # TODO: currently snapshot rentries are not stable if snapshot source entries
143 # are removed, https://tracker.ceph.com/issues/46747
144 while check < timo and subvol_path is None:
145 val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries"))
146 val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries"))
147 if val1 == val2:
148 break
149 check += 1
150 time.sleep(1)
151 self.assertTrue(check < timo)
152
153 self._verify_clone_root(path1, path2, clone, clone_group, clone_pool)
154 self._verify_clone_attrs(path1, path2)
155
156 def _generate_random_volume_name(self, count=1):
157 n = self.volume_start
158 volumes = [f"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
159 self.volume_start += count
160 return volumes[0] if count == 1 else volumes
161
162 def _generate_random_subvolume_name(self, count=1):
163 n = self.subvolume_start
164 subvolumes = [f"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
165 self.subvolume_start += count
166 return subvolumes[0] if count == 1 else subvolumes
167
168 def _generate_random_group_name(self, count=1):
169 n = self.group_start
170 groups = [f"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i in range(n, n+count)]
171 self.group_start += count
172 return groups[0] if count == 1 else groups
173
174 def _generate_random_snapshot_name(self, count=1):
175 n = self.snapshot_start
176 snaps = [f"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i in range(n, n+count)]
177 self.snapshot_start += count
178 return snaps[0] if count == 1 else snaps
179
180 def _generate_random_clone_name(self, count=1):
181 n = self.clone_start
182 clones = [f"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i in range(n, n+count)]
183 self.clone_start += count
184 return clones[0] if count == 1 else clones
185
186 def _enable_multi_fs(self):
187 self._fs_cmd("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
188
189 def _create_or_reuse_test_volume(self):
190 result = json.loads(self._fs_cmd("volume", "ls"))
191 if len(result) == 0:
192 self.vol_created = True
193 self.volname = self._generate_random_volume_name()
194 self._fs_cmd("volume", "create", self.volname)
195 else:
196 self.volname = result[0]['name']
197
198 def _get_subvolume_group_path(self, vol_name, group_name):
199 args = ("subvolumegroup", "getpath", vol_name, group_name)
200 path = self._fs_cmd(*args)
201 # remove the leading '/', and trailing whitespaces
202 return path[1:].rstrip()
203
204 def _get_subvolume_path(self, vol_name, subvol_name, group_name=None):
205 args = ["subvolume", "getpath", vol_name, subvol_name]
206 if group_name:
207 args.append(group_name)
208 args = tuple(args)
209 path = self._fs_cmd(*args)
210 # remove the leading '/', and trailing whitespaces
211 return path[1:].rstrip()
212
213 def _get_subvolume_info(self, vol_name, subvol_name, group_name=None):
214 args = ["subvolume", "info", vol_name, subvol_name]
215 if group_name:
216 args.append(group_name)
217 args = tuple(args)
218 subvol_md = self._fs_cmd(*args)
219 return subvol_md
220
221 def _get_subvolume_snapshot_info(self, vol_name, subvol_name, snapname, group_name=None):
222 args = ["subvolume", "snapshot", "info", vol_name, subvol_name, snapname]
223 if group_name:
224 args.append(group_name)
225 args = tuple(args)
226 snap_md = self._fs_cmd(*args)
227 return snap_md
228
229 def _delete_test_volume(self):
230 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
231
232 def _do_subvolume_pool_and_namespace_update(self, subvolume, pool=None, pool_namespace=None, subvolume_group=None):
233 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
234
235 if pool is not None:
236 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool)
237
238 if pool_namespace is not None:
239 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace)
240
241 def _do_subvolume_attr_update(self, subvolume, uid, gid, mode, subvolume_group=None):
242 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
243
244 # mode
245 self.mount_a.run_shell(['chmod', mode, subvolpath])
246
247 # ownership
248 self.mount_a.run_shell(['chown', uid, subvolpath])
249 self.mount_a.run_shell(['chgrp', gid, subvolpath])
250
251 def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None,
252 number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE):
253 # get subvolume path for IO
254 args = ["subvolume", "getpath", self.volname, subvolume]
255 if subvolume_group:
256 args.append(subvolume_group)
257 args = tuple(args)
258 subvolpath = self._fs_cmd(*args)
259 self.assertNotEqual(subvolpath, None)
260 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
261
262 io_path = subvolpath
263 if create_dir:
264 io_path = os.path.join(subvolpath, create_dir)
265 self.mount_a.run_shell(["mkdir", "-p", io_path])
266
267 log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path))
268 for i in range(number_of_files):
269 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
270 self.mount_a.write_n_mb(os.path.join(io_path, filename), file_size)
271
272 def _do_subvolume_io_mixed(self, subvolume, subvolume_group=None):
273 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
274
275 reg_file = "regfile.0"
276 reg_path = os.path.join(subvolpath, reg_file)
277 dir_path = os.path.join(subvolpath, "dir.0")
278 sym_path1 = os.path.join(subvolpath, "sym.0")
279 # this symlink's ownership would be changed
280 sym_path2 = os.path.join(dir_path, "sym.0")
281
282 #self.mount_a.write_n_mb(reg_path, TestVolumes.DEFAULT_FILE_SIZE)
283 self.mount_a.run_shell(["sudo", "mkdir", dir_path], omit_sudo=False)
284 self.mount_a.run_shell(["sudo", "ln", "-s", "./{}".format(reg_file), sym_path1], omit_sudo=False)
285 self.mount_a.run_shell(["sudo", "ln", "-s", "./{}".format(reg_file), sym_path2], omit_sudo=False)
286 # flip ownership to nobody. assumption: nobody's id is 65534
287 self.mount_a.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2], omit_sudo=False)
288
289 def _wait_for_trash_empty(self, timeout=30):
290 # XXX: construct the trash dir path (note that there is no mgr
291 # [sub]volume interface for this).
292 trashdir = os.path.join("./", "volumes", "_deleting")
293 self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
294
295 def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False):
296 if legacy:
297 subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group)
298 m = md5()
299 m.update(("/"+subvol_path).encode('utf-8'))
300 meta_filename = "{0}.meta".format(m.digest().hex())
301 metapath = os.path.join(".", "volumes", "_legacy", meta_filename)
302 else:
303 group = subvol_group if subvol_group is not None else '_nogroup'
304 metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
305
306 out = self.mount_a.run_shell(['cat', metapath])
307 lines = out.stdout.getvalue().strip().split('\n')
308 sv_version = -1
309 for line in lines:
310 if line == "version = " + str(version):
311 sv_version = version
312 break
313 self.assertEqual(sv_version, version, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
314 version, sv_version, metapath))
315
316 def _create_v1_subvolume(self, subvol_name, subvol_group=None, has_snapshot=True, subvol_type='subvolume', state='complete'):
317 group = subvol_group if subvol_group is not None else '_nogroup'
318 basepath = os.path.join("volumes", group, subvol_name)
319 uuid_str = str(uuid.uuid4())
320 createpath = os.path.join(basepath, uuid_str)
321 self.mount_a.run_shell(['mkdir', '-p', createpath])
322
323 # create a v1 snapshot, to prevent auto upgrades
324 if has_snapshot:
325 snappath = os.path.join(createpath, ".snap", "fake")
326 self.mount_a.run_shell(['mkdir', '-p', snappath])
327
328 # add required xattrs to subvolume
329 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
330 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool)
331
332 # create a v1 .meta file
333 meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, "/" + createpath, state)
334 if state == 'pending':
335 # add a fake clone source
336 meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
337 meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta")
338 sudo_write_file(self.mount_a.client_remote, meta_filepath1, meta_contents)
339 return createpath
340
341 def _update_fake_trash(self, subvol_name, subvol_group=None, trash_name='fake', create=True):
342 group = subvol_group if subvol_group is not None else '_nogroup'
343 trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name)
344 if create:
345 self.mount_a.run_shell(['mkdir', '-p', trashpath])
346 else:
347 self.mount_a.run_shell(['rmdir', trashpath])
348
349 def setUp(self):
350 super(TestVolumes, self).setUp()
351 self.volname = None
352 self.vol_created = False
353 self._enable_multi_fs()
354 self._create_or_reuse_test_volume()
355 self.config_set('mon', 'mon_allow_pool_delete', True)
356 self.volume_start = random.randint(1, (1<<20))
357 self.subvolume_start = random.randint(1, (1<<20))
358 self.group_start = random.randint(1, (1<<20))
359 self.snapshot_start = random.randint(1, (1<<20))
360 self.clone_start = random.randint(1, (1<<20))
361
362 def tearDown(self):
363 if self.vol_created:
364 self._delete_test_volume()
365 super(TestVolumes, self).tearDown()
366
367 def test_connection_expiration(self):
368 # unmount any cephfs mounts
369 self.mount_a.umount_wait()
370 sessions = self._session_list()
371 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
372
373 # Get the mgr to definitely mount cephfs
374 subvolume = self._generate_random_subvolume_name()
375 self._fs_cmd("subvolume", "create", self.volname, subvolume)
376 sessions = self._session_list()
377 self.assertEqual(len(sessions), 1)
378
379 # Now wait for the mgr to expire the connection:
380 self.wait_until_evicted(sessions[0]['id'], timeout=90)
381
382 def test_volume_create(self):
383 """
384 That the volume can be created and then cleans up
385 """
386 volname = self._generate_random_volume_name()
387 self._fs_cmd("volume", "create", volname)
388 volumels = json.loads(self._fs_cmd("volume", "ls"))
389
390 if not (volname in ([volume['name'] for volume in volumels])):
391 raise RuntimeError("Error creating volume '{0}'".format(volname))
392 else:
393 # clean up
394 self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
395
396 def test_volume_ls(self):
397 """
398 That the existing and the newly created volumes can be listed and
399 finally cleans up.
400 """
401 vls = json.loads(self._fs_cmd("volume", "ls"))
402 volumes = [volume['name'] for volume in vls]
403
404 #create new volumes and add it to the existing list of volumes
405 volumenames = self._generate_random_volume_name(3)
406 for volumename in volumenames:
407 self._fs_cmd("volume", "create", volumename)
408 volumes.extend(volumenames)
409
410 # list volumes
411 try:
412 volumels = json.loads(self._fs_cmd('volume', 'ls'))
413 if len(volumels) == 0:
414 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
415 else:
416 volnames = [volume['name'] for volume in volumels]
417 if collections.Counter(volnames) != collections.Counter(volumes):
418 raise RuntimeError("Error creating or listing volumes")
419 finally:
420 # clean up
421 for volume in volumenames:
422 self._fs_cmd("volume", "rm", volume, "--yes-i-really-mean-it")
423
424 def test_volume_rm(self):
425 """
426 That the volume can only be removed when --yes-i-really-mean-it is used
427 and verify that the deleted volume is not listed anymore.
428 """
429 for m in self.mounts:
430 m.umount_wait()
431 try:
432 self._fs_cmd("volume", "rm", self.volname)
433 except CommandFailedError as ce:
434 if ce.exitstatus != errno.EPERM:
435 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
436 "but it failed with {0}".format(ce.exitstatus))
437 else:
438 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
439
440 #check if it's gone
441 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
442 if (self.volname in [volume['name'] for volume in volumes]):
443 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
444 "The volume {0} not removed.".format(self.volname))
445 else:
446 raise RuntimeError("expected the 'fs volume rm' command to fail.")
447
448 def test_subvolume_marked(self):
449 """
450 ensure a subvolume is marked with the ceph.dir.subvolume xattr
451 """
452 subvolume = self._generate_random_subvolume_name()
453
454 # create subvolume
455 self._fs_cmd("subvolume", "create", self.volname, subvolume)
456
457 # getpath
458 subvolpath = self._get_subvolume_path(self.volname, subvolume)
459
460 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
461 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
462 # outside the subvolume
463 dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location')
464 srcpath = os.path.join(self.mount_a.mountpoint, subvolpath)
465 rename_script = dedent("""
466 import os
467 import errno
468 try:
469 os.rename("{src}", "{dst}")
470 except OSError as e:
471 if e.errno != errno.EXDEV:
472 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
473 else:
474 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
475 """)
476 self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath))
477
478 # remove subvolume
479 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
480
481 # verify trash dir is clean
482 self._wait_for_trash_empty()
483
484 def test_volume_rm_arbitrary_pool_removal(self):
485 """
486 That the arbitrary pool added to the volume out of band is removed
487 successfully on volume removal.
488 """
489 for m in self.mounts:
490 m.umount_wait()
491 new_pool = "new_pool"
492 # add arbitrary data pool
493 self.fs.add_data_pool(new_pool)
494 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
495 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
496
497 #check if fs is gone
498 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
499 volnames = [volume['name'] for volume in volumes]
500 self.assertNotIn(self.volname, volnames)
501
502 #check if osd pools are gone
503 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
504 for pool in vol_status["pools"]:
505 self.assertNotIn(pool["name"], pools)
506
507 def test_volume_rm_when_mon_delete_pool_false(self):
508 """
509 That the volume can only be removed when mon_allowd_pool_delete is set
510 to true and verify that the pools are removed after volume deletion.
511 """
512 for m in self.mounts:
513 m.umount_wait()
514 self.config_set('mon', 'mon_allow_pool_delete', False)
515 try:
516 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
517 except CommandFailedError as ce:
518 self.assertEqual(ce.exitstatus, errno.EPERM,
519 "expected the 'fs volume rm' command to fail with EPERM, "
520 "but it failed with {0}".format(ce.exitstatus))
521 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
522 self.config_set('mon', 'mon_allow_pool_delete', True)
523 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
524
525 #check if fs is gone
526 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
527 volnames = [volume['name'] for volume in volumes]
528 self.assertNotIn(self.volname, volnames,
529 "volume {0} exists after removal".format(self.volname))
530 #check if pools are gone
531 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
532 for pool in vol_status["pools"]:
533 self.assertNotIn(pool["name"], pools,
534 "pool {0} exists after volume removal".format(pool["name"]))
535
536 ### basic subvolume operations
537
538 def test_subvolume_create_and_rm(self):
539 # create subvolume
540 subvolume = self._generate_random_subvolume_name()
541 self._fs_cmd("subvolume", "create", self.volname, subvolume)
542
543 # make sure it exists
544 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
545 self.assertNotEqual(subvolpath, None)
546
547 # remove subvolume
548 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
549 # make sure its gone
550 try:
551 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
552 except CommandFailedError as ce:
553 if ce.exitstatus != errno.ENOENT:
554 raise
555 else:
556 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
557
558 # verify trash dir is clean
559 self._wait_for_trash_empty()
560
561 def test_subvolume_expand(self):
562 """
563 That a subvolume can be expanded in size and its quota matches the expected size.
564 """
565
566 # create subvolume
567 subvolname = self._generate_random_subvolume_name()
568 osize = self.DEFAULT_FILE_SIZE*1024*1024
569 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
570
571 # make sure it exists
572 subvolpath = self._get_subvolume_path(self.volname, subvolname)
573 self.assertNotEqual(subvolpath, None)
574
575 # expand the subvolume
576 nsize = osize*2
577 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
578
579 # verify the quota
580 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
581 self.assertEqual(size, nsize)
582
583 # remove subvolume
584 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
585
586 # verify trash dir is clean
587 self._wait_for_trash_empty()
588
589 def test_subvolume_shrink(self):
590 """
591 That a subvolume can be shrinked in size and its quota matches the expected size.
592 """
593
594 # create subvolume
595 subvolname = self._generate_random_subvolume_name()
596 osize = self.DEFAULT_FILE_SIZE*1024*1024
597 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
598
599 # make sure it exists
600 subvolpath = self._get_subvolume_path(self.volname, subvolname)
601 self.assertNotEqual(subvolpath, None)
602
603 # shrink the subvolume
604 nsize = osize // 2
605 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
606
607 # verify the quota
608 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
609 self.assertEqual(size, nsize)
610
611 # remove subvolume
612 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
613
614 # verify trash dir is clean
615 self._wait_for_trash_empty()
616
617 def test_subvolume_resize_fail_invalid_size(self):
618 """
619 That a subvolume cannot be resized to an invalid size and the quota did not change
620 """
621
622 osize = self.DEFAULT_FILE_SIZE*1024*1024
623 # create subvolume
624 subvolname = self._generate_random_subvolume_name()
625 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
626
627 # make sure it exists
628 subvolpath = self._get_subvolume_path(self.volname, subvolname)
629 self.assertNotEqual(subvolpath, None)
630
631 # try to resize the subvolume with an invalid size -10
632 nsize = -10
633 try:
634 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
635 except CommandFailedError as ce:
636 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
637 else:
638 self.fail("expected the 'fs subvolume resize' command to fail")
639
640 # verify the quota did not change
641 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
642 self.assertEqual(size, osize)
643
644 # remove subvolume
645 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
646
647 # verify trash dir is clean
648 self._wait_for_trash_empty()
649
650 def test_subvolume_resize_fail_zero_size(self):
651 """
652 That a subvolume cannot be resized to a zero size and the quota did not change
653 """
654
655 osize = self.DEFAULT_FILE_SIZE*1024*1024
656 # create subvolume
657 subvolname = self._generate_random_subvolume_name()
658 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
659
660 # make sure it exists
661 subvolpath = self._get_subvolume_path(self.volname, subvolname)
662 self.assertNotEqual(subvolpath, None)
663
664 # try to resize the subvolume with size 0
665 nsize = 0
666 try:
667 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
668 except CommandFailedError as ce:
669 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
670 else:
671 self.fail("expected the 'fs subvolume resize' command to fail")
672
673 # verify the quota did not change
674 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
675 self.assertEqual(size, osize)
676
677 # remove subvolume
678 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
679
680 # verify trash dir is clean
681 self._wait_for_trash_empty()
682
683 def test_subvolume_resize_quota_lt_used_size(self):
684 """
685 That a subvolume can be resized to a size smaller than the current used size
686 and the resulting quota matches the expected size.
687 """
688
689 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
690 # create subvolume
691 subvolname = self._generate_random_subvolume_name()
692 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
693
694 # make sure it exists
695 subvolpath = self._get_subvolume_path(self.volname, subvolname)
696 self.assertNotEqual(subvolpath, None)
697
698 # create one file of 10MB
699 file_size=self.DEFAULT_FILE_SIZE*10
700 number_of_files=1
701 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
702 number_of_files,
703 file_size))
704 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
705 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
706
707 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
708 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
709 self.assertEqual(usedsize, susedsize)
710
711 # shrink the subvolume
712 nsize = usedsize // 2
713 try:
714 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
715 except CommandFailedError:
716 self.fail("expected the 'fs subvolume resize' command to succeed")
717
718 # verify the quota
719 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
720 self.assertEqual(size, nsize)
721
722 # remove subvolume
723 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
724
725 # verify trash dir is clean
726 self._wait_for_trash_empty()
727
728
729 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self):
730 """
731 That a subvolume cannot be resized to a size smaller than the current used size
732 when --no_shrink is given and the quota did not change.
733 """
734
735 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
736 # create subvolume
737 subvolname = self._generate_random_subvolume_name()
738 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
739
740 # make sure it exists
741 subvolpath = self._get_subvolume_path(self.volname, subvolname)
742 self.assertNotEqual(subvolpath, None)
743
744 # create one file of 10MB
745 file_size=self.DEFAULT_FILE_SIZE*10
746 number_of_files=1
747 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
748 number_of_files,
749 file_size))
750 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
751 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
752
753 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
754 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
755 self.assertEqual(usedsize, susedsize)
756
757 # shrink the subvolume
758 nsize = usedsize // 2
759 try:
760 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink")
761 except CommandFailedError as ce:
762 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
763 else:
764 self.fail("expected the 'fs subvolume resize' command to fail")
765
766 # verify the quota did not change
767 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
768 self.assertEqual(size, osize)
769
770 # remove subvolume
771 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
772
773 # verify trash dir is clean
774 self._wait_for_trash_empty()
775
776 def test_subvolume_resize_expand_on_full_subvolume(self):
777 """
778 That the subvolume can be expanded from a full subvolume and future writes succeed.
779 """
780
781 osize = self.DEFAULT_FILE_SIZE*1024*1024*10
782 # create subvolume of quota 10MB and make sure it exists
783 subvolname = self._generate_random_subvolume_name()
784 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
785 subvolpath = self._get_subvolume_path(self.volname, subvolname)
786 self.assertNotEqual(subvolpath, None)
787
788 # create one file of size 10MB and write
789 file_size=self.DEFAULT_FILE_SIZE*10
790 number_of_files=1
791 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
792 number_of_files,
793 file_size))
794 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3)
795 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
796
797 # create a file of size 5MB and try write more
798 file_size=file_size // 2
799 number_of_files=1
800 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
801 number_of_files,
802 file_size))
803 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4)
804 try:
805 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
806 except CommandFailedError:
807 # Not able to write. So expand the subvolume more and try writing the 5MB file again
808 nsize = osize*2
809 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
810 try:
811 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
812 except CommandFailedError:
813 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
814 "to succeed".format(subvolname, number_of_files, file_size))
815 else:
816 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
817 "to fail".format(subvolname, number_of_files, file_size))
818
819 # remove subvolume
820 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
821
822 # verify trash dir is clean
823 self._wait_for_trash_empty()
824
825 def test_subvolume_create_idempotence(self):
826 # create subvolume
827 subvolume = self._generate_random_subvolume_name()
828 self._fs_cmd("subvolume", "create", self.volname, subvolume)
829
830 # try creating w/ same subvolume name -- should be idempotent
831 self._fs_cmd("subvolume", "create", self.volname, subvolume)
832
833 # remove subvolume
834 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
835
836 # verify trash dir is clean
837 self._wait_for_trash_empty()
838
839 def test_subvolume_create_idempotence_resize(self):
840 # create subvolume
841 subvolume = self._generate_random_subvolume_name()
842 self._fs_cmd("subvolume", "create", self.volname, subvolume)
843
844 # try creating w/ same subvolume name with size -- should set quota
845 self._fs_cmd("subvolume", "create", self.volname, subvolume, "1000000000")
846
847 # get subvolume metadata
848 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
849 self.assertEqual(subvol_info["bytes_quota"], 1000000000)
850
851 # remove subvolume
852 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
853
854 # verify trash dir is clean
855 self._wait_for_trash_empty()
856
857 def test_subvolume_pin_export(self):
858 self.fs.set_max_mds(2)
859 status = self.fs.wait_for_daemons()
860
861 subvolume = self._generate_random_subvolume_name()
862 self._fs_cmd("subvolume", "create", self.volname, subvolume)
863 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1")
864 path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
865 path = os.path.dirname(path) # get subvolume path
866
867 self._get_subtrees(status=status, rank=1)
868 self._wait_subtrees([(path, 1)], status=status)
869
870 # remove subvolume
871 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
872
873 # verify trash dir is clean
874 self._wait_for_trash_empty()
875
876 def test_subvolumegroup_pin_distributed(self):
877 self.fs.set_max_mds(2)
878 status = self.fs.wait_for_daemons()
879 self.config_set('mds', 'mds_export_ephemeral_distributed', True)
880
881 group = "pinme"
882 self._fs_cmd("subvolumegroup", "create", self.volname, group)
883 self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True")
884 # (no effect on distribution) pin the group directory to 0 so rank 0 has all subtree bounds visible
885 self._fs_cmd("subvolumegroup", "pin", self.volname, group, "export", "0")
886 subvolumes = self._generate_random_subvolume_name(10)
887 for subvolume in subvolumes:
888 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
889 self._wait_distributed_subtrees(10, status=status)
890
891 # remove subvolumes
892 for subvolume in subvolumes:
893 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
894
895 # verify trash dir is clean
896 self._wait_for_trash_empty()
897
898 def test_subvolume_pin_random(self):
899 self.fs.set_max_mds(2)
900 self.fs.wait_for_daemons()
901 self.config_set('mds', 'mds_export_ephemeral_random', True)
902
903 subvolume = self._generate_random_subvolume_name()
904 self._fs_cmd("subvolume", "create", self.volname, subvolume)
905 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01")
906 # no verification
907
908 # remove subvolume
909 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
910
911 # verify trash dir is clean
912 self._wait_for_trash_empty()
913
914 def test_subvolume_create_isolated_namespace(self):
915 """
916 Create subvolume in separate rados namespace
917 """
918
919 # create subvolume
920 subvolume = self._generate_random_subvolume_name()
921 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated")
922
923 # get subvolume metadata
924 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
925 self.assertNotEqual(len(subvol_info), 0)
926 self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume)
927
928 # remove subvolumes
929 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
930
931 # verify trash dir is clean
932 self._wait_for_trash_empty()
933
934 def test_subvolume_create_with_invalid_data_pool_layout(self):
935 subvolume = self._generate_random_subvolume_name()
936 data_pool = "invalid_pool"
937 # create subvolume with invalid data pool layout
938 try:
939 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
940 except CommandFailedError as ce:
941 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid pool layout")
942 else:
943 self.fail("expected the 'fs subvolume create' command to fail")
944
945 # verify trash dir is clean
946 self._wait_for_trash_empty()
947
948 def test_subvolume_rm_force(self):
949 # test removing non-existing subvolume with --force
950 subvolume = self._generate_random_subvolume_name()
951 try:
952 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
953 except CommandFailedError:
954 self.fail("expected the 'fs subvolume rm --force' command to succeed")
955
956 def test_subvolume_create_with_auto_cleanup_on_fail(self):
957 subvolume = self._generate_random_subvolume_name()
958 data_pool = "invalid_pool"
959 # create subvolume with invalid data pool layout fails
960 with self.assertRaises(CommandFailedError):
961 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
962
963 # check whether subvol path is cleaned up
964 try:
965 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
966 except CommandFailedError as ce:
967 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume")
968 else:
969 self.fail("expected the 'fs subvolume getpath' command to fail")
970
971 # verify trash dir is clean
972 self._wait_for_trash_empty()
973
974 def test_subvolume_create_with_invalid_size(self):
975 # create subvolume with an invalid size -1
976 subvolume = self._generate_random_subvolume_name()
977 try:
978 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--size", "-1")
979 except CommandFailedError as ce:
980 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid size")
981 else:
982 self.fail("expected the 'fs subvolume create' command to fail")
983
984 # verify trash dir is clean
985 self._wait_for_trash_empty()
986
987 def test_nonexistent_subvolume_rm(self):
988 # remove non-existing subvolume
989 subvolume = "non_existent_subvolume"
990
991 # try, remove subvolume
992 try:
993 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
994 except CommandFailedError as ce:
995 if ce.exitstatus != errno.ENOENT:
996 raise
997 else:
998 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
999
1000 def test_nonexistent_subvolume_group_create(self):
1001 subvolume = self._generate_random_subvolume_name()
1002 group = "non_existent_group"
1003
1004 # try, creating subvolume in a nonexistent group
1005 try:
1006 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1007 except CommandFailedError as ce:
1008 if ce.exitstatus != errno.ENOENT:
1009 raise
1010 else:
1011 raise RuntimeError("expected the 'fs subvolume create' command to fail")
1012
1013 def test_default_uid_gid_subvolume(self):
1014 subvolume = self._generate_random_subvolume_name()
1015 expected_uid = 0
1016 expected_gid = 0
1017
1018 # create subvolume
1019 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1020 subvol_path = self._get_subvolume_path(self.volname, subvolume)
1021
1022 # check subvolume's uid and gid
1023 stat = self.mount_a.stat(subvol_path)
1024 self.assertEqual(stat['st_uid'], expected_uid)
1025 self.assertEqual(stat['st_gid'], expected_gid)
1026
1027 # remove subvolume
1028 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1029
1030 # verify trash dir is clean
1031 self._wait_for_trash_empty()
1032
1033 def test_subvolume_ls(self):
1034 # tests the 'fs subvolume ls' command
1035
1036 subvolumes = []
1037
1038 # create subvolumes
1039 subvolumes = self._generate_random_subvolume_name(3)
1040 for subvolume in subvolumes:
1041 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1042
1043 # list subvolumes
1044 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
1045 if len(subvolumels) == 0:
1046 self.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
1047 else:
1048 subvolnames = [subvolume['name'] for subvolume in subvolumels]
1049 if collections.Counter(subvolnames) != collections.Counter(subvolumes):
1050 self.fail("Error creating or listing subvolumes")
1051
1052 # remove subvolume
1053 for subvolume in subvolumes:
1054 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1055
1056 # verify trash dir is clean
1057 self._wait_for_trash_empty()
1058
1059 def test_subvolume_ls_for_notexistent_default_group(self):
1060 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
1061 # prerequisite: we expect that the volume is created and the default group _nogroup is
1062 # NOT created (i.e. a subvolume without group is not created)
1063
1064 # list subvolumes
1065 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
1066 if len(subvolumels) > 0:
1067 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
1068
1069 def test_subvolume_resize_infinite_size(self):
1070 """
1071 That a subvolume can be resized to an infinite size by unsetting its quota.
1072 """
1073
1074 # create subvolume
1075 subvolname = self._generate_random_subvolume_name()
1076 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
1077 str(self.DEFAULT_FILE_SIZE*1024*1024))
1078
1079 # make sure it exists
1080 subvolpath = self._get_subvolume_path(self.volname, subvolname)
1081 self.assertNotEqual(subvolpath, None)
1082
1083 # resize inf
1084 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
1085
1086 # verify that the quota is None
1087 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
1088 self.assertEqual(size, None)
1089
1090 # remove subvolume
1091 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
1092
1093 # verify trash dir is clean
1094 self._wait_for_trash_empty()
1095
1096 def test_subvolume_resize_infinite_size_future_writes(self):
1097 """
1098 That a subvolume can be resized to an infinite size and the future writes succeed.
1099 """
1100
1101 # create subvolume
1102 subvolname = self._generate_random_subvolume_name()
1103 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
1104 str(self.DEFAULT_FILE_SIZE*1024*1024*5))
1105
1106 # make sure it exists
1107 subvolpath = self._get_subvolume_path(self.volname, subvolname)
1108 self.assertNotEqual(subvolpath, None)
1109
1110 # resize inf
1111 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
1112
1113 # verify that the quota is None
1114 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
1115 self.assertEqual(size, None)
1116
1117 # create one file of 10MB and try to write
1118 file_size=self.DEFAULT_FILE_SIZE*10
1119 number_of_files=1
1120 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
1121 number_of_files,
1122 file_size))
1123 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5)
1124
1125 try:
1126 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
1127 except CommandFailedError:
1128 self.fail("expected filling subvolume {0} with {1} file of size {2}MB "
1129 "to succeed".format(subvolname, number_of_files, file_size))
1130
1131 # remove subvolume
1132 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
1133
1134 # verify trash dir is clean
1135 self._wait_for_trash_empty()
1136
1137 def test_subvolume_info(self):
1138 # tests the 'fs subvolume info' command
1139
1140 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
1141 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
1142 "type", "uid", "features", "state"]
1143
1144 # create subvolume
1145 subvolume = self._generate_random_subvolume_name()
1146 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1147
1148 # get subvolume metadata
1149 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1150 for md in subvol_md:
1151 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
1152
1153 self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
1154 self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
1155 self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty")
1156 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
1157
1158 self.assertEqual(len(subvol_info["features"]), 3,
1159 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
1160 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1161 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
1162
1163 nsize = self.DEFAULT_FILE_SIZE*1024*1024
1164 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
1165
1166 # get subvolume metadata after quota set
1167 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1168 for md in subvol_md:
1169 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
1170
1171 self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
1172 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
1173 self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume")
1174 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
1175
1176 self.assertEqual(len(subvol_info["features"]), 3,
1177 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
1178 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1179 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
1180
1181 # remove subvolumes
1182 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1183
1184 # verify trash dir is clean
1185 self._wait_for_trash_empty()
1186
1187 def test_clone_subvolume_info(self):
1188
1189 # tests the 'fs subvolume info' command for a clone
1190 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
1191 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
1192 "type", "uid"]
1193
1194 subvolume = self._generate_random_subvolume_name()
1195 snapshot = self._generate_random_snapshot_name()
1196 clone = self._generate_random_clone_name()
1197
1198 # create subvolume
1199 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1200
1201 # do some IO
1202 self._do_subvolume_io(subvolume, number_of_files=1)
1203
1204 # snapshot subvolume
1205 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1206
1207 # schedule a clone
1208 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
1209
1210 # check clone status
1211 self._wait_for_clone_to_complete(clone)
1212
1213 # remove snapshot
1214 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
1215
1216 subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
1217 if len(subvol_info) == 0:
1218 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
1219 for md in subvol_md:
1220 if md not in subvol_info.keys():
1221 raise RuntimeError("%s not present in the metadata of subvolume" % md)
1222 if subvol_info["type"] != "clone":
1223 raise RuntimeError("type should be set to clone")
1224
1225 # remove subvolumes
1226 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1227 self._fs_cmd("subvolume", "rm", self.volname, clone)
1228
1229 # verify trash dir is clean
1230 self._wait_for_trash_empty()
1231
1232
1233 ### subvolume group operations
1234
1235 def test_subvolume_create_and_rm_in_group(self):
1236 subvolume = self._generate_random_subvolume_name()
1237 group = self._generate_random_group_name()
1238
1239 # create group
1240 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1241
1242 # create subvolume in group
1243 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1244
1245 # remove subvolume
1246 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1247
1248 # verify trash dir is clean
1249 self._wait_for_trash_empty()
1250
1251 # remove group
1252 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1253
1254 def test_subvolume_group_create_with_desired_data_pool_layout(self):
1255 group1, group2 = self._generate_random_group_name(2)
1256
1257 # create group
1258 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
1259 group1_path = self._get_subvolume_group_path(self.volname, group1)
1260
1261 default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool")
1262 new_pool = "new_pool"
1263 self.assertNotEqual(default_pool, new_pool)
1264
1265 # add data pool
1266 self.fs.add_data_pool(new_pool)
1267
1268 # create group specifying the new data pool as its pool layout
1269 self._fs_cmd("subvolumegroup", "create", self.volname, group2,
1270 "--pool_layout", new_pool)
1271 group2_path = self._get_subvolume_group_path(self.volname, group2)
1272
1273 desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool")
1274 self.assertEqual(desired_pool, new_pool)
1275
1276 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
1277 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
1278
1279 def test_subvolume_group_create_with_invalid_data_pool_layout(self):
1280 group = self._generate_random_group_name()
1281 data_pool = "invalid_pool"
1282 # create group with invalid data pool layout
1283 try:
1284 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
1285 except CommandFailedError as ce:
1286 if ce.exitstatus != errno.EINVAL:
1287 raise
1288 else:
1289 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
1290
1291 def test_subvolume_group_rm_force(self):
1292 # test removing non-existing subvolume group with --force
1293 group = self._generate_random_group_name()
1294 try:
1295 self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force")
1296 except CommandFailedError:
1297 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
1298
1299 def test_subvolume_group_create_with_auto_cleanup_on_fail(self):
1300 group = self._generate_random_group_name()
1301 data_pool = "invalid_pool"
1302 # create group with invalid data pool layout
1303 with self.assertRaises(CommandFailedError):
1304 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
1305
1306 # check whether group path is cleaned up
1307 try:
1308 self._fs_cmd("subvolumegroup", "getpath", self.volname, group)
1309 except CommandFailedError as ce:
1310 if ce.exitstatus != errno.ENOENT:
1311 raise
1312 else:
1313 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
1314
1315 def test_subvolume_create_with_desired_data_pool_layout_in_group(self):
1316 subvol1, subvol2 = self._generate_random_subvolume_name(2)
1317 group = self._generate_random_group_name()
1318
1319 # create group. this also helps set default pool layout for subvolumes
1320 # created within the group.
1321 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1322
1323 # create subvolume in group.
1324 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
1325 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
1326
1327 default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool")
1328 new_pool = "new_pool"
1329 self.assertNotEqual(default_pool, new_pool)
1330
1331 # add data pool
1332 self.fs.add_data_pool(new_pool)
1333
1334 # create subvolume specifying the new data pool as its pool layout
1335 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group,
1336 "--pool_layout", new_pool)
1337 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
1338
1339 desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool")
1340 self.assertEqual(desired_pool, new_pool)
1341
1342 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
1343 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
1344 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1345
1346 # verify trash dir is clean
1347 self._wait_for_trash_empty()
1348
1349 def test_subvolume_group_create_with_desired_mode(self):
1350 group1, group2 = self._generate_random_group_name(2)
1351 # default mode
1352 expected_mode1 = "755"
1353 # desired mode
1354 expected_mode2 = "777"
1355
1356 # create group
1357 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
1358 self._fs_cmd("subvolumegroup", "create", self.volname, group2, "--mode", "777")
1359
1360 group1_path = self._get_subvolume_group_path(self.volname, group1)
1361 group2_path = self._get_subvolume_group_path(self.volname, group2)
1362
1363 # check group's mode
1364 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip()
1365 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip()
1366 self.assertEqual(actual_mode1, expected_mode1)
1367 self.assertEqual(actual_mode2, expected_mode2)
1368
1369 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
1370 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
1371
1372 def test_subvolume_group_create_with_desired_uid_gid(self):
1373 """
1374 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
1375 expected values.
1376 """
1377 uid = 1000
1378 gid = 1000
1379
1380 # create subvolume group
1381 subvolgroupname = self._generate_random_group_name()
1382 self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid))
1383
1384 # make sure it exists
1385 subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname)
1386 self.assertNotEqual(subvolgrouppath, None)
1387
1388 # verify the uid and gid
1389 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip())
1390 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip())
1391 self.assertEqual(uid, suid)
1392 self.assertEqual(gid, sgid)
1393
1394 # remove group
1395 self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname)
1396
1397 def test_subvolume_create_with_desired_mode_in_group(self):
1398 subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
1399
1400 group = self._generate_random_group_name()
1401 # default mode
1402 expected_mode1 = "755"
1403 # desired mode
1404 expected_mode2 = "777"
1405
1406 # create group
1407 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1408
1409 # create subvolume in group
1410 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
1411 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777")
1412 # check whether mode 0777 also works
1413 self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777")
1414
1415 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
1416 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
1417 subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group)
1418
1419 # check subvolume's mode
1420 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
1421 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip()
1422 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip()
1423 self.assertEqual(actual_mode1, expected_mode1)
1424 self.assertEqual(actual_mode2, expected_mode2)
1425 self.assertEqual(actual_mode3, expected_mode2)
1426
1427 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
1428 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
1429 self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
1430 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1431
1432 # verify trash dir is clean
1433 self._wait_for_trash_empty()
1434
1435 def test_subvolume_create_with_desired_uid_gid(self):
1436 """
1437 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
1438 expected values.
1439 """
1440 uid = 1000
1441 gid = 1000
1442
1443 # create subvolume
1444 subvolname = self._generate_random_subvolume_name()
1445 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid))
1446
1447 # make sure it exists
1448 subvolpath = self._get_subvolume_path(self.volname, subvolname)
1449 self.assertNotEqual(subvolpath, None)
1450
1451 # verify the uid and gid
1452 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip())
1453 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip())
1454 self.assertEqual(uid, suid)
1455 self.assertEqual(gid, sgid)
1456
1457 # remove subvolume
1458 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
1459
1460 # verify trash dir is clean
1461 self._wait_for_trash_empty()
1462
1463 def test_nonexistent_subvolume_group_rm(self):
1464 group = "non_existent_group"
1465
1466 # try, remove subvolume group
1467 try:
1468 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1469 except CommandFailedError as ce:
1470 if ce.exitstatus != errno.ENOENT:
1471 raise
1472 else:
1473 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
1474
1475 def test_default_uid_gid_subvolume_group(self):
1476 group = self._generate_random_group_name()
1477 expected_uid = 0
1478 expected_gid = 0
1479
1480 # create group
1481 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1482 group_path = self._get_subvolume_group_path(self.volname, group)
1483
1484 # check group's uid and gid
1485 stat = self.mount_a.stat(group_path)
1486 self.assertEqual(stat['st_uid'], expected_uid)
1487 self.assertEqual(stat['st_gid'], expected_gid)
1488
1489 # remove group
1490 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1491
1492 def test_subvolume_group_ls(self):
1493 # tests the 'fs subvolumegroup ls' command
1494
1495 subvolumegroups = []
1496
1497 #create subvolumegroups
1498 subvolumegroups = self._generate_random_group_name(3)
1499 for groupname in subvolumegroups:
1500 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
1501
1502 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1503 if len(subvolumegroupls) == 0:
1504 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
1505 else:
1506 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
1507 if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups):
1508 raise RuntimeError("Error creating or listing subvolume groups")
1509
1510 def test_subvolume_group_ls_for_nonexistent_volume(self):
1511 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
1512 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
1513
1514 # list subvolume groups
1515 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1516 if len(subvolumegroupls) > 0:
1517 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
1518
1519 ### snapshot operations
1520
1521 def test_subvolume_snapshot_create_and_rm(self):
1522 subvolume = self._generate_random_subvolume_name()
1523 snapshot = self._generate_random_snapshot_name()
1524
1525 # create subvolume
1526 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1527
1528 # snapshot subvolume
1529 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1530
1531 # remove snapshot
1532 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
1533
1534 # remove subvolume
1535 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1536
1537 # verify trash dir is clean
1538 self._wait_for_trash_empty()
1539
1540 def test_subvolume_snapshot_info(self):
1541
1542 """
1543 tests the 'fs subvolume snapshot info' command
1544 """
1545
1546 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
1547
1548 subvolume = self._generate_random_subvolume_name()
1549 snapshot, snap_missing = self._generate_random_snapshot_name(2)
1550
1551 # create subvolume
1552 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1553
1554 # do some IO
1555 self._do_subvolume_io(subvolume, number_of_files=1)
1556
1557 # snapshot subvolume
1558 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1559
1560 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
1561 for md in snap_md:
1562 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
1563 self.assertEqual(snap_info["has_pending_clones"], "no")
1564
1565 # snapshot info for non-existent snapshot
1566 try:
1567 self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing)
1568 except CommandFailedError as ce:
1569 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot")
1570 else:
1571 self.fail("expected snapshot info of non-existent snapshot to fail")
1572
1573 # remove snapshot
1574 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
1575
1576 # remove subvolume
1577 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1578
1579 # verify trash dir is clean
1580 self._wait_for_trash_empty()
1581
1582 def test_subvolume_snapshot_create_idempotence(self):
1583 subvolume = self._generate_random_subvolume_name()
1584 snapshot = self._generate_random_snapshot_name()
1585
1586 # create subvolume
1587 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1588
1589 # snapshot subvolume
1590 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1591
1592 # try creating w/ same subvolume snapshot name -- should be idempotent
1593 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1594
1595 # remove snapshot
1596 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
1597
1598 # remove subvolume
1599 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1600
1601 # verify trash dir is clean
1602 self._wait_for_trash_empty()
1603
1604 def test_nonexistent_subvolume_snapshot_rm(self):
1605 subvolume = self._generate_random_subvolume_name()
1606 snapshot = self._generate_random_snapshot_name()
1607
1608 # create subvolume
1609 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1610
1611 # snapshot subvolume
1612 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1613
1614 # remove snapshot
1615 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
1616
1617 # remove snapshot again
1618 try:
1619 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
1620 except CommandFailedError as ce:
1621 if ce.exitstatus != errno.ENOENT:
1622 raise
1623 else:
1624 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
1625
1626 # remove subvolume
1627 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1628
1629 # verify trash dir is clean
1630 self._wait_for_trash_empty()
1631
1632 def test_subvolume_snapshot_rm_force(self):
1633 # test removing non existing subvolume snapshot with --force
1634 subvolume = self._generate_random_subvolume_name()
1635 snapshot = self._generate_random_snapshot_name()
1636
1637 # remove snapshot
1638 try:
1639 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force")
1640 except CommandFailedError:
1641 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
1642
1643 def test_subvolume_snapshot_in_group(self):
1644 subvolume = self._generate_random_subvolume_name()
1645 group = self._generate_random_group_name()
1646 snapshot = self._generate_random_snapshot_name()
1647
1648 # create group
1649 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1650
1651 # create subvolume in group
1652 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1653
1654 # snapshot subvolume in group
1655 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
1656
1657 # remove snapshot
1658 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
1659
1660 # remove subvolume
1661 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1662
1663 # verify trash dir is clean
1664 self._wait_for_trash_empty()
1665
1666 # remove group
1667 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1668
1669 def test_subvolume_snapshot_ls(self):
1670 # tests the 'fs subvolume snapshot ls' command
1671
1672 snapshots = []
1673
1674 # create subvolume
1675 subvolume = self._generate_random_subvolume_name()
1676 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1677
1678 # create subvolume snapshots
1679 snapshots = self._generate_random_snapshot_name(3)
1680 for snapshot in snapshots:
1681 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1682
1683 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
1684 if len(subvolsnapshotls) == 0:
1685 self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
1686 else:
1687 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
1688 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
1689 self.fail("Error creating or listing subvolume snapshots")
1690
1691 # remove snapshot
1692 for snapshot in snapshots:
1693 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
1694
1695 # remove subvolume
1696 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1697
1698 # verify trash dir is clean
1699 self._wait_for_trash_empty()
1700
1701 def test_subvolume_group_snapshot_unsupported_status(self):
1702 group = self._generate_random_group_name()
1703 snapshot = self._generate_random_snapshot_name()
1704
1705 # create group
1706 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1707
1708 # snapshot group
1709 try:
1710 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
1711 except CommandFailedError as ce:
1712 self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create")
1713 else:
1714 self.fail("expected subvolumegroup snapshot create command to fail")
1715
1716 # remove group
1717 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1718
1719 @unittest.skip("skipping subvolumegroup snapshot tests")
1720 def test_subvolume_group_snapshot_create_and_rm(self):
1721 subvolume = self._generate_random_subvolume_name()
1722 group = self._generate_random_group_name()
1723 snapshot = self._generate_random_snapshot_name()
1724
1725 # create group
1726 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1727
1728 # create subvolume in group
1729 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1730
1731 # snapshot group
1732 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
1733
1734 # remove snapshot
1735 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
1736
1737 # remove subvolume
1738 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1739
1740 # verify trash dir is clean
1741 self._wait_for_trash_empty()
1742
1743 # remove group
1744 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1745
1746 @unittest.skip("skipping subvolumegroup snapshot tests")
1747 def test_subvolume_group_snapshot_idempotence(self):
1748 subvolume = self._generate_random_subvolume_name()
1749 group = self._generate_random_group_name()
1750 snapshot = self._generate_random_snapshot_name()
1751
1752 # create group
1753 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1754
1755 # create subvolume in group
1756 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1757
1758 # snapshot group
1759 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
1760
1761 # try creating snapshot w/ same snapshot name -- shoule be idempotent
1762 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
1763
1764 # remove snapshot
1765 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
1766
1767 # remove subvolume
1768 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1769
1770 # verify trash dir is clean
1771 self._wait_for_trash_empty()
1772
1773 # remove group
1774 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1775
1776 @unittest.skip("skipping subvolumegroup snapshot tests")
1777 def test_nonexistent_subvolume_group_snapshot_rm(self):
1778 subvolume = self._generate_random_subvolume_name()
1779 group = self._generate_random_group_name()
1780 snapshot = self._generate_random_snapshot_name()
1781
1782 # create group
1783 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1784
1785 # create subvolume in group
1786 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1787
1788 # snapshot group
1789 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
1790
1791 # remove snapshot
1792 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
1793
1794 # remove snapshot
1795 try:
1796 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
1797 except CommandFailedError as ce:
1798 if ce.exitstatus != errno.ENOENT:
1799 raise
1800 else:
1801 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
1802
1803 # remove subvolume
1804 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1805
1806 # verify trash dir is clean
1807 self._wait_for_trash_empty()
1808
1809 # remove group
1810 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1811
1812 @unittest.skip("skipping subvolumegroup snapshot tests")
1813 def test_subvolume_group_snapshot_rm_force(self):
1814 # test removing non-existing subvolume group snapshot with --force
1815 group = self._generate_random_group_name()
1816 snapshot = self._generate_random_snapshot_name()
1817 # remove snapshot
1818 try:
1819 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force")
1820 except CommandFailedError:
1821 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
1822
1823 @unittest.skip("skipping subvolumegroup snapshot tests")
1824 def test_subvolume_group_snapshot_ls(self):
1825 # tests the 'fs subvolumegroup snapshot ls' command
1826
1827 snapshots = []
1828
1829 # create group
1830 group = self._generate_random_group_name()
1831 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1832
1833 # create subvolumegroup snapshots
1834 snapshots = self._generate_random_snapshot_name(3)
1835 for snapshot in snapshots:
1836 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
1837
1838 subvolgrpsnapshotls = json.loads(self._fs_cmd('subvolumegroup', 'snapshot', 'ls', self.volname, group))
1839 if len(subvolgrpsnapshotls) == 0:
1840 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
1841 else:
1842 snapshotnames = [snapshot['name'] for snapshot in subvolgrpsnapshotls]
1843 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
1844 raise RuntimeError("Error creating or listing subvolume group snapshots")
1845
1846 def test_async_subvolume_rm(self):
1847 subvolumes = self._generate_random_subvolume_name(100)
1848
1849 # create subvolumes
1850 for subvolume in subvolumes:
1851 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1852 self._do_subvolume_io(subvolume, number_of_files=10)
1853
1854 self.mount_a.umount_wait()
1855
1856 # remove subvolumes
1857 for subvolume in subvolumes:
1858 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1859
1860 self.mount_a.mount_wait()
1861
1862 # verify trash dir is clean
1863 self._wait_for_trash_empty(timeout=300)
1864
1865 def test_mgr_eviction(self):
1866 # unmount any cephfs mounts
1867 self.mount_a.umount_wait()
1868 sessions = self._session_list()
1869 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
1870
1871 # Get the mgr to definitely mount cephfs
1872 subvolume = self._generate_random_subvolume_name()
1873 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1874 sessions = self._session_list()
1875 self.assertEqual(len(sessions), 1)
1876
1877 # Now fail the mgr, check the session was evicted
1878 mgr = self.mgr_cluster.get_active_id()
1879 self.mgr_cluster.mgr_fail(mgr)
1880 self.wait_until_evicted(sessions[0]['id'])
1881
1882 def test_subvolume_upgrade_legacy_to_v1(self):
1883 """
1884 poor man's upgrade test -- rather than going through a full upgrade cycle,
1885 emulate subvolumes by going through the wormhole and verify if they are
1886 accessible.
1887 further ensure that a legacy volume is not updated to v2.
1888 """
1889 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
1890 group = self._generate_random_group_name()
1891
1892 # emulate a old-fashioned subvolume -- one in the default group and
1893 # the other in a custom group
1894 createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
1895 self.mount_a.run_shell(['mkdir', '-p', createpath1])
1896
1897 # create group
1898 createpath2 = os.path.join(".", "volumes", group, subvolume2)
1899 self.mount_a.run_shell(['mkdir', '-p', createpath2])
1900
1901 # this would auto-upgrade on access without anyone noticing
1902 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
1903 self.assertNotEqual(subvolpath1, None)
1904 subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
1905
1906 subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
1907 self.assertNotEqual(subvolpath2, None)
1908 subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline
1909
1910 # and... the subvolume path returned should be what we created behind the scene
1911 self.assertEqual(createpath1[1:], subvolpath1)
1912 self.assertEqual(createpath2[1:], subvolpath2)
1913
1914 # ensure metadata file is in legacy location, with required version v1
1915 self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True)
1916 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True)
1917
1918 # remove subvolume
1919 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
1920 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
1921
1922 # verify trash dir is clean
1923 self._wait_for_trash_empty()
1924
1925 # remove group
1926 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1927
1928 def test_subvolume_no_upgrade_v1_sanity(self):
1929 """
1930 poor man's upgrade test -- theme continues...
1931
1932 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
1933 a series of operations on the v1 subvolume to ensure they work as expected.
1934 """
1935 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
1936 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
1937 "type", "uid", "features", "state"]
1938 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
1939
1940 subvolume = self._generate_random_subvolume_name()
1941 snapshot = self._generate_random_snapshot_name()
1942 clone1, clone2 = self._generate_random_clone_name(2)
1943 mode = "777"
1944 uid = "1000"
1945 gid = "1000"
1946
1947 # emulate a v1 subvolume -- in the default group
1948 subvolume_path = self._create_v1_subvolume(subvolume)
1949
1950 # getpath
1951 subvolpath = self._get_subvolume_path(self.volname, subvolume)
1952 self.assertEqual(subvolpath, subvolume_path)
1953
1954 # ls
1955 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
1956 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
1957 self.assertEqual(subvolumes[0]['name'], subvolume,
1958 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
1959
1960 # info
1961 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1962 for md in subvol_md:
1963 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
1964
1965 self.assertEqual(subvol_info["state"], "complete",
1966 msg="expected state to be 'complete', found '{0}".format(subvol_info["state"]))
1967 self.assertEqual(len(subvol_info["features"]), 2,
1968 msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
1969 for feature in ['snapshot-clone', 'snapshot-autoprotect']:
1970 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
1971
1972 # resize
1973 nsize = self.DEFAULT_FILE_SIZE*1024*1024*10
1974 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
1975 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1976 for md in subvol_md:
1977 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
1978 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
1979
1980 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
1981 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
1982
1983 # do some IO
1984 self._do_subvolume_io(subvolume, number_of_files=8)
1985
1986 # snap-create
1987 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1988
1989 # clone
1990 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
1991
1992 # check clone status
1993 self._wait_for_clone_to_complete(clone1)
1994
1995 # ensure clone is v2
1996 self._assert_meta_location_and_version(self.volname, clone1, version=2)
1997
1998 # verify clone
1999 self._verify_clone(subvolume, snapshot, clone1, source_version=1)
2000
2001 # clone (older snapshot)
2002 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2)
2003
2004 # check clone status
2005 self._wait_for_clone_to_complete(clone2)
2006
2007 # ensure clone is v2
2008 self._assert_meta_location_and_version(self.volname, clone2, version=2)
2009
2010 # verify clone
2011 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
2012 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
2013
2014 # snap-info
2015 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
2016 for md in snap_md:
2017 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
2018 self.assertEqual(snap_info["has_pending_clones"], "no")
2019
2020 # snap-ls
2021 subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
2022 self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots)))
2023 snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots]
2024 for name in [snapshot, 'fake']:
2025 self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name))
2026
2027 # snap-rm
2028 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2029 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake")
2030
2031 # ensure volume is still at version 1
2032 self._assert_meta_location_and_version(self.volname, subvolume, version=1)
2033
2034 # rm
2035 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2036 self._fs_cmd("subvolume", "rm", self.volname, clone1)
2037 self._fs_cmd("subvolume", "rm", self.volname, clone2)
2038
2039 # verify trash dir is clean
2040 self._wait_for_trash_empty()
2041
2042 def test_subvolume_no_upgrade_v1_to_v2(self):
2043 """
2044 poor man's upgrade test -- theme continues...
2045 ensure v1 to v2 upgrades are not done automatically due to various states of v1
2046 """
2047 subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3)
2048 group = self._generate_random_group_name()
2049
2050 # emulate a v1 subvolume -- in the default group
2051 subvol1_path = self._create_v1_subvolume(subvolume1)
2052
2053 # emulate a v1 subvolume -- in a custom group
2054 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group)
2055
2056 # emulate a v1 subvolume -- in a clone pending state
2057 self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending')
2058
2059 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
2060 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
2061 self.assertEqual(subvolpath1, subvol1_path)
2062
2063 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
2064 self.assertEqual(subvolpath2, subvol2_path)
2065
2066 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
2067 # use clone status, as only certain operations are allowed in pending state
2068 status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3))
2069 self.assertEqual(status["status"]["state"], "pending")
2070
2071 # remove snapshot
2072 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake")
2073 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group)
2074
2075 # ensure metadata file is in v1 location, with version retained as v1
2076 self._assert_meta_location_and_version(self.volname, subvolume1, version=1)
2077 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1)
2078
2079 # remove subvolume
2080 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
2081 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
2082 try:
2083 self._fs_cmd("subvolume", "rm", self.volname, subvolume3)
2084 except CommandFailedError as ce:
2085 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone")
2086 else:
2087 self.fail("expected rm of subvolume undergoing clone to fail")
2088
2089 # ensure metadata file is in v1 location, with version retained as v1
2090 self._assert_meta_location_and_version(self.volname, subvolume3, version=1)
2091 self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force")
2092
2093 # verify list subvolumes returns an empty list
2094 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2095 self.assertEqual(len(subvolumels), 0)
2096
2097 # verify trash dir is clean
2098 self._wait_for_trash_empty()
2099
2100 def test_subvolume_upgrade_v1_to_v2(self):
2101 """
2102 poor man's upgrade test -- theme continues...
2103 ensure v1 to v2 upgrades work
2104 """
2105 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
2106 group = self._generate_random_group_name()
2107
2108 # emulate a v1 subvolume -- in the default group
2109 subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False)
2110
2111 # emulate a v1 subvolume -- in a custom group
2112 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False)
2113
2114 # this would attempt auto-upgrade on access
2115 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
2116 self.assertEqual(subvolpath1, subvol1_path)
2117
2118 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
2119 self.assertEqual(subvolpath2, subvol2_path)
2120
2121 # ensure metadata file is in v2 location, with version retained as v2
2122 self._assert_meta_location_and_version(self.volname, subvolume1, version=2)
2123 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2)
2124
2125 # remove subvolume
2126 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
2127 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
2128
2129 # verify trash dir is clean
2130 self._wait_for_trash_empty()
2131
2132 def test_subvolume_rm_with_snapshots(self):
2133 subvolume = self._generate_random_subvolume_name()
2134 snapshot = self._generate_random_snapshot_name()
2135
2136 # create subvolume
2137 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2138
2139 # snapshot subvolume
2140 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2141
2142 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
2143 try:
2144 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2145 except CommandFailedError as ce:
2146 if ce.exitstatus != errno.ENOTEMPTY:
2147 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
2148 else:
2149 raise RuntimeError("expected subvolume deletion to fail")
2150
2151 # remove snapshot
2152 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2153
2154 # remove subvolume
2155 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2156
2157 # verify trash dir is clean
2158 self._wait_for_trash_empty()
2159
2160 def test_subvolume_retain_snapshot_without_snapshots(self):
2161 """
2162 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
2163 """
2164 subvolume = self._generate_random_subvolume_name()
2165
2166 # create subvolume
2167 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2168
2169 # remove with snapshot retention (should remove volume, no snapshots to retain)
2170 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2171
2172 # verify list subvolumes returns an empty list
2173 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2174 self.assertEqual(len(subvolumels), 0)
2175
2176 # verify trash dir is clean
2177 self._wait_for_trash_empty()
2178
2179 def test_subvolume_retain_snapshot_with_snapshots(self):
2180 """
2181 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
2182 also test allowed and dis-allowed operations on a retained subvolume
2183 """
2184 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
2185
2186 subvolume = self._generate_random_subvolume_name()
2187 snapshot = self._generate_random_snapshot_name()
2188
2189 # create subvolume
2190 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2191
2192 # snapshot subvolume
2193 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2194
2195 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
2196 try:
2197 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2198 except CommandFailedError as ce:
2199 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of retained subvolume with snapshots")
2200 else:
2201 self.fail("expected rm of subvolume with retained snapshots to fail")
2202
2203 # remove with snapshot retention
2204 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2205
2206 # fetch info
2207 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
2208 self.assertEqual(subvol_info["state"], "snapshot-retained",
2209 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
2210
2211 ## test allowed ops in retained state
2212 # ls
2213 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2214 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
2215 self.assertEqual(subvolumes[0]['name'], subvolume,
2216 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
2217
2218 # snapshot info
2219 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
2220 for md in snap_md:
2221 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
2222 self.assertEqual(snap_info["has_pending_clones"], "no")
2223
2224 # rm --force (allowed but should fail)
2225 try:
2226 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
2227 except CommandFailedError as ce:
2228 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
2229 else:
2230 self.fail("expected rm of subvolume with retained snapshots to fail")
2231
2232 # rm (allowed but should fail)
2233 try:
2234 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2235 except CommandFailedError as ce:
2236 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
2237 else:
2238 self.fail("expected rm of subvolume with retained snapshots to fail")
2239
2240 ## test disallowed ops
2241 # getpath
2242 try:
2243 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
2244 except CommandFailedError as ce:
2245 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
2246 else:
2247 self.fail("expected getpath of subvolume with retained snapshots to fail")
2248
2249 # resize
2250 nsize = self.DEFAULT_FILE_SIZE*1024*1024
2251 try:
2252 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
2253 except CommandFailedError as ce:
2254 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on resize of subvolume with retained snapshots")
2255 else:
2256 self.fail("expected resize of subvolume with retained snapshots to fail")
2257
2258 # snap-create
2259 try:
2260 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, "fail")
2261 except CommandFailedError as ce:
2262 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot create of subvolume with retained snapshots")
2263 else:
2264 self.fail("expected snapshot create of subvolume with retained snapshots to fail")
2265
2266 # remove snapshot (should remove volume)
2267 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2268
2269 # verify list subvolumes returns an empty list
2270 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2271 self.assertEqual(len(subvolumels), 0)
2272
2273 # verify trash dir is clean
2274 self._wait_for_trash_empty()
2275
2276 def test_subvolume_retain_snapshot_invalid_recreate(self):
2277 """
2278 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
2279 """
2280 subvolume = self._generate_random_subvolume_name()
2281 snapshot = self._generate_random_snapshot_name()
2282
2283 # create subvolume
2284 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2285
2286 # snapshot subvolume
2287 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2288
2289 # remove with snapshot retention
2290 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2291
2292 # recreate subvolume with an invalid pool
2293 data_pool = "invalid_pool"
2294 try:
2295 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
2296 except CommandFailedError as ce:
2297 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname")
2298 else:
2299 self.fail("expected recreate of subvolume with invalid poolname to fail")
2300
2301 # fetch info
2302 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
2303 self.assertEqual(subvol_info["state"], "snapshot-retained",
2304 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
2305
2306 # getpath
2307 try:
2308 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
2309 except CommandFailedError as ce:
2310 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
2311 else:
2312 self.fail("expected getpath of subvolume with retained snapshots to fail")
2313
2314 # remove snapshot (should remove volume)
2315 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2316
2317 # verify trash dir is clean
2318 self._wait_for_trash_empty()
2319
2320 def test_subvolume_retain_snapshot_trash_busy_recreate(self):
2321 """
2322 ensure retained subvolume recreate fails if its trash is not yet purged
2323 """
2324 subvolume = self._generate_random_subvolume_name()
2325 snapshot = self._generate_random_snapshot_name()
2326
2327 # create subvolume
2328 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2329
2330 # snapshot subvolume
2331 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2332
2333 # remove with snapshot retention
2334 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2335
2336 # fake a trash entry
2337 self._update_fake_trash(subvolume)
2338
2339 # recreate subvolume
2340 try:
2341 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2342 except CommandFailedError as ce:
2343 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of subvolume with purge pending")
2344 else:
2345 self.fail("expected recreate of subvolume with purge pending to fail")
2346
2347 # clear fake trash entry
2348 self._update_fake_trash(subvolume, create=False)
2349
2350 # recreate subvolume
2351 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2352
2353 # remove snapshot
2354 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2355
2356 # remove subvolume
2357 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2358
2359 # verify trash dir is clean
2360 self._wait_for_trash_empty()
2361
2362 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self):
2363 """
2364 ensure retained clone recreate fails if its trash is not yet purged
2365 """
2366 subvolume = self._generate_random_subvolume_name()
2367 snapshot = self._generate_random_snapshot_name()
2368 clone = self._generate_random_clone_name()
2369
2370 # create subvolume
2371 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2372
2373 # snapshot subvolume
2374 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2375
2376 # clone subvolume snapshot
2377 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
2378
2379 # check clone status
2380 self._wait_for_clone_to_complete(clone)
2381
2382 # snapshot clone
2383 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot)
2384
2385 # remove clone with snapshot retention
2386 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
2387
2388 # fake a trash entry
2389 self._update_fake_trash(clone)
2390
2391 # clone subvolume snapshot (recreate)
2392 try:
2393 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
2394 except CommandFailedError as ce:
2395 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending")
2396 else:
2397 self.fail("expected recreate of clone with purge pending to fail")
2398
2399 # clear fake trash entry
2400 self._update_fake_trash(clone, create=False)
2401
2402 # recreate subvolume
2403 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
2404
2405 # check clone status
2406 self._wait_for_clone_to_complete(clone)
2407
2408 # remove snapshot
2409 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2410 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot)
2411
2412 # remove subvolume
2413 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2414 self._fs_cmd("subvolume", "rm", self.volname, clone)
2415
2416 # verify trash dir is clean
2417 self._wait_for_trash_empty()
2418
2419 def test_subvolume_retain_snapshot_recreate_subvolume(self):
2420 """
2421 ensure a retained subvolume can be recreated and further snapshotted
2422 """
2423 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
2424
2425 subvolume = self._generate_random_subvolume_name()
2426 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
2427
2428 # create subvolume
2429 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2430
2431 # snapshot subvolume
2432 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
2433
2434 # remove with snapshot retention
2435 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2436
2437 # fetch info
2438 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
2439 self.assertEqual(subvol_info["state"], "snapshot-retained",
2440 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
2441
2442 # recreate retained subvolume
2443 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2444
2445 # fetch info
2446 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
2447 self.assertEqual(subvol_info["state"], "complete",
2448 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
2449
2450 # snapshot info (older snapshot)
2451 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1))
2452 for md in snap_md:
2453 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
2454 self.assertEqual(snap_info["has_pending_clones"], "no")
2455
2456 # snap-create (new snapshot)
2457 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
2458
2459 # remove with retain snapshots
2460 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2461
2462 # list snapshots
2463 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
2464 self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
2465 " created subvolume snapshots")
2466 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
2467 for snap in [snapshot1, snapshot2]:
2468 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
2469
2470 # remove snapshots (should remove volume)
2471 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
2472 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
2473
2474 # verify list subvolumes returns an empty list
2475 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2476 self.assertEqual(len(subvolumels), 0)
2477
2478 # verify trash dir is clean
2479 self._wait_for_trash_empty()
2480
2481 def test_subvolume_retain_snapshot_clone(self):
2482 """
2483 clone a snapshot from a snapshot retained subvolume
2484 """
2485 subvolume = self._generate_random_subvolume_name()
2486 snapshot = self._generate_random_snapshot_name()
2487 clone = self._generate_random_clone_name()
2488
2489 # create subvolume
2490 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2491
2492 # store path for clone verification
2493 subvol_path = self._get_subvolume_path(self.volname, subvolume)
2494
2495 # do some IO
2496 self._do_subvolume_io(subvolume, number_of_files=16)
2497
2498 # snapshot subvolume
2499 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2500
2501 # remove with snapshot retention
2502 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2503
2504 # clone retained subvolume snapshot
2505 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
2506
2507 # check clone status
2508 self._wait_for_clone_to_complete(clone)
2509
2510 # verify clone
2511 self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path)
2512
2513 # remove snapshots (removes retained volume)
2514 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2515
2516 # remove subvolume
2517 self._fs_cmd("subvolume", "rm", self.volname, clone)
2518
2519 # verify list subvolumes returns an empty list
2520 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2521 self.assertEqual(len(subvolumels), 0)
2522
2523 # verify trash dir is clean
2524 self._wait_for_trash_empty()
2525
2526 def test_subvolume_retain_snapshot_recreate(self):
2527 """
2528 recreate a subvolume from one of its retained snapshots
2529 """
2530 subvolume = self._generate_random_subvolume_name()
2531 snapshot = self._generate_random_snapshot_name()
2532
2533 # create subvolume
2534 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2535
2536 # store path for clone verification
2537 subvol_path = self._get_subvolume_path(self.volname, subvolume)
2538
2539 # do some IO
2540 self._do_subvolume_io(subvolume, number_of_files=16)
2541
2542 # snapshot subvolume
2543 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2544
2545 # remove with snapshot retention
2546 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2547
2548 # recreate retained subvolume using its own snapshot to clone
2549 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume)
2550
2551 # check clone status
2552 self._wait_for_clone_to_complete(subvolume)
2553
2554 # verify clone
2555 self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path)
2556
2557 # remove snapshot
2558 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2559
2560 # remove subvolume
2561 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2562
2563 # verify list subvolumes returns an empty list
2564 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2565 self.assertEqual(len(subvolumels), 0)
2566
2567 # verify trash dir is clean
2568 self._wait_for_trash_empty()
2569
2570 def test_subvolume_clone_retain_snapshot_with_snapshots(self):
2571 """
2572 retain snapshots of a cloned subvolume and check disallowed operations
2573 """
2574 subvolume = self._generate_random_subvolume_name()
2575 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
2576 clone = self._generate_random_clone_name()
2577
2578 # create subvolume
2579 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2580
2581 # store path for clone verification
2582 subvol1_path = self._get_subvolume_path(self.volname, subvolume)
2583
2584 # do some IO
2585 self._do_subvolume_io(subvolume, number_of_files=16)
2586
2587 # snapshot subvolume
2588 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
2589
2590 # remove with snapshot retention
2591 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2592
2593 # clone retained subvolume snapshot
2594 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot1, clone)
2595
2596 # check clone status
2597 self._wait_for_clone_to_complete(clone)
2598
2599 # verify clone
2600 self._verify_clone(subvolume, snapshot1, clone, subvol_path=subvol1_path)
2601
2602 # create a snapshot on the clone
2603 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot2)
2604
2605 # retain a clone
2606 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
2607
2608 # list snapshots
2609 clonesnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, clone))
2610 self.assertEqual(len(clonesnapshotls), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
2611 " created subvolume snapshots")
2612 snapshotnames = [snapshot['name'] for snapshot in clonesnapshotls]
2613 for snap in [snapshot2]:
2614 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
2615
2616 ## check disallowed operations on retained clone
2617 # clone-status
2618 try:
2619 self._fs_cmd("clone", "status", self.volname, clone)
2620 except CommandFailedError as ce:
2621 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone status of clone with retained snapshots")
2622 else:
2623 self.fail("expected clone status of clone with retained snapshots to fail")
2624
2625 # clone-cancel
2626 try:
2627 self._fs_cmd("clone", "cancel", self.volname, clone)
2628 except CommandFailedError as ce:
2629 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone cancel of clone with retained snapshots")
2630 else:
2631 self.fail("expected clone cancel of clone with retained snapshots to fail")
2632
2633 # remove snapshots (removes subvolumes as all are in retained state)
2634 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
2635 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot2)
2636
2637 # verify list subvolumes returns an empty list
2638 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2639 self.assertEqual(len(subvolumels), 0)
2640
2641 # verify trash dir is clean
2642 self._wait_for_trash_empty()
2643
2644 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self):
2645 """
2646 clone a subvolume from recreated subvolume's latest snapshot
2647 """
2648 subvolume = self._generate_random_subvolume_name()
2649 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
2650 clone = self._generate_random_clone_name(1)
2651
2652 # create subvolume
2653 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2654
2655 # do some IO
2656 self._do_subvolume_io(subvolume, number_of_files=16)
2657
2658 # snapshot subvolume
2659 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
2660
2661 # remove with snapshot retention
2662 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2663
2664 # recreate subvolume
2665 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2666
2667 # get and store path for clone verification
2668 subvol2_path = self._get_subvolume_path(self.volname, subvolume)
2669
2670 # do some IO
2671 self._do_subvolume_io(subvolume, number_of_files=16)
2672
2673 # snapshot newer subvolume
2674 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
2675
2676 # remove with snapshot retention
2677 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
2678
2679 # clone retained subvolume's newer snapshot
2680 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot2, clone)
2681
2682 # check clone status
2683 self._wait_for_clone_to_complete(clone)
2684
2685 # verify clone
2686 self._verify_clone(subvolume, snapshot2, clone, subvol_path=subvol2_path)
2687
2688 # remove snapshot
2689 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
2690 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
2691
2692 # remove subvolume
2693 self._fs_cmd("subvolume", "rm", self.volname, clone)
2694
2695 # verify list subvolumes returns an empty list
2696 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2697 self.assertEqual(len(subvolumels), 0)
2698
2699 # verify trash dir is clean
2700 self._wait_for_trash_empty()
2701
2702 def test_subvolume_snapshot_protect_unprotect_sanity(self):
2703 """
2704 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
2705 invoking the command does not cause errors, till they are removed from a subsequent release.
2706 """
2707 subvolume = self._generate_random_subvolume_name()
2708 snapshot = self._generate_random_snapshot_name()
2709 clone = self._generate_random_clone_name()
2710
2711 # create subvolume
2712 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2713
2714 # do some IO
2715 self._do_subvolume_io(subvolume, number_of_files=64)
2716
2717 # snapshot subvolume
2718 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2719
2720 # now, protect snapshot
2721 self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot)
2722
2723 # schedule a clone
2724 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
2725
2726 # check clone status
2727 self._wait_for_clone_to_complete(clone)
2728
2729 # now, unprotect snapshot
2730 self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot)
2731
2732 # verify clone
2733 self._verify_clone(subvolume, snapshot, clone)
2734
2735 # remove snapshot
2736 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2737
2738 # remove subvolumes
2739 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2740 self._fs_cmd("subvolume", "rm", self.volname, clone)
2741
2742 # verify trash dir is clean
2743 self._wait_for_trash_empty()
2744
2745 def test_subvolume_snapshot_clone(self):
2746 subvolume = self._generate_random_subvolume_name()
2747 snapshot = self._generate_random_snapshot_name()
2748 clone = self._generate_random_clone_name()
2749
2750 # create subvolume
2751 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2752
2753 # do some IO
2754 self._do_subvolume_io(subvolume, number_of_files=64)
2755
2756 # snapshot subvolume
2757 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2758
2759 # schedule a clone
2760 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
2761
2762 # check clone status
2763 self._wait_for_clone_to_complete(clone)
2764
2765 # verify clone
2766 self._verify_clone(subvolume, snapshot, clone)
2767
2768 # remove snapshot
2769 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2770
2771 # remove subvolumes
2772 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2773 self._fs_cmd("subvolume", "rm", self.volname, clone)
2774
2775 # verify trash dir is clean
2776 self._wait_for_trash_empty()
2777
2778 def test_subvolume_snapshot_reconf_max_concurrent_clones(self):
2779 """
2780 Validate 'max_concurrent_clones' config option
2781 """
2782
2783 # get the default number of cloner threads
2784 default_max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
2785 self.assertEqual(default_max_concurrent_clones, 4)
2786
2787 # Increase number of cloner threads
2788 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
2789 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
2790 self.assertEqual(max_concurrent_clones, 6)
2791
2792 # Decrease number of cloner threads
2793 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
2794 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
2795 self.assertEqual(max_concurrent_clones, 2)
2796
2797 def test_subvolume_snapshot_clone_pool_layout(self):
2798 subvolume = self._generate_random_subvolume_name()
2799 snapshot = self._generate_random_snapshot_name()
2800 clone = self._generate_random_clone_name()
2801
2802 # add data pool
2803 new_pool = "new_pool"
2804 self.fs.add_data_pool(new_pool)
2805
2806 # create subvolume
2807 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2808
2809 # do some IO
2810 self._do_subvolume_io(subvolume, number_of_files=32)
2811
2812 # snapshot subvolume
2813 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2814
2815 # schedule a clone
2816 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool)
2817
2818 # check clone status
2819 self._wait_for_clone_to_complete(clone)
2820
2821 # verify clone
2822 self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool)
2823
2824 # remove snapshot
2825 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2826
2827 subvol_path = self._get_subvolume_path(self.volname, clone)
2828 desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
2829 self.assertEqual(desired_pool, new_pool)
2830
2831 # remove subvolumes
2832 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2833 self._fs_cmd("subvolume", "rm", self.volname, clone)
2834
2835 # verify trash dir is clean
2836 self._wait_for_trash_empty()
2837
2838 def test_subvolume_snapshot_clone_with_attrs(self):
2839 subvolume = self._generate_random_subvolume_name()
2840 snapshot = self._generate_random_snapshot_name()
2841 clone = self._generate_random_clone_name()
2842
2843 mode = "777"
2844 uid = "1000"
2845 gid = "1000"
2846 new_uid = "1001"
2847 new_gid = "1001"
2848 new_mode = "700"
2849
2850 # create subvolume
2851 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
2852
2853 # do some IO
2854 self._do_subvolume_io(subvolume, number_of_files=32)
2855
2856 # snapshot subvolume
2857 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2858
2859 # change subvolume attrs (to ensure clone picks up snapshot attrs)
2860 self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode)
2861
2862 # schedule a clone
2863 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
2864
2865 # check clone status
2866 self._wait_for_clone_to_complete(clone)
2867
2868 # verify clone
2869 self._verify_clone(subvolume, snapshot, clone)
2870
2871 # remove snapshot
2872 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2873
2874 # remove subvolumes
2875 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2876 self._fs_cmd("subvolume", "rm", self.volname, clone)
2877
2878 # verify trash dir is clean
2879 self._wait_for_trash_empty()
2880
2881 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self):
2882 subvolume = self._generate_random_subvolume_name()
2883 snapshot = self._generate_random_snapshot_name()
2884 clone = self._generate_random_clone_name()
2885 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
2886
2887 # create subvolume, in an isolated namespace with a specified size
2888 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize))
2889
2890 # do some IO
2891 self._do_subvolume_io(subvolume, number_of_files=8)
2892
2893 # snapshot subvolume
2894 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2895
2896 # create a pool different from current subvolume pool
2897 subvol_path = self._get_subvolume_path(self.volname, subvolume)
2898 default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
2899 new_pool = "new_pool"
2900 self.assertNotEqual(default_pool, new_pool)
2901 self.fs.add_data_pool(new_pool)
2902
2903 # update source subvolume pool
2904 self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="")
2905
2906 # schedule a clone, with NO --pool specification
2907 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
2908
2909 # check clone status
2910 self._wait_for_clone_to_complete(clone)
2911
2912 # verify clone
2913 self._verify_clone(subvolume, snapshot, clone)
2914
2915 # remove snapshot
2916 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2917
2918 # remove subvolumes
2919 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2920 self._fs_cmd("subvolume", "rm", self.volname, clone)
2921
2922 # verify trash dir is clean
2923 self._wait_for_trash_empty()
2924
2925 def test_subvolume_snapshot_clone_and_reclone(self):
2926 subvolume = self._generate_random_subvolume_name()
2927 snapshot = self._generate_random_snapshot_name()
2928 clone1, clone2 = self._generate_random_clone_name(2)
2929
2930 # create subvolume
2931 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2932
2933 # do some IO
2934 self._do_subvolume_io(subvolume, number_of_files=32)
2935
2936 # snapshot subvolume
2937 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2938
2939 # schedule a clone
2940 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
2941
2942 # check clone status
2943 self._wait_for_clone_to_complete(clone1)
2944
2945 # verify clone
2946 self._verify_clone(subvolume, snapshot, clone1)
2947
2948 # remove snapshot
2949 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2950
2951 # now the clone is just like a normal subvolume -- snapshot the clone and fork
2952 # another clone. before that do some IO so it's can be differentiated.
2953 self._do_subvolume_io(clone1, create_dir="data", number_of_files=32)
2954
2955 # snapshot clone -- use same snap name
2956 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot)
2957
2958 # schedule a clone
2959 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2)
2960
2961 # check clone status
2962 self._wait_for_clone_to_complete(clone2)
2963
2964 # verify clone
2965 self._verify_clone(clone1, snapshot, clone2)
2966
2967 # remove snapshot
2968 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot)
2969
2970 # remove subvolumes
2971 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2972 self._fs_cmd("subvolume", "rm", self.volname, clone1)
2973 self._fs_cmd("subvolume", "rm", self.volname, clone2)
2974
2975 # verify trash dir is clean
2976 self._wait_for_trash_empty()
2977
2978 def test_subvolume_snapshot_clone_under_group(self):
2979 subvolume = self._generate_random_subvolume_name()
2980 snapshot = self._generate_random_snapshot_name()
2981 clone = self._generate_random_clone_name()
2982 group = self._generate_random_group_name()
2983
2984 # create subvolume
2985 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2986
2987 # do some IO
2988 self._do_subvolume_io(subvolume, number_of_files=32)
2989
2990 # snapshot subvolume
2991 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2992
2993 # create group
2994 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2995
2996 # schedule a clone
2997 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group)
2998
2999 # check clone status
3000 self._wait_for_clone_to_complete(clone, clone_group=group)
3001
3002 # verify clone
3003 self._verify_clone(subvolume, snapshot, clone, clone_group=group)
3004
3005 # remove snapshot
3006 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3007
3008 # remove subvolumes
3009 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3010 self._fs_cmd("subvolume", "rm", self.volname, clone, group)
3011
3012 # remove group
3013 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3014
3015 # verify trash dir is clean
3016 self._wait_for_trash_empty()
3017
3018 def test_subvolume_under_group_snapshot_clone(self):
3019 subvolume = self._generate_random_subvolume_name()
3020 group = self._generate_random_group_name()
3021 snapshot = self._generate_random_snapshot_name()
3022 clone = self._generate_random_clone_name()
3023
3024 # create group
3025 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3026
3027 # create subvolume
3028 self._fs_cmd("subvolume", "create", self.volname, subvolume, group)
3029
3030 # do some IO
3031 self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32)
3032
3033 # snapshot subvolume
3034 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
3035
3036 # schedule a clone
3037 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group)
3038
3039 # check clone status
3040 self._wait_for_clone_to_complete(clone)
3041
3042 # verify clone
3043 self._verify_clone(subvolume, snapshot, clone, source_group=group)
3044
3045 # remove snapshot
3046 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
3047
3048 # remove subvolumes
3049 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
3050 self._fs_cmd("subvolume", "rm", self.volname, clone)
3051
3052 # remove group
3053 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3054
3055 # verify trash dir is clean
3056 self._wait_for_trash_empty()
3057
3058 def test_subvolume_snapshot_clone_different_groups(self):
3059 subvolume = self._generate_random_subvolume_name()
3060 snapshot = self._generate_random_snapshot_name()
3061 clone = self._generate_random_clone_name()
3062 s_group, c_group = self._generate_random_group_name(2)
3063
3064 # create groups
3065 self._fs_cmd("subvolumegroup", "create", self.volname, s_group)
3066 self._fs_cmd("subvolumegroup", "create", self.volname, c_group)
3067
3068 # create subvolume
3069 self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group)
3070
3071 # do some IO
3072 self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32)
3073
3074 # snapshot subvolume
3075 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group)
3076
3077 # schedule a clone
3078 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
3079 '--group_name', s_group, '--target_group_name', c_group)
3080
3081 # check clone status
3082 self._wait_for_clone_to_complete(clone, clone_group=c_group)
3083
3084 # verify clone
3085 self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group)
3086
3087 # remove snapshot
3088 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group)
3089
3090 # remove subvolumes
3091 self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group)
3092 self._fs_cmd("subvolume", "rm", self.volname, clone, c_group)
3093
3094 # remove groups
3095 self._fs_cmd("subvolumegroup", "rm", self.volname, s_group)
3096 self._fs_cmd("subvolumegroup", "rm", self.volname, c_group)
3097
3098 # verify trash dir is clean
3099 self._wait_for_trash_empty()
3100
3101 def test_subvolume_snapshot_clone_with_upgrade(self):
3102 """
3103 yet another poor man's upgrade test -- rather than going through a full
3104 upgrade cycle, emulate old types subvolumes by going through the wormhole
3105 and verify clone operation.
3106 further ensure that a legacy volume is not updated to v2, but clone is.
3107 """
3108 subvolume = self._generate_random_subvolume_name()
3109 snapshot = self._generate_random_snapshot_name()
3110 clone = self._generate_random_clone_name()
3111
3112 # emulate a old-fashioned subvolume
3113 createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
3114 self.mount_a.run_shell(['mkdir', '-p', createpath])
3115
3116 # add required xattrs to subvolume
3117 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
3118 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool)
3119
3120 # do some IO
3121 self._do_subvolume_io(subvolume, number_of_files=64)
3122
3123 # snapshot subvolume
3124 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3125
3126 # ensure metadata file is in legacy location, with required version v1
3127 self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
3128
3129 # schedule a clone
3130 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3131
3132 # snapshot should not be deletable now
3133 try:
3134 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3135 except CommandFailedError as ce:
3136 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
3137 else:
3138 self.fail("expected removing source snapshot of a clone to fail")
3139
3140 # check clone status
3141 self._wait_for_clone_to_complete(clone)
3142
3143 # verify clone
3144 self._verify_clone(subvolume, snapshot, clone, source_version=1)
3145
3146 # remove snapshot
3147 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3148
3149 # ensure metadata file is in v2 location, with required version v2
3150 self._assert_meta_location_and_version(self.volname, clone)
3151
3152 # remove subvolumes
3153 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3154 self._fs_cmd("subvolume", "rm", self.volname, clone)
3155
3156 # verify trash dir is clean
3157 self._wait_for_trash_empty()
3158
3159 def test_subvolume_clone_in_progress_getpath(self):
3160 subvolume = self._generate_random_subvolume_name()
3161 snapshot = self._generate_random_snapshot_name()
3162 clone = self._generate_random_clone_name()
3163
3164 # create subvolume
3165 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3166
3167 # do some IO
3168 self._do_subvolume_io(subvolume, number_of_files=64)
3169
3170 # snapshot subvolume
3171 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3172
3173 # schedule a clone
3174 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3175
3176 # clone should not be accessible right now
3177 try:
3178 self._get_subvolume_path(self.volname, clone)
3179 except CommandFailedError as ce:
3180 if ce.exitstatus != errno.EAGAIN:
3181 raise RuntimeError("invalid error code when fetching path of an pending clone")
3182 else:
3183 raise RuntimeError("expected fetching path of an pending clone to fail")
3184
3185 # check clone status
3186 self._wait_for_clone_to_complete(clone)
3187
3188 # clone should be accessible now
3189 subvolpath = self._get_subvolume_path(self.volname, clone)
3190 self.assertNotEqual(subvolpath, None)
3191
3192 # verify clone
3193 self._verify_clone(subvolume, snapshot, clone)
3194
3195 # remove snapshot
3196 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3197
3198 # remove subvolumes
3199 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3200 self._fs_cmd("subvolume", "rm", self.volname, clone)
3201
3202 # verify trash dir is clean
3203 self._wait_for_trash_empty()
3204
3205 def test_subvolume_clone_in_progress_snapshot_rm(self):
3206 subvolume = self._generate_random_subvolume_name()
3207 snapshot = self._generate_random_snapshot_name()
3208 clone = self._generate_random_clone_name()
3209
3210 # create subvolume
3211 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3212
3213 # do some IO
3214 self._do_subvolume_io(subvolume, number_of_files=64)
3215
3216 # snapshot subvolume
3217 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3218
3219 # schedule a clone
3220 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3221
3222 # snapshot should not be deletable now
3223 try:
3224 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3225 except CommandFailedError as ce:
3226 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
3227 else:
3228 self.fail("expected removing source snapshot of a clone to fail")
3229
3230 # check clone status
3231 self._wait_for_clone_to_complete(clone)
3232
3233 # clone should be accessible now
3234 subvolpath = self._get_subvolume_path(self.volname, clone)
3235 self.assertNotEqual(subvolpath, None)
3236
3237 # verify clone
3238 self._verify_clone(subvolume, snapshot, clone)
3239
3240 # remove snapshot
3241 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3242
3243 # remove subvolumes
3244 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3245 self._fs_cmd("subvolume", "rm", self.volname, clone)
3246
3247 # verify trash dir is clean
3248 self._wait_for_trash_empty()
3249
3250 def test_subvolume_clone_in_progress_source(self):
3251 subvolume = self._generate_random_subvolume_name()
3252 snapshot = self._generate_random_snapshot_name()
3253 clone = self._generate_random_clone_name()
3254
3255 # create subvolume
3256 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3257
3258 # do some IO
3259 self._do_subvolume_io(subvolume, number_of_files=64)
3260
3261 # snapshot subvolume
3262 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3263
3264 # schedule a clone
3265 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3266
3267 # verify clone source
3268 result = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
3269 source = result['status']['source']
3270 self.assertEqual(source['volume'], self.volname)
3271 self.assertEqual(source['subvolume'], subvolume)
3272 self.assertEqual(source.get('group', None), None)
3273 self.assertEqual(source['snapshot'], snapshot)
3274
3275 # check clone status
3276 self._wait_for_clone_to_complete(clone)
3277
3278 # clone should be accessible now
3279 subvolpath = self._get_subvolume_path(self.volname, clone)
3280 self.assertNotEqual(subvolpath, None)
3281
3282 # verify clone
3283 self._verify_clone(subvolume, snapshot, clone)
3284
3285 # remove snapshot
3286 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3287
3288 # remove subvolumes
3289 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3290 self._fs_cmd("subvolume", "rm", self.volname, clone)
3291
3292 # verify trash dir is clean
3293 self._wait_for_trash_empty()
3294
3295 def test_non_clone_status(self):
3296 subvolume = self._generate_random_subvolume_name()
3297
3298 # create subvolume
3299 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3300
3301 try:
3302 self._fs_cmd("clone", "status", self.volname, subvolume)
3303 except CommandFailedError as ce:
3304 if ce.exitstatus != errno.ENOTSUP:
3305 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
3306 else:
3307 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
3308
3309 # remove subvolume
3310 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3311
3312 # verify trash dir is clean
3313 self._wait_for_trash_empty()
3314
3315 def test_subvolume_snapshot_clone_on_existing_subvolumes(self):
3316 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
3317 snapshot = self._generate_random_snapshot_name()
3318 clone = self._generate_random_clone_name()
3319
3320 # create subvolumes
3321 self._fs_cmd("subvolume", "create", self.volname, subvolume1)
3322 self._fs_cmd("subvolume", "create", self.volname, subvolume2)
3323
3324 # do some IO
3325 self._do_subvolume_io(subvolume1, number_of_files=32)
3326
3327 # snapshot subvolume
3328 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot)
3329
3330 # schedule a clone with target as subvolume2
3331 try:
3332 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2)
3333 except CommandFailedError as ce:
3334 if ce.exitstatus != errno.EEXIST:
3335 raise RuntimeError("invalid error code when cloning to existing subvolume")
3336 else:
3337 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
3338
3339 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
3340
3341 # schedule a clone with target as clone
3342 try:
3343 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
3344 except CommandFailedError as ce:
3345 if ce.exitstatus != errno.EEXIST:
3346 raise RuntimeError("invalid error code when cloning to existing clone")
3347 else:
3348 raise RuntimeError("expected cloning to fail if the target is an existing clone")
3349
3350 # check clone status
3351 self._wait_for_clone_to_complete(clone)
3352
3353 # verify clone
3354 self._verify_clone(subvolume1, snapshot, clone)
3355
3356 # remove snapshot
3357 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot)
3358
3359 # remove subvolumes
3360 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
3361 self._fs_cmd("subvolume", "rm", self.volname, subvolume2)
3362 self._fs_cmd("subvolume", "rm", self.volname, clone)
3363
3364 # verify trash dir is clean
3365 self._wait_for_trash_empty()
3366
3367 def test_subvolume_snapshot_clone_fail_with_remove(self):
3368 subvolume = self._generate_random_subvolume_name()
3369 snapshot = self._generate_random_snapshot_name()
3370 clone1, clone2 = self._generate_random_clone_name(2)
3371
3372 pool_capacity = 32 * 1024 * 1024
3373 # number of files required to fill up 99% of the pool
3374 nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
3375
3376 # create subvolume
3377 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3378
3379 # do some IO
3380 self._do_subvolume_io(subvolume, number_of_files=nr_files)
3381
3382 # snapshot subvolume
3383 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3384
3385 # add data pool
3386 new_pool = "new_pool"
3387 self.fs.add_data_pool(new_pool)
3388
3389 self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
3390 "max_bytes", "{0}".format(pool_capacity // 4))
3391
3392 # schedule a clone
3393 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
3394
3395 # check clone status -- this should dramatically overshoot the pool quota
3396 self._wait_for_clone_to_complete(clone1)
3397
3398 # verify clone
3399 self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool)
3400
3401 # wait a bit so that subsequent I/O will give pool full error
3402 time.sleep(120)
3403
3404 # schedule a clone
3405 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool)
3406
3407 # check clone status
3408 self._wait_for_clone_to_fail(clone2)
3409
3410 # remove snapshot
3411 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3412
3413 # remove subvolumes
3414 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3415 self._fs_cmd("subvolume", "rm", self.volname, clone1)
3416 try:
3417 self._fs_cmd("subvolume", "rm", self.volname, clone2)
3418 except CommandFailedError as ce:
3419 if ce.exitstatus != errno.EAGAIN:
3420 raise RuntimeError("invalid error code when trying to remove failed clone")
3421 else:
3422 raise RuntimeError("expected error when removing a failed clone")
3423
3424 # ... and with force, failed clone can be removed
3425 self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force")
3426
3427 # verify trash dir is clean
3428 self._wait_for_trash_empty()
3429
3430 def test_subvolume_snapshot_attr_clone(self):
3431 subvolume = self._generate_random_subvolume_name()
3432 snapshot = self._generate_random_snapshot_name()
3433 clone = self._generate_random_clone_name()
3434
3435 # create subvolume
3436 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3437
3438 # do some IO
3439 self._do_subvolume_io_mixed(subvolume)
3440
3441 # snapshot subvolume
3442 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3443
3444 # schedule a clone
3445 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3446
3447 # check clone status
3448 self._wait_for_clone_to_complete(clone)
3449
3450 # verify clone
3451 self._verify_clone(subvolume, snapshot, clone)
3452
3453 # remove snapshot
3454 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3455
3456 # remove subvolumes
3457 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3458 self._fs_cmd("subvolume", "rm", self.volname, clone)
3459
3460 # verify trash dir is clean
3461 self._wait_for_trash_empty()
3462
3463 def test_subvolume_snapshot_clone_cancel_in_progress(self):
3464 subvolume = self._generate_random_subvolume_name()
3465 snapshot = self._generate_random_snapshot_name()
3466 clone = self._generate_random_clone_name()
3467
3468 # create subvolume
3469 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3470
3471 # do some IO
3472 self._do_subvolume_io(subvolume, number_of_files=128)
3473
3474 # snapshot subvolume
3475 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3476
3477 # schedule a clone
3478 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3479
3480 # cancel on-going clone
3481 self._fs_cmd("clone", "cancel", self.volname, clone)
3482
3483 # verify canceled state
3484 self._check_clone_canceled(clone)
3485
3486 # remove snapshot
3487 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3488
3489 # remove subvolumes
3490 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3491 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
3492
3493 # verify trash dir is clean
3494 self._wait_for_trash_empty()
3495
3496 def test_subvolume_snapshot_clone_cancel_pending(self):
3497 """
3498 this test is a bit more involved compared to canceling an in-progress clone.
3499 we'd need to ensure that a to-be canceled clone has still not been picked up
3500 by cloner threads. exploit the fact that clones are picked up in an FCFS
3501 fashion and there are four (4) cloner threads by default. When the number of
3502 cloner threads increase, this test _may_ start tripping -- so, the number of
3503 clone operations would need to be jacked up.
3504 """
3505 # default number of clone threads
3506 NR_THREADS = 4
3507 # good enough for 4 threads
3508 NR_CLONES = 5
3509 # yeh, 1gig -- we need the clone to run for sometime
3510 FILE_SIZE_MB = 1024
3511
3512 subvolume = self._generate_random_subvolume_name()
3513 snapshot = self._generate_random_snapshot_name()
3514 clones = self._generate_random_clone_name(NR_CLONES)
3515
3516 # create subvolume
3517 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3518
3519 # do some IO
3520 self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB)
3521
3522 # snapshot subvolume
3523 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3524
3525 # schedule clones
3526 for clone in clones:
3527 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3528
3529 to_wait = clones[0:NR_THREADS]
3530 to_cancel = clones[NR_THREADS:]
3531
3532 # cancel pending clones and verify
3533 for clone in to_cancel:
3534 status = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
3535 self.assertEqual(status["status"]["state"], "pending")
3536 self._fs_cmd("clone", "cancel", self.volname, clone)
3537 self._check_clone_canceled(clone)
3538
3539 # let's cancel on-going clones. handle the case where some of the clones
3540 # _just_ complete
3541 for clone in list(to_wait):
3542 try:
3543 self._fs_cmd("clone", "cancel", self.volname, clone)
3544 to_cancel.append(clone)
3545 to_wait.remove(clone)
3546 except CommandFailedError as ce:
3547 if ce.exitstatus != errno.EINVAL:
3548 raise RuntimeError("invalid error code when cancelling on-going clone")
3549
3550 # remove snapshot
3551 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3552
3553 # remove subvolumes
3554 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3555 for clone in to_wait:
3556 self._fs_cmd("subvolume", "rm", self.volname, clone)
3557 for clone in to_cancel:
3558 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
3559
3560 # verify trash dir is clean
3561 self._wait_for_trash_empty()