10 from hashlib
import md5
11 from textwrap
import dedent
12 from io
import StringIO
14 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
15 from tasks
.cephfs
.fuse_mount
import FuseMount
16 from teuthology
.exceptions
import CommandFailedError
18 log
= logging
.getLogger(__name__
)
20 class TestVolumesHelper(CephFSTestCase
):
21 """Helper class for testing FS volume, subvolume group and subvolume operations."""
22 TEST_VOLUME_PREFIX
= "volume"
23 TEST_SUBVOLUME_PREFIX
="subvolume"
24 TEST_GROUP_PREFIX
="group"
25 TEST_SNAPSHOT_PREFIX
="snapshot"
26 TEST_CLONE_PREFIX
="clone"
27 TEST_FILE_NAME_PREFIX
="subvolume_file"
29 # for filling subvolume with data
34 DEFAULT_FILE_SIZE
= 1 # MB
35 DEFAULT_NUMBER_OF_FILES
= 1024
37 def _fs_cmd(self
, *args
):
38 return self
.mgr_cluster
.mon_manager
.raw_cluster_cmd("fs", *args
)
40 def _raw_cmd(self
, *args
):
41 return self
.mgr_cluster
.mon_manager
.raw_cluster_cmd(*args
)
43 def __check_clone_state(self
, state
, clone
, clone_group
=None, timo
=120):
45 args
= ["clone", "status", self
.volname
, clone
]
47 args
.append(clone_group
)
50 result
= json
.loads(self
._fs
_cmd
(*args
))
51 if result
["status"]["state"] == state
:
55 self
.assertTrue(check
< timo
)
57 def _get_clone_status(self
, clone
, clone_group
=None):
58 args
= ["clone", "status", self
.volname
, clone
]
60 args
.append(clone_group
)
62 result
= json
.loads(self
._fs
_cmd
(*args
))
65 def _wait_for_clone_to_complete(self
, clone
, clone_group
=None, timo
=120):
66 self
.__check
_clone
_state
("complete", clone
, clone_group
, timo
)
68 def _wait_for_clone_to_fail(self
, clone
, clone_group
=None, timo
=120):
69 self
.__check
_clone
_state
("failed", clone
, clone_group
, timo
)
71 def _wait_for_clone_to_be_in_progress(self
, clone
, clone_group
=None, timo
=120):
72 self
.__check
_clone
_state
("in-progress", clone
, clone_group
, timo
)
74 def _check_clone_canceled(self
, clone
, clone_group
=None):
75 self
.__check
_clone
_state
("canceled", clone
, clone_group
, timo
=1)
77 def _get_subvolume_snapshot_path(self
, subvolume
, snapshot
, source_group
, subvol_path
, source_version
):
78 if source_version
== 2:
80 if subvol_path
is not None:
81 (base_path
, uuid_str
) = os
.path
.split(subvol_path
)
83 (base_path
, uuid_str
) = os
.path
.split(self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=source_group
))
84 return os
.path
.join(base_path
, ".snap", snapshot
, uuid_str
)
87 base_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=source_group
)
88 return os
.path
.join(base_path
, ".snap", snapshot
)
90 def _verify_clone_attrs(self
, source_path
, clone_path
):
94 p
= self
.mount_a
.run_shell(["find", path1
])
95 paths
= p
.stdout
.getvalue().strip().split()
97 # for each entry in source and clone (sink) verify certain inode attributes:
98 # inode type, mode, ownership, [am]time.
99 for source_path
in paths
:
100 sink_entry
= source_path
[len(path1
)+1:]
101 sink_path
= os
.path
.join(path2
, sink_entry
)
104 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%f', source_path
]).stdout
.getvalue().strip(), 16)
105 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%f', sink_path
]).stdout
.getvalue().strip(), 16)
106 self
.assertEqual(sval
, cval
)
109 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', source_path
]).stdout
.getvalue().strip())
110 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', sink_path
]).stdout
.getvalue().strip())
111 self
.assertEqual(sval
, cval
)
113 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', source_path
]).stdout
.getvalue().strip())
114 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', sink_path
]).stdout
.getvalue().strip())
115 self
.assertEqual(sval
, cval
)
118 # do not check access as kclient will generally not update this like ceph-fuse will.
119 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%Y', source_path
]).stdout
.getvalue().strip())
120 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%Y', sink_path
]).stdout
.getvalue().strip())
121 self
.assertEqual(sval
, cval
)
123 def _verify_clone_root(self
, source_path
, clone_path
, clone
, clone_group
, clone_pool
):
124 # verifies following clone root attrs quota, data_pool and pool_namespace
125 # remaining attributes of clone root are validated in _verify_clone_attrs
127 clone_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, clone
, clone_group
))
129 # verify quota is inherited from source snapshot
130 src_quota
= self
.mount_a
.getfattr(source_path
, "ceph.quota.max_bytes")
131 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
132 if isinstance(self
.mount_a
, FuseMount
):
133 self
.assertEqual(clone_info
["bytes_quota"], "infinite" if src_quota
is None else int(src_quota
))
136 # verify pool is set as per request
137 self
.assertEqual(clone_info
["data_pool"], clone_pool
)
139 # verify pool and pool namespace are inherited from snapshot
140 self
.assertEqual(clone_info
["data_pool"],
141 self
.mount_a
.getfattr(source_path
, "ceph.dir.layout.pool"))
142 self
.assertEqual(clone_info
["pool_namespace"],
143 self
.mount_a
.getfattr(source_path
, "ceph.dir.layout.pool_namespace"))
145 def _verify_clone(self
, subvolume
, snapshot
, clone
,
146 source_group
=None, clone_group
=None, clone_pool
=None,
147 subvol_path
=None, source_version
=2, timo
=120):
148 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
149 # but snapshots are retained for clone verification
150 path1
= self
._get
_subvolume
_snapshot
_path
(subvolume
, snapshot
, source_group
, subvol_path
, source_version
)
151 path2
= self
._get
_subvolume
_path
(self
.volname
, clone
, group_name
=clone_group
)
154 # TODO: currently snapshot rentries are not stable if snapshot source entries
155 # are removed, https://tracker.ceph.com/issues/46747
156 while check
< timo
and subvol_path
is None:
157 val1
= int(self
.mount_a
.getfattr(path1
, "ceph.dir.rentries"))
158 val2
= int(self
.mount_a
.getfattr(path2
, "ceph.dir.rentries"))
163 self
.assertTrue(check
< timo
)
165 self
._verify
_clone
_root
(path1
, path2
, clone
, clone_group
, clone_pool
)
166 self
._verify
_clone
_attrs
(path1
, path2
)
168 def _generate_random_volume_name(self
, count
=1):
169 n
= self
.volume_start
170 volumes
= [f
"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
171 self
.volume_start
+= count
172 return volumes
[0] if count
== 1 else volumes
174 def _generate_random_subvolume_name(self
, count
=1):
175 n
= self
.subvolume_start
176 subvolumes
= [f
"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
177 self
.subvolume_start
+= count
178 return subvolumes
[0] if count
== 1 else subvolumes
180 def _generate_random_group_name(self
, count
=1):
182 groups
= [f
"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
183 self
.group_start
+= count
184 return groups
[0] if count
== 1 else groups
186 def _generate_random_snapshot_name(self
, count
=1):
187 n
= self
.snapshot_start
188 snaps
= [f
"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
189 self
.snapshot_start
+= count
190 return snaps
[0] if count
== 1 else snaps
192 def _generate_random_clone_name(self
, count
=1):
194 clones
= [f
"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
195 self
.clone_start
+= count
196 return clones
[0] if count
== 1 else clones
198 def _enable_multi_fs(self
):
199 self
._fs
_cmd
("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
201 def _create_or_reuse_test_volume(self
):
202 result
= json
.loads(self
._fs
_cmd
("volume", "ls"))
204 self
.vol_created
= True
205 self
.volname
= self
._generate
_random
_volume
_name
()
206 self
._fs
_cmd
("volume", "create", self
.volname
)
208 self
.volname
= result
[0]['name']
210 def _get_volume_info(self
, vol_name
, human_readable
=False):
212 args
= ["volume", "info", vol_name
, human_readable
]
214 args
= ["volume", "info", vol_name
]
216 vol_md
= self
._fs
_cmd
(*args
)
219 def _get_subvolume_group_path(self
, vol_name
, group_name
):
220 args
= ("subvolumegroup", "getpath", vol_name
, group_name
)
221 path
= self
._fs
_cmd
(*args
)
222 # remove the leading '/', and trailing whitespaces
223 return path
[1:].rstrip()
225 def _get_subvolume_group_info(self
, vol_name
, group_name
):
226 args
= ["subvolumegroup", "info", vol_name
, group_name
]
228 group_md
= self
._fs
_cmd
(*args
)
231 def _get_subvolume_path(self
, vol_name
, subvol_name
, group_name
=None):
232 args
= ["subvolume", "getpath", vol_name
, subvol_name
]
234 args
.append(group_name
)
236 path
= self
._fs
_cmd
(*args
)
237 # remove the leading '/', and trailing whitespaces
238 return path
[1:].rstrip()
240 def _get_subvolume_info(self
, vol_name
, subvol_name
, group_name
=None):
241 args
= ["subvolume", "info", vol_name
, subvol_name
]
243 args
.append(group_name
)
245 subvol_md
= self
._fs
_cmd
(*args
)
248 def _get_subvolume_snapshot_info(self
, vol_name
, subvol_name
, snapname
, group_name
=None):
249 args
= ["subvolume", "snapshot", "info", vol_name
, subvol_name
, snapname
]
251 args
.append(group_name
)
253 snap_md
= self
._fs
_cmd
(*args
)
256 def _delete_test_volume(self
):
257 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
259 def _do_subvolume_pool_and_namespace_update(self
, subvolume
, pool
=None, pool_namespace
=None, subvolume_group
=None):
260 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
263 self
.mount_a
.setfattr(subvolpath
, 'ceph.dir.layout.pool', pool
, sudo
=True)
265 if pool_namespace
is not None:
266 self
.mount_a
.setfattr(subvolpath
, 'ceph.dir.layout.pool_namespace', pool_namespace
, sudo
=True)
268 def _do_subvolume_attr_update(self
, subvolume
, uid
, gid
, mode
, subvolume_group
=None):
269 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
272 self
.mount_a
.run_shell(['sudo', 'chmod', mode
, subvolpath
], omit_sudo
=False)
275 self
.mount_a
.run_shell(['sudo', 'chown', uid
, subvolpath
], omit_sudo
=False)
276 self
.mount_a
.run_shell(['sudo', 'chgrp', gid
, subvolpath
], omit_sudo
=False)
278 def _do_subvolume_io(self
, subvolume
, subvolume_group
=None, create_dir
=None,
279 number_of_files
=DEFAULT_NUMBER_OF_FILES
, file_size
=DEFAULT_FILE_SIZE
):
280 # get subvolume path for IO
281 args
= ["subvolume", "getpath", self
.volname
, subvolume
]
283 args
.append(subvolume_group
)
285 subvolpath
= self
._fs
_cmd
(*args
)
286 self
.assertNotEqual(subvolpath
, None)
287 subvolpath
= subvolpath
[1:].rstrip() # remove "/" prefix and any trailing newline
291 io_path
= os
.path
.join(subvolpath
, create_dir
)
292 self
.mount_a
.run_shell_payload(f
"mkdir -p {io_path}")
294 log
.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume
, number_of_files
, file_size
, io_path
))
295 for i
in range(number_of_files
):
296 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
297 self
.mount_a
.write_n_mb(os
.path
.join(io_path
, filename
), file_size
)
299 def _do_subvolume_io_mixed(self
, subvolume
, subvolume_group
=None):
300 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
302 reg_file
= "regfile.0"
303 dir_path
= os
.path
.join(subvolpath
, "dir.0")
304 sym_path1
= os
.path
.join(subvolpath
, "sym.0")
305 # this symlink's ownership would be changed
306 sym_path2
= os
.path
.join(dir_path
, "sym.0")
308 self
.mount_a
.run_shell(["mkdir", dir_path
])
309 self
.mount_a
.run_shell(["ln", "-s", "./{}".format(reg_file
), sym_path1
])
310 self
.mount_a
.run_shell(["ln", "-s", "./{}".format(reg_file
), sym_path2
])
311 # flip ownership to nobody. assumption: nobody's id is 65534
312 self
.mount_a
.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2
], omit_sudo
=False)
314 def _wait_for_trash_empty(self
, timeout
=60):
315 # XXX: construct the trash dir path (note that there is no mgr
316 # [sub]volume interface for this).
317 trashdir
= os
.path
.join("./", "volumes", "_deleting")
318 self
.mount_a
.wait_for_dir_empty(trashdir
, timeout
=timeout
)
320 def _wait_for_subvol_trash_empty(self
, subvol
, group
="_nogroup", timeout
=30):
321 trashdir
= os
.path
.join("./", "volumes", group
, subvol
, ".trash")
323 self
.mount_a
.wait_for_dir_empty(trashdir
, timeout
=timeout
)
324 except CommandFailedError
as ce
:
325 if ce
.exitstatus
!= errno
.ENOENT
:
330 def _assert_meta_location_and_version(self
, vol_name
, subvol_name
, subvol_group
=None, version
=2, legacy
=False):
332 subvol_path
= self
._get
_subvolume
_path
(vol_name
, subvol_name
, group_name
=subvol_group
)
334 m
.update(("/"+subvol_path
).encode('utf-8'))
335 meta_filename
= "{0}.meta".format(m
.digest().hex())
336 metapath
= os
.path
.join(".", "volumes", "_legacy", meta_filename
)
338 group
= subvol_group
if subvol_group
is not None else '_nogroup'
339 metapath
= os
.path
.join(".", "volumes", group
, subvol_name
, ".meta")
341 out
= self
.mount_a
.run_shell(['sudo', 'cat', metapath
], omit_sudo
=False)
342 lines
= out
.stdout
.getvalue().strip().split('\n')
345 if line
== "version = " + str(version
):
348 self
.assertEqual(sv_version
, version
, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
349 version
, sv_version
, metapath
))
351 def _create_v1_subvolume(self
, subvol_name
, subvol_group
=None, has_snapshot
=True, subvol_type
='subvolume', state
='complete'):
352 group
= subvol_group
if subvol_group
is not None else '_nogroup'
353 basepath
= os
.path
.join("volumes", group
, subvol_name
)
354 uuid_str
= str(uuid
.uuid4())
355 createpath
= os
.path
.join(basepath
, uuid_str
)
356 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
358 # create a v1 snapshot, to prevent auto upgrades
360 snappath
= os
.path
.join(createpath
, ".snap", "fake")
361 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', snappath
], omit_sudo
=False)
363 # add required xattrs to subvolume
364 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
365 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
367 # create a v1 .meta file
368 meta_contents
= "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type
, "/" + createpath
, state
)
369 if state
== 'pending':
370 # add a fake clone source
371 meta_contents
= meta_contents
+ '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
372 meta_filepath1
= os
.path
.join(self
.mount_a
.mountpoint
, basepath
, ".meta")
373 self
.mount_a
.client_remote
.write_file(meta_filepath1
, meta_contents
, sudo
=True)
376 def _update_fake_trash(self
, subvol_name
, subvol_group
=None, trash_name
='fake', create
=True):
377 group
= subvol_group
if subvol_group
is not None else '_nogroup'
378 trashpath
= os
.path
.join("volumes", group
, subvol_name
, '.trash', trash_name
)
380 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', trashpath
], omit_sudo
=False)
382 self
.mount_a
.run_shell(['sudo', 'rmdir', trashpath
], omit_sudo
=False)
384 def _configure_guest_auth(self
, guest_mount
, authid
, key
):
386 Set up auth credentials for a guest client.
388 # Create keyring file for the guest client.
389 keyring_txt
= dedent("""
393 """.format(authid
=authid
,key
=key
))
395 guest_mount
.client_id
= authid
396 guest_mount
.client_remote
.write_file(guest_mount
.get_keyring_path(),
397 keyring_txt
, sudo
=True)
398 # Add a guest client section to the ceph config file.
399 self
.config_set("client.{0}".format(authid
), "debug client", 20)
400 self
.config_set("client.{0}".format(authid
), "debug objecter", 20)
401 self
.set_conf("client.{0}".format(authid
),
402 "keyring", guest_mount
.get_keyring_path())
404 def _auth_metadata_get(self
, filedata
):
406 Return a deserialized JSON object, or None
409 data
= json
.loads(filedata
)
410 except json
.decoder
.JSONDecodeError
:
415 super(TestVolumesHelper
, self
).setUp()
417 self
.vol_created
= False
418 self
._enable
_multi
_fs
()
419 self
._create
_or
_reuse
_test
_volume
()
420 self
.config_set('mon', 'mon_allow_pool_delete', True)
421 self
.volume_start
= random
.randint(1, (1<<20))
422 self
.subvolume_start
= random
.randint(1, (1<<20))
423 self
.group_start
= random
.randint(1, (1<<20))
424 self
.snapshot_start
= random
.randint(1, (1<<20))
425 self
.clone_start
= random
.randint(1, (1<<20))
429 self
._delete
_test
_volume
()
430 super(TestVolumesHelper
, self
).tearDown()
433 class TestVolumes(TestVolumesHelper
):
434 """Tests for FS volume operations."""
435 def test_volume_create(self
):
437 That the volume can be created and then cleans up
439 volname
= self
._generate
_random
_volume
_name
()
440 self
._fs
_cmd
("volume", "create", volname
)
441 volumels
= json
.loads(self
._fs
_cmd
("volume", "ls"))
443 if not (volname
in ([volume
['name'] for volume
in volumels
])):
444 raise RuntimeError("Error creating volume '{0}'".format(volname
))
446 # check that the pools were created with the correct config
447 pool_details
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "detail", "--format=json"))
449 for pool
in pool_details
:
450 pool_flags
[pool
["pool_id"]] = pool
["flags_names"].split(",")
452 volume_details
= json
.loads(self
._fs
_cmd
("get", volname
, "--format=json"))
453 for data_pool_id
in volume_details
['mdsmap']['data_pools']:
454 self
.assertIn("bulk", pool_flags
[data_pool_id
])
455 meta_pool_id
= volume_details
['mdsmap']['metadata_pool']
456 self
.assertNotIn("bulk", pool_flags
[meta_pool_id
])
459 self
._fs
_cmd
("volume", "rm", volname
, "--yes-i-really-mean-it")
461 def test_volume_ls(self
):
463 That the existing and the newly created volumes can be listed and
466 vls
= json
.loads(self
._fs
_cmd
("volume", "ls"))
467 volumes
= [volume
['name'] for volume
in vls
]
469 #create new volumes and add it to the existing list of volumes
470 volumenames
= self
._generate
_random
_volume
_name
(2)
471 for volumename
in volumenames
:
472 self
._fs
_cmd
("volume", "create", volumename
)
473 volumes
.extend(volumenames
)
477 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
478 if len(volumels
) == 0:
479 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
481 volnames
= [volume
['name'] for volume
in volumels
]
482 if collections
.Counter(volnames
) != collections
.Counter(volumes
):
483 raise RuntimeError("Error creating or listing volumes")
486 for volume
in volumenames
:
487 self
._fs
_cmd
("volume", "rm", volume
, "--yes-i-really-mean-it")
489 def test_volume_rm(self
):
491 That the volume can only be removed when --yes-i-really-mean-it is used
492 and verify that the deleted volume is not listed anymore.
494 for m
in self
.mounts
:
497 self
._fs
_cmd
("volume", "rm", self
.volname
)
498 except CommandFailedError
as ce
:
499 if ce
.exitstatus
!= errno
.EPERM
:
500 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
501 "but it failed with {0}".format(ce
.exitstatus
))
503 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
506 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
507 if (self
.volname
in [volume
['name'] for volume
in volumes
]):
508 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
509 "The volume {0} not removed.".format(self
.volname
))
511 raise RuntimeError("expected the 'fs volume rm' command to fail.")
513 def test_volume_rm_arbitrary_pool_removal(self
):
515 That the arbitrary pool added to the volume out of band is removed
516 successfully on volume removal.
518 for m
in self
.mounts
:
520 new_pool
= "new_pool"
521 # add arbitrary data pool
522 self
.fs
.add_data_pool(new_pool
)
523 vol_status
= json
.loads(self
._fs
_cmd
("status", self
.volname
, "--format=json-pretty"))
524 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
527 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
528 volnames
= [volume
['name'] for volume
in volumes
]
529 self
.assertNotIn(self
.volname
, volnames
)
531 #check if osd pools are gone
532 pools
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "--format=json-pretty"))
533 for pool
in vol_status
["pools"]:
534 self
.assertNotIn(pool
["name"], pools
)
536 def test_volume_rm_when_mon_delete_pool_false(self
):
538 That the volume can only be removed when mon_allowd_pool_delete is set
539 to true and verify that the pools are removed after volume deletion.
541 for m
in self
.mounts
:
543 self
.config_set('mon', 'mon_allow_pool_delete', False)
545 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
546 except CommandFailedError
as ce
:
547 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
548 "expected the 'fs volume rm' command to fail with EPERM, "
549 "but it failed with {0}".format(ce
.exitstatus
))
550 vol_status
= json
.loads(self
._fs
_cmd
("status", self
.volname
, "--format=json-pretty"))
551 self
.config_set('mon', 'mon_allow_pool_delete', True)
552 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
555 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
556 volnames
= [volume
['name'] for volume
in volumes
]
557 self
.assertNotIn(self
.volname
, volnames
,
558 "volume {0} exists after removal".format(self
.volname
))
559 #check if pools are gone
560 pools
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "--format=json-pretty"))
561 for pool
in vol_status
["pools"]:
562 self
.assertNotIn(pool
["name"], pools
,
563 "pool {0} exists after volume removal".format(pool
["name"]))
565 def test_volume_rename(self
):
567 That volume, its file system and pools, can be renamed.
569 for m
in self
.mounts
:
571 oldvolname
= self
.volname
572 newvolname
= self
._generate
_random
_volume
_name
()
573 new_data_pool
, new_metadata_pool
= f
"cephfs.{newvolname}.data", f
"cephfs.{newvolname}.meta"
574 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
575 "--yes-i-really-mean-it")
576 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
577 volnames
= [volume
['name'] for volume
in volumels
]
578 # volume name changed
579 self
.assertIn(newvolname
, volnames
)
580 self
.assertNotIn(oldvolname
, volnames
)
582 self
.fs
.get_pool_names(refresh
=True)
583 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
584 self
.assertEqual(new_data_pool
, self
.fs
.get_data_pool_name())
586 def test_volume_rename_idempotency(self
):
588 That volume rename is idempotent.
590 for m
in self
.mounts
:
592 oldvolname
= self
.volname
593 newvolname
= self
._generate
_random
_volume
_name
()
594 new_data_pool
, new_metadata_pool
= f
"cephfs.{newvolname}.data", f
"cephfs.{newvolname}.meta"
595 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
596 "--yes-i-really-mean-it")
597 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
598 "--yes-i-really-mean-it")
599 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
600 volnames
= [volume
['name'] for volume
in volumels
]
601 self
.assertIn(newvolname
, volnames
)
602 self
.assertNotIn(oldvolname
, volnames
)
603 self
.fs
.get_pool_names(refresh
=True)
604 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
605 self
.assertEqual(new_data_pool
, self
.fs
.get_data_pool_name())
607 def test_volume_rename_fails_without_confirmation_flag(self
):
609 That renaming volume fails without --yes-i-really-mean-it flag.
611 newvolname
= self
._generate
_random
_volume
_name
()
613 self
._fs
_cmd
("volume", "rename", self
.volname
, newvolname
)
614 except CommandFailedError
as ce
:
615 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
616 "invalid error code on renaming a FS volume without the "
617 "'--yes-i-really-mean-it' flag")
619 self
.fail("expected renaming of FS volume to fail without the "
620 "'--yes-i-really-mean-it' flag")
622 def test_volume_rename_for_more_than_one_data_pool(self
):
624 That renaming a volume with more than one data pool does not change
625 the name of the data pools.
627 for m
in self
.mounts
:
629 self
.fs
.add_data_pool('another-data-pool')
630 oldvolname
= self
.volname
631 newvolname
= self
._generate
_random
_volume
_name
()
632 self
.fs
.get_pool_names(refresh
=True)
633 orig_data_pool_names
= list(self
.fs
.data_pools
.values())
634 new_metadata_pool
= f
"cephfs.{newvolname}.meta"
635 self
._fs
_cmd
("volume", "rename", self
.volname
, newvolname
,
636 "--yes-i-really-mean-it")
637 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
638 volnames
= [volume
['name'] for volume
in volumels
]
639 # volume name changed
640 self
.assertIn(newvolname
, volnames
)
641 self
.assertNotIn(oldvolname
, volnames
)
642 self
.fs
.get_pool_names(refresh
=True)
643 # metadata pool name changed
644 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
645 # data pool names unchanged
646 self
.assertCountEqual(orig_data_pool_names
, list(self
.fs
.data_pools
.values()))
648 def test_volume_info(self
):
650 Tests the 'fs volume info' command
652 vol_fields
= ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
653 group
= self
._generate
_random
_group
_name
()
654 # create subvolumegroup
655 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
656 # get volume metadata
657 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
))
658 for md
in vol_fields
:
659 self
.assertIn(md
, vol_info
,
660 f
"'{md}' key not present in metadata of volume")
661 self
.assertEqual(vol_info
["used_size"], 0,
662 "Size should be zero when volumes directory is empty")
664 def test_volume_info_pending_subvol_deletions(self
):
666 Tests the pending_subvolume_deletions in 'fs volume info' command
668 subvolname
= self
._generate
_random
_subvolume
_name
()
670 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--mode=777")
671 # create 3K zero byte files
672 self
._do
_subvolume
_io
(subvolname
, number_of_files
=3000, file_size
=0)
673 # Delete the subvolume
674 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
675 # get volume metadata
676 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
))
677 self
.assertNotEqual(vol_info
['pending_subvolume_deletions'], 0,
678 "pending_subvolume_deletions should be 1")
679 # verify trash dir is clean
680 self
._wait
_for
_trash
_empty
()
682 def test_volume_info_without_subvolumegroup(self
):
684 Tests the 'fs volume info' command without subvolume group
686 vol_fields
= ["pools", "mon_addrs"]
687 # get volume metadata
688 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
))
689 for md
in vol_fields
:
690 self
.assertIn(md
, vol_info
,
691 f
"'{md}' key not present in metadata of volume")
692 self
.assertNotIn("used_size", vol_info
,
693 "'used_size' should not be present in absence of subvolumegroup")
694 self
.assertNotIn("pending_subvolume_deletions", vol_info
,
695 "'pending_subvolume_deletions' should not be present in absence"
696 " of subvolumegroup")
698 def test_volume_info_with_human_readable_flag(self
):
700 Tests the 'fs volume info --human_readable' command
702 vol_fields
= ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
703 group
= self
._generate
_random
_group
_name
()
704 # create subvolumegroup
705 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
706 # get volume metadata
707 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
, "--human_readable"))
708 for md
in vol_fields
:
709 self
.assertIn(md
, vol_info
,
710 f
"'{md}' key not present in metadata of volume")
711 units
= [' ', 'k', 'M', 'G', 'T', 'P', 'E']
712 assert vol_info
["used_size"][-1] in units
, "unit suffix in used_size is absent"
713 assert vol_info
["pools"]["data"][0]["avail"][-1] in units
, "unit suffix in avail data is absent"
714 assert vol_info
["pools"]["data"][0]["used"][-1] in units
, "unit suffix in used data is absent"
715 assert vol_info
["pools"]["metadata"][0]["avail"][-1] in units
, "unit suffix in avail metadata is absent"
716 assert vol_info
["pools"]["metadata"][0]["used"][-1] in units
, "unit suffix in used metadata is absent"
717 self
.assertEqual(int(vol_info
["used_size"]), 0,
718 "Size should be zero when volumes directory is empty")
720 def test_volume_info_with_human_readable_flag_without_subvolumegroup(self
):
722 Tests the 'fs volume info --human_readable' command without subvolume group
724 vol_fields
= ["pools", "mon_addrs"]
725 # get volume metadata
726 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
, "--human_readable"))
727 for md
in vol_fields
:
728 self
.assertIn(md
, vol_info
,
729 f
"'{md}' key not present in metadata of volume")
730 units
= [' ', 'k', 'M', 'G', 'T', 'P', 'E']
731 assert vol_info
["pools"]["data"][0]["avail"][-1] in units
, "unit suffix in avail data is absent"
732 assert vol_info
["pools"]["data"][0]["used"][-1] in units
, "unit suffix in used data is absent"
733 assert vol_info
["pools"]["metadata"][0]["avail"][-1] in units
, "unit suffix in avail metadata is absent"
734 assert vol_info
["pools"]["metadata"][0]["used"][-1] in units
, "unit suffix in used metadata is absent"
735 self
.assertNotIn("used_size", vol_info
,
736 "'used_size' should not be present in absence of subvolumegroup")
737 self
.assertNotIn("pending_subvolume_deletions", vol_info
,
738 "'pending_subvolume_deletions' should not be present in absence"
739 " of subvolumegroup")
742 class TestSubvolumeGroups(TestVolumesHelper
):
743 """Tests for FS subvolume group operations."""
744 def test_default_uid_gid_subvolume_group(self
):
745 group
= self
._generate
_random
_group
_name
()
750 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
751 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
753 # check group's uid and gid
754 stat
= self
.mount_a
.stat(group_path
)
755 self
.assertEqual(stat
['st_uid'], expected_uid
)
756 self
.assertEqual(stat
['st_gid'], expected_gid
)
759 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
761 def test_nonexistent_subvolume_group_create(self
):
762 subvolume
= self
._generate
_random
_subvolume
_name
()
763 group
= "non_existent_group"
765 # try, creating subvolume in a nonexistent group
767 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
768 except CommandFailedError
as ce
:
769 if ce
.exitstatus
!= errno
.ENOENT
:
772 raise RuntimeError("expected the 'fs subvolume create' command to fail")
774 def test_nonexistent_subvolume_group_rm(self
):
775 group
= "non_existent_group"
777 # try, remove subvolume group
779 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
780 except CommandFailedError
as ce
:
781 if ce
.exitstatus
!= errno
.ENOENT
:
784 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
786 def test_subvolume_group_create_with_auto_cleanup_on_fail(self
):
787 group
= self
._generate
_random
_group
_name
()
788 data_pool
= "invalid_pool"
789 # create group with invalid data pool layout
790 with self
.assertRaises(CommandFailedError
):
791 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", data_pool
)
793 # check whether group path is cleaned up
795 self
._fs
_cmd
("subvolumegroup", "getpath", self
.volname
, group
)
796 except CommandFailedError
as ce
:
797 if ce
.exitstatus
!= errno
.ENOENT
:
800 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
802 def test_subvolume_group_create_with_desired_data_pool_layout(self
):
803 group1
, group2
= self
._generate
_random
_group
_name
(2)
806 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group1
)
807 group1_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group1
)
809 default_pool
= self
.mount_a
.getfattr(group1_path
, "ceph.dir.layout.pool")
810 new_pool
= "new_pool"
811 self
.assertNotEqual(default_pool
, new_pool
)
814 newid
= self
.fs
.add_data_pool(new_pool
)
816 # create group specifying the new data pool as its pool layout
817 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group2
,
818 "--pool_layout", new_pool
)
819 group2_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group2
)
821 desired_pool
= self
.mount_a
.getfattr(group2_path
, "ceph.dir.layout.pool")
823 self
.assertEqual(desired_pool
, new_pool
)
824 except AssertionError:
825 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
827 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group1
)
828 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group2
)
830 def test_subvolume_group_create_with_desired_mode(self
):
831 group1
, group2
= self
._generate
_random
_group
_name
(2)
833 expected_mode1
= "755"
835 expected_mode2
= "777"
838 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group2
, f
"--mode={expected_mode2}")
839 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group1
)
841 group1_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group1
)
842 group2_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group2
)
843 volumes_path
= os
.path
.dirname(group1_path
)
846 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', group1_path
]).stdout
.getvalue().strip()
847 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', group2_path
]).stdout
.getvalue().strip()
848 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', volumes_path
]).stdout
.getvalue().strip()
849 self
.assertEqual(actual_mode1
, expected_mode1
)
850 self
.assertEqual(actual_mode2
, expected_mode2
)
851 self
.assertEqual(actual_mode3
, expected_mode1
)
853 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group1
)
854 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group2
)
856 def test_subvolume_group_create_with_desired_uid_gid(self
):
858 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
864 # create subvolume group
865 subvolgroupname
= self
._generate
_random
_group
_name
()
866 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, subvolgroupname
, "--uid", str(uid
), "--gid", str(gid
))
868 # make sure it exists
869 subvolgrouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, subvolgroupname
)
870 self
.assertNotEqual(subvolgrouppath
, None)
872 # verify the uid and gid
873 suid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', subvolgrouppath
]).stdout
.getvalue().strip())
874 sgid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', subvolgrouppath
]).stdout
.getvalue().strip())
875 self
.assertEqual(uid
, suid
)
876 self
.assertEqual(gid
, sgid
)
879 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, subvolgroupname
)
881 def test_subvolume_group_create_with_invalid_data_pool_layout(self
):
882 group
= self
._generate
_random
_group
_name
()
883 data_pool
= "invalid_pool"
884 # create group with invalid data pool layout
886 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", data_pool
)
887 except CommandFailedError
as ce
:
888 if ce
.exitstatus
!= errno
.EINVAL
:
891 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
893 def test_subvolume_group_create_with_size(self
):
894 # create group with size -- should set quota
895 group
= self
._generate
_random
_group
_name
()
896 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
899 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
900 self
.assertEqual(group_info
["bytes_quota"], 1000000000)
903 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
905 def test_subvolume_group_info(self
):
906 # tests the 'fs subvolumegroup info' command
908 group_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
909 "data_pool", "gid", "mode", "mon_addrs", "mtime", "uid"]
912 group
= self
._generate
_random
_group
_name
()
913 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
916 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
918 self
.assertIn(md
, group_info
, "'{0}' key not present in metadata of group".format(md
))
920 self
.assertEqual(group_info
["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
921 self
.assertEqual(group_info
["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
922 self
.assertEqual(group_info
["uid"], 0)
923 self
.assertEqual(group_info
["gid"], 0)
925 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
926 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
928 # get group metadata after quota set
929 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
931 self
.assertIn(md
, group_info
, "'{0}' key not present in metadata of subvolume".format(md
))
933 self
.assertNotEqual(group_info
["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is set")
934 self
.assertEqual(group_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
937 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
939 def test_subvolume_group_create_idempotence(self
):
941 group
= self
._generate
_random
_group
_name
()
942 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
944 # try creating w/ same subvolume group name -- should be idempotent
945 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
948 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
950 def test_subvolume_group_create_idempotence_mode(self
):
952 group
= self
._generate
_random
_group
_name
()
953 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
955 # try creating w/ same subvolume group name with mode -- should set mode
956 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--mode=766")
958 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
960 # check subvolumegroup's mode
961 mode
= self
.mount_a
.run_shell(['stat', '-c' '%a', group_path
]).stdout
.getvalue().strip()
962 self
.assertEqual(mode
, "766")
965 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
967 def test_subvolume_group_create_idempotence_uid_gid(self
):
972 group
= self
._generate
_random
_group
_name
()
973 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
975 # try creating w/ same subvolume group name with uid/gid -- should set uid/gid
976 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--uid", str(desired_uid
), "--gid", str(desired_gid
))
978 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
980 # verify the uid and gid
981 actual_uid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', group_path
]).stdout
.getvalue().strip())
982 actual_gid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', group_path
]).stdout
.getvalue().strip())
983 self
.assertEqual(desired_uid
, actual_uid
)
984 self
.assertEqual(desired_gid
, actual_gid
)
987 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
989 def test_subvolume_group_create_idempotence_data_pool(self
):
991 group
= self
._generate
_random
_group
_name
()
992 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
994 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
996 default_pool
= self
.mount_a
.getfattr(group_path
, "ceph.dir.layout.pool")
997 new_pool
= "new_pool"
998 self
.assertNotEqual(default_pool
, new_pool
)
1001 newid
= self
.fs
.add_data_pool(new_pool
)
1003 # try creating w/ same subvolume group name with new data pool -- should set pool
1004 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", new_pool
)
1005 desired_pool
= self
.mount_a
.getfattr(group_path
, "ceph.dir.layout.pool")
1007 self
.assertEqual(desired_pool
, new_pool
)
1008 except AssertionError:
1009 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
1012 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1014 def test_subvolume_group_create_idempotence_resize(self
):
1016 group
= self
._generate
_random
_group
_name
()
1017 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1019 # try creating w/ same subvolume name with size -- should set quota
1020 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
1022 # get group metadata
1023 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
1024 self
.assertEqual(group_info
["bytes_quota"], 1000000000)
1027 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1029 def test_subvolume_group_quota_mds_path_restriction_to_group_path(self
):
1031 Tests subvolumegroup quota enforcement with mds path restriction set to group.
1032 For quota to be enforced, read permission needs to be provided to the parent
1033 of the directory on which quota is set. Please see the tracker comment [1]
1034 [1] https://tracker.ceph.com/issues/55090#note-8
1036 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1037 # create group with 100MB quota
1038 group
= self
._generate
_random
_group
_name
()
1039 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1040 "--size", str(osize
), "--mode=777")
1042 # make sure it exists
1043 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1044 self
.assertNotEqual(grouppath
, None)
1046 # create subvolume under the group
1047 subvolname
= self
._generate
_random
_subvolume
_name
()
1048 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1049 "--group_name", group
, "--mode=777")
1051 # make sure it exists
1052 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1053 self
.assertNotEqual(subvolpath
, None)
1056 authid
= "client.guest1"
1057 user
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd(
1058 "auth", "get-or-create", authid
,
1059 "mds", "allow rw path=/volumes",
1061 "osd", "allow rw tag cephfs *=*",
1063 "--format=json-pretty"
1066 # Prepare guest_mount with new authid
1067 guest_mount
= self
.mount_b
1068 guest_mount
.umount_wait()
1070 # configure credentials for guest client
1071 self
._configure
_guest
_auth
(guest_mount
, "guest1", user
[0]["key"])
1073 # mount the subvolume
1074 mount_path
= os
.path
.join("/", subvolpath
)
1075 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1077 # create 99 files of 1MB
1078 guest_mount
.run_shell_payload("mkdir -p dir1")
1080 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1081 guest_mount
.write_n_mb(os
.path
.join("dir1", filename
), self
.DEFAULT_FILE_SIZE
)
1083 # write two files of 1MB file to exceed the quota
1084 guest_mount
.run_shell_payload("mkdir -p dir2")
1086 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1087 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1088 # For quota to be enforced
1090 # create 400 files of 1MB to exceed quota
1091 for i
in range(400):
1092 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1093 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1094 # Sometimes quota enforcement takes time.
1097 except CommandFailedError
:
1100 self
.fail(f
"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1103 guest_mount
.umount_wait()
1105 # Delete the subvolume
1106 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1109 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1111 # verify trash dir is clean
1112 self
._wait
_for
_trash
_empty
()
1114 def test_subvolume_group_quota_mds_path_restriction_to_subvolume_path(self
):
1116 Tests subvolumegroup quota enforcement with mds path restriction set to subvolume path
1117 The quota should not be enforced because of the fourth limitation mentioned at
1118 https://docs.ceph.com/en/latest/cephfs/quota/#limitations
1120 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1121 # create group with 100MB quota
1122 group
= self
._generate
_random
_group
_name
()
1123 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1124 "--size", str(osize
), "--mode=777")
1126 # make sure it exists
1127 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1128 self
.assertNotEqual(grouppath
, None)
1130 # create subvolume under the group
1131 subvolname
= self
._generate
_random
_subvolume
_name
()
1132 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1133 "--group_name", group
, "--mode=777")
1135 # make sure it exists
1136 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1137 self
.assertNotEqual(subvolpath
, None)
1139 mount_path
= os
.path
.join("/", subvolpath
)
1142 authid
= "client.guest1"
1143 user
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd(
1144 "auth", "get-or-create", authid
,
1145 "mds", f
"allow rw path={mount_path}",
1147 "osd", "allow rw tag cephfs *=*",
1149 "--format=json-pretty"
1152 # Prepare guest_mount with new authid
1153 guest_mount
= self
.mount_b
1154 guest_mount
.umount_wait()
1156 # configure credentials for guest client
1157 self
._configure
_guest
_auth
(guest_mount
, "guest1", user
[0]["key"])
1159 # mount the subvolume
1160 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1162 # create 99 files of 1MB to exceed quota
1163 guest_mount
.run_shell_payload("mkdir -p dir1")
1165 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1166 guest_mount
.write_n_mb(os
.path
.join("dir1", filename
), self
.DEFAULT_FILE_SIZE
)
1168 # write two files of 1MB file to exceed the quota
1169 guest_mount
.run_shell_payload("mkdir -p dir2")
1171 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1172 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1173 # For quota to be enforced
1175 # create 400 files of 1MB to exceed quota
1176 for i
in range(400):
1177 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1178 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1179 # Sometimes quota enforcement takes time.
1182 except CommandFailedError
:
1183 self
.fail(f
"Quota should not be enforced, expected filling subvolume {subvolname} with 400 files of size 1MB to succeed")
1186 guest_mount
.umount_wait()
1188 # Delete the subvolume
1189 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1192 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1194 # verify trash dir is clean
1195 self
._wait
_for
_trash
_empty
()
1197 def test_subvolume_group_quota_exceeded_subvolume_removal(self
):
1199 Tests subvolume removal if it's group quota is exceeded
1201 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1202 # create group with 100MB quota
1203 group
= self
._generate
_random
_group
_name
()
1204 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1205 "--size", str(osize
), "--mode=777")
1207 # make sure it exists
1208 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1209 self
.assertNotEqual(grouppath
, None)
1211 # create subvolume under the group
1212 subvolname
= self
._generate
_random
_subvolume
_name
()
1213 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1214 "--group_name", group
, "--mode=777")
1216 # make sure it exists
1217 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1218 self
.assertNotEqual(subvolpath
, None)
1220 # create 99 files of 1MB to exceed quota
1221 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=99)
1224 # write two files of 1MB file to exceed the quota
1225 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1226 # For quota to be enforced
1228 # create 400 files of 1MB to exceed quota
1229 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=400)
1230 except CommandFailedError
:
1231 # Delete subvolume when group quota is exceeded
1232 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1234 self
.fail(f
"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1237 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1239 # verify trash dir is clean
1240 self
._wait
_for
_trash
_empty
()
1242 def test_subvolume_group_quota_exceeded_subvolume_removal_retained_snaps(self
):
1244 Tests retained snapshot subvolume removal if it's group quota is exceeded
1246 group
= self
._generate
_random
_group
_name
()
1247 subvolname
= self
._generate
_random
_subvolume
_name
()
1248 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
1250 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1251 # create group with 100MB quota
1252 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1253 "--size", str(osize
), "--mode=777")
1255 # make sure it exists
1256 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1257 self
.assertNotEqual(grouppath
, None)
1259 # create subvolume under the group
1260 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1261 "--group_name", group
, "--mode=777")
1263 # make sure it exists
1264 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1265 self
.assertNotEqual(subvolpath
, None)
1267 # create 99 files of 1MB to exceed quota
1268 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=99)
1270 # snapshot subvolume
1271 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot1
, "--group_name", group
)
1272 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot2
, "--group_name", group
)
1275 # write two files of 1MB file to exceed the quota
1276 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1277 # For quota to be enforced
1279 # create 400 files of 1MB to exceed quota
1280 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=400)
1281 except CommandFailedError
:
1282 # remove with snapshot retention
1283 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
, "--retain-snapshots")
1285 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot1
, "--group_name", group
)
1286 # remove snapshot2 (should remove volume)
1287 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot2
, "--group_name", group
)
1288 # verify subvolume trash is clean
1289 self
._wait
_for
_subvol
_trash
_empty
(subvolname
, group
=group
)
1291 self
.fail(f
"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1294 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1296 # verify trash dir is clean
1297 self
._wait
_for
_trash
_empty
()
1299 def test_subvolume_group_quota_subvolume_removal(self
):
1301 Tests subvolume removal if it's group quota is set.
1303 # create group with size -- should set quota
1304 group
= self
._generate
_random
_group
_name
()
1305 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
1307 # create subvolume under the group
1308 subvolname
= self
._generate
_random
_subvolume
_name
()
1309 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
1313 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1314 except CommandFailedError
:
1315 self
.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1317 # remove subvolumegroup
1318 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1320 # verify trash dir is clean
1321 self
._wait
_for
_trash
_empty
()
1323 def test_subvolume_group_quota_legacy_subvolume_removal(self
):
1325 Tests legacy subvolume removal if it's group quota is set.
1327 subvolume
= self
._generate
_random
_subvolume
_name
()
1328 group
= self
._generate
_random
_group
_name
()
1330 # emulate a old-fashioned subvolume -- in a custom group
1331 createpath1
= os
.path
.join(".", "volumes", group
, subvolume
)
1332 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath1
], omit_sudo
=False)
1334 # this would auto-upgrade on access without anyone noticing
1335 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
, "--group-name", group
)
1336 self
.assertNotEqual(subvolpath1
, None)
1337 subvolpath1
= subvolpath1
.rstrip() # remove "/" prefix and any trailing newline
1339 # and... the subvolume path returned should be what we created behind the scene
1340 self
.assertEqual(createpath1
[1:], subvolpath1
)
1342 # Set subvolumegroup quota on idempotent subvolumegroup creation
1343 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
1347 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1348 except CommandFailedError
:
1349 self
.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1351 # remove subvolumegroup
1352 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1354 # verify trash dir is clean
1355 self
._wait
_for
_trash
_empty
()
1357 def test_subvolume_group_quota_v1_subvolume_removal(self
):
1359 Tests v1 subvolume removal if it's group quota is set.
1361 subvolume
= self
._generate
_random
_subvolume
_name
()
1362 group
= self
._generate
_random
_group
_name
()
1364 # emulate a v1 subvolume -- in a custom group
1365 self
._create
_v
1_subvolume
(subvolume
, subvol_group
=group
, has_snapshot
=False)
1367 # Set subvolumegroup quota on idempotent subvolumegroup creation
1368 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
1372 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1373 except CommandFailedError
:
1374 self
.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1376 # remove subvolumegroup
1377 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1379 # verify trash dir is clean
1380 self
._wait
_for
_trash
_empty
()
1382 def test_subvolume_group_resize_fail_invalid_size(self
):
1384 That a subvolume group cannot be resized to an invalid size and the quota did not change
1387 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1388 # create group with 1MB quota
1389 group
= self
._generate
_random
_group
_name
()
1390 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--size", str(osize
))
1392 # make sure it exists
1393 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1394 self
.assertNotEqual(grouppath
, None)
1396 # try to resize the subvolume with an invalid size -10
1399 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1400 except CommandFailedError
as ce
:
1401 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
1402 "invalid error code on resize of subvolume group with invalid size")
1404 self
.fail("expected the 'fs subvolumegroup resize' command to fail")
1406 # verify the quota did not change
1407 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1408 self
.assertEqual(size
, osize
)
1411 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1413 def test_subvolume_group_resize_fail_zero_size(self
):
1415 That a subvolume group cannot be resized to a zero size and the quota did not change
1418 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1419 # create group with 1MB quota
1420 group
= self
._generate
_random
_group
_name
()
1421 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--size", str(osize
))
1423 # make sure it exists
1424 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1425 self
.assertNotEqual(grouppath
, None)
1427 # try to resize the subvolume group with size 0
1430 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1431 except CommandFailedError
as ce
:
1432 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
1433 "invalid error code on resize of subvolume group with invalid size")
1435 self
.fail("expected the 'fs subvolumegroup resize' command to fail")
1437 # verify the quota did not change
1438 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1439 self
.assertEqual(size
, osize
)
1442 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1444 def test_subvolume_group_resize_quota_lt_used_size(self
):
1446 That a subvolume group can be resized to a size smaller than the current used size
1447 and the resulting quota matches the expected size.
1450 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
1451 # create group with 20MB quota
1452 group
= self
._generate
_random
_group
_name
()
1453 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1454 "--size", str(osize
), "--mode=777")
1456 # make sure it exists
1457 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1458 self
.assertNotEqual(grouppath
, None)
1460 # create subvolume under the group
1461 subvolname
= self
._generate
_random
_subvolume
_name
()
1462 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1463 "--group_name", group
, "--mode=777")
1465 # make sure it exists
1466 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1467 self
.assertNotEqual(subvolpath
, None)
1469 # create one file of 10MB
1470 file_size
=self
.DEFAULT_FILE_SIZE
*10
1472 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
1475 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+1)
1476 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
1478 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
1480 # shrink the subvolume group
1481 nsize
= usedsize
// 2
1483 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1484 except CommandFailedError
:
1485 self
.fail("expected the 'fs subvolumegroup resize' command to succeed")
1488 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1489 self
.assertEqual(size
, nsize
)
1491 # remove subvolume and group
1492 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1493 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1495 # verify trash dir is clean
1496 self
._wait
_for
_trash
_empty
()
1498 def test_subvolume_group_resize_fail_quota_lt_used_size_no_shrink(self
):
1500 That a subvolume group cannot be resized to a size smaller than the current used size
1501 when --no_shrink is given and the quota did not change.
1504 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
1505 # create group with 20MB quota
1506 group
= self
._generate
_random
_group
_name
()
1507 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1508 "--size", str(osize
), "--mode=777")
1510 # make sure it exists
1511 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1512 self
.assertNotEqual(grouppath
, None)
1514 # create subvolume under the group
1515 subvolname
= self
._generate
_random
_subvolume
_name
()
1516 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1517 "--group_name", group
, "--mode=777")
1519 # make sure it exists
1520 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1521 self
.assertNotEqual(subvolpath
, None)
1523 # create one file of 10MB
1524 file_size
=self
.DEFAULT_FILE_SIZE
*10
1526 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
1529 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+2)
1530 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
1532 usedsize
= int(self
.mount_a
.getfattr(grouppath
, "ceph.dir.rbytes"))
1534 # shrink the subvolume group
1535 nsize
= usedsize
// 2
1537 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
), "--no_shrink")
1538 except CommandFailedError
as ce
:
1539 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolumegroup with quota less than used")
1541 self
.fail("expected the 'fs subvolumegroup resize' command to fail")
1543 # verify the quota did not change
1544 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1545 self
.assertEqual(size
, osize
)
1547 # remove subvolume and group
1548 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1549 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1551 # verify trash dir is clean
1552 self
._wait
_for
_trash
_empty
()
1554 def test_subvolume_group_resize_expand_on_full_subvolume(self
):
1556 That the subvolume group can be expanded after it is full and future write succeed
1559 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1560 # create group with 100MB quota
1561 group
= self
._generate
_random
_group
_name
()
1562 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1563 "--size", str(osize
), "--mode=777")
1565 # make sure it exists
1566 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1567 self
.assertNotEqual(grouppath
, None)
1569 # create subvolume under the group
1570 subvolname
= self
._generate
_random
_subvolume
_name
()
1571 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1572 "--group_name", group
, "--mode=777")
1574 # make sure it exists
1575 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1576 self
.assertNotEqual(subvolpath
, None)
1578 # create 99 files of 1MB
1579 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=99)
1582 # write two files of 1MB file to exceed the quota
1583 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1584 # For quota to be enforced
1586 # create 500 files of 1MB
1587 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1588 except CommandFailedError
:
1589 # Not able to write. So expand the subvolumegroup more and try writing the files again
1591 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1593 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1594 except CommandFailedError
:
1595 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1596 "to succeed".format(subvolname
))
1598 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1599 "to fail".format(subvolname
))
1601 # remove subvolume and group
1602 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1603 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1605 # verify trash dir is clean
1606 self
._wait
_for
_trash
_empty
()
1608 def test_subvolume_group_resize_infinite_size(self
):
1610 That a subvolume group can be resized to an infinite size by unsetting its quota.
1613 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1615 group
= self
._generate
_random
_group
_name
()
1616 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1617 "--size", str(osize
))
1619 # make sure it exists
1620 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1621 self
.assertNotEqual(grouppath
, None)
1624 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, "inf")
1626 # verify that the quota is None
1627 size
= self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes")
1628 self
.assertEqual(size
, None)
1630 # remove subvolume group
1631 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1633 def test_subvolume_group_resize_infinite_size_future_writes(self
):
1635 That a subvolume group can be resized to an infinite size and the future writes succeed.
1638 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*5
1639 # create group with 5MB quota
1640 group
= self
._generate
_random
_group
_name
()
1641 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1642 "--size", str(osize
), "--mode=777")
1644 # make sure it exists
1645 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1646 self
.assertNotEqual(grouppath
, None)
1648 # create subvolume under the group
1649 subvolname
= self
._generate
_random
_subvolume
_name
()
1650 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1651 "--group_name", group
, "--mode=777")
1653 # make sure it exists
1654 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1655 self
.assertNotEqual(subvolpath
, None)
1657 # create 4 files of 1MB
1658 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=4)
1661 # write two files of 1MB file to exceed the quota
1662 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1663 # For quota to be enforced
1665 # create 500 files of 1MB
1666 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1667 except CommandFailedError
:
1668 # Not able to write. So resize subvolumegroup to 'inf' and try writing the files again
1670 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, "inf")
1672 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1673 except CommandFailedError
:
1674 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1675 "to succeed".format(subvolname
))
1677 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1678 "to fail".format(subvolname
))
1681 # verify that the quota is None
1682 size
= self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes")
1683 self
.assertEqual(size
, None)
1685 # remove subvolume and group
1686 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1687 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1689 # verify trash dir is clean
1690 self
._wait
_for
_trash
_empty
()
1692 def test_subvolume_group_ls(self
):
1693 # tests the 'fs subvolumegroup ls' command
1695 subvolumegroups
= []
1697 #create subvolumegroups
1698 subvolumegroups
= self
._generate
_random
_group
_name
(3)
1699 for groupname
in subvolumegroups
:
1700 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
1702 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1703 if len(subvolumegroupls
) == 0:
1704 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
1706 subvolgroupnames
= [subvolumegroup
['name'] for subvolumegroup
in subvolumegroupls
]
1707 if collections
.Counter(subvolgroupnames
) != collections
.Counter(subvolumegroups
):
1708 raise RuntimeError("Error creating or listing subvolume groups")
1710 def test_subvolume_group_ls_filter(self
):
1711 # tests the 'fs subvolumegroup ls' command filters '_deleting' directory
1713 subvolumegroups
= []
1715 #create subvolumegroup
1716 subvolumegroups
= self
._generate
_random
_group
_name
(3)
1717 for groupname
in subvolumegroups
:
1718 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
1720 # create subvolume and remove. This creates '_deleting' directory.
1721 subvolume
= self
._generate
_random
_subvolume
_name
()
1722 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1723 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1725 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1726 subvolgroupnames
= [subvolumegroup
['name'] for subvolumegroup
in subvolumegroupls
]
1727 if "_deleting" in subvolgroupnames
:
1728 self
.fail("Listing subvolume groups listed '_deleting' directory")
1730 def test_subvolume_group_ls_filter_internal_directories(self
):
1731 # tests the 'fs subvolumegroup ls' command filters internal directories
1732 # eg: '_deleting', '_nogroup', '_index', "_legacy"
1734 subvolumegroups
= self
._generate
_random
_group
_name
(3)
1735 subvolume
= self
._generate
_random
_subvolume
_name
()
1736 snapshot
= self
._generate
_random
_snapshot
_name
()
1737 clone
= self
._generate
_random
_clone
_name
()
1739 #create subvolumegroups
1740 for groupname
in subvolumegroups
:
1741 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
1743 # create subvolume which will create '_nogroup' directory
1744 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1747 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
1749 # clone snapshot which will create '_index' directory
1750 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
1752 # wait for clone to complete
1753 self
._wait
_for
_clone
_to
_complete
(clone
)
1756 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
1758 # remove subvolume which will create '_deleting' directory
1759 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1761 # list subvolumegroups
1762 ret
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1763 self
.assertEqual(len(ret
), len(subvolumegroups
))
1765 ret_list
= [subvolumegroup
['name'] for subvolumegroup
in ret
]
1766 self
.assertEqual(len(ret_list
), len(subvolumegroups
))
1768 self
.assertEqual(all(elem
in subvolumegroups
for elem
in ret_list
), True)
1771 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
1772 for groupname
in subvolumegroups
:
1773 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, groupname
)
1775 def test_subvolume_group_ls_for_nonexistent_volume(self
):
1776 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
1777 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
1779 # list subvolume groups
1780 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1781 if len(subvolumegroupls
) > 0:
1782 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
1784 def test_subvolumegroup_pin_distributed(self
):
1785 self
.fs
.set_max_mds(2)
1786 status
= self
.fs
.wait_for_daemons()
1787 self
.config_set('mds', 'mds_export_ephemeral_distributed', True)
1790 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1791 self
._fs
_cmd
("subvolumegroup", "pin", self
.volname
, group
, "distributed", "True")
1792 subvolumes
= self
._generate
_random
_subvolume
_name
(50)
1793 for subvolume
in subvolumes
:
1794 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1795 self
._wait
_distributed
_subtrees
(2 * 2, status
=status
, rank
="all")
1798 for subvolume
in subvolumes
:
1799 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
1801 # verify trash dir is clean
1802 self
._wait
_for
_trash
_empty
()
1804 def test_subvolume_group_rm_force(self
):
1805 # test removing non-existing subvolume group with --force
1806 group
= self
._generate
_random
_group
_name
()
1808 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
, "--force")
1809 except CommandFailedError
:
1810 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
1812 def test_subvolume_group_exists_with_subvolumegroup_and_no_subvolume(self
):
1813 """Test the presence of any subvolumegroup when only subvolumegroup is present"""
1815 group
= self
._generate
_random
_group
_name
()
1816 # create subvolumegroup
1817 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1818 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1819 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1820 # delete subvolumegroup
1821 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1822 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1823 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1825 def test_subvolume_group_exists_with_no_subvolumegroup_and_subvolume(self
):
1826 """Test the presence of any subvolumegroup when no subvolumegroup is present"""
1828 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1829 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1831 def test_subvolume_group_exists_with_subvolumegroup_and_subvolume(self
):
1832 """Test the presence of any subvolume when subvolumegroup
1833 and subvolume both are present"""
1835 group
= self
._generate
_random
_group
_name
()
1836 subvolume
= self
._generate
_random
_subvolume
_name
(2)
1837 # create subvolumegroup
1838 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1839 # create subvolume in group
1840 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
[0], "--group_name", group
)
1842 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
[1])
1843 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1844 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1845 # delete subvolume in group
1846 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
[0], "--group_name", group
)
1847 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1848 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1850 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
[1])
1851 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1852 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1853 # delete subvolumegroup
1854 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1855 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1856 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1858 def test_subvolume_group_exists_without_subvolumegroup_and_with_subvolume(self
):
1859 """Test the presence of any subvolume when subvolume is present
1860 but no subvolumegroup is present"""
1862 subvolume
= self
._generate
_random
_subvolume
_name
()
1864 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1865 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1866 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1868 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1869 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1870 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1873 class TestSubvolumes(TestVolumesHelper
):
1874 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
1875 def test_async_subvolume_rm(self
):
1876 subvolumes
= self
._generate
_random
_subvolume
_name
(100)
1879 for subvolume
in subvolumes
:
1880 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
1881 self
._do
_subvolume
_io
(subvolume
, number_of_files
=10)
1883 self
.mount_a
.umount_wait()
1886 for subvolume
in subvolumes
:
1887 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1889 self
.mount_a
.mount_wait()
1891 # verify trash dir is clean
1892 self
._wait
_for
_trash
_empty
(timeout
=300)
1894 def test_default_uid_gid_subvolume(self
):
1895 subvolume
= self
._generate
_random
_subvolume
_name
()
1900 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1901 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
1903 # check subvolume's uid and gid
1904 stat
= self
.mount_a
.stat(subvol_path
)
1905 self
.assertEqual(stat
['st_uid'], expected_uid
)
1906 self
.assertEqual(stat
['st_gid'], expected_gid
)
1909 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1911 # verify trash dir is clean
1912 self
._wait
_for
_trash
_empty
()
1914 def test_nonexistent_subvolume_rm(self
):
1915 # remove non-existing subvolume
1916 subvolume
= "non_existent_subvolume"
1918 # try, remove subvolume
1920 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1921 except CommandFailedError
as ce
:
1922 if ce
.exitstatus
!= errno
.ENOENT
:
1925 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
1927 def test_subvolume_create_and_rm(self
):
1929 subvolume
= self
._generate
_random
_subvolume
_name
()
1930 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1932 # make sure it exists
1933 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1934 self
.assertNotEqual(subvolpath
, None)
1937 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1938 # make sure its gone
1940 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1941 except CommandFailedError
as ce
:
1942 if ce
.exitstatus
!= errno
.ENOENT
:
1945 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
1947 # verify trash dir is clean
1948 self
._wait
_for
_trash
_empty
()
1950 def test_subvolume_create_and_rm_in_group(self
):
1951 subvolume
= self
._generate
_random
_subvolume
_name
()
1952 group
= self
._generate
_random
_group
_name
()
1955 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1957 # create subvolume in group
1958 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1961 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
1963 # verify trash dir is clean
1964 self
._wait
_for
_trash
_empty
()
1967 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1969 def test_subvolume_create_idempotence(self
):
1971 subvolume
= self
._generate
_random
_subvolume
_name
()
1972 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1974 # try creating w/ same subvolume name -- should be idempotent
1975 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1978 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1980 # verify trash dir is clean
1981 self
._wait
_for
_trash
_empty
()
1983 def test_subvolume_create_idempotence_resize(self
):
1985 subvolume
= self
._generate
_random
_subvolume
_name
()
1986 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1988 # try creating w/ same subvolume name with size -- should set quota
1989 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "1000000000")
1991 # get subvolume metadata
1992 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
1993 self
.assertEqual(subvol_info
["bytes_quota"], 1000000000)
1996 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1998 # verify trash dir is clean
1999 self
._wait
_for
_trash
_empty
()
2001 def test_subvolume_create_idempotence_mode(self
):
2003 default_mode
= "755"
2006 subvolume
= self
._generate
_random
_subvolume
_name
()
2007 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2009 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
2011 actual_mode_1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
2012 self
.assertEqual(actual_mode_1
, default_mode
)
2014 # try creating w/ same subvolume name with --mode 777
2016 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", new_mode
)
2018 actual_mode_2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
2019 self
.assertEqual(actual_mode_2
, new_mode
)
2022 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2024 # verify trash dir is clean
2025 self
._wait
_for
_trash
_empty
()
2027 def test_subvolume_create_idempotence_without_passing_mode(self
):
2029 desired_mode
= "777"
2030 subvolume
= self
._generate
_random
_subvolume
_name
()
2031 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", desired_mode
)
2033 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
2035 actual_mode_1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
2036 self
.assertEqual(actual_mode_1
, desired_mode
)
2039 default_mode
= "755"
2041 # try creating w/ same subvolume name without passing --mode argument
2042 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2044 actual_mode_2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
2045 self
.assertEqual(actual_mode_2
, default_mode
)
2048 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2050 # verify trash dir is clean
2051 self
._wait
_for
_trash
_empty
()
2053 def test_subvolume_create_isolated_namespace(self
):
2055 Create subvolume in separate rados namespace
2059 subvolume
= self
._generate
_random
_subvolume
_name
()
2060 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--namespace-isolated")
2062 # get subvolume metadata
2063 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
2064 self
.assertNotEqual(len(subvol_info
), 0)
2065 self
.assertEqual(subvol_info
["pool_namespace"], "fsvolumens_" + subvolume
)
2068 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2070 # verify trash dir is clean
2071 self
._wait
_for
_trash
_empty
()
2073 def test_subvolume_create_with_auto_cleanup_on_fail(self
):
2074 subvolume
= self
._generate
_random
_subvolume
_name
()
2075 data_pool
= "invalid_pool"
2076 # create subvolume with invalid data pool layout fails
2077 with self
.assertRaises(CommandFailedError
):
2078 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
2080 # check whether subvol path is cleaned up
2082 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
2083 except CommandFailedError
as ce
:
2084 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of non-existent subvolume")
2086 self
.fail("expected the 'fs subvolume getpath' command to fail")
2088 # verify trash dir is clean
2089 self
._wait
_for
_trash
_empty
()
2091 def test_subvolume_create_with_desired_data_pool_layout_in_group(self
):
2092 subvol1
, subvol2
= self
._generate
_random
_subvolume
_name
(2)
2093 group
= self
._generate
_random
_group
_name
()
2095 # create group. this also helps set default pool layout for subvolumes
2096 # created within the group.
2097 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2099 # create subvolume in group.
2100 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
2101 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
, group_name
=group
)
2103 default_pool
= self
.mount_a
.getfattr(subvol1_path
, "ceph.dir.layout.pool")
2104 new_pool
= "new_pool"
2105 self
.assertNotEqual(default_pool
, new_pool
)
2108 newid
= self
.fs
.add_data_pool(new_pool
)
2110 # create subvolume specifying the new data pool as its pool layout
2111 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
,
2112 "--pool_layout", new_pool
)
2113 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvol2
, group_name
=group
)
2115 desired_pool
= self
.mount_a
.getfattr(subvol2_path
, "ceph.dir.layout.pool")
2117 self
.assertEqual(desired_pool
, new_pool
)
2118 except AssertionError:
2119 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
2121 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
2122 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
2123 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2125 # verify trash dir is clean
2126 self
._wait
_for
_trash
_empty
()
2128 def test_subvolume_create_with_desired_mode(self
):
2129 subvol1
= self
._generate
_random
_subvolume
_name
()
2132 default_mode
= "755"
2134 desired_mode
= "777"
2136 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--mode", "777")
2138 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
)
2140 # check subvolumegroup's mode
2141 subvol_par_path
= os
.path
.dirname(subvol1_path
)
2142 group_path
= os
.path
.dirname(subvol_par_path
)
2143 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', group_path
]).stdout
.getvalue().strip()
2144 self
.assertEqual(actual_mode1
, default_mode
)
2145 # check /volumes mode
2146 volumes_path
= os
.path
.dirname(group_path
)
2147 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', volumes_path
]).stdout
.getvalue().strip()
2148 self
.assertEqual(actual_mode2
, default_mode
)
2149 # check subvolume's mode
2150 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol1_path
]).stdout
.getvalue().strip()
2151 self
.assertEqual(actual_mode3
, desired_mode
)
2153 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
)
2155 # verify trash dir is clean
2156 self
._wait
_for
_trash
_empty
()
2158 def test_subvolume_create_with_desired_mode_in_group(self
):
2159 subvol1
, subvol2
, subvol3
= self
._generate
_random
_subvolume
_name
(3)
2161 group
= self
._generate
_random
_group
_name
()
2163 expected_mode1
= "755"
2165 expected_mode2
= "777"
2168 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2170 # create subvolume in group
2171 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
2172 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
, "--mode", "777")
2173 # check whether mode 0777 also works
2174 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol3
, "--group_name", group
, "--mode", "0777")
2176 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
, group_name
=group
)
2177 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvol2
, group_name
=group
)
2178 subvol3_path
= self
._get
_subvolume
_path
(self
.volname
, subvol3
, group_name
=group
)
2180 # check subvolume's mode
2181 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol1_path
]).stdout
.getvalue().strip()
2182 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol2_path
]).stdout
.getvalue().strip()
2183 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol3_path
]).stdout
.getvalue().strip()
2184 self
.assertEqual(actual_mode1
, expected_mode1
)
2185 self
.assertEqual(actual_mode2
, expected_mode2
)
2186 self
.assertEqual(actual_mode3
, expected_mode2
)
2188 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
2189 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
2190 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol3
, group
)
2191 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2193 # verify trash dir is clean
2194 self
._wait
_for
_trash
_empty
()
2196 def test_subvolume_create_with_desired_uid_gid(self
):
2198 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
2205 subvolname
= self
._generate
_random
_subvolume
_name
()
2206 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--uid", str(uid
), "--gid", str(gid
))
2208 # make sure it exists
2209 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2210 self
.assertNotEqual(subvolpath
, None)
2212 # verify the uid and gid
2213 suid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', subvolpath
]).stdout
.getvalue().strip())
2214 sgid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', subvolpath
]).stdout
.getvalue().strip())
2215 self
.assertEqual(uid
, suid
)
2216 self
.assertEqual(gid
, sgid
)
2219 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2221 # verify trash dir is clean
2222 self
._wait
_for
_trash
_empty
()
2224 def test_subvolume_create_with_invalid_data_pool_layout(self
):
2225 subvolume
= self
._generate
_random
_subvolume
_name
()
2226 data_pool
= "invalid_pool"
2227 # create subvolume with invalid data pool layout
2229 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
2230 except CommandFailedError
as ce
:
2231 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on create of subvolume with invalid pool layout")
2233 self
.fail("expected the 'fs subvolume create' command to fail")
2235 # verify trash dir is clean
2236 self
._wait
_for
_trash
_empty
()
2238 def test_subvolume_create_with_invalid_size(self
):
2239 # create subvolume with an invalid size -1
2240 subvolume
= self
._generate
_random
_subvolume
_name
()
2242 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--size", "-1")
2243 except CommandFailedError
as ce
:
2244 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on create of subvolume with invalid size")
2246 self
.fail("expected the 'fs subvolume create' command to fail")
2248 # verify trash dir is clean
2249 self
._wait
_for
_trash
_empty
()
2251 def test_subvolume_create_and_ls_providing_group_as_nogroup(self
):
2253 That a 'subvolume create' and 'subvolume ls' should throw
2254 permission denied error if option --group=_nogroup is provided.
2257 subvolname
= self
._generate
_random
_subvolume
_name
()
2259 # try to create subvolume providing --group_name=_nogroup option
2261 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", "_nogroup")
2262 except CommandFailedError
as ce
:
2263 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
)
2265 self
.fail("expected the 'fs subvolume create' command to fail")
2268 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
)
2270 # try to list subvolumes providing --group_name=_nogroup option
2272 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_nogroup")
2273 except CommandFailedError
as ce
:
2274 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
)
2276 self
.fail("expected the 'fs subvolume ls' command to fail")
2279 self
._fs
_cmd
("subvolume", "ls", self
.volname
)
2281 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2283 # verify trash dir is clean.
2284 self
._wait
_for
_trash
_empty
()
2286 def test_subvolume_expand(self
):
2288 That a subvolume can be expanded in size and its quota matches the expected size.
2292 subvolname
= self
._generate
_random
_subvolume
_name
()
2293 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2294 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
2296 # make sure it exists
2297 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2298 self
.assertNotEqual(subvolpath
, None)
2300 # expand the subvolume
2302 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2305 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2306 self
.assertEqual(size
, nsize
)
2309 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2311 # verify trash dir is clean
2312 self
._wait
_for
_trash
_empty
()
2314 def test_subvolume_info(self
):
2315 # tests the 'fs subvolume info' command
2317 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
2318 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
2319 "type", "uid", "features", "state"]
2322 subvolume
= self
._generate
_random
_subvolume
_name
()
2323 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2325 # get subvolume metadata
2326 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
2327 for md
in subvol_md
:
2328 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
2330 self
.assertEqual(subvol_info
["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
2331 self
.assertEqual(subvol_info
["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
2332 self
.assertEqual(subvol_info
["pool_namespace"], "", "expected pool namespace to be empty")
2333 self
.assertEqual(subvol_info
["state"], "complete", "expected state to be complete")
2335 self
.assertEqual(len(subvol_info
["features"]), 3,
2336 msg
="expected 3 features, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
2337 for feature
in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2338 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
2340 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2341 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
2343 # get subvolume metadata after quota set
2344 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
2345 for md
in subvol_md
:
2346 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
2348 self
.assertNotEqual(subvol_info
["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
2349 self
.assertEqual(subvol_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
2350 self
.assertEqual(subvol_info
["type"], "subvolume", "type should be set to subvolume")
2351 self
.assertEqual(subvol_info
["state"], "complete", "expected state to be complete")
2353 self
.assertEqual(len(subvol_info
["features"]), 3,
2354 msg
="expected 3 features, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
2355 for feature
in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2356 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
2359 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2361 # verify trash dir is clean
2362 self
._wait
_for
_trash
_empty
()
2364 def test_subvolume_ls(self
):
2365 # tests the 'fs subvolume ls' command
2370 subvolumes
= self
._generate
_random
_subvolume
_name
(3)
2371 for subvolume
in subvolumes
:
2372 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2375 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
2376 if len(subvolumels
) == 0:
2377 self
.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
2379 subvolnames
= [subvolume
['name'] for subvolume
in subvolumels
]
2380 if collections
.Counter(subvolnames
) != collections
.Counter(subvolumes
):
2381 self
.fail("Error creating or listing subvolumes")
2384 for subvolume
in subvolumes
:
2385 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2387 # verify trash dir is clean
2388 self
._wait
_for
_trash
_empty
()
2390 def test_subvolume_ls_with_groupname_as_internal_directory(self
):
2391 # tests the 'fs subvolume ls' command when the default groupname as internal directories
2392 # Eg: '_nogroup', '_legacy', '_deleting', '_index'.
2393 # Expecting 'fs subvolume ls' will be fail with errno EINVAL for '_legacy', '_deleting', '_index'
2394 # Expecting 'fs subvolume ls' will be fail with errno EPERM for '_nogroup'
2396 # try to list subvolumes providing --group_name=_nogroup option
2398 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_nogroup")
2399 except CommandFailedError
as ce
:
2400 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
)
2402 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EPERM' for _nogroup")
2404 # try to list subvolumes providing --group_name=_legacy option
2406 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_legacy")
2407 except CommandFailedError
as ce
:
2408 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
)
2410 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _legacy")
2412 # try to list subvolumes providing --group_name=_deleting option
2414 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_deleting")
2415 except CommandFailedError
as ce
:
2416 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
)
2418 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _deleting")
2420 # try to list subvolumes providing --group_name=_index option
2422 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_index")
2423 except CommandFailedError
as ce
:
2424 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
)
2426 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _index")
2428 def test_subvolume_ls_for_notexistent_default_group(self
):
2429 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
2430 # prerequisite: we expect that the volume is created and the default group _nogroup is
2431 # NOT created (i.e. a subvolume without group is not created)
2434 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
2435 if len(subvolumels
) > 0:
2436 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
2438 def test_subvolume_marked(self
):
2440 ensure a subvolume is marked with the ceph.dir.subvolume xattr
2442 subvolume
= self
._generate
_random
_subvolume
_name
()
2445 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2448 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
2450 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
2451 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
2452 # outside the subvolume
2453 dstpath
= os
.path
.join(self
.mount_a
.mountpoint
, 'volumes', '_nogroup', 'new_subvol_location')
2454 srcpath
= os
.path
.join(self
.mount_a
.mountpoint
, subvolpath
)
2455 rename_script
= dedent("""
2459 os.rename("{src}", "{dst}")
2460 except OSError as e:
2461 if e.errno != errno.EXDEV:
2462 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
2464 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
2466 self
.mount_a
.run_python(rename_script
.format(src
=srcpath
, dst
=dstpath
), sudo
=True)
2469 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2471 # verify trash dir is clean
2472 self
._wait
_for
_trash
_empty
()
2474 def test_subvolume_pin_export(self
):
2475 self
.fs
.set_max_mds(2)
2476 status
= self
.fs
.wait_for_daemons()
2478 subvolume
= self
._generate
_random
_subvolume
_name
()
2479 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2480 self
._fs
_cmd
("subvolume", "pin", self
.volname
, subvolume
, "export", "1")
2481 path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
2482 path
= os
.path
.dirname(path
) # get subvolume path
2484 self
._get
_subtrees
(status
=status
, rank
=1)
2485 self
._wait
_subtrees
([(path
, 1)], status
=status
)
2488 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2490 # verify trash dir is clean
2491 self
._wait
_for
_trash
_empty
()
2493 ### authorize operations
2495 def test_authorize_deauthorize_legacy_subvolume(self
):
2496 subvolume
= self
._generate
_random
_subvolume
_name
()
2497 group
= self
._generate
_random
_group
_name
()
2500 guest_mount
= self
.mount_b
2501 guest_mount
.umount_wait()
2503 # emulate a old-fashioned subvolume in a custom group
2504 createpath
= os
.path
.join(".", "volumes", group
, subvolume
)
2505 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
2507 # add required xattrs to subvolume
2508 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
2509 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
2511 mount_path
= os
.path
.join("/", "volumes", group
, subvolume
)
2513 # authorize guest authID read-write access to subvolume
2514 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2515 "--group_name", group
, "--tenant_id", "tenant_id")
2517 # guest authID should exist
2518 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2519 self
.assertIn("client.{0}".format(authid
), existing_ids
)
2521 # configure credentials for guest client
2522 self
._configure
_guest
_auth
(guest_mount
, authid
, key
)
2524 # mount the subvolume, and write to it
2525 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2526 guest_mount
.write_n_mb("data.bin", 1)
2528 # authorize guest authID read access to subvolume
2529 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2530 "--group_name", group
, "--tenant_id", "tenant_id", "--access_level", "r")
2532 # guest client sees the change in access level to read only after a
2533 # remount of the subvolume.
2534 guest_mount
.umount_wait()
2535 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2537 # read existing content of the subvolume
2538 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
2539 # cannot write into read-only subvolume
2540 with self
.assertRaises(CommandFailedError
):
2541 guest_mount
.write_n_mb("rogue.bin", 1)
2544 guest_mount
.umount_wait()
2545 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid
,
2546 "--group_name", group
)
2547 # guest authID should no longer exist
2548 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2549 self
.assertNotIn("client.{0}".format(authid
), existing_ids
)
2550 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2551 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2553 def test_authorize_deauthorize_subvolume(self
):
2554 subvolume
= self
._generate
_random
_subvolume
_name
()
2555 group
= self
._generate
_random
_group
_name
()
2558 guest_mount
= self
.mount_b
2559 guest_mount
.umount_wait()
2562 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--mode=777")
2564 # create subvolume in group
2565 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2566 mount_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
,
2567 "--group_name", group
).rstrip()
2569 # authorize guest authID read-write access to subvolume
2570 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2571 "--group_name", group
, "--tenant_id", "tenant_id")
2573 # guest authID should exist
2574 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2575 self
.assertIn("client.{0}".format(authid
), existing_ids
)
2577 # configure credentials for guest client
2578 self
._configure
_guest
_auth
(guest_mount
, authid
, key
)
2580 # mount the subvolume, and write to it
2581 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2582 guest_mount
.write_n_mb("data.bin", 1)
2584 # authorize guest authID read access to subvolume
2585 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2586 "--group_name", group
, "--tenant_id", "tenant_id", "--access_level", "r")
2588 # guest client sees the change in access level to read only after a
2589 # remount of the subvolume.
2590 guest_mount
.umount_wait()
2591 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2593 # read existing content of the subvolume
2594 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
2595 # cannot write into read-only subvolume
2596 with self
.assertRaises(CommandFailedError
):
2597 guest_mount
.write_n_mb("rogue.bin", 1)
2600 guest_mount
.umount_wait()
2601 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid
,
2602 "--group_name", group
)
2603 # guest authID should no longer exist
2604 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2605 self
.assertNotIn("client.{0}".format(authid
), existing_ids
)
2606 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2607 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2609 def test_multitenant_subvolumes(self
):
2611 That subvolume access can be restricted to a tenant.
2613 That metadata used to enforce tenant isolation of
2614 subvolumes is stored as a two-way mapping between auth
2615 IDs and subvolumes that they're authorized to access.
2617 subvolume
= self
._generate
_random
_subvolume
_name
()
2618 group
= self
._generate
_random
_group
_name
()
2620 guest_mount
= self
.mount_b
2622 # Guest clients belonging to different tenants, but using the same
2627 "tenant_id": "tenant1",
2631 "tenant_id": "tenant2",
2635 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2637 # create subvolume in group
2638 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2640 # Check that subvolume metadata file is created on subvolume creation.
2641 subvol_metadata_filename
= "_{0}:{1}.meta".format(group
, subvolume
)
2642 self
.assertIn(subvol_metadata_filename
, guest_mount
.ls("volumes"))
2644 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
2645 # 'tenant1', with 'rw' access to the volume.
2646 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2647 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2649 # Check that auth metadata file for auth ID 'alice', is
2650 # created on authorizing 'alice' access to the subvolume.
2651 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2652 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2654 # Verify that the auth metadata file stores the tenant ID that the
2655 # auth ID belongs to, the auth ID's authorized access levels
2656 # for different subvolumes, versioning details, etc.
2657 expected_auth_metadata
= {
2659 "compat_version": 6,
2661 "tenant_id": "tenant1",
2663 "{0}/{1}".format(group
,subvolume
): {
2665 "access_level": "rw"
2670 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2671 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
2672 del expected_auth_metadata
["version"]
2673 del auth_metadata
["version"]
2674 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
2676 # Verify that the subvolume metadata file stores info about auth IDs
2677 # and their access levels to the subvolume, versioning details, etc.
2678 expected_subvol_metadata
= {
2680 "compat_version": 1,
2684 "access_level": "rw"
2688 subvol_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(subvol_metadata_filename
)))
2690 self
.assertGreaterEqual(subvol_metadata
["version"], expected_subvol_metadata
["version"])
2691 del expected_subvol_metadata
["version"]
2692 del subvol_metadata
["version"]
2693 self
.assertEqual(expected_subvol_metadata
, subvol_metadata
)
2695 # Cannot authorize 'guestclient_2' to access the volume.
2696 # It uses auth ID 'alice', which has already been used by a
2697 # 'guestclient_1' belonging to an another tenant for accessing
2701 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_2
["auth_id"],
2702 "--group_name", group
, "--tenant_id", guestclient_2
["tenant_id"])
2703 except CommandFailedError
as ce
:
2704 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
2705 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
2707 self
.fail("expected the 'fs subvolume authorize' command to fail")
2709 # Check that auth metadata file is cleaned up on removing
2710 # auth ID's only access to a volume.
2712 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
,
2713 "--group_name", group
)
2714 self
.assertNotIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2716 # Check that subvolume metadata file is cleaned up on subvolume deletion.
2717 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2718 self
.assertNotIn(subvol_metadata_filename
, guest_mount
.ls("volumes"))
2721 guest_mount
.umount_wait()
2722 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2724 def test_subvolume_authorized_list(self
):
2725 subvolume
= self
._generate
_random
_subvolume
_name
()
2726 group
= self
._generate
_random
_group
_name
()
2732 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2734 # create subvolume in group
2735 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2737 # authorize alice authID read-write access to subvolume
2738 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid1
,
2739 "--group_name", group
)
2740 # authorize guest1 authID read-write access to subvolume
2741 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid2
,
2742 "--group_name", group
)
2743 # authorize guest2 authID read access to subvolume
2744 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid3
,
2745 "--group_name", group
, "--access_level", "r")
2747 # list authorized-ids of the subvolume
2748 expected_auth_list
= [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
2749 auth_list
= json
.loads(self
._fs
_cmd
('subvolume', 'authorized_list', self
.volname
, subvolume
, "--group_name", group
))
2750 self
.assertCountEqual(expected_auth_list
, auth_list
)
2753 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid1
,
2754 "--group_name", group
)
2755 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid2
,
2756 "--group_name", group
)
2757 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid3
,
2758 "--group_name", group
)
2759 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2760 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2762 def test_authorize_auth_id_not_created_by_mgr_volumes(self
):
2764 If the auth_id already exists and is not created by mgr plugin,
2765 it's not allowed to authorize the auth-id by default.
2768 subvolume
= self
._generate
_random
_subvolume
_name
()
2769 group
= self
._generate
_random
_group
_name
()
2772 self
.fs
.mon_manager
.raw_cluster_cmd(
2773 "auth", "get-or-create", "client.guest1",
2782 "tenant_id": "tenant1",
2786 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2788 # create subvolume in group
2789 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2792 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2793 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2794 except CommandFailedError
as ce
:
2795 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
2796 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
2798 self
.fail("expected the 'fs subvolume authorize' command to fail")
2801 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2802 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2803 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2805 def test_authorize_allow_existing_id_option(self
):
2807 If the auth_id already exists and is not created by mgr volumes,
2808 it's not allowed to authorize the auth-id by default but is
2809 allowed with option allow_existing_id.
2812 subvolume
= self
._generate
_random
_subvolume
_name
()
2813 group
= self
._generate
_random
_group
_name
()
2816 self
.fs
.mon_manager
.raw_cluster_cmd(
2817 "auth", "get-or-create", "client.guest1",
2826 "tenant_id": "tenant1",
2830 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2832 # create subvolume in group
2833 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2835 # Cannot authorize 'guestclient_1' to access the volume by default,
2836 # which already exists and not created by mgr volumes but is allowed
2837 # with option 'allow_existing_id'.
2838 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2839 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"], "--allow-existing-id")
2842 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
,
2843 "--group_name", group
)
2844 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2845 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2846 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2848 def test_deauthorize_auth_id_after_out_of_band_update(self
):
2850 If the auth_id authorized by mgr/volumes plugin is updated
2851 out of band, the auth_id should not be deleted after a
2852 deauthorize. It should only remove caps associated with it.
2855 subvolume
= self
._generate
_random
_subvolume
_name
()
2856 group
= self
._generate
_random
_group
_name
()
2861 "tenant_id": "tenant1",
2865 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2867 # create subvolume in group
2868 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2870 # Authorize 'guestclient_1' to access the subvolume.
2871 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2872 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2874 subvol_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
,
2875 "--group_name", group
).rstrip()
2877 # Update caps for guestclient_1 out of band
2878 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
2879 "auth", "caps", "client.guest1",
2880 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group
, subvol_path
),
2881 "osd", "allow rw pool=cephfs_data",
2886 # Deauthorize guestclient_1
2887 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
, "--group_name", group
)
2889 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
2890 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
2891 out
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
2893 self
.assertEqual("client.guest1", out
[0]["entity"])
2894 self
.assertEqual("allow rw path=/volumes/{0}".format(group
), out
[0]["caps"]["mds"])
2895 self
.assertEqual("allow *", out
[0]["caps"]["mgr"])
2896 self
.assertNotIn("osd", out
[0]["caps"])
2899 out
= self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2900 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2901 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2903 def test_recover_auth_metadata_during_authorize(self
):
2905 That auth metadata manager can recover from partial auth updates using
2906 metadata files, which store auth info and its update status info. This
2907 test validates the recovery during authorize.
2910 guest_mount
= self
.mount_b
2912 subvolume
= self
._generate
_random
_subvolume
_name
()
2913 group
= self
._generate
_random
_group
_name
()
2918 "tenant_id": "tenant1",
2922 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2924 # create subvolume in group
2925 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2927 # Authorize 'guestclient_1' to access the subvolume.
2928 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2929 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2931 # Check that auth metadata file for auth ID 'guest1', is
2932 # created on authorizing 'guest1' access to the subvolume.
2933 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2934 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2935 expected_auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2937 # Induce partial auth update state by modifying the auth metadata file,
2938 # and then run authorize again.
2939 guest_mount
.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename
)], omit_sudo
=False)
2941 # Authorize 'guestclient_1' to access the subvolume.
2942 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2943 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2945 auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2946 self
.assertEqual(auth_metadata_content
, expected_auth_metadata_content
)
2949 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
, "--group_name", group
)
2950 guest_mount
.umount_wait()
2951 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2952 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2953 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2955 def test_recover_auth_metadata_during_deauthorize(self
):
2957 That auth metadata manager can recover from partial auth updates using
2958 metadata files, which store auth info and its update status info. This
2959 test validates the recovery during deauthorize.
2962 guest_mount
= self
.mount_b
2964 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
2965 group
= self
._generate
_random
_group
_name
()
2968 "auth_id": "guest1",
2969 "tenant_id": "tenant1",
2973 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2975 # create subvolumes in group
2976 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
2977 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
2979 # Authorize 'guestclient_1' to access the subvolume1.
2980 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
2981 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2983 # Check that auth metadata file for auth ID 'guest1', is
2984 # created on authorizing 'guest1' access to the subvolume1.
2985 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2986 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2987 expected_auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2989 # Authorize 'guestclient_1' to access the subvolume2.
2990 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
2991 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2993 # Induce partial auth update state by modifying the auth metadata file,
2994 # and then run de-authorize.
2995 guest_mount
.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename
)], omit_sudo
=False)
2997 # Deauthorize 'guestclient_1' to access the subvolume2.
2998 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
2999 "--group_name", group
)
3001 auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
3002 self
.assertEqual(auth_metadata_content
, expected_auth_metadata_content
)
3005 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, "guest1", "--group_name", group
)
3006 guest_mount
.umount_wait()
3007 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
3008 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
3009 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
3010 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3012 def test_update_old_style_auth_metadata_to_new_during_authorize(self
):
3014 CephVolumeClient stores the subvolume data in auth metadata file with
3015 'volumes' key as there was no subvolume namespace. It doesn't makes sense
3016 with mgr/volumes. This test validates the transparent update of 'volumes'
3017 key to 'subvolumes' key in auth metadata file during authorize.
3020 guest_mount
= self
.mount_b
3022 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
3023 group
= self
._generate
_random
_group
_name
()
3028 "tenant_id": "tenant1",
3032 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3034 # create subvolumes in group
3035 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
3036 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
3038 # Authorize 'guestclient_1' to access the subvolume1.
3039 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
3040 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3042 # Check that auth metadata file for auth ID 'guest1', is
3043 # created on authorizing 'guest1' access to the subvolume1.
3044 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
3045 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
3047 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
3048 guest_mount
.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename
)], omit_sudo
=False)
3050 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
3051 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
3052 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3054 expected_auth_metadata
= {
3056 "compat_version": 6,
3058 "tenant_id": "tenant1",
3060 "{0}/{1}".format(group
,subvolume1
): {
3062 "access_level": "rw"
3064 "{0}/{1}".format(group
,subvolume2
): {
3066 "access_level": "rw"
3071 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
3073 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
3074 del expected_auth_metadata
["version"]
3075 del auth_metadata
["version"]
3076 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
3079 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, auth_id
, "--group_name", group
)
3080 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, auth_id
, "--group_name", group
)
3081 guest_mount
.umount_wait()
3082 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
3083 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
3084 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
3085 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3087 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self
):
3089 CephVolumeClient stores the subvolume data in auth metadata file with
3090 'volumes' key as there was no subvolume namespace. It doesn't makes sense
3091 with mgr/volumes. This test validates the transparent update of 'volumes'
3092 key to 'subvolumes' key in auth metadata file during deauthorize.
3095 guest_mount
= self
.mount_b
3097 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
3098 group
= self
._generate
_random
_group
_name
()
3103 "tenant_id": "tenant1",
3107 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3109 # create subvolumes in group
3110 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
3111 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
3113 # Authorize 'guestclient_1' to access the subvolume1.
3114 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
3115 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3117 # Authorize 'guestclient_1' to access the subvolume2.
3118 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
3119 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3121 # Check that auth metadata file for auth ID 'guest1', is created.
3122 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
3123 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
3125 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
3126 guest_mount
.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename
)], omit_sudo
=False)
3128 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
3129 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, auth_id
, "--group_name", group
)
3131 expected_auth_metadata
= {
3133 "compat_version": 6,
3135 "tenant_id": "tenant1",
3137 "{0}/{1}".format(group
,subvolume1
): {
3139 "access_level": "rw"
3144 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
3146 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
3147 del expected_auth_metadata
["version"]
3148 del auth_metadata
["version"]
3149 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
3152 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, auth_id
, "--group_name", group
)
3153 guest_mount
.umount_wait()
3154 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
3155 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
3156 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
3157 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3159 def test_subvolume_evict_client(self
):
3161 That a subvolume client can be evicted based on the auth ID
3164 subvolumes
= self
._generate
_random
_subvolume
_name
(2)
3165 group
= self
._generate
_random
_group
_name
()
3168 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3170 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
3171 for i
in range(0, 2):
3172 self
.mounts
[i
].umount_wait()
3173 guest_mounts
= (self
.mounts
[0], self
.mounts
[1])
3177 "tenant_id": "tenant1",
3180 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
3181 # subvolumes. Mount the two subvolumes. Write data to the volumes.
3184 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolumes
[i
], "--group_name", group
, "--mode=777")
3186 # authorize guest authID read-write access to subvolume
3187 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolumes
[i
], guestclient_1
["auth_id"],
3188 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3190 mount_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolumes
[i
],
3191 "--group_name", group
).rstrip()
3192 # configure credentials for guest client
3193 self
._configure
_guest
_auth
(guest_mounts
[i
], auth_id
, key
)
3195 # mount the subvolume, and write to it
3196 guest_mounts
[i
].mount_wait(cephfs_mntpt
=mount_path
)
3197 guest_mounts
[i
].write_n_mb("data.bin", 1)
3199 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
3201 self
._fs
_cmd
("subvolume", "evict", self
.volname
, subvolumes
[0], auth_id
, "--group_name", group
)
3203 # Evicted guest client, guest_mounts[0], should not be able to do
3204 # anymore metadata ops. It should start failing all operations
3205 # when it sees that its own address is in the blocklist.
3207 guest_mounts
[0].write_n_mb("rogue.bin", 1)
3208 except CommandFailedError
:
3211 raise RuntimeError("post-eviction write should have failed!")
3213 # The blocklisted guest client should now be unmountable
3214 guest_mounts
[0].umount_wait()
3216 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
3217 # has mounted the other volume, should be able to use its volume
3219 guest_mounts
[1].write_n_mb("data.bin.1", 1)
3222 guest_mounts
[1].umount_wait()
3224 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolumes
[i
], auth_id
, "--group_name", group
)
3225 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolumes
[i
], "--group_name", group
)
3226 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3228 def test_subvolume_pin_random(self
):
3229 self
.fs
.set_max_mds(2)
3230 self
.fs
.wait_for_daemons()
3231 self
.config_set('mds', 'mds_export_ephemeral_random', True)
3233 subvolume
= self
._generate
_random
_subvolume
_name
()
3234 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3235 self
._fs
_cmd
("subvolume", "pin", self
.volname
, subvolume
, "random", ".01")
3239 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3241 # verify trash dir is clean
3242 self
._wait
_for
_trash
_empty
()
3244 def test_subvolume_resize_fail_invalid_size(self
):
3246 That a subvolume cannot be resized to an invalid size and the quota did not change
3249 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3251 subvolname
= self
._generate
_random
_subvolume
_name
()
3252 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
3254 # make sure it exists
3255 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3256 self
.assertNotEqual(subvolpath
, None)
3258 # try to resize the subvolume with an invalid size -10
3261 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3262 except CommandFailedError
as ce
:
3263 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
3265 self
.fail("expected the 'fs subvolume resize' command to fail")
3267 # verify the quota did not change
3268 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3269 self
.assertEqual(size
, osize
)
3272 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3274 # verify trash dir is clean
3275 self
._wait
_for
_trash
_empty
()
3277 def test_subvolume_resize_fail_zero_size(self
):
3279 That a subvolume cannot be resized to a zero size and the quota did not change
3282 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3284 subvolname
= self
._generate
_random
_subvolume
_name
()
3285 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
3287 # make sure it exists
3288 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3289 self
.assertNotEqual(subvolpath
, None)
3291 # try to resize the subvolume with size 0
3294 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3295 except CommandFailedError
as ce
:
3296 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
3298 self
.fail("expected the 'fs subvolume resize' command to fail")
3300 # verify the quota did not change
3301 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3302 self
.assertEqual(size
, osize
)
3305 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3307 # verify trash dir is clean
3308 self
._wait
_for
_trash
_empty
()
3310 def test_subvolume_resize_quota_lt_used_size(self
):
3312 That a subvolume can be resized to a size smaller than the current used size
3313 and the resulting quota matches the expected size.
3316 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
3318 subvolname
= self
._generate
_random
_subvolume
_name
()
3319 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
3321 # make sure it exists
3322 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3323 self
.assertNotEqual(subvolpath
, None)
3325 # create one file of 10MB
3326 file_size
=self
.DEFAULT_FILE_SIZE
*10
3328 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3331 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+1)
3332 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3334 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
3335 susedsize
= int(self
.mount_a
.run_shell(['stat', '-c' '%s', subvolpath
]).stdout
.getvalue().strip())
3336 if isinstance(self
.mount_a
, FuseMount
):
3337 # kclient dir does not have size==rbytes
3338 self
.assertEqual(usedsize
, susedsize
)
3340 # shrink the subvolume
3341 nsize
= usedsize
// 2
3343 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3344 except CommandFailedError
:
3345 self
.fail("expected the 'fs subvolume resize' command to succeed")
3348 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3349 self
.assertEqual(size
, nsize
)
3352 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3354 # verify trash dir is clean
3355 self
._wait
_for
_trash
_empty
()
3357 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self
):
3359 That a subvolume cannot be resized to a size smaller than the current used size
3360 when --no_shrink is given and the quota did not change.
3363 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
3365 subvolname
= self
._generate
_random
_subvolume
_name
()
3366 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
3368 # make sure it exists
3369 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3370 self
.assertNotEqual(subvolpath
, None)
3372 # create one file of 10MB
3373 file_size
=self
.DEFAULT_FILE_SIZE
*10
3375 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3378 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+2)
3379 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3381 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
3382 susedsize
= int(self
.mount_a
.run_shell(['stat', '-c' '%s', subvolpath
]).stdout
.getvalue().strip())
3383 if isinstance(self
.mount_a
, FuseMount
):
3384 # kclient dir does not have size==rbytes
3385 self
.assertEqual(usedsize
, susedsize
)
3387 # shrink the subvolume
3388 nsize
= usedsize
// 2
3390 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
), "--no_shrink")
3391 except CommandFailedError
as ce
:
3392 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
3394 self
.fail("expected the 'fs subvolume resize' command to fail")
3396 # verify the quota did not change
3397 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3398 self
.assertEqual(size
, osize
)
3401 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3403 # verify trash dir is clean
3404 self
._wait
_for
_trash
_empty
()
3406 def test_subvolume_resize_expand_on_full_subvolume(self
):
3408 That the subvolume can be expanded from a full subvolume and future writes succeed.
3411 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*10
3412 # create subvolume of quota 10MB and make sure it exists
3413 subvolname
= self
._generate
_random
_subvolume
_name
()
3414 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
3415 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3416 self
.assertNotEqual(subvolpath
, None)
3418 # create one file of size 10MB and write
3419 file_size
=self
.DEFAULT_FILE_SIZE
*10
3421 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3424 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+3)
3425 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3427 # create a file of size 5MB and try write more
3428 file_size
=file_size
// 2
3430 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3433 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+4)
3435 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3436 except CommandFailedError
:
3437 # Not able to write. So expand the subvolume more and try writing the 5MB file again
3439 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3441 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3442 except CommandFailedError
:
3443 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3444 "to succeed".format(subvolname
, number_of_files
, file_size
))
3446 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3447 "to fail".format(subvolname
, number_of_files
, file_size
))
3450 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3452 # verify trash dir is clean
3453 self
._wait
_for
_trash
_empty
()
3455 def test_subvolume_resize_infinite_size(self
):
3457 That a subvolume can be resized to an infinite size by unsetting its quota.
3461 subvolname
= self
._generate
_random
_subvolume
_name
()
3462 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size",
3463 str(self
.DEFAULT_FILE_SIZE
*1024*1024))
3465 # make sure it exists
3466 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3467 self
.assertNotEqual(subvolpath
, None)
3470 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, "inf")
3472 # verify that the quota is None
3473 size
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes")
3474 self
.assertEqual(size
, None)
3477 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3479 # verify trash dir is clean
3480 self
._wait
_for
_trash
_empty
()
3482 def test_subvolume_resize_infinite_size_future_writes(self
):
3484 That a subvolume can be resized to an infinite size and the future writes succeed.
3488 subvolname
= self
._generate
_random
_subvolume
_name
()
3489 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size",
3490 str(self
.DEFAULT_FILE_SIZE
*1024*1024*5), "--mode=777")
3492 # make sure it exists
3493 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3494 self
.assertNotEqual(subvolpath
, None)
3497 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, "inf")
3499 # verify that the quota is None
3500 size
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes")
3501 self
.assertEqual(size
, None)
3503 # create one file of 10MB and try to write
3504 file_size
=self
.DEFAULT_FILE_SIZE
*10
3506 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3509 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+5)
3512 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3513 except CommandFailedError
:
3514 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB "
3515 "to succeed".format(subvolname
, number_of_files
, file_size
))
3518 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3520 # verify trash dir is clean
3521 self
._wait
_for
_trash
_empty
()
3523 def test_subvolume_rm_force(self
):
3524 # test removing non-existing subvolume with --force
3525 subvolume
= self
._generate
_random
_subvolume
_name
()
3527 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--force")
3528 except CommandFailedError
:
3529 self
.fail("expected the 'fs subvolume rm --force' command to succeed")
3531 def test_subvolume_exists_with_subvolumegroup_and_subvolume(self
):
3532 """Test the presence of any subvolume by specifying the name of subvolumegroup"""
3534 group
= self
._generate
_random
_group
_name
()
3535 subvolume1
= self
._generate
_random
_subvolume
_name
()
3536 # create subvolumegroup
3537 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3538 # create subvolume in group
3539 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
3540 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
, "--group_name", group
)
3541 self
.assertEqual(ret
.strip('\n'), "subvolume exists")
3542 # delete subvolume in group
3543 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
3544 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
, "--group_name", group
)
3545 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3546 # delete subvolumegroup
3547 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3549 def test_subvolume_exists_with_subvolumegroup_and_no_subvolume(self
):
3550 """Test the presence of any subvolume specifying the name
3551 of subvolumegroup and no subvolumes"""
3553 group
= self
._generate
_random
_group
_name
()
3554 # create subvolumegroup
3555 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3556 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
, "--group_name", group
)
3557 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3558 # delete subvolumegroup
3559 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3561 def test_subvolume_exists_without_subvolumegroup_and_with_subvolume(self
):
3562 """Test the presence of any subvolume without specifying the name
3563 of subvolumegroup"""
3565 subvolume1
= self
._generate
_random
_subvolume
_name
()
3567 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
)
3568 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
)
3569 self
.assertEqual(ret
.strip('\n'), "subvolume exists")
3571 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
3572 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
)
3573 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3575 def test_subvolume_exists_without_subvolumegroup_and_without_subvolume(self
):
3576 """Test the presence of any subvolume without any subvolumegroup
3577 and without any subvolume"""
3579 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
)
3580 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3582 def test_subvolume_shrink(self
):
3584 That a subvolume can be shrinked in size and its quota matches the expected size.
3588 subvolname
= self
._generate
_random
_subvolume
_name
()
3589 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3590 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
3592 # make sure it exists
3593 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3594 self
.assertNotEqual(subvolpath
, None)
3596 # shrink the subvolume
3598 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3601 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3602 self
.assertEqual(size
, nsize
)
3605 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3607 # verify trash dir is clean
3608 self
._wait
_for
_trash
_empty
()
3610 def test_subvolume_retain_snapshot_rm_idempotency(self
):
3612 ensure subvolume deletion of a subvolume which is already deleted with retain snapshots option passes.
3613 After subvolume deletion with retain snapshots, the subvolume exists until the trash directory (resides inside subvolume)
3614 is cleaned up. The subvolume deletion issued while the trash directory is not empty, should pass and should
3615 not error out with EAGAIN.
3617 subvolume
= self
._generate
_random
_subvolume
_name
()
3618 snapshot
= self
._generate
_random
_snapshot
_name
()
3621 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3624 self
._do
_subvolume
_io
(subvolume
, number_of_files
=256)
3626 # snapshot subvolume
3627 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3629 # remove with snapshot retention
3630 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3632 # remove snapshots (removes retained volume)
3633 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3635 # remove subvolume (check idempotency)
3637 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3638 except CommandFailedError
as ce
:
3639 if ce
.exitstatus
!= errno
.ENOENT
:
3640 self
.fail(f
"expected subvolume rm to pass with error: {os.strerror(ce.exitstatus)}")
3642 # verify trash dir is clean
3643 self
._wait
_for
_trash
_empty
()
3646 def test_subvolume_user_metadata_set(self
):
3647 subvolname
= self
._generate
_random
_subvolume
_name
()
3648 group
= self
._generate
_random
_group
_name
()
3651 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3653 # create subvolume in group.
3654 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3656 # set metadata for subvolume.
3660 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3661 except CommandFailedError
:
3662 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
3664 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3665 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3667 # verify trash dir is clean.
3668 self
._wait
_for
_trash
_empty
()
3670 def test_subvolume_user_metadata_set_idempotence(self
):
3671 subvolname
= self
._generate
_random
_subvolume
_name
()
3672 group
= self
._generate
_random
_group
_name
()
3675 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3677 # create subvolume in group.
3678 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3680 # set metadata for subvolume.
3684 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3685 except CommandFailedError
:
3686 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
3688 # set same metadata again for subvolume.
3690 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3691 except CommandFailedError
:
3692 self
.fail("expected the 'fs subvolume metadata set' command to succeed because it is idempotent operation")
3694 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3695 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3697 # verify trash dir is clean.
3698 self
._wait
_for
_trash
_empty
()
3700 def test_subvolume_user_metadata_get(self
):
3701 subvolname
= self
._generate
_random
_subvolume
_name
()
3702 group
= self
._generate
_random
_group
_name
()
3705 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3707 # create subvolume in group.
3708 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3710 # set metadata for subvolume.
3713 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3715 # get value for specified key.
3717 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3718 except CommandFailedError
:
3719 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
3721 # remove '\n' from returned value.
3722 ret
= ret
.strip('\n')
3724 # match received value with expected value.
3725 self
.assertEqual(value
, ret
)
3727 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3728 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3730 # verify trash dir is clean.
3731 self
._wait
_for
_trash
_empty
()
3733 def test_subvolume_user_metadata_get_for_nonexisting_key(self
):
3734 subvolname
= self
._generate
_random
_subvolume
_name
()
3735 group
= self
._generate
_random
_group
_name
()
3738 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3740 # create subvolume in group.
3741 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3743 # set metadata for subvolume.
3746 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3748 # try to get value for nonexisting key
3749 # Expecting ENOENT exit status because key does not exist
3751 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key_nonexist", "--group_name", group
)
3752 except CommandFailedError
as e
:
3753 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3755 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
3757 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3758 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3760 # verify trash dir is clean.
3761 self
._wait
_for
_trash
_empty
()
3763 def test_subvolume_user_metadata_get_for_nonexisting_section(self
):
3764 subvolname
= self
._generate
_random
_subvolume
_name
()
3765 group
= self
._generate
_random
_group
_name
()
3768 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3770 # create subvolume in group.
3771 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3773 # try to get value for nonexisting key (as section does not exist)
3774 # Expecting ENOENT exit status because key does not exist
3776 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key", "--group_name", group
)
3777 except CommandFailedError
as e
:
3778 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3780 self
.fail("Expected ENOENT because section does not exist")
3782 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3783 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3785 # verify trash dir is clean.
3786 self
._wait
_for
_trash
_empty
()
3788 def test_subvolume_user_metadata_update(self
):
3789 subvolname
= self
._generate
_random
_subvolume
_name
()
3790 group
= self
._generate
_random
_group
_name
()
3793 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3795 # create subvolume in group.
3796 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3798 # set metadata for subvolume.
3801 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3803 # update metadata against key.
3804 new_value
= "new_value"
3805 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, new_value
, "--group_name", group
)
3807 # get metadata for specified key of subvolume.
3809 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3810 except CommandFailedError
:
3811 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
3813 # remove '\n' from returned value.
3814 ret
= ret
.strip('\n')
3816 # match received value with expected value.
3817 self
.assertEqual(new_value
, ret
)
3819 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3820 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3822 # verify trash dir is clean.
3823 self
._wait
_for
_trash
_empty
()
3825 def test_subvolume_user_metadata_list(self
):
3826 subvolname
= self
._generate
_random
_subvolume
_name
()
3827 group
= self
._generate
_random
_group
_name
()
3830 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3832 # create subvolume in group.
3833 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3835 # set metadata for subvolume.
3836 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
3838 for k
, v
in input_metadata_dict
.items():
3839 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, k
, v
, "--group_name", group
)
3843 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
3844 except CommandFailedError
:
3845 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
3847 ret_dict
= json
.loads(ret
)
3849 # compare output with expected output
3850 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
3852 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3853 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3855 # verify trash dir is clean.
3856 self
._wait
_for
_trash
_empty
()
3858 def test_subvolume_user_metadata_list_if_no_metadata_set(self
):
3859 subvolname
= self
._generate
_random
_subvolume
_name
()
3860 group
= self
._generate
_random
_group
_name
()
3863 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3865 # create subvolume in group.
3866 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3870 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
3871 except CommandFailedError
:
3872 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
3874 # remove '\n' from returned value.
3875 ret
= ret
.strip('\n')
3877 # compare output with expected output
3878 # expecting empty json/dictionary
3879 self
.assertEqual(ret
, "{}")
3881 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3882 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3884 # verify trash dir is clean.
3885 self
._wait
_for
_trash
_empty
()
3887 def test_subvolume_user_metadata_remove(self
):
3888 subvolname
= self
._generate
_random
_subvolume
_name
()
3889 group
= self
._generate
_random
_group
_name
()
3892 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3894 # create subvolume in group.
3895 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3897 # set metadata for subvolume.
3900 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3902 # remove metadata against specified key.
3904 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
)
3905 except CommandFailedError
:
3906 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
3908 # confirm key is removed by again fetching metadata
3910 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3911 except CommandFailedError
as e
:
3912 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3914 self
.fail("Expected ENOENT because key does not exist")
3916 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3917 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3919 # verify trash dir is clean.
3920 self
._wait
_for
_trash
_empty
()
3922 def test_subvolume_user_metadata_remove_for_nonexisting_key(self
):
3923 subvolname
= self
._generate
_random
_subvolume
_name
()
3924 group
= self
._generate
_random
_group
_name
()
3927 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3929 # create subvolume in group.
3930 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3932 # set metadata for subvolume.
3935 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3937 # try to remove value for nonexisting key
3938 # Expecting ENOENT exit status because key does not exist
3940 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key_nonexist", "--group_name", group
)
3941 except CommandFailedError
as e
:
3942 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3944 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
3946 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3947 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3949 # verify trash dir is clean.
3950 self
._wait
_for
_trash
_empty
()
3952 def test_subvolume_user_metadata_remove_for_nonexisting_section(self
):
3953 subvolname
= self
._generate
_random
_subvolume
_name
()
3954 group
= self
._generate
_random
_group
_name
()
3957 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3959 # create subvolume in group.
3960 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3962 # try to remove value for nonexisting key (as section does not exist)
3963 # Expecting ENOENT exit status because key does not exist
3965 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key", "--group_name", group
)
3966 except CommandFailedError
as e
:
3967 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3969 self
.fail("Expected ENOENT because section does not exist")
3971 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3972 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3974 # verify trash dir is clean.
3975 self
._wait
_for
_trash
_empty
()
3977 def test_subvolume_user_metadata_remove_force(self
):
3978 subvolname
= self
._generate
_random
_subvolume
_name
()
3979 group
= self
._generate
_random
_group
_name
()
3982 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3984 # create subvolume in group.
3985 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3987 # set metadata for subvolume.
3990 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3992 # remove metadata against specified key with --force option.
3994 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
, "--force")
3995 except CommandFailedError
:
3996 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
3998 # confirm key is removed by again fetching metadata
4000 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
4001 except CommandFailedError
as e
:
4002 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4004 self
.fail("Expected ENOENT because key does not exist")
4006 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4007 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4009 # verify trash dir is clean.
4010 self
._wait
_for
_trash
_empty
()
4012 def test_subvolume_user_metadata_remove_force_for_nonexisting_key(self
):
4013 subvolname
= self
._generate
_random
_subvolume
_name
()
4014 group
= self
._generate
_random
_group
_name
()
4017 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4019 # create subvolume in group.
4020 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
4022 # set metadata for subvolume.
4025 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
4027 # remove metadata against specified key.
4029 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
)
4030 except CommandFailedError
:
4031 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
4033 # confirm key is removed by again fetching metadata
4035 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
4036 except CommandFailedError
as e
:
4037 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4039 self
.fail("Expected ENOENT because key does not exist")
4041 # again remove metadata against already removed key with --force option.
4043 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
, "--force")
4044 except CommandFailedError
:
4045 self
.fail("expected the 'fs subvolume metadata rm' (with --force) command to succeed")
4047 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4048 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4050 # verify trash dir is clean.
4051 self
._wait
_for
_trash
_empty
()
4053 def test_subvolume_user_metadata_set_and_get_for_legacy_subvolume(self
):
4054 subvolname
= self
._generate
_random
_subvolume
_name
()
4055 group
= self
._generate
_random
_group
_name
()
4057 # emulate a old-fashioned subvolume in a custom group
4058 createpath
= os
.path
.join(".", "volumes", group
, subvolname
)
4059 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
4061 # set metadata for subvolume.
4065 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
4066 except CommandFailedError
:
4067 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
4069 # get value for specified key.
4071 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
4072 except CommandFailedError
:
4073 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
4075 # remove '\n' from returned value.
4076 ret
= ret
.strip('\n')
4078 # match received value with expected value.
4079 self
.assertEqual(value
, ret
)
4081 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4082 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4084 # verify trash dir is clean.
4085 self
._wait
_for
_trash
_empty
()
4087 def test_subvolume_user_metadata_list_and_remove_for_legacy_subvolume(self
):
4088 subvolname
= self
._generate
_random
_subvolume
_name
()
4089 group
= self
._generate
_random
_group
_name
()
4091 # emulate a old-fashioned subvolume in a custom group
4092 createpath
= os
.path
.join(".", "volumes", group
, subvolname
)
4093 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
4095 # set metadata for subvolume.
4096 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
4098 for k
, v
in input_metadata_dict
.items():
4099 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, k
, v
, "--group_name", group
)
4103 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
4104 except CommandFailedError
:
4105 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
4107 ret_dict
= json
.loads(ret
)
4109 # compare output with expected output
4110 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
4112 # remove metadata against specified key.
4114 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key_1", "--group_name", group
)
4115 except CommandFailedError
:
4116 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
4118 # confirm key is removed by again fetching metadata
4120 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key_1", "--group_name", group
)
4121 except CommandFailedError
as e
:
4122 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4124 self
.fail("Expected ENOENT because key_1 does not exist")
4126 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4127 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4129 # verify trash dir is clean.
4130 self
._wait
_for
_trash
_empty
()
4132 class TestSubvolumeGroupSnapshots(TestVolumesHelper
):
4133 """Tests for FS subvolume group snapshot operations."""
4134 @unittest.skip("skipping subvolumegroup snapshot tests")
4135 def test_nonexistent_subvolume_group_snapshot_rm(self
):
4136 subvolume
= self
._generate
_random
_subvolume
_name
()
4137 group
= self
._generate
_random
_group
_name
()
4138 snapshot
= self
._generate
_random
_snapshot
_name
()
4141 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4143 # create subvolume in group
4144 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4147 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4150 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4154 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4155 except CommandFailedError
as ce
:
4156 if ce
.exitstatus
!= errno
.ENOENT
:
4159 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
4162 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4164 # verify trash dir is clean
4165 self
._wait
_for
_trash
_empty
()
4168 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4170 @unittest.skip("skipping subvolumegroup snapshot tests")
4171 def test_subvolume_group_snapshot_create_and_rm(self
):
4172 subvolume
= self
._generate
_random
_subvolume
_name
()
4173 group
= self
._generate
_random
_group
_name
()
4174 snapshot
= self
._generate
_random
_snapshot
_name
()
4177 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4179 # create subvolume in group
4180 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4183 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4186 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4189 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4191 # verify trash dir is clean
4192 self
._wait
_for
_trash
_empty
()
4195 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4197 @unittest.skip("skipping subvolumegroup snapshot tests")
4198 def test_subvolume_group_snapshot_idempotence(self
):
4199 subvolume
= self
._generate
_random
_subvolume
_name
()
4200 group
= self
._generate
_random
_group
_name
()
4201 snapshot
= self
._generate
_random
_snapshot
_name
()
4204 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4206 # create subvolume in group
4207 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4210 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4212 # try creating snapshot w/ same snapshot name -- shoule be idempotent
4213 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4216 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4219 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4221 # verify trash dir is clean
4222 self
._wait
_for
_trash
_empty
()
4225 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4227 @unittest.skip("skipping subvolumegroup snapshot tests")
4228 def test_subvolume_group_snapshot_ls(self
):
4229 # tests the 'fs subvolumegroup snapshot ls' command
4234 group
= self
._generate
_random
_group
_name
()
4235 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4237 # create subvolumegroup snapshots
4238 snapshots
= self
._generate
_random
_snapshot
_name
(3)
4239 for snapshot
in snapshots
:
4240 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4242 subvolgrpsnapshotls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'snapshot', 'ls', self
.volname
, group
))
4243 if len(subvolgrpsnapshotls
) == 0:
4244 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
4246 snapshotnames
= [snapshot
['name'] for snapshot
in subvolgrpsnapshotls
]
4247 if collections
.Counter(snapshotnames
) != collections
.Counter(snapshots
):
4248 raise RuntimeError("Error creating or listing subvolume group snapshots")
4250 @unittest.skip("skipping subvolumegroup snapshot tests")
4251 def test_subvolume_group_snapshot_rm_force(self
):
4252 # test removing non-existing subvolume group snapshot with --force
4253 group
= self
._generate
_random
_group
_name
()
4254 snapshot
= self
._generate
_random
_snapshot
_name
()
4257 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
, "--force")
4258 except CommandFailedError
:
4259 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
4261 def test_subvolume_group_snapshot_unsupported_status(self
):
4262 group
= self
._generate
_random
_group
_name
()
4263 snapshot
= self
._generate
_random
_snapshot
_name
()
4266 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4270 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4271 except CommandFailedError
as ce
:
4272 self
.assertEqual(ce
.exitstatus
, errno
.ENOSYS
, "invalid error code on subvolumegroup snapshot create")
4274 self
.fail("expected subvolumegroup snapshot create command to fail")
4277 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4280 class TestSubvolumeSnapshots(TestVolumesHelper
):
4281 """Tests for FS subvolume snapshot operations."""
4282 def test_nonexistent_subvolume_snapshot_rm(self
):
4283 subvolume
= self
._generate
_random
_subvolume
_name
()
4284 snapshot
= self
._generate
_random
_snapshot
_name
()
4287 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4289 # snapshot subvolume
4290 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4293 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4295 # remove snapshot again
4297 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4298 except CommandFailedError
as ce
:
4299 if ce
.exitstatus
!= errno
.ENOENT
:
4302 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
4305 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4307 # verify trash dir is clean
4308 self
._wait
_for
_trash
_empty
()
4310 def test_subvolume_snapshot_create_and_rm(self
):
4311 subvolume
= self
._generate
_random
_subvolume
_name
()
4312 snapshot
= self
._generate
_random
_snapshot
_name
()
4315 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4317 # snapshot subvolume
4318 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4321 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4324 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4326 # verify trash dir is clean
4327 self
._wait
_for
_trash
_empty
()
4329 def test_subvolume_snapshot_create_idempotence(self
):
4330 subvolume
= self
._generate
_random
_subvolume
_name
()
4331 snapshot
= self
._generate
_random
_snapshot
_name
()
4334 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4336 # snapshot subvolume
4337 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4339 # try creating w/ same subvolume snapshot name -- should be idempotent
4340 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4343 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4346 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4348 # verify trash dir is clean
4349 self
._wait
_for
_trash
_empty
()
4351 def test_subvolume_snapshot_info(self
):
4354 tests the 'fs subvolume snapshot info' command
4357 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
4359 subvolume
= self
._generate
_random
_subvolume
_name
()
4360 snapshot
, snap_missing
= self
._generate
_random
_snapshot
_name
(2)
4363 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4366 self
._do
_subvolume
_io
(subvolume
, number_of_files
=1)
4368 # snapshot subvolume
4369 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4371 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
4373 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
4374 self
.assertEqual(snap_info
["has_pending_clones"], "no")
4376 # snapshot info for non-existent snapshot
4378 self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snap_missing
)
4379 except CommandFailedError
as ce
:
4380 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on snapshot info of non-existent snapshot")
4382 self
.fail("expected snapshot info of non-existent snapshot to fail")
4385 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4388 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4390 # verify trash dir is clean
4391 self
._wait
_for
_trash
_empty
()
4393 def test_subvolume_snapshot_in_group(self
):
4394 subvolume
= self
._generate
_random
_subvolume
_name
()
4395 group
= self
._generate
_random
_group
_name
()
4396 snapshot
= self
._generate
_random
_snapshot
_name
()
4399 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4401 # create subvolume in group
4402 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4404 # snapshot subvolume in group
4405 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
4408 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
4411 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4413 # verify trash dir is clean
4414 self
._wait
_for
_trash
_empty
()
4417 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4419 def test_subvolume_snapshot_ls(self
):
4420 # tests the 'fs subvolume snapshot ls' command
4425 subvolume
= self
._generate
_random
_subvolume
_name
()
4426 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4428 # create subvolume snapshots
4429 snapshots
= self
._generate
_random
_snapshot
_name
(3)
4430 for snapshot
in snapshots
:
4431 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4433 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
4434 if len(subvolsnapshotls
) == 0:
4435 self
.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
4437 snapshotnames
= [snapshot
['name'] for snapshot
in subvolsnapshotls
]
4438 if collections
.Counter(snapshotnames
) != collections
.Counter(snapshots
):
4439 self
.fail("Error creating or listing subvolume snapshots")
4442 for snapshot
in snapshots
:
4443 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4446 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4448 # verify trash dir is clean
4449 self
._wait
_for
_trash
_empty
()
4451 def test_subvolume_inherited_snapshot_ls(self
):
4452 # tests the scenario where 'fs subvolume snapshot ls' command
4453 # should not list inherited snapshots created as part of snapshot
4454 # at ancestral level
4457 subvolume
= self
._generate
_random
_subvolume
_name
()
4458 group
= self
._generate
_random
_group
_name
()
4462 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4464 # create subvolume in group
4465 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4467 # create subvolume snapshots
4468 snapshots
= self
._generate
_random
_snapshot
_name
(snap_count
)
4469 for snapshot
in snapshots
:
4470 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
4472 # Create snapshot at ancestral level
4473 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", "ancestral_snap_1")
4474 ancestral_snappath2
= os
.path
.join(".", "volumes", group
, ".snap", "ancestral_snap_2")
4475 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1
, ancestral_snappath2
], omit_sudo
=False)
4477 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
, group
))
4478 self
.assertEqual(len(subvolsnapshotls
), snap_count
)
4480 # remove ancestral snapshots
4481 self
.mount_a
.run_shell(['sudo', 'rmdir', ancestral_snappath1
, ancestral_snappath2
], omit_sudo
=False)
4484 for snapshot
in snapshots
:
4485 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
4488 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4490 # verify trash dir is clean
4491 self
._wait
_for
_trash
_empty
()
4494 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4496 def test_subvolume_inherited_snapshot_info(self
):
4498 tests the scenario where 'fs subvolume snapshot info' command
4499 should fail for inherited snapshots created as part of snapshot
4503 subvolume
= self
._generate
_random
_subvolume
_name
()
4504 group
= self
._generate
_random
_group
_name
()
4507 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4509 # create subvolume in group
4510 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4512 # Create snapshot at ancestral level
4513 ancestral_snap_name
= "ancestral_snap_1"
4514 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", ancestral_snap_name
)
4515 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1
], omit_sudo
=False)
4517 # Validate existence of inherited snapshot
4518 group_path
= os
.path
.join(".", "volumes", group
)
4519 inode_number_group_dir
= int(self
.mount_a
.run_shell(['stat', '-c' '%i', group_path
]).stdout
.getvalue().strip())
4520 inherited_snap
= "_{0}_{1}".format(ancestral_snap_name
, inode_number_group_dir
)
4521 inherited_snappath
= os
.path
.join(".", "volumes", group
, subvolume
,".snap", inherited_snap
)
4522 self
.mount_a
.run_shell(['ls', inherited_snappath
])
4524 # snapshot info on inherited snapshot
4526 self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, inherited_snap
, group
)
4527 except CommandFailedError
as ce
:
4528 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on snapshot info of inherited snapshot")
4530 self
.fail("expected snapshot info of inherited snapshot to fail")
4532 # remove ancestral snapshots
4533 self
.mount_a
.run_shell(['sudo', 'rmdir', ancestral_snappath1
], omit_sudo
=False)
4536 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
4538 # verify trash dir is clean
4539 self
._wait
_for
_trash
_empty
()
4542 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4544 def test_subvolume_inherited_snapshot_rm(self
):
4546 tests the scenario where 'fs subvolume snapshot rm' command
4547 should fail for inherited snapshots created as part of snapshot
4551 subvolume
= self
._generate
_random
_subvolume
_name
()
4552 group
= self
._generate
_random
_group
_name
()
4555 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4557 # create subvolume in group
4558 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4560 # Create snapshot at ancestral level
4561 ancestral_snap_name
= "ancestral_snap_1"
4562 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", ancestral_snap_name
)
4563 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1
], omit_sudo
=False)
4565 # Validate existence of inherited snap
4566 group_path
= os
.path
.join(".", "volumes", group
)
4567 inode_number_group_dir
= int(self
.mount_a
.run_shell(['stat', '-c' '%i', group_path
]).stdout
.getvalue().strip())
4568 inherited_snap
= "_{0}_{1}".format(ancestral_snap_name
, inode_number_group_dir
)
4569 inherited_snappath
= os
.path
.join(".", "volumes", group
, subvolume
,".snap", inherited_snap
)
4570 self
.mount_a
.run_shell(['ls', inherited_snappath
])
4572 # inherited snapshot should not be deletable
4574 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, inherited_snap
, "--group_name", group
)
4575 except CommandFailedError
as ce
:
4576 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, msg
="invalid error code when removing inherited snapshot")
4578 self
.fail("expected removing inheirted snapshot to fail")
4580 # remove ancestral snapshots
4581 self
.mount_a
.run_shell(['sudo', 'rmdir', ancestral_snappath1
], omit_sudo
=False)
4584 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4586 # verify trash dir is clean
4587 self
._wait
_for
_trash
_empty
()
4590 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4592 def test_subvolume_subvolumegroup_snapshot_name_conflict(self
):
4594 tests the scenario where creation of subvolume snapshot name
4595 with same name as it's subvolumegroup snapshot name. This should
4599 subvolume
= self
._generate
_random
_subvolume
_name
()
4600 group
= self
._generate
_random
_group
_name
()
4601 group_snapshot
= self
._generate
_random
_snapshot
_name
()
4604 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4606 # create subvolume in group
4607 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4609 # Create subvolumegroup snapshot
4610 group_snapshot_path
= os
.path
.join(".", "volumes", group
, ".snap", group_snapshot
)
4611 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', group_snapshot_path
], omit_sudo
=False)
4613 # Validate existence of subvolumegroup snapshot
4614 self
.mount_a
.run_shell(['ls', group_snapshot_path
])
4616 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
4618 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, group_snapshot
, "--group_name", group
)
4619 except CommandFailedError
as ce
:
4620 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, msg
="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
4622 self
.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
4624 # remove subvolumegroup snapshot
4625 self
.mount_a
.run_shell(['sudo', 'rmdir', group_snapshot_path
], omit_sudo
=False)
4628 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4630 # verify trash dir is clean
4631 self
._wait
_for
_trash
_empty
()
4634 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4636 def test_subvolume_retain_snapshot_invalid_recreate(self
):
4638 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
4640 subvolume
= self
._generate
_random
_subvolume
_name
()
4641 snapshot
= self
._generate
_random
_snapshot
_name
()
4644 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4646 # snapshot subvolume
4647 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4649 # remove with snapshot retention
4650 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4652 # recreate subvolume with an invalid pool
4653 data_pool
= "invalid_pool"
4655 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
4656 except CommandFailedError
as ce
:
4657 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on recreate of subvolume with invalid poolname")
4659 self
.fail("expected recreate of subvolume with invalid poolname to fail")
4662 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4663 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
4664 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4668 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
4669 except CommandFailedError
as ce
:
4670 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of subvolume with retained snapshots")
4672 self
.fail("expected getpath of subvolume with retained snapshots to fail")
4674 # remove snapshot (should remove volume)
4675 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4677 # verify trash dir is clean
4678 self
._wait
_for
_trash
_empty
()
4680 def test_subvolume_retain_snapshot_recreate_subvolume(self
):
4682 ensure a retained subvolume can be recreated and further snapshotted
4684 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
4686 subvolume
= self
._generate
_random
_subvolume
_name
()
4687 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
4690 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4692 # snapshot subvolume
4693 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
4695 # remove with snapshot retention
4696 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4699 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4700 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
4701 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4703 # recreate retained subvolume
4704 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4707 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4708 self
.assertEqual(subvol_info
["state"], "complete",
4709 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4711 # snapshot info (older snapshot)
4712 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot1
))
4714 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
4715 self
.assertEqual(snap_info
["has_pending_clones"], "no")
4717 # snap-create (new snapshot)
4718 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot2
)
4720 # remove with retain snapshots
4721 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4724 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
4725 self
.assertEqual(len(subvolsnapshotls
), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
4726 " created subvolume snapshots")
4727 snapshotnames
= [snapshot
['name'] for snapshot
in subvolsnapshotls
]
4728 for snap
in [snapshot1
, snapshot2
]:
4729 self
.assertIn(snap
, snapshotnames
, "Missing snapshot '{0}' in snapshot list".format(snap
))
4731 # remove snapshots (should remove volume)
4732 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
4733 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot2
)
4735 # verify list subvolumes returns an empty list
4736 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4737 self
.assertEqual(len(subvolumels
), 0)
4739 # verify trash dir is clean
4740 self
._wait
_for
_trash
_empty
()
4742 def test_subvolume_retain_snapshot_with_snapshots(self
):
4744 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
4745 also test allowed and dis-allowed operations on a retained subvolume
4747 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
4749 subvolume
= self
._generate
_random
_subvolume
_name
()
4750 snapshot
= self
._generate
_random
_snapshot
_name
()
4753 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4755 # snapshot subvolume
4756 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4758 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4760 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4761 except CommandFailedError
as ce
:
4762 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of retained subvolume with snapshots")
4764 self
.fail("expected rm of subvolume with retained snapshots to fail")
4766 # remove with snapshot retention
4767 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4770 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4771 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
4772 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4774 ## test allowed ops in retained state
4776 subvolumes
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4777 self
.assertEqual(len(subvolumes
), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes
)))
4778 self
.assertEqual(subvolumes
[0]['name'], subvolume
,
4779 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume
, subvolumes
[0]['name']))
4782 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
4784 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
4785 self
.assertEqual(snap_info
["has_pending_clones"], "no")
4787 # rm --force (allowed but should fail)
4789 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--force")
4790 except CommandFailedError
as ce
:
4791 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of subvolume with retained snapshots")
4793 self
.fail("expected rm of subvolume with retained snapshots to fail")
4795 # rm (allowed but should fail)
4797 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4798 except CommandFailedError
as ce
:
4799 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of subvolume with retained snapshots")
4801 self
.fail("expected rm of subvolume with retained snapshots to fail")
4803 ## test disallowed ops
4806 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
4807 except CommandFailedError
as ce
:
4808 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of subvolume with retained snapshots")
4810 self
.fail("expected getpath of subvolume with retained snapshots to fail")
4813 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
4815 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
4816 except CommandFailedError
as ce
:
4817 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on resize of subvolume with retained snapshots")
4819 self
.fail("expected resize of subvolume with retained snapshots to fail")
4823 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, "fail")
4824 except CommandFailedError
as ce
:
4825 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on snapshot create of subvolume with retained snapshots")
4827 self
.fail("expected snapshot create of subvolume with retained snapshots to fail")
4829 # remove snapshot (should remove volume)
4830 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4832 # verify list subvolumes returns an empty list
4833 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4834 self
.assertEqual(len(subvolumels
), 0)
4836 # verify trash dir is clean
4837 self
._wait
_for
_trash
_empty
()
4839 def test_subvolume_retain_snapshot_without_snapshots(self
):
4841 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
4843 subvolume
= self
._generate
_random
_subvolume
_name
()
4846 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4848 # remove with snapshot retention (should remove volume, no snapshots to retain)
4849 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4851 # verify list subvolumes returns an empty list
4852 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4853 self
.assertEqual(len(subvolumels
), 0)
4855 # verify trash dir is clean
4856 self
._wait
_for
_trash
_empty
()
4858 def test_subvolume_retain_snapshot_trash_busy_recreate(self
):
4860 ensure retained subvolume recreate fails if its trash is not yet purged
4862 subvolume
= self
._generate
_random
_subvolume
_name
()
4863 snapshot
= self
._generate
_random
_snapshot
_name
()
4866 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4868 # snapshot subvolume
4869 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4871 # remove with snapshot retention
4872 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4874 # fake a trash entry
4875 self
._update
_fake
_trash
(subvolume
)
4877 # recreate subvolume
4879 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4880 except CommandFailedError
as ce
:
4881 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on recreate of subvolume with purge pending")
4883 self
.fail("expected recreate of subvolume with purge pending to fail")
4885 # clear fake trash entry
4886 self
._update
_fake
_trash
(subvolume
, create
=False)
4888 # recreate subvolume
4889 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4892 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4895 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4897 # verify trash dir is clean
4898 self
._wait
_for
_trash
_empty
()
4900 def test_subvolume_rm_with_snapshots(self
):
4901 subvolume
= self
._generate
_random
_subvolume
_name
()
4902 snapshot
= self
._generate
_random
_snapshot
_name
()
4905 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4907 # snapshot subvolume
4908 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4910 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4912 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4913 except CommandFailedError
as ce
:
4914 if ce
.exitstatus
!= errno
.ENOTEMPTY
:
4915 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
4917 raise RuntimeError("expected subvolume deletion to fail")
4920 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4923 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4925 # verify trash dir is clean
4926 self
._wait
_for
_trash
_empty
()
4928 def test_subvolume_snapshot_protect_unprotect_sanity(self
):
4930 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
4931 invoking the command does not cause errors, till they are removed from a subsequent release.
4933 subvolume
= self
._generate
_random
_subvolume
_name
()
4934 snapshot
= self
._generate
_random
_snapshot
_name
()
4935 clone
= self
._generate
_random
_clone
_name
()
4938 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4941 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
4943 # snapshot subvolume
4944 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4946 # now, protect snapshot
4947 self
._fs
_cmd
("subvolume", "snapshot", "protect", self
.volname
, subvolume
, snapshot
)
4950 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4952 # check clone status
4953 self
._wait
_for
_clone
_to
_complete
(clone
)
4955 # now, unprotect snapshot
4956 self
._fs
_cmd
("subvolume", "snapshot", "unprotect", self
.volname
, subvolume
, snapshot
)
4959 self
._verify
_clone
(subvolume
, snapshot
, clone
)
4962 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4965 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4966 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4968 # verify trash dir is clean
4969 self
._wait
_for
_trash
_empty
()
4971 def test_subvolume_snapshot_rm_force(self
):
4972 # test removing non existing subvolume snapshot with --force
4973 subvolume
= self
._generate
_random
_subvolume
_name
()
4974 snapshot
= self
._generate
_random
_snapshot
_name
()
4978 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, "--force")
4979 except CommandFailedError
:
4980 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
4982 def test_subvolume_snapshot_metadata_set(self
):
4984 Set custom metadata for subvolume snapshot.
4986 subvolname
= self
._generate
_random
_subvolume
_name
()
4987 group
= self
._generate
_random
_group
_name
()
4988 snapshot
= self
._generate
_random
_snapshot
_name
()
4991 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4993 # create subvolume in group.
4994 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4996 # snapshot subvolume
4997 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4999 # set metadata for snapshot.
5003 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5004 except CommandFailedError
:
5005 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5007 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5008 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5009 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5011 # verify trash dir is clean.
5012 self
._wait
_for
_trash
_empty
()
5014 def test_subvolume_snapshot_metadata_set_idempotence(self
):
5016 Set custom metadata for subvolume snapshot (Idempotency).
5018 subvolname
= self
._generate
_random
_subvolume
_name
()
5019 group
= self
._generate
_random
_group
_name
()
5020 snapshot
= self
._generate
_random
_snapshot
_name
()
5023 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5025 # create subvolume in group.
5026 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5028 # snapshot subvolume
5029 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5031 # set metadata for snapshot.
5035 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5036 except CommandFailedError
:
5037 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5039 # set same metadata again for subvolume.
5041 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5042 except CommandFailedError
:
5043 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed because it is idempotent operation")
5045 # get value for specified key.
5047 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5048 except CommandFailedError
:
5049 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5051 # remove '\n' from returned value.
5052 ret
= ret
.strip('\n')
5054 # match received value with expected value.
5055 self
.assertEqual(value
, ret
)
5057 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5058 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5059 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5061 # verify trash dir is clean.
5062 self
._wait
_for
_trash
_empty
()
5064 def test_subvolume_snapshot_metadata_get(self
):
5066 Get custom metadata for a specified key in subvolume snapshot metadata.
5068 subvolname
= self
._generate
_random
_subvolume
_name
()
5069 group
= self
._generate
_random
_group
_name
()
5070 snapshot
= self
._generate
_random
_snapshot
_name
()
5073 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5075 # create subvolume in group.
5076 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5078 # snapshot subvolume
5079 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5081 # set metadata for snapshot.
5084 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5086 # get value for specified key.
5088 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5089 except CommandFailedError
:
5090 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5092 # remove '\n' from returned value.
5093 ret
= ret
.strip('\n')
5095 # match received value with expected value.
5096 self
.assertEqual(value
, ret
)
5098 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5099 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5100 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5102 # verify trash dir is clean.
5103 self
._wait
_for
_trash
_empty
()
5105 def test_subvolume_snapshot_metadata_get_for_nonexisting_key(self
):
5107 Get custom metadata for subvolume snapshot if specified key not exist in metadata.
5109 subvolname
= self
._generate
_random
_subvolume
_name
()
5110 group
= self
._generate
_random
_group
_name
()
5111 snapshot
= self
._generate
_random
_snapshot
_name
()
5114 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5116 # create subvolume in group.
5117 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5119 # snapshot subvolume
5120 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5122 # set metadata for snapshot.
5125 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5127 # try to get value for nonexisting key
5128 # Expecting ENOENT exit status because key does not exist
5130 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, "key_nonexist", group
)
5131 except CommandFailedError
as e
:
5132 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5134 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
5136 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5137 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5138 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5140 # verify trash dir is clean.
5141 self
._wait
_for
_trash
_empty
()
5143 def test_subvolume_snapshot_metadata_get_for_nonexisting_section(self
):
5145 Get custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5147 subvolname
= self
._generate
_random
_subvolume
_name
()
5148 group
= self
._generate
_random
_group
_name
()
5149 snapshot
= self
._generate
_random
_snapshot
_name
()
5152 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5154 # create subvolume in group.
5155 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5157 # snapshot subvolume
5158 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5160 # try to get value for nonexisting key (as section does not exist)
5161 # Expecting ENOENT exit status because key does not exist
5163 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, "key", group
)
5164 except CommandFailedError
as e
:
5165 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5167 self
.fail("Expected ENOENT because section does not exist")
5169 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5170 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5171 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5173 # verify trash dir is clean.
5174 self
._wait
_for
_trash
_empty
()
5176 def test_subvolume_snapshot_metadata_update(self
):
5178 Update custom metadata for a specified key in subvolume snapshot metadata.
5180 subvolname
= self
._generate
_random
_subvolume
_name
()
5181 group
= self
._generate
_random
_group
_name
()
5182 snapshot
= self
._generate
_random
_snapshot
_name
()
5185 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5187 # create subvolume in group.
5188 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5190 # snapshot subvolume
5191 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5193 # set metadata for snapshot.
5196 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5198 # update metadata against key.
5199 new_value
= "new_value"
5200 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, new_value
, group
)
5202 # get metadata for specified key of snapshot.
5204 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5205 except CommandFailedError
:
5206 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5208 # remove '\n' from returned value.
5209 ret
= ret
.strip('\n')
5211 # match received value with expected value.
5212 self
.assertEqual(new_value
, ret
)
5214 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5215 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5216 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5218 # verify trash dir is clean.
5219 self
._wait
_for
_trash
_empty
()
5221 def test_subvolume_snapshot_metadata_list(self
):
5223 List custom metadata for subvolume snapshot.
5225 subvolname
= self
._generate
_random
_subvolume
_name
()
5226 group
= self
._generate
_random
_group
_name
()
5227 snapshot
= self
._generate
_random
_snapshot
_name
()
5230 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5232 # create subvolume in group.
5233 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5235 # snapshot subvolume
5236 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5238 # set metadata for subvolume.
5239 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
5241 for k
, v
in input_metadata_dict
.items():
5242 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, k
, v
, group
)
5246 ret_dict
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "metadata", "ls", self
.volname
, subvolname
, snapshot
, group
))
5247 except CommandFailedError
:
5248 self
.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5250 # compare output with expected output
5251 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
5253 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5254 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5255 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5257 # verify trash dir is clean.
5258 self
._wait
_for
_trash
_empty
()
5260 def test_subvolume_snapshot_metadata_list_if_no_metadata_set(self
):
5262 List custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5264 subvolname
= self
._generate
_random
_subvolume
_name
()
5265 group
= self
._generate
_random
_group
_name
()
5266 snapshot
= self
._generate
_random
_snapshot
_name
()
5269 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5271 # create subvolume in group.
5272 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5274 # snapshot subvolume
5275 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5279 ret_dict
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "metadata", "ls", self
.volname
, subvolname
, snapshot
, group
))
5280 except CommandFailedError
:
5281 self
.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5283 # compare output with expected output
5285 self
.assertDictEqual(ret_dict
, empty_dict
)
5287 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5288 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5289 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5291 # verify trash dir is clean.
5292 self
._wait
_for
_trash
_empty
()
5294 def test_subvolume_snapshot_metadata_remove(self
):
5296 Remove custom metadata for a specified key in subvolume snapshot metadata.
5298 subvolname
= self
._generate
_random
_subvolume
_name
()
5299 group
= self
._generate
_random
_group
_name
()
5300 snapshot
= self
._generate
_random
_snapshot
_name
()
5303 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5305 # create subvolume in group.
5306 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5308 # snapshot subvolume
5309 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5311 # set metadata for snapshot.
5314 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5316 # remove metadata against specified key.
5318 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
)
5319 except CommandFailedError
:
5320 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5322 # confirm key is removed by again fetching metadata
5324 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, key
, snapshot
, group
)
5325 except CommandFailedError
as e
:
5326 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5328 self
.fail("Expected ENOENT because key does not exist")
5330 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5331 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5332 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5334 # verify trash dir is clean.
5335 self
._wait
_for
_trash
_empty
()
5337 def test_subvolume_snapshot_metadata_remove_for_nonexisting_key(self
):
5339 Remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5341 subvolname
= self
._generate
_random
_subvolume
_name
()
5342 group
= self
._generate
_random
_group
_name
()
5343 snapshot
= self
._generate
_random
_snapshot
_name
()
5346 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5348 # create subvolume in group.
5349 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5351 # snapshot subvolume
5352 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5354 # set metadata for snapshot.
5357 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5359 # try to remove value for nonexisting key
5360 # Expecting ENOENT exit status because key does not exist
5362 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, "key_nonexist", group
)
5363 except CommandFailedError
as e
:
5364 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5366 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
5368 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5369 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5370 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5372 # verify trash dir is clean.
5373 self
._wait
_for
_trash
_empty
()
5375 def test_subvolume_snapshot_metadata_remove_for_nonexisting_section(self
):
5377 Remove custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5379 subvolname
= self
._generate
_random
_subvolume
_name
()
5380 group
= self
._generate
_random
_group
_name
()
5381 snapshot
= self
._generate
_random
_snapshot
_name
()
5384 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5386 # create subvolume in group.
5387 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5389 # snapshot subvolume
5390 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5392 # try to remove value for nonexisting key (as section does not exist)
5393 # Expecting ENOENT exit status because key does not exist
5395 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, "key", group
)
5396 except CommandFailedError
as e
:
5397 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5399 self
.fail("Expected ENOENT because section does not exist")
5401 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5402 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5403 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5405 # verify trash dir is clean.
5406 self
._wait
_for
_trash
_empty
()
5408 def test_subvolume_snapshot_metadata_remove_force(self
):
5410 Forcefully remove custom metadata for a specified key in subvolume snapshot metadata.
5412 subvolname
= self
._generate
_random
_subvolume
_name
()
5413 group
= self
._generate
_random
_group
_name
()
5414 snapshot
= self
._generate
_random
_snapshot
_name
()
5417 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5419 # create subvolume in group.
5420 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5422 # snapshot subvolume
5423 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5425 # set metadata for snapshot.
5428 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5430 # remove metadata against specified key with --force option.
5432 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
, "--force")
5433 except CommandFailedError
:
5434 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5436 # confirm key is removed by again fetching metadata
5438 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5439 except CommandFailedError
as e
:
5440 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5442 self
.fail("Expected ENOENT because key does not exist")
5444 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5445 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5446 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5448 # verify trash dir is clean.
5449 self
._wait
_for
_trash
_empty
()
5451 def test_subvolume_snapshot_metadata_remove_force_for_nonexisting_key(self
):
5453 Forcefully remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5455 subvolname
= self
._generate
_random
_subvolume
_name
()
5456 group
= self
._generate
_random
_group
_name
()
5457 snapshot
= self
._generate
_random
_snapshot
_name
()
5460 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5462 # create subvolume in group.
5463 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5465 # snapshot subvolume
5466 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5468 # set metadata for snapshot.
5471 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5473 # remove metadata against specified key.
5475 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
)
5476 except CommandFailedError
:
5477 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5479 # confirm key is removed by again fetching metadata
5481 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5482 except CommandFailedError
as e
:
5483 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5485 self
.fail("Expected ENOENT because key does not exist")
5487 # again remove metadata against already removed key with --force option.
5489 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
, "--force")
5490 except CommandFailedError
:
5491 self
.fail("expected the 'fs subvolume snapshot metadata rm' (with --force) command to succeed")
5493 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5494 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5495 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5497 # verify trash dir is clean.
5498 self
._wait
_for
_trash
_empty
()
5500 def test_subvolume_snapshot_metadata_after_snapshot_remove(self
):
5502 Verify metadata removal of subvolume snapshot after snapshot removal.
5504 subvolname
= self
._generate
_random
_subvolume
_name
()
5505 group
= self
._generate
_random
_group
_name
()
5506 snapshot
= self
._generate
_random
_snapshot
_name
()
5509 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5511 # create subvolume in group.
5512 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5514 # snapshot subvolume
5515 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5517 # set metadata for snapshot.
5520 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5522 # get value for specified key.
5523 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5525 # remove '\n' from returned value.
5526 ret
= ret
.strip('\n')
5528 # match received value with expected value.
5529 self
.assertEqual(value
, ret
)
5531 # remove subvolume snapshot.
5532 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5534 # try to get metadata after removing snapshot.
5535 # Expecting error ENOENT with error message of snapshot does not exist
5536 cmd_ret
= self
.mgr_cluster
.mon_manager
.run_cluster_cmd(
5537 args
=["fs", "subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
],
5538 check_status
=False, stdout
=StringIO(), stderr
=StringIO())
5539 self
.assertEqual(cmd_ret
.returncode
, errno
.ENOENT
, "Expecting ENOENT error")
5540 self
.assertIn(f
"snapshot '{snapshot}' does not exist", cmd_ret
.stderr
.getvalue(),
5541 f
"Expecting message: snapshot '{snapshot}' does not exist ")
5543 # confirm metadata is removed by searching section name in .meta file
5544 meta_path
= os
.path
.join(".", "volumes", group
, subvolname
, ".meta")
5545 section_name
= "SNAP_METADATA_" + snapshot
5548 self
.mount_a
.run_shell(f
"sudo grep {section_name} {meta_path}", omit_sudo
=False)
5549 except CommandFailedError
as e
:
5550 self
.assertNotEqual(e
.exitstatus
, 0)
5552 self
.fail("Expected non-zero exist status because section should not exist")
5554 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5555 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5557 # verify trash dir is clean.
5558 self
._wait
_for
_trash
_empty
()
5560 def test_clean_stale_subvolume_snapshot_metadata(self
):
5562 Validate cleaning of stale subvolume snapshot metadata.
5564 subvolname
= self
._generate
_random
_subvolume
_name
()
5565 group
= self
._generate
_random
_group
_name
()
5566 snapshot
= self
._generate
_random
_snapshot
_name
()
5569 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5571 # create subvolume in group.
5572 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5574 # snapshot subvolume
5575 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5577 # set metadata for snapshot.
5581 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5582 except CommandFailedError
:
5583 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5585 # save the subvolume config file.
5586 meta_path
= os
.path
.join(".", "volumes", group
, subvolname
, ".meta")
5587 tmp_meta_path
= os
.path
.join(".", "volumes", group
, subvolname
, ".meta.stale_snap_section")
5588 self
.mount_a
.run_shell(['sudo', 'cp', '-p', meta_path
, tmp_meta_path
], omit_sudo
=False)
5590 # Delete snapshot, this would remove user snap metadata
5591 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5593 # Copy back saved subvolume config file. This would have stale snapshot metadata
5594 self
.mount_a
.run_shell(['sudo', 'cp', '-p', tmp_meta_path
, meta_path
], omit_sudo
=False)
5596 # Verify that it has stale snapshot metadata
5597 section_name
= "SNAP_METADATA_" + snapshot
5599 self
.mount_a
.run_shell(f
"sudo grep {section_name} {meta_path}", omit_sudo
=False)
5600 except CommandFailedError
:
5601 self
.fail("Expected grep cmd to succeed because stale snapshot metadata exist")
5603 # Do any subvolume operation to clean the stale snapshot metadata
5604 _
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolname
, group
))
5606 # Verify that the stale snapshot metadata is cleaned
5608 self
.mount_a
.run_shell(f
"sudo grep {section_name} {meta_path}", omit_sudo
=False)
5609 except CommandFailedError
as e
:
5610 self
.assertNotEqual(e
.exitstatus
, 0)
5612 self
.fail("Expected non-zero exist status because stale snapshot metadata should not exist")
5614 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5615 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5617 # verify trash dir is clean.
5618 self
._wait
_for
_trash
_empty
()
5619 # Clean tmp config file
5620 self
.mount_a
.run_shell(['sudo', 'rm', '-f', tmp_meta_path
], omit_sudo
=False)
5623 class TestSubvolumeSnapshotClones(TestVolumesHelper
):
5624 """ Tests for FS subvolume snapshot clone operations."""
5625 def test_clone_subvolume_info(self
):
5626 # tests the 'fs subvolume info' command for a clone
5627 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
5628 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
5631 subvolume
= self
._generate
_random
_subvolume
_name
()
5632 snapshot
= self
._generate
_random
_snapshot
_name
()
5633 clone
= self
._generate
_random
_clone
_name
()
5636 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5639 self
._do
_subvolume
_io
(subvolume
, number_of_files
=1)
5641 # snapshot subvolume
5642 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5645 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5647 # check clone status
5648 self
._wait
_for
_clone
_to
_complete
(clone
)
5651 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5653 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, clone
))
5654 if len(subvol_info
) == 0:
5655 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
5656 for md
in subvol_md
:
5657 if md
not in subvol_info
.keys():
5658 raise RuntimeError("%s not present in the metadata of subvolume" % md
)
5659 if subvol_info
["type"] != "clone":
5660 raise RuntimeError("type should be set to clone")
5663 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5664 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5666 # verify trash dir is clean
5667 self
._wait
_for
_trash
_empty
()
5669 def test_subvolume_snapshot_info_without_snapshot_clone(self
):
5671 Verify subvolume snapshot info output without cloning snapshot.
5672 If no clone is performed then path /volumes/_index/clone/{track_id}
5675 subvolume
= self
._generate
_random
_subvolume
_name
()
5676 snapshot
= self
._generate
_random
_snapshot
_name
()
5679 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5681 # snapshot subvolume
5682 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5684 # list snapshot info
5685 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5687 # verify snapshot info
5688 self
.assertEqual(result
['has_pending_clones'], "no")
5689 self
.assertFalse('orphan_clones_count' in result
)
5690 self
.assertFalse('pending_clones' in result
)
5692 # remove snapshot, subvolume, clone
5693 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5694 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5696 # verify trash dir is clean
5697 self
._wait
_for
_trash
_empty
()
5699 def test_subvolume_snapshot_info_if_no_clone_pending(self
):
5701 Verify subvolume snapshot info output if no clone is in pending state.
5703 subvolume
= self
._generate
_random
_subvolume
_name
()
5704 snapshot
= self
._generate
_random
_snapshot
_name
()
5705 clone_list
= [f
'clone_{i}' for i
in range(3)]
5708 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5710 # snapshot subvolume
5711 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5714 for clone
in clone_list
:
5715 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5717 # check clones status
5718 for clone
in clone_list
:
5719 self
._wait
_for
_clone
_to
_complete
(clone
)
5721 # list snapshot info
5722 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5724 # verify snapshot info
5725 self
.assertEqual(result
['has_pending_clones'], "no")
5726 self
.assertFalse('orphan_clones_count' in result
)
5727 self
.assertFalse('pending_clones' in result
)
5729 # remove snapshot, subvolume, clone
5730 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5731 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5732 for clone
in clone_list
:
5733 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5735 # verify trash dir is clean
5736 self
._wait
_for
_trash
_empty
()
5738 def test_subvolume_snapshot_info_if_clone_pending_for_no_group(self
):
5740 Verify subvolume snapshot info output if clones are in pending state.
5741 Clones are not specified for particular target_group. Hence target_group
5742 should not be in the output as we don't show _nogroup (default group)
5744 subvolume
= self
._generate
_random
_subvolume
_name
()
5745 snapshot
= self
._generate
_random
_snapshot
_name
()
5746 clone_list
= [f
'clone_{i}' for i
in range(3)]
5749 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5751 # snapshot subvolume
5752 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5754 # insert delay at the beginning of snapshot clone
5755 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5758 for clone
in clone_list
:
5759 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5761 # list snapshot info
5762 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5764 # verify snapshot info
5765 expected_clone_list
= []
5766 for clone
in clone_list
:
5767 expected_clone_list
.append({"name": clone
})
5768 self
.assertEqual(result
['has_pending_clones'], "yes")
5769 self
.assertFalse('orphan_clones_count' in result
)
5770 self
.assertListEqual(result
['pending_clones'], expected_clone_list
)
5771 self
.assertEqual(len(result
['pending_clones']), 3)
5773 # check clones status
5774 for clone
in clone_list
:
5775 self
._wait
_for
_clone
_to
_complete
(clone
)
5777 # remove snapshot, subvolume, clone
5778 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5779 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5780 for clone
in clone_list
:
5781 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5783 # verify trash dir is clean
5784 self
._wait
_for
_trash
_empty
()
5786 def test_subvolume_snapshot_info_if_clone_pending_for_target_group(self
):
5788 Verify subvolume snapshot info output if clones are in pending state.
5789 Clones are not specified for target_group.
5791 subvolume
= self
._generate
_random
_subvolume
_name
()
5792 snapshot
= self
._generate
_random
_snapshot
_name
()
5793 clone
= self
._generate
_random
_clone
_name
()
5794 group
= self
._generate
_random
_group
_name
()
5795 target_group
= self
._generate
_random
_group
_name
()
5798 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5799 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, target_group
)
5802 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, group
, "--mode=777")
5804 # snapshot subvolume
5805 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
5807 # insert delay at the beginning of snapshot clone
5808 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5811 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
,
5812 "--group_name", group
, "--target_group_name", target_group
)
5814 # list snapshot info
5815 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
, "--group_name", group
))
5817 # verify snapshot info
5818 expected_clone_list
= [{"name": clone
, "target_group": target_group
}]
5819 self
.assertEqual(result
['has_pending_clones'], "yes")
5820 self
.assertFalse('orphan_clones_count' in result
)
5821 self
.assertListEqual(result
['pending_clones'], expected_clone_list
)
5822 self
.assertEqual(len(result
['pending_clones']), 1)
5824 # check clone status
5825 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=target_group
)
5828 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
5831 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
5832 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, target_group
)
5835 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5836 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, target_group
)
5838 # verify trash dir is clean
5839 self
._wait
_for
_trash
_empty
()
5841 def test_subvolume_snapshot_info_if_orphan_clone(self
):
5843 Verify subvolume snapshot info output if orphan clones exists.
5844 Orphan clones should not list under pending clones.
5845 orphan_clones_count should display correct count of orphan clones'
5847 subvolume
= self
._generate
_random
_subvolume
_name
()
5848 snapshot
= self
._generate
_random
_snapshot
_name
()
5849 clone_list
= [f
'clone_{i}' for i
in range(3)]
5852 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5854 # snapshot subvolume
5855 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5857 # insert delay at the beginning of snapshot clone
5858 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 15)
5861 for clone
in clone_list
:
5862 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5864 # remove track file for third clone to make it orphan
5865 meta_path
= os
.path
.join(".", "volumes", "_nogroup", subvolume
, ".meta")
5866 pending_clones_result
= self
.mount_a
.run_shell(['sudo', 'grep', 'clone snaps', '-A3', meta_path
], omit_sudo
=False, stdout
=StringIO(), stderr
=StringIO())
5867 third_clone_track_id
= pending_clones_result
.stdout
.getvalue().splitlines()[3].split(" = ")[0]
5868 third_clone_track_path
= os
.path
.join(".", "volumes", "_index", "clone", third_clone_track_id
)
5869 self
.mount_a
.run_shell(f
"sudo rm -f {third_clone_track_path}", omit_sudo
=False)
5871 # list snapshot info
5872 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5874 # verify snapshot info
5875 expected_clone_list
= []
5876 for i
in range(len(clone_list
)-1):
5877 expected_clone_list
.append({"name": clone_list
[i
]})
5878 self
.assertEqual(result
['has_pending_clones'], "yes")
5879 self
.assertEqual(result
['orphan_clones_count'], 1)
5880 self
.assertListEqual(result
['pending_clones'], expected_clone_list
)
5881 self
.assertEqual(len(result
['pending_clones']), 2)
5883 # check clones status
5884 for i
in range(len(clone_list
)-1):
5885 self
._wait
_for
_clone
_to
_complete
(clone_list
[i
])
5887 # list snapshot info after cloning completion
5888 res
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5890 # verify snapshot info (has_pending_clones should be no)
5891 self
.assertEqual(res
['has_pending_clones'], "no")
5893 def test_non_clone_status(self
):
5894 subvolume
= self
._generate
_random
_subvolume
_name
()
5897 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
5900 self
._fs
_cmd
("clone", "status", self
.volname
, subvolume
)
5901 except CommandFailedError
as ce
:
5902 if ce
.exitstatus
!= errno
.ENOTSUP
:
5903 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
5905 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
5908 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5910 # verify trash dir is clean
5911 self
._wait
_for
_trash
_empty
()
5913 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self
):
5914 subvolume
= self
._generate
_random
_subvolume
_name
()
5915 snapshot
= self
._generate
_random
_snapshot
_name
()
5916 clone
= self
._generate
_random
_clone
_name
()
5917 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*12
5919 # create subvolume, in an isolated namespace with a specified size
5920 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--namespace-isolated", "--size", str(osize
), "--mode=777")
5923 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
5925 # snapshot subvolume
5926 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5928 # create a pool different from current subvolume pool
5929 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
5930 default_pool
= self
.mount_a
.getfattr(subvol_path
, "ceph.dir.layout.pool")
5931 new_pool
= "new_pool"
5932 self
.assertNotEqual(default_pool
, new_pool
)
5933 self
.fs
.add_data_pool(new_pool
)
5935 # update source subvolume pool
5936 self
._do
_subvolume
_pool
_and
_namespace
_update
(subvolume
, pool
=new_pool
, pool_namespace
="")
5938 # schedule a clone, with NO --pool specification
5939 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5941 # check clone status
5942 self
._wait
_for
_clone
_to
_complete
(clone
)
5945 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5948 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5951 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5952 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5954 # verify trash dir is clean
5955 self
._wait
_for
_trash
_empty
()
5957 def test_subvolume_clone_inherit_quota_attrs(self
):
5958 subvolume
= self
._generate
_random
_subvolume
_name
()
5959 snapshot
= self
._generate
_random
_snapshot
_name
()
5960 clone
= self
._generate
_random
_clone
_name
()
5961 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*12
5963 # create subvolume with a specified size
5964 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777", "--size", str(osize
))
5967 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
5969 # get subvolume path
5970 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
5972 # set quota on number of files
5973 self
.mount_a
.setfattr(subvolpath
, 'ceph.quota.max_files', "20", sudo
=True)
5975 # snapshot subvolume
5976 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5979 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5981 # check clone status
5982 self
._wait
_for
_clone
_to
_complete
(clone
)
5985 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5987 # get subvolume path
5988 clonepath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
5990 # verify quota max_files is inherited from source snapshot
5991 subvol_quota
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_files")
5992 clone_quota
= self
.mount_a
.getfattr(clonepath
, "ceph.quota.max_files")
5993 self
.assertEqual(subvol_quota
, clone_quota
)
5996 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5999 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6000 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6002 # verify trash dir is clean
6003 self
._wait
_for
_trash
_empty
()
6005 def test_subvolume_clone_in_progress_getpath(self
):
6006 subvolume
= self
._generate
_random
_subvolume
_name
()
6007 snapshot
= self
._generate
_random
_snapshot
_name
()
6008 clone
= self
._generate
_random
_clone
_name
()
6011 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6014 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6016 # snapshot subvolume
6017 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6019 # Insert delay at the beginning of snapshot clone
6020 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6023 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6025 # clone should not be accessible right now
6027 self
._get
_subvolume
_path
(self
.volname
, clone
)
6028 except CommandFailedError
as ce
:
6029 if ce
.exitstatus
!= errno
.EAGAIN
:
6030 raise RuntimeError("invalid error code when fetching path of an pending clone")
6032 raise RuntimeError("expected fetching path of an pending clone to fail")
6034 # check clone status
6035 self
._wait
_for
_clone
_to
_complete
(clone
)
6037 # clone should be accessible now
6038 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
6039 self
.assertNotEqual(subvolpath
, None)
6042 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6045 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6048 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6049 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6051 # verify trash dir is clean
6052 self
._wait
_for
_trash
_empty
()
6054 def test_subvolume_clone_in_progress_snapshot_rm(self
):
6055 subvolume
= self
._generate
_random
_subvolume
_name
()
6056 snapshot
= self
._generate
_random
_snapshot
_name
()
6057 clone
= self
._generate
_random
_clone
_name
()
6060 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6063 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6065 # snapshot subvolume
6066 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6068 # Insert delay at the beginning of snapshot clone
6069 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6072 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6074 # snapshot should not be deletable now
6076 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6077 except CommandFailedError
as ce
:
6078 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, msg
="invalid error code when removing source snapshot of a clone")
6080 self
.fail("expected removing source snapshot of a clone to fail")
6082 # check clone status
6083 self
._wait
_for
_clone
_to
_complete
(clone
)
6085 # clone should be accessible now
6086 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
6087 self
.assertNotEqual(subvolpath
, None)
6090 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6093 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6096 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6097 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6099 # verify trash dir is clean
6100 self
._wait
_for
_trash
_empty
()
6102 def test_subvolume_clone_in_progress_source(self
):
6103 subvolume
= self
._generate
_random
_subvolume
_name
()
6104 snapshot
= self
._generate
_random
_snapshot
_name
()
6105 clone
= self
._generate
_random
_clone
_name
()
6108 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6111 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6113 # snapshot subvolume
6114 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6116 # Insert delay at the beginning of snapshot clone
6117 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6120 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6122 # verify clone source
6123 result
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, clone
))
6124 source
= result
['status']['source']
6125 self
.assertEqual(source
['volume'], self
.volname
)
6126 self
.assertEqual(source
['subvolume'], subvolume
)
6127 self
.assertEqual(source
.get('group', None), None)
6128 self
.assertEqual(source
['snapshot'], snapshot
)
6130 # check clone status
6131 self
._wait
_for
_clone
_to
_complete
(clone
)
6133 # clone should be accessible now
6134 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
6135 self
.assertNotEqual(subvolpath
, None)
6138 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6141 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6144 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6145 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6147 # verify trash dir is clean
6148 self
._wait
_for
_trash
_empty
()
6150 def test_subvolume_clone_retain_snapshot_with_snapshots(self
):
6152 retain snapshots of a cloned subvolume and check disallowed operations
6154 subvolume
= self
._generate
_random
_subvolume
_name
()
6155 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
6156 clone
= self
._generate
_random
_clone
_name
()
6159 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6161 # store path for clone verification
6162 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6165 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6167 # snapshot subvolume
6168 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
6170 # remove with snapshot retention
6171 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6173 # clone retained subvolume snapshot
6174 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot1
, clone
)
6176 # check clone status
6177 self
._wait
_for
_clone
_to
_complete
(clone
)
6180 self
._verify
_clone
(subvolume
, snapshot1
, clone
, subvol_path
=subvol1_path
)
6182 # create a snapshot on the clone
6183 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone
, snapshot2
)
6186 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--retain-snapshots")
6189 clonesnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, clone
))
6190 self
.assertEqual(len(clonesnapshotls
), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
6191 " created subvolume snapshots")
6192 snapshotnames
= [snapshot
['name'] for snapshot
in clonesnapshotls
]
6193 for snap
in [snapshot2
]:
6194 self
.assertIn(snap
, snapshotnames
, "Missing snapshot '{0}' in snapshot list".format(snap
))
6196 ## check disallowed operations on retained clone
6199 self
._fs
_cmd
("clone", "status", self
.volname
, clone
)
6200 except CommandFailedError
as ce
:
6201 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on clone status of clone with retained snapshots")
6203 self
.fail("expected clone status of clone with retained snapshots to fail")
6207 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6208 except CommandFailedError
as ce
:
6209 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on clone cancel of clone with retained snapshots")
6211 self
.fail("expected clone cancel of clone with retained snapshots to fail")
6213 # remove snapshots (removes subvolumes as all are in retained state)
6214 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
6215 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone
, snapshot2
)
6217 # verify list subvolumes returns an empty list
6218 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6219 self
.assertEqual(len(subvolumels
), 0)
6221 # verify trash dir is clean
6222 self
._wait
_for
_trash
_empty
()
6224 def test_subvolume_retain_snapshot_clone(self
):
6226 clone a snapshot from a snapshot retained subvolume
6228 subvolume
= self
._generate
_random
_subvolume
_name
()
6229 snapshot
= self
._generate
_random
_snapshot
_name
()
6230 clone
= self
._generate
_random
_clone
_name
()
6233 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6235 # store path for clone verification
6236 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6239 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6241 # snapshot subvolume
6242 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6244 # remove with snapshot retention
6245 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6247 # clone retained subvolume snapshot
6248 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6250 # check clone status
6251 self
._wait
_for
_clone
_to
_complete
(clone
)
6254 self
._verify
_clone
(subvolume
, snapshot
, clone
, subvol_path
=subvol_path
)
6256 # remove snapshots (removes retained volume)
6257 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6260 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6262 # verify list subvolumes returns an empty list
6263 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6264 self
.assertEqual(len(subvolumels
), 0)
6266 # verify trash dir is clean
6267 self
._wait
_for
_trash
_empty
()
6269 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self
):
6271 clone a subvolume from recreated subvolume's latest snapshot
6273 subvolume
= self
._generate
_random
_subvolume
_name
()
6274 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
6275 clone
= self
._generate
_random
_clone
_name
(1)
6278 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6281 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6283 # snapshot subvolume
6284 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
6286 # remove with snapshot retention
6287 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6289 # recreate subvolume
6290 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6292 # get and store path for clone verification
6293 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6296 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6298 # snapshot newer subvolume
6299 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot2
)
6301 # remove with snapshot retention
6302 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6304 # clone retained subvolume's newer snapshot
6305 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot2
, clone
)
6307 # check clone status
6308 self
._wait
_for
_clone
_to
_complete
(clone
)
6311 self
._verify
_clone
(subvolume
, snapshot2
, clone
, subvol_path
=subvol2_path
)
6314 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
6315 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot2
)
6318 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6320 # verify list subvolumes returns an empty list
6321 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6322 self
.assertEqual(len(subvolumels
), 0)
6324 # verify trash dir is clean
6325 self
._wait
_for
_trash
_empty
()
6327 def test_subvolume_retain_snapshot_recreate(self
):
6329 recreate a subvolume from one of its retained snapshots
6331 subvolume
= self
._generate
_random
_subvolume
_name
()
6332 snapshot
= self
._generate
_random
_snapshot
_name
()
6335 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6337 # store path for clone verification
6338 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6341 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6343 # snapshot subvolume
6344 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6346 # remove with snapshot retention
6347 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6349 # recreate retained subvolume using its own snapshot to clone
6350 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, subvolume
)
6352 # check clone status
6353 self
._wait
_for
_clone
_to
_complete
(subvolume
)
6356 self
._verify
_clone
(subvolume
, snapshot
, subvolume
, subvol_path
=subvol_path
)
6359 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6362 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6364 # verify list subvolumes returns an empty list
6365 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6366 self
.assertEqual(len(subvolumels
), 0)
6368 # verify trash dir is clean
6369 self
._wait
_for
_trash
_empty
()
6371 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self
):
6373 ensure retained clone recreate fails if its trash is not yet purged
6375 subvolume
= self
._generate
_random
_subvolume
_name
()
6376 snapshot
= self
._generate
_random
_snapshot
_name
()
6377 clone
= self
._generate
_random
_clone
_name
()
6380 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
6382 # snapshot subvolume
6383 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6385 # clone subvolume snapshot
6386 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6388 # check clone status
6389 self
._wait
_for
_clone
_to
_complete
(clone
)
6392 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone
, snapshot
)
6394 # remove clone with snapshot retention
6395 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--retain-snapshots")
6397 # fake a trash entry
6398 self
._update
_fake
_trash
(clone
)
6400 # clone subvolume snapshot (recreate)
6402 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6403 except CommandFailedError
as ce
:
6404 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on recreate of clone with purge pending")
6406 self
.fail("expected recreate of clone with purge pending to fail")
6408 # clear fake trash entry
6409 self
._update
_fake
_trash
(clone
, create
=False)
6411 # recreate subvolume
6412 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6414 # check clone status
6415 self
._wait
_for
_clone
_to
_complete
(clone
)
6418 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6419 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone
, snapshot
)
6422 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6423 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6425 # verify trash dir is clean
6426 self
._wait
_for
_trash
_empty
()
6428 def test_subvolume_snapshot_attr_clone(self
):
6429 subvolume
= self
._generate
_random
_subvolume
_name
()
6430 snapshot
= self
._generate
_random
_snapshot
_name
()
6431 clone
= self
._generate
_random
_clone
_name
()
6434 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6437 self
._do
_subvolume
_io
_mixed
(subvolume
)
6439 # snapshot subvolume
6440 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6443 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6445 # check clone status
6446 self
._wait
_for
_clone
_to
_complete
(clone
)
6449 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6452 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6455 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6456 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6458 # verify trash dir is clean
6459 self
._wait
_for
_trash
_empty
()
6461 def test_clone_failure_status_pending_in_progress_complete(self
):
6463 ensure failure status is not shown when clone is not in failed/cancelled state
6465 subvolume
= self
._generate
_random
_subvolume
_name
()
6466 snapshot
= self
._generate
_random
_snapshot
_name
()
6467 clone1
= self
._generate
_random
_clone
_name
()
6470 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6473 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6475 # snapshot subvolume
6476 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6478 # Insert delay at the beginning of snapshot clone
6479 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6482 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6484 # pending clone shouldn't show failure status
6485 clone1_result
= self
._get
_clone
_status
(clone1
)
6487 clone1_result
["status"]["failure"]["errno"]
6488 except KeyError as e
:
6489 self
.assertEqual(str(e
), "'failure'")
6491 self
.fail("clone status shouldn't show failure for pending clone")
6493 # check clone1 to be in-progress
6494 self
._wait
_for
_clone
_to
_be
_in
_progress
(clone1
)
6496 # in-progress clone1 shouldn't show failure status
6497 clone1_result
= self
._get
_clone
_status
(clone1
)
6499 clone1_result
["status"]["failure"]["errno"]
6500 except KeyError as e
:
6501 self
.assertEqual(str(e
), "'failure'")
6503 self
.fail("clone status shouldn't show failure for in-progress clone")
6505 # wait for clone1 to complete
6506 self
._wait
_for
_clone
_to
_complete
(clone1
)
6508 # complete clone1 shouldn't show failure status
6509 clone1_result
= self
._get
_clone
_status
(clone1
)
6511 clone1_result
["status"]["failure"]["errno"]
6512 except KeyError as e
:
6513 self
.assertEqual(str(e
), "'failure'")
6515 self
.fail("clone status shouldn't show failure for complete clone")
6518 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6521 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6522 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
6524 # verify trash dir is clean
6525 self
._wait
_for
_trash
_empty
()
6527 def test_clone_failure_status_failed(self
):
6529 ensure failure status is shown when clone is in failed state and validate the reason
6531 subvolume
= self
._generate
_random
_subvolume
_name
()
6532 snapshot
= self
._generate
_random
_snapshot
_name
()
6533 clone1
= self
._generate
_random
_clone
_name
()
6536 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6539 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6541 # snapshot subvolume
6542 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6544 # Insert delay at the beginning of snapshot clone
6545 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6548 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6550 # remove snapshot from backend to force the clone failure.
6551 snappath
= os
.path
.join(".", "volumes", "_nogroup", subvolume
, ".snap", snapshot
)
6552 self
.mount_a
.run_shell(['sudo', 'rmdir', snappath
], omit_sudo
=False)
6554 # wait for clone1 to fail.
6555 self
._wait
_for
_clone
_to
_fail
(clone1
)
6557 # check clone1 status
6558 clone1_result
= self
._get
_clone
_status
(clone1
)
6559 self
.assertEqual(clone1_result
["status"]["state"], "failed")
6560 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "2")
6561 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "snapshot '{0}' does not exist".format(snapshot
))
6563 # clone removal should succeed after failure, remove clone1
6564 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
6567 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6569 # verify trash dir is clean
6570 self
._wait
_for
_trash
_empty
()
6572 def test_clone_failure_status_pending_cancelled(self
):
6574 ensure failure status is shown when clone is cancelled during pending state and validate the reason
6576 subvolume
= self
._generate
_random
_subvolume
_name
()
6577 snapshot
= self
._generate
_random
_snapshot
_name
()
6578 clone1
= self
._generate
_random
_clone
_name
()
6581 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6584 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6586 # snapshot subvolume
6587 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6589 # Insert delay at the beginning of snapshot clone
6590 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6593 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6595 # cancel pending clone1
6596 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone1
)
6598 # check clone1 status
6599 clone1_result
= self
._get
_clone
_status
(clone1
)
6600 self
.assertEqual(clone1_result
["status"]["state"], "canceled")
6601 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "4")
6602 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "user interrupted clone operation")
6604 # clone removal should succeed with force after cancelled, remove clone1
6605 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
6608 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6611 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6613 # verify trash dir is clean
6614 self
._wait
_for
_trash
_empty
()
6616 def test_clone_failure_status_in_progress_cancelled(self
):
6618 ensure failure status is shown when clone is cancelled during in-progress state and validate the reason
6620 subvolume
= self
._generate
_random
_subvolume
_name
()
6621 snapshot
= self
._generate
_random
_snapshot
_name
()
6622 clone1
= self
._generate
_random
_clone
_name
()
6625 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6628 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6630 # snapshot subvolume
6631 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6633 # Insert delay at the beginning of snapshot clone
6634 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6637 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6639 # wait for clone1 to be in-progress
6640 self
._wait
_for
_clone
_to
_be
_in
_progress
(clone1
)
6642 # cancel in-progess clone1
6643 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone1
)
6645 # check clone1 status
6646 clone1_result
= self
._get
_clone
_status
(clone1
)
6647 self
.assertEqual(clone1_result
["status"]["state"], "canceled")
6648 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "4")
6649 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "user interrupted clone operation")
6651 # clone removal should succeed with force after cancelled, remove clone1
6652 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
6655 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6658 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6660 # verify trash dir is clean
6661 self
._wait
_for
_trash
_empty
()
6663 def test_subvolume_snapshot_clone(self
):
6664 subvolume
= self
._generate
_random
_subvolume
_name
()
6665 snapshot
= self
._generate
_random
_snapshot
_name
()
6666 clone
= self
._generate
_random
_clone
_name
()
6669 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6672 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6674 # snapshot subvolume
6675 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6678 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6680 # check clone status
6681 self
._wait
_for
_clone
_to
_complete
(clone
)
6684 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6687 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6690 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6691 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6693 # verify trash dir is clean
6694 self
._wait
_for
_trash
_empty
()
6696 def test_subvolume_snapshot_clone_quota_exceeded(self
):
6697 subvolume
= self
._generate
_random
_subvolume
_name
()
6698 snapshot
= self
._generate
_random
_snapshot
_name
()
6699 clone
= self
._generate
_random
_clone
_name
()
6701 # create subvolume with 20MB quota
6702 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
6703 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
,"--mode=777", "--size", str(osize
))
6705 # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
6707 self
._do
_subvolume
_io
(subvolume
, number_of_files
=50)
6708 except CommandFailedError
:
6709 # ignore quota enforcement error.
6712 # snapshot subvolume
6713 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6716 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6718 # check clone status
6719 self
._wait
_for
_clone
_to
_complete
(clone
)
6722 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6725 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6728 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6729 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6731 # verify trash dir is clean
6732 self
._wait
_for
_trash
_empty
()
6734 def test_subvolume_snapshot_in_complete_clone_rm(self
):
6736 Validates the removal of clone when it is not in 'complete|cancelled|failed' state.
6737 The forceful removl of subvolume clone succeeds only if it's in any of the
6738 'complete|cancelled|failed' states. It fails with EAGAIN in any other states.
6741 subvolume
= self
._generate
_random
_subvolume
_name
()
6742 snapshot
= self
._generate
_random
_snapshot
_name
()
6743 clone
= self
._generate
_random
_clone
_name
()
6746 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6749 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6751 # snapshot subvolume
6752 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6754 # Insert delay at the beginning of snapshot clone
6755 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6758 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6760 # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled.
6762 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6763 except CommandFailedError
as ce
:
6764 if ce
.exitstatus
!= errno
.EAGAIN
:
6765 raise RuntimeError("invalid error code when trying to remove failed clone")
6767 raise RuntimeError("expected error when removing a failed clone")
6769 # cancel on-going clone
6770 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6772 # verify canceled state
6773 self
._check
_clone
_canceled
(clone
)
6775 # clone removal should succeed after cancel
6776 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6779 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6782 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6784 # verify trash dir is clean
6785 self
._wait
_for
_trash
_empty
()
6787 def test_subvolume_snapshot_clone_retain_suid_guid(self
):
6788 subvolume
= self
._generate
_random
_subvolume
_name
()
6789 snapshot
= self
._generate
_random
_snapshot
_name
()
6790 clone
= self
._generate
_random
_clone
_name
()
6793 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6795 # Create a file with suid, guid bits set along with executable bit.
6796 args
= ["subvolume", "getpath", self
.volname
, subvolume
]
6798 subvolpath
= self
._fs
_cmd
(*args
)
6799 self
.assertNotEqual(subvolpath
, None)
6800 subvolpath
= subvolpath
[1:].rstrip() # remove "/" prefix and any trailing newline
6802 file_path
= subvolpath
6803 file_path
= os
.path
.join(subvolpath
, "test_suid_file")
6804 self
.mount_a
.run_shell(["touch", file_path
])
6805 self
.mount_a
.run_shell(["chmod", "u+sx,g+sx", file_path
])
6807 # snapshot subvolume
6808 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6811 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6813 # check clone status
6814 self
._wait
_for
_clone
_to
_complete
(clone
)
6817 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6820 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6823 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6824 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6826 # verify trash dir is clean
6827 self
._wait
_for
_trash
_empty
()
6829 def test_subvolume_snapshot_clone_and_reclone(self
):
6830 subvolume
= self
._generate
_random
_subvolume
_name
()
6831 snapshot
= self
._generate
_random
_snapshot
_name
()
6832 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
6835 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6838 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
6840 # snapshot subvolume
6841 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6844 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6846 # check clone status
6847 self
._wait
_for
_clone
_to
_complete
(clone1
)
6850 self
._verify
_clone
(subvolume
, snapshot
, clone1
)
6853 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6855 # now the clone is just like a normal subvolume -- snapshot the clone and fork
6856 # another clone. before that do some IO so it's can be differentiated.
6857 self
._do
_subvolume
_io
(clone1
, create_dir
="data", number_of_files
=32)
6859 # snapshot clone -- use same snap name
6860 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone1
, snapshot
)
6863 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, clone1
, snapshot
, clone2
)
6865 # check clone status
6866 self
._wait
_for
_clone
_to
_complete
(clone2
)
6869 self
._verify
_clone
(clone1
, snapshot
, clone2
)
6872 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone1
, snapshot
)
6875 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6876 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
6877 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
6879 # verify trash dir is clean
6880 self
._wait
_for
_trash
_empty
()
6882 def test_subvolume_snapshot_clone_cancel_in_progress(self
):
6883 subvolume
= self
._generate
_random
_subvolume
_name
()
6884 snapshot
= self
._generate
_random
_snapshot
_name
()
6885 clone
= self
._generate
_random
_clone
_name
()
6888 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6891 self
._do
_subvolume
_io
(subvolume
, number_of_files
=128)
6893 # snapshot subvolume
6894 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6896 # Insert delay at the beginning of snapshot clone
6897 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6900 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6902 # cancel on-going clone
6903 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6905 # verify canceled state
6906 self
._check
_clone
_canceled
(clone
)
6909 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6912 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6913 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6915 # verify trash dir is clean
6916 self
._wait
_for
_trash
_empty
()
6918 def test_subvolume_snapshot_clone_cancel_pending(self
):
6920 this test is a bit more involved compared to canceling an in-progress clone.
6921 we'd need to ensure that a to-be canceled clone has still not been picked up
6922 by cloner threads. exploit the fact that clones are picked up in an FCFS
6923 fashion and there are four (4) cloner threads by default. When the number of
6924 cloner threads increase, this test _may_ start tripping -- so, the number of
6925 clone operations would need to be jacked up.
6927 # default number of clone threads
6929 # good enough for 4 threads
6931 # yeh, 1gig -- we need the clone to run for sometime
6934 subvolume
= self
._generate
_random
_subvolume
_name
()
6935 snapshot
= self
._generate
_random
_snapshot
_name
()
6936 clones
= self
._generate
_random
_clone
_name
(NR_CLONES
)
6939 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6942 self
._do
_subvolume
_io
(subvolume
, number_of_files
=4, file_size
=FILE_SIZE_MB
)
6944 # snapshot subvolume
6945 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6948 for clone
in clones
:
6949 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6951 to_wait
= clones
[0:NR_THREADS
]
6952 to_cancel
= clones
[NR_THREADS
:]
6954 # cancel pending clones and verify
6955 for clone
in to_cancel
:
6956 status
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, clone
))
6957 self
.assertEqual(status
["status"]["state"], "pending")
6958 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6959 self
._check
_clone
_canceled
(clone
)
6961 # let's cancel on-going clones. handle the case where some of the clones
6963 for clone
in list(to_wait
):
6965 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6966 to_cancel
.append(clone
)
6967 to_wait
.remove(clone
)
6968 except CommandFailedError
as ce
:
6969 if ce
.exitstatus
!= errno
.EINVAL
:
6970 raise RuntimeError("invalid error code when cancelling on-going clone")
6973 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6976 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6977 for clone
in to_wait
:
6978 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6979 for clone
in to_cancel
:
6980 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6982 # verify trash dir is clean
6983 self
._wait
_for
_trash
_empty
()
6985 def test_subvolume_snapshot_clone_different_groups(self
):
6986 subvolume
= self
._generate
_random
_subvolume
_name
()
6987 snapshot
= self
._generate
_random
_snapshot
_name
()
6988 clone
= self
._generate
_random
_clone
_name
()
6989 s_group
, c_group
= self
._generate
_random
_group
_name
(2)
6992 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, s_group
)
6993 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, c_group
)
6996 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, s_group
, "--mode=777")
6999 self
._do
_subvolume
_io
(subvolume
, subvolume_group
=s_group
, number_of_files
=32)
7001 # snapshot subvolume
7002 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, s_group
)
7005 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
,
7006 '--group_name', s_group
, '--target_group_name', c_group
)
7008 # check clone status
7009 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=c_group
)
7012 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_group
=s_group
, clone_group
=c_group
)
7015 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, s_group
)
7018 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, s_group
)
7019 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, c_group
)
7022 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, s_group
)
7023 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, c_group
)
7025 # verify trash dir is clean
7026 self
._wait
_for
_trash
_empty
()
7028 def test_subvolume_snapshot_clone_fail_with_remove(self
):
7029 subvolume
= self
._generate
_random
_subvolume
_name
()
7030 snapshot
= self
._generate
_random
_snapshot
_name
()
7031 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
7033 pool_capacity
= 32 * 1024 * 1024
7034 # number of files required to fill up 99% of the pool
7035 nr_files
= int((pool_capacity
* 0.99) / (TestVolumes
.DEFAULT_FILE_SIZE
* 1024 * 1024))
7038 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
7041 self
._do
_subvolume
_io
(subvolume
, number_of_files
=nr_files
)
7043 # snapshot subvolume
7044 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7047 new_pool
= "new_pool"
7048 self
.fs
.add_data_pool(new_pool
)
7050 self
.fs
.mon_manager
.raw_cluster_cmd("osd", "pool", "set-quota", new_pool
,
7051 "max_bytes", "{0}".format(pool_capacity
// 4))
7054 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
, "--pool_layout", new_pool
)
7056 # check clone status -- this should dramatically overshoot the pool quota
7057 self
._wait
_for
_clone
_to
_complete
(clone1
)
7060 self
._verify
_clone
(subvolume
, snapshot
, clone1
, clone_pool
=new_pool
)
7062 # wait a bit so that subsequent I/O will give pool full error
7066 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone2
, "--pool_layout", new_pool
)
7068 # check clone status
7069 self
._wait
_for
_clone
_to
_fail
(clone2
)
7072 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7075 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7076 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
7078 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
7079 except CommandFailedError
as ce
:
7080 if ce
.exitstatus
!= errno
.EAGAIN
:
7081 raise RuntimeError("invalid error code when trying to remove failed clone")
7083 raise RuntimeError("expected error when removing a failed clone")
7085 # ... and with force, failed clone can be removed
7086 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
, "--force")
7088 # verify trash dir is clean
7089 self
._wait
_for
_trash
_empty
()
7091 def test_subvolume_snapshot_clone_on_existing_subvolumes(self
):
7092 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
7093 snapshot
= self
._generate
_random
_snapshot
_name
()
7094 clone
= self
._generate
_random
_clone
_name
()
7097 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--mode=777")
7098 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--mode=777")
7101 self
._do
_subvolume
_io
(subvolume1
, number_of_files
=32)
7103 # snapshot subvolume
7104 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume1
, snapshot
)
7106 # schedule a clone with target as subvolume2
7108 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, subvolume2
)
7109 except CommandFailedError
as ce
:
7110 if ce
.exitstatus
!= errno
.EEXIST
:
7111 raise RuntimeError("invalid error code when cloning to existing subvolume")
7113 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
7115 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, clone
)
7117 # schedule a clone with target as clone
7119 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, clone
)
7120 except CommandFailedError
as ce
:
7121 if ce
.exitstatus
!= errno
.EEXIST
:
7122 raise RuntimeError("invalid error code when cloning to existing clone")
7124 raise RuntimeError("expected cloning to fail if the target is an existing clone")
7126 # check clone status
7127 self
._wait
_for
_clone
_to
_complete
(clone
)
7130 self
._verify
_clone
(subvolume1
, snapshot
, clone
)
7133 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume1
, snapshot
)
7136 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7137 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
)
7138 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7140 # verify trash dir is clean
7141 self
._wait
_for
_trash
_empty
()
7143 def test_subvolume_snapshot_clone_pool_layout(self
):
7144 subvolume
= self
._generate
_random
_subvolume
_name
()
7145 snapshot
= self
._generate
_random
_snapshot
_name
()
7146 clone
= self
._generate
_random
_clone
_name
()
7149 new_pool
= "new_pool"
7150 newid
= self
.fs
.add_data_pool(new_pool
)
7153 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
7156 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
7158 # snapshot subvolume
7159 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7162 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, "--pool_layout", new_pool
)
7164 # check clone status
7165 self
._wait
_for
_clone
_to
_complete
(clone
)
7168 self
._verify
_clone
(subvolume
, snapshot
, clone
, clone_pool
=new_pool
)
7171 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7173 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, clone
)
7174 desired_pool
= self
.mount_a
.getfattr(subvol_path
, "ceph.dir.layout.pool")
7176 self
.assertEqual(desired_pool
, new_pool
)
7177 except AssertionError:
7178 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
7181 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7182 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7184 # verify trash dir is clean
7185 self
._wait
_for
_trash
_empty
()
7187 def test_subvolume_snapshot_clone_under_group(self
):
7188 subvolume
= self
._generate
_random
_subvolume
_name
()
7189 snapshot
= self
._generate
_random
_snapshot
_name
()
7190 clone
= self
._generate
_random
_clone
_name
()
7191 group
= self
._generate
_random
_group
_name
()
7194 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
7197 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
7199 # snapshot subvolume
7200 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7203 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
7206 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, '--target_group_name', group
)
7208 # check clone status
7209 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=group
)
7212 self
._verify
_clone
(subvolume
, snapshot
, clone
, clone_group
=group
)
7215 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7218 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7219 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, group
)
7222 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7224 # verify trash dir is clean
7225 self
._wait
_for
_trash
_empty
()
7227 def test_subvolume_snapshot_clone_with_attrs(self
):
7228 subvolume
= self
._generate
_random
_subvolume
_name
()
7229 snapshot
= self
._generate
_random
_snapshot
_name
()
7230 clone
= self
._generate
_random
_clone
_name
()
7240 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", mode
, "--uid", uid
, "--gid", gid
)
7243 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
7245 # snapshot subvolume
7246 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7248 # change subvolume attrs (to ensure clone picks up snapshot attrs)
7249 self
._do
_subvolume
_attr
_update
(subvolume
, new_uid
, new_gid
, new_mode
)
7252 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
7254 # check clone status
7255 self
._wait
_for
_clone
_to
_complete
(clone
)
7258 self
._verify
_clone
(subvolume
, snapshot
, clone
)
7261 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7264 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7265 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7267 # verify trash dir is clean
7268 self
._wait
_for
_trash
_empty
()
7270 def test_subvolume_snapshot_clone_with_upgrade(self
):
7272 yet another poor man's upgrade test -- rather than going through a full
7273 upgrade cycle, emulate old types subvolumes by going through the wormhole
7274 and verify clone operation.
7275 further ensure that a legacy volume is not updated to v2, but clone is.
7277 subvolume
= self
._generate
_random
_subvolume
_name
()
7278 snapshot
= self
._generate
_random
_snapshot
_name
()
7279 clone
= self
._generate
_random
_clone
_name
()
7281 # emulate a old-fashioned subvolume
7282 createpath
= os
.path
.join(".", "volumes", "_nogroup", subvolume
)
7283 self
.mount_a
.run_shell_payload(f
"sudo mkdir -p -m 777 {createpath}", omit_sudo
=False)
7285 # add required xattrs to subvolume
7286 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7287 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7290 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
7292 # snapshot subvolume
7293 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7295 # ensure metadata file is in legacy location, with required version v1
7296 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume
, version
=1, legacy
=True)
7298 # Insert delay at the beginning of snapshot clone
7299 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7302 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
7304 # snapshot should not be deletable now
7306 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7307 except CommandFailedError
as ce
:
7308 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, msg
="invalid error code when removing source snapshot of a clone")
7310 self
.fail("expected removing source snapshot of a clone to fail")
7312 # check clone status
7313 self
._wait
_for
_clone
_to
_complete
(clone
)
7316 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_version
=1)
7319 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7321 # ensure metadata file is in v2 location, with required version v2
7322 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone
)
7325 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7326 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7328 # verify trash dir is clean
7329 self
._wait
_for
_trash
_empty
()
7331 def test_subvolume_snapshot_reconf_max_concurrent_clones(self
):
7333 Validate 'max_concurrent_clones' config option
7336 # get the default number of cloner threads
7337 default_max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7338 self
.assertEqual(default_max_concurrent_clones
, 4)
7340 # Increase number of cloner threads
7341 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
7342 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7343 self
.assertEqual(max_concurrent_clones
, 6)
7345 # Decrease number of cloner threads
7346 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7347 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7348 self
.assertEqual(max_concurrent_clones
, 2)
7350 def test_subvolume_snapshot_config_snapshot_clone_delay(self
):
7352 Validate 'snapshot_clone_delay' config option
7355 # get the default delay before starting the clone
7356 default_timeout
= int(self
.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7357 self
.assertEqual(default_timeout
, 0)
7359 # Insert delay of 2 seconds at the beginning of the snapshot clone
7360 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7361 default_timeout
= int(self
.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7362 self
.assertEqual(default_timeout
, 2)
7364 # Decrease number of cloner threads
7365 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7366 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7367 self
.assertEqual(max_concurrent_clones
, 2)
7369 def test_subvolume_under_group_snapshot_clone(self
):
7370 subvolume
= self
._generate
_random
_subvolume
_name
()
7371 group
= self
._generate
_random
_group
_name
()
7372 snapshot
= self
._generate
_random
_snapshot
_name
()
7373 clone
= self
._generate
_random
_clone
_name
()
7376 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
7379 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, group
, "--mode=777")
7382 self
._do
_subvolume
_io
(subvolume
, subvolume_group
=group
, number_of_files
=32)
7384 # snapshot subvolume
7385 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
7388 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, '--group_name', group
)
7390 # check clone status
7391 self
._wait
_for
_clone
_to
_complete
(clone
)
7394 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_group
=group
)
7397 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
7400 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
7401 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7404 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7406 # verify trash dir is clean
7407 self
._wait
_for
_trash
_empty
()
7410 class TestMisc(TestVolumesHelper
):
7411 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
7412 def test_connection_expiration(self
):
7413 # unmount any cephfs mounts
7414 for i
in range(0, self
.CLIENTS_REQUIRED
):
7415 self
.mounts
[i
].umount_wait()
7416 sessions
= self
._session
_list
()
7417 self
.assertLessEqual(len(sessions
), 1) # maybe mgr is already mounted
7419 # Get the mgr to definitely mount cephfs
7420 subvolume
= self
._generate
_random
_subvolume
_name
()
7421 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
7422 sessions
= self
._session
_list
()
7423 self
.assertEqual(len(sessions
), 1)
7425 # Now wait for the mgr to expire the connection:
7426 self
.wait_until_evicted(sessions
[0]['id'], timeout
=90)
7428 def test_mgr_eviction(self
):
7429 # unmount any cephfs mounts
7430 for i
in range(0, self
.CLIENTS_REQUIRED
):
7431 self
.mounts
[i
].umount_wait()
7432 sessions
= self
._session
_list
()
7433 self
.assertLessEqual(len(sessions
), 1) # maybe mgr is already mounted
7435 # Get the mgr to definitely mount cephfs
7436 subvolume
= self
._generate
_random
_subvolume
_name
()
7437 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
7438 sessions
= self
._session
_list
()
7439 self
.assertEqual(len(sessions
), 1)
7441 # Now fail the mgr, check the session was evicted
7442 mgr
= self
.mgr_cluster
.get_active_id()
7443 self
.mgr_cluster
.mgr_fail(mgr
)
7444 self
.wait_until_evicted(sessions
[0]['id'])
7446 def test_names_can_only_be_goodchars(self
):
7448 Test the creating vols, subvols subvolgroups fails when their names uses
7449 characters beyond [a-zA-Z0-9 -_.].
7451 volname
, badname
= 'testvol', 'abcd@#'
7453 with self
.assertRaises(CommandFailedError
):
7454 self
._fs
_cmd
('volume', 'create', badname
)
7455 self
._fs
_cmd
('volume', 'create', volname
)
7457 with self
.assertRaises(CommandFailedError
):
7458 self
._fs
_cmd
('subvolumegroup', 'create', volname
, badname
)
7460 with self
.assertRaises(CommandFailedError
):
7461 self
._fs
_cmd
('subvolume', 'create', volname
, badname
)
7462 self
._fs
_cmd
('volume', 'rm', volname
, '--yes-i-really-mean-it')
7464 def test_subvolume_ops_on_nonexistent_vol(self
):
7465 # tests the fs subvolume operations on non existing volume
7467 volname
= "non_existent_subvolume"
7469 # try subvolume operations
7470 for op
in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
7473 self
._fs
_cmd
("subvolume", "resize", volname
, "subvolname_1", "inf")
7475 self
._fs
_cmd
("subvolume", "pin", volname
, "subvolname_1", "export", "1")
7477 self
._fs
_cmd
("subvolume", "ls", volname
)
7479 self
._fs
_cmd
("subvolume", op
, volname
, "subvolume_1")
7480 except CommandFailedError
as ce
:
7481 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7483 self
.fail("expected the 'fs subvolume {0}' command to fail".format(op
))
7485 # try subvolume snapshot operations and clone create
7486 for op
in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
7489 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1")
7491 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1", "snapshot_1", "clone_1")
7493 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1", "snapshot_1")
7494 except CommandFailedError
as ce
:
7495 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7497 self
.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op
))
7501 self
._fs
_cmd
("clone", "status", volname
, "clone_1")
7502 except CommandFailedError
as ce
:
7503 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7505 self
.fail("expected the 'fs clone status' command to fail")
7507 # try subvolumegroup operations
7508 for op
in ("create", "rm", "getpath", "pin", "ls"):
7511 self
._fs
_cmd
("subvolumegroup", "pin", volname
, "group_1", "export", "0")
7513 self
._fs
_cmd
("subvolumegroup", op
, volname
)
7515 self
._fs
_cmd
("subvolumegroup", op
, volname
, "group_1")
7516 except CommandFailedError
as ce
:
7517 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7519 self
.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op
))
7521 # try subvolumegroup snapshot operations
7522 for op
in ("create", "rm", "ls"):
7525 self
._fs
_cmd
("subvolumegroup", "snapshot", op
, volname
, "group_1")
7527 self
._fs
_cmd
("subvolumegroup", "snapshot", op
, volname
, "group_1", "snapshot_1")
7528 except CommandFailedError
as ce
:
7529 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7531 self
.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op
))
7533 def test_subvolume_upgrade_legacy_to_v1(self
):
7535 poor man's upgrade test -- rather than going through a full upgrade cycle,
7536 emulate subvolumes by going through the wormhole and verify if they are
7538 further ensure that a legacy volume is not updated to v2.
7540 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
7541 group
= self
._generate
_random
_group
_name
()
7543 # emulate a old-fashioned subvolume -- one in the default group and
7544 # the other in a custom group
7545 createpath1
= os
.path
.join(".", "volumes", "_nogroup", subvolume1
)
7546 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath1
], omit_sudo
=False)
7549 createpath2
= os
.path
.join(".", "volumes", group
, subvolume2
)
7550 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath2
], omit_sudo
=False)
7552 # this would auto-upgrade on access without anyone noticing
7553 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume1
)
7554 self
.assertNotEqual(subvolpath1
, None)
7555 subvolpath1
= subvolpath1
.rstrip() # remove "/" prefix and any trailing newline
7557 subvolpath2
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume2
, group
)
7558 self
.assertNotEqual(subvolpath2
, None)
7559 subvolpath2
= subvolpath2
.rstrip() # remove "/" prefix and any trailing newline
7561 # and... the subvolume path returned should be what we created behind the scene
7562 self
.assertEqual(createpath1
[1:], subvolpath1
)
7563 self
.assertEqual(createpath2
[1:], subvolpath2
)
7565 # ensure metadata file is in legacy location, with required version v1
7566 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=1, legacy
=True)
7567 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=1, legacy
=True)
7570 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7571 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
7573 # verify trash dir is clean
7574 self
._wait
_for
_trash
_empty
()
7577 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7579 def test_subvolume_no_upgrade_v1_sanity(self
):
7581 poor man's upgrade test -- theme continues...
7583 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
7584 a series of operations on the v1 subvolume to ensure they work as expected.
7586 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
7587 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
7588 "type", "uid", "features", "state"]
7589 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
7591 subvolume
= self
._generate
_random
_subvolume
_name
()
7592 snapshot
= self
._generate
_random
_snapshot
_name
()
7593 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
7598 # emulate a v1 subvolume -- in the default group
7599 subvolume_path
= self
._create
_v
1_subvolume
(subvolume
)
7602 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
7603 self
.assertEqual(subvolpath
, subvolume_path
)
7606 subvolumes
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
7607 self
.assertEqual(len(subvolumes
), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes
)))
7608 self
.assertEqual(subvolumes
[0]['name'], subvolume
,
7609 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume
, subvolumes
[0]['name']))
7612 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
7613 for md
in subvol_md
:
7614 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
7616 self
.assertEqual(subvol_info
["state"], "complete",
7617 msg
="expected state to be 'complete', found '{0}".format(subvol_info
["state"]))
7618 self
.assertEqual(len(subvol_info
["features"]), 2,
7619 msg
="expected 1 feature, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
7620 for feature
in ['snapshot-clone', 'snapshot-autoprotect']:
7621 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
7624 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024*10
7625 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
7626 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
7627 for md
in subvol_md
:
7628 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
7629 self
.assertEqual(subvol_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
7631 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
7632 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", mode
, "--uid", uid
, "--gid", gid
)
7635 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
7638 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7641 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
7643 # check clone status
7644 self
._wait
_for
_clone
_to
_complete
(clone1
)
7646 # ensure clone is v2
7647 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone1
, version
=2)
7650 self
._verify
_clone
(subvolume
, snapshot
, clone1
, source_version
=1)
7652 # clone (older snapshot)
7653 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, 'fake', clone2
)
7655 # check clone status
7656 self
._wait
_for
_clone
_to
_complete
(clone2
)
7658 # ensure clone is v2
7659 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone2
, version
=2)
7662 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
7663 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
7666 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
7668 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
7669 self
.assertEqual(snap_info
["has_pending_clones"], "no")
7672 subvol_snapshots
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
7673 self
.assertEqual(len(subvol_snapshots
), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots
)))
7674 snapshotnames
= [snapshot
['name'] for snapshot
in subvol_snapshots
]
7675 for name
in [snapshot
, 'fake']:
7676 self
.assertIn(name
, snapshotnames
, msg
="expected snapshot '{0}' in subvolume snapshot ls".format(name
))
7679 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7680 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, "fake")
7682 # ensure volume is still at version 1
7683 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume
, version
=1)
7686 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7687 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
7688 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
7690 # verify trash dir is clean
7691 self
._wait
_for
_trash
_empty
()
7693 def test_subvolume_no_upgrade_v1_to_v2(self
):
7695 poor man's upgrade test -- theme continues...
7696 ensure v1 to v2 upgrades are not done automatically due to various states of v1
7698 subvolume1
, subvolume2
, subvolume3
= self
._generate
_random
_subvolume
_name
(3)
7699 group
= self
._generate
_random
_group
_name
()
7701 # emulate a v1 subvolume -- in the default group
7702 subvol1_path
= self
._create
_v
1_subvolume
(subvolume1
)
7704 # emulate a v1 subvolume -- in a custom group
7705 subvol2_path
= self
._create
_v
1_subvolume
(subvolume2
, subvol_group
=group
)
7707 # emulate a v1 subvolume -- in a clone pending state
7708 self
._create
_v
1_subvolume
(subvolume3
, subvol_type
='clone', has_snapshot
=False, state
='pending')
7710 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
7711 subvolpath1
= self
._get
_subvolume
_path
(self
.volname
, subvolume1
)
7712 self
.assertEqual(subvolpath1
, subvol1_path
)
7714 subvolpath2
= self
._get
_subvolume
_path
(self
.volname
, subvolume2
, group_name
=group
)
7715 self
.assertEqual(subvolpath2
, subvol2_path
)
7717 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
7718 # use clone status, as only certain operations are allowed in pending state
7719 status
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, subvolume3
))
7720 self
.assertEqual(status
["status"]["state"], "pending")
7723 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume1
, "fake")
7724 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume2
, "fake", group
)
7726 # ensure metadata file is in v1 location, with version retained as v1
7727 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=1)
7728 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=1)
7731 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7732 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
7734 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume3
)
7735 except CommandFailedError
as ce
:
7736 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on rm of subvolume undergoing clone")
7738 self
.fail("expected rm of subvolume undergoing clone to fail")
7740 # ensure metadata file is in v1 location, with version retained as v1
7741 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume3
, version
=1)
7742 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume3
, "--force")
7744 # verify list subvolumes returns an empty list
7745 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
7746 self
.assertEqual(len(subvolumels
), 0)
7748 # verify trash dir is clean
7749 self
._wait
_for
_trash
_empty
()
7751 def test_subvolume_upgrade_v1_to_v2(self
):
7753 poor man's upgrade test -- theme continues...
7754 ensure v1 to v2 upgrades work
7756 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
7757 group
= self
._generate
_random
_group
_name
()
7759 # emulate a v1 subvolume -- in the default group
7760 subvol1_path
= self
._create
_v
1_subvolume
(subvolume1
, has_snapshot
=False)
7762 # emulate a v1 subvolume -- in a custom group
7763 subvol2_path
= self
._create
_v
1_subvolume
(subvolume2
, subvol_group
=group
, has_snapshot
=False)
7765 # this would attempt auto-upgrade on access
7766 subvolpath1
= self
._get
_subvolume
_path
(self
.volname
, subvolume1
)
7767 self
.assertEqual(subvolpath1
, subvol1_path
)
7769 subvolpath2
= self
._get
_subvolume
_path
(self
.volname
, subvolume2
, group_name
=group
)
7770 self
.assertEqual(subvolpath2
, subvol2_path
)
7772 # ensure metadata file is in v2 location, with version retained as v2
7773 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=2)
7774 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=2)
7777 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7778 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
7780 # verify trash dir is clean
7781 self
._wait
_for
_trash
_empty
()
7783 def test_malicious_metafile_on_legacy_to_v1_upgrade(self
):
7785 Validate handcrafted .meta file on legacy subvol root doesn't break the system
7786 on legacy subvol upgrade to v1
7787 poor man's upgrade test -- theme continues...
7789 subvol1
, subvol2
= self
._generate
_random
_subvolume
_name
(2)
7791 # emulate a old-fashioned subvolume in the default group
7792 createpath1
= os
.path
.join(".", "volumes", "_nogroup", subvol1
)
7793 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath1
], omit_sudo
=False)
7795 # add required xattrs to subvolume
7796 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7797 self
.mount_a
.setfattr(createpath1
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7799 # create v2 subvolume
7800 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
)
7802 # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume
7803 # .meta into legacy subvol1's root
7804 subvol2_metapath
= os
.path
.join(".", "volumes", "_nogroup", subvol2
, ".meta")
7805 self
.mount_a
.run_shell(['sudo', 'cp', subvol2_metapath
, createpath1
], omit_sudo
=False)
7807 # Upgrade legacy subvol1 to v1
7808 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol1
)
7809 self
.assertNotEqual(subvolpath1
, None)
7810 subvolpath1
= subvolpath1
.rstrip()
7812 # the subvolume path returned should not be of subvol2 from handcrafted
7814 self
.assertEqual(createpath1
[1:], subvolpath1
)
7816 # ensure metadata file is in legacy location, with required version v1
7817 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol1
, version
=1, legacy
=True)
7819 # Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2
7820 # path whose '.meta' file is copied to subvol1 root
7822 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvol1
, authid1
)
7824 # Validate that the mds path added is of subvol1 and not of subvol2
7825 out
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
7826 self
.assertEqual("client.alice", out
[0]["entity"])
7827 self
.assertEqual("allow rw path={0}".format(createpath1
[1:]), out
[0]["caps"]["mds"])
7830 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
)
7831 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
)
7833 # verify trash dir is clean
7834 self
._wait
_for
_trash
_empty
()
7836 def test_binary_metafile_on_legacy_to_v1_upgrade(self
):
7838 Validate binary .meta file on legacy subvol root doesn't break the system
7839 on legacy subvol upgrade to v1
7840 poor man's upgrade test -- theme continues...
7842 subvol
= self
._generate
_random
_subvolume
_name
()
7843 group
= self
._generate
_random
_group
_name
()
7845 # emulate a old-fashioned subvolume -- in a custom group
7846 createpath
= os
.path
.join(".", "volumes", group
, subvol
)
7847 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
7849 # add required xattrs to subvolume
7850 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7851 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7853 # Create unparseable binary .meta file on legacy subvol's root
7854 meta_contents
= os
.urandom(4096)
7855 meta_filepath
= os
.path
.join(self
.mount_a
.mountpoint
, createpath
, ".meta")
7856 self
.mount_a
.client_remote
.write_file(meta_filepath
, meta_contents
, sudo
=True)
7858 # Upgrade legacy subvol to v1
7859 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol
, group
)
7860 self
.assertNotEqual(subvolpath
, None)
7861 subvolpath
= subvolpath
.rstrip()
7863 # The legacy subvolume path should be returned for subvol.
7864 # Should ignore unparseable binary .meta file in subvol's root
7865 self
.assertEqual(createpath
[1:], subvolpath
)
7867 # ensure metadata file is in legacy location, with required version v1
7868 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol
, subvol_group
=group
, version
=1, legacy
=True)
7871 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol
, group
)
7873 # verify trash dir is clean
7874 self
._wait
_for
_trash
_empty
()
7877 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7879 def test_unparseable_metafile_on_legacy_to_v1_upgrade(self
):
7881 Validate unparseable text .meta file on legacy subvol root doesn't break the system
7882 on legacy subvol upgrade to v1
7883 poor man's upgrade test -- theme continues...
7885 subvol
= self
._generate
_random
_subvolume
_name
()
7886 group
= self
._generate
_random
_group
_name
()
7888 # emulate a old-fashioned subvolume -- in a custom group
7889 createpath
= os
.path
.join(".", "volumes", group
, subvol
)
7890 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
7892 # add required xattrs to subvolume
7893 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7894 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7896 # Create unparseable text .meta file on legacy subvol's root
7897 meta_contents
= "unparseable config\nfile ...\nunparseable config\nfile ...\n"
7898 meta_filepath
= os
.path
.join(self
.mount_a
.mountpoint
, createpath
, ".meta")
7899 self
.mount_a
.client_remote
.write_file(meta_filepath
, meta_contents
, sudo
=True)
7901 # Upgrade legacy subvol to v1
7902 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol
, group
)
7903 self
.assertNotEqual(subvolpath
, None)
7904 subvolpath
= subvolpath
.rstrip()
7906 # The legacy subvolume path should be returned for subvol.
7907 # Should ignore unparseable binary .meta file in subvol's root
7908 self
.assertEqual(createpath
[1:], subvolpath
)
7910 # ensure metadata file is in legacy location, with required version v1
7911 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol
, subvol_group
=group
, version
=1, legacy
=True)
7914 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol
, group
)
7916 # verify trash dir is clean
7917 self
._wait
_for
_trash
_empty
()
7920 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7922 class TestPerModuleFinsherThread(TestVolumesHelper
):
7924 Per module finisher thread tests related to mgr/volume cmds.
7925 This is used in conjuction with check_counter with min val being 4
7926 as four subvolume cmds are run
7928 def test_volumes_module_finisher_thread(self
):
7929 subvol1
, subvol2
, subvol3
= self
._generate
_random
_subvolume
_name
(3)
7930 group
= self
._generate
_random
_group
_name
()
7933 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
7935 # create subvolumes in group
7936 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
7937 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
)
7938 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol3
, "--group_name", group
)
7940 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
7941 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
7942 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol3
, group
)
7943 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7945 # verify trash dir is clean
7946 self
._wait
_for
_trash
_empty
()