10 from hashlib
import md5
11 from textwrap
import dedent
12 from io
import StringIO
14 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
15 from tasks
.cephfs
.fuse_mount
import FuseMount
16 from teuthology
.exceptions
import CommandFailedError
18 log
= logging
.getLogger(__name__
)
20 class TestVolumesHelper(CephFSTestCase
):
21 """Helper class for testing FS volume, subvolume group and subvolume operations."""
22 TEST_VOLUME_PREFIX
= "volume"
23 TEST_SUBVOLUME_PREFIX
="subvolume"
24 TEST_GROUP_PREFIX
="group"
25 TEST_SNAPSHOT_PREFIX
="snapshot"
26 TEST_CLONE_PREFIX
="clone"
27 TEST_FILE_NAME_PREFIX
="subvolume_file"
29 # for filling subvolume with data
34 DEFAULT_FILE_SIZE
= 1 # MB
35 DEFAULT_NUMBER_OF_FILES
= 1024
37 def _fs_cmd(self
, *args
):
38 return self
.mgr_cluster
.mon_manager
.raw_cluster_cmd("fs", *args
)
40 def _raw_cmd(self
, *args
):
41 return self
.mgr_cluster
.mon_manager
.raw_cluster_cmd(*args
)
43 def __check_clone_state(self
, state
, clone
, clone_group
=None, timo
=120):
45 args
= ["clone", "status", self
.volname
, clone
]
47 args
.append(clone_group
)
50 result
= json
.loads(self
._fs
_cmd
(*args
))
51 if result
["status"]["state"] == state
:
55 self
.assertTrue(check
< timo
)
57 def _get_clone_status(self
, clone
, clone_group
=None):
58 args
= ["clone", "status", self
.volname
, clone
]
60 args
.append(clone_group
)
62 result
= json
.loads(self
._fs
_cmd
(*args
))
65 def _wait_for_clone_to_complete(self
, clone
, clone_group
=None, timo
=120):
66 self
.__check
_clone
_state
("complete", clone
, clone_group
, timo
)
68 def _wait_for_clone_to_fail(self
, clone
, clone_group
=None, timo
=120):
69 self
.__check
_clone
_state
("failed", clone
, clone_group
, timo
)
71 def _wait_for_clone_to_be_in_progress(self
, clone
, clone_group
=None, timo
=120):
72 self
.__check
_clone
_state
("in-progress", clone
, clone_group
, timo
)
74 def _check_clone_canceled(self
, clone
, clone_group
=None):
75 self
.__check
_clone
_state
("canceled", clone
, clone_group
, timo
=1)
77 def _get_subvolume_snapshot_path(self
, subvolume
, snapshot
, source_group
, subvol_path
, source_version
):
78 if source_version
== 2:
80 if subvol_path
is not None:
81 (base_path
, uuid_str
) = os
.path
.split(subvol_path
)
83 (base_path
, uuid_str
) = os
.path
.split(self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=source_group
))
84 return os
.path
.join(base_path
, ".snap", snapshot
, uuid_str
)
87 base_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=source_group
)
88 return os
.path
.join(base_path
, ".snap", snapshot
)
90 def _verify_clone_attrs(self
, source_path
, clone_path
):
94 p
= self
.mount_a
.run_shell(["find", path1
])
95 paths
= p
.stdout
.getvalue().strip().split()
97 # for each entry in source and clone (sink) verify certain inode attributes:
98 # inode type, mode, ownership, [am]time.
99 for source_path
in paths
:
100 sink_entry
= source_path
[len(path1
)+1:]
101 sink_path
= os
.path
.join(path2
, sink_entry
)
104 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%f', source_path
]).stdout
.getvalue().strip(), 16)
105 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%f', sink_path
]).stdout
.getvalue().strip(), 16)
106 self
.assertEqual(sval
, cval
)
109 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', source_path
]).stdout
.getvalue().strip())
110 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', sink_path
]).stdout
.getvalue().strip())
111 self
.assertEqual(sval
, cval
)
113 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', source_path
]).stdout
.getvalue().strip())
114 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', sink_path
]).stdout
.getvalue().strip())
115 self
.assertEqual(sval
, cval
)
118 # do not check access as kclient will generally not update this like ceph-fuse will.
119 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%Y', source_path
]).stdout
.getvalue().strip())
120 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%Y', sink_path
]).stdout
.getvalue().strip())
121 self
.assertEqual(sval
, cval
)
123 def _verify_clone_root(self
, source_path
, clone_path
, clone
, clone_group
, clone_pool
):
124 # verifies following clone root attrs quota, data_pool and pool_namespace
125 # remaining attributes of clone root are validated in _verify_clone_attrs
127 clone_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, clone
, clone_group
))
129 # verify quota is inherited from source snapshot
130 src_quota
= self
.mount_a
.getfattr(source_path
, "ceph.quota.max_bytes")
131 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
132 if isinstance(self
.mount_a
, FuseMount
):
133 self
.assertEqual(clone_info
["bytes_quota"], "infinite" if src_quota
is None else int(src_quota
))
136 # verify pool is set as per request
137 self
.assertEqual(clone_info
["data_pool"], clone_pool
)
139 # verify pool and pool namespace are inherited from snapshot
140 self
.assertEqual(clone_info
["data_pool"],
141 self
.mount_a
.getfattr(source_path
, "ceph.dir.layout.pool"))
142 self
.assertEqual(clone_info
["pool_namespace"],
143 self
.mount_a
.getfattr(source_path
, "ceph.dir.layout.pool_namespace"))
145 def _verify_clone(self
, subvolume
, snapshot
, clone
,
146 source_group
=None, clone_group
=None, clone_pool
=None,
147 subvol_path
=None, source_version
=2, timo
=120):
148 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
149 # but snapshots are retained for clone verification
150 path1
= self
._get
_subvolume
_snapshot
_path
(subvolume
, snapshot
, source_group
, subvol_path
, source_version
)
151 path2
= self
._get
_subvolume
_path
(self
.volname
, clone
, group_name
=clone_group
)
154 # TODO: currently snapshot rentries are not stable if snapshot source entries
155 # are removed, https://tracker.ceph.com/issues/46747
156 while check
< timo
and subvol_path
is None:
157 val1
= int(self
.mount_a
.getfattr(path1
, "ceph.dir.rentries"))
158 val2
= int(self
.mount_a
.getfattr(path2
, "ceph.dir.rentries"))
163 self
.assertTrue(check
< timo
)
165 self
._verify
_clone
_root
(path1
, path2
, clone
, clone_group
, clone_pool
)
166 self
._verify
_clone
_attrs
(path1
, path2
)
168 def _generate_random_volume_name(self
, count
=1):
169 n
= self
.volume_start
170 volumes
= [f
"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
171 self
.volume_start
+= count
172 return volumes
[0] if count
== 1 else volumes
174 def _generate_random_subvolume_name(self
, count
=1):
175 n
= self
.subvolume_start
176 subvolumes
= [f
"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
177 self
.subvolume_start
+= count
178 return subvolumes
[0] if count
== 1 else subvolumes
180 def _generate_random_group_name(self
, count
=1):
182 groups
= [f
"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
183 self
.group_start
+= count
184 return groups
[0] if count
== 1 else groups
186 def _generate_random_snapshot_name(self
, count
=1):
187 n
= self
.snapshot_start
188 snaps
= [f
"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
189 self
.snapshot_start
+= count
190 return snaps
[0] if count
== 1 else snaps
192 def _generate_random_clone_name(self
, count
=1):
194 clones
= [f
"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
195 self
.clone_start
+= count
196 return clones
[0] if count
== 1 else clones
198 def _enable_multi_fs(self
):
199 self
._fs
_cmd
("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
201 def _create_or_reuse_test_volume(self
):
202 result
= json
.loads(self
._fs
_cmd
("volume", "ls"))
204 self
.vol_created
= True
205 self
.volname
= self
._generate
_random
_volume
_name
()
206 self
._fs
_cmd
("volume", "create", self
.volname
)
208 self
.volname
= result
[0]['name']
210 def _get_subvolume_group_path(self
, vol_name
, group_name
):
211 args
= ("subvolumegroup", "getpath", vol_name
, group_name
)
212 path
= self
._fs
_cmd
(*args
)
213 # remove the leading '/', and trailing whitespaces
214 return path
[1:].rstrip()
216 def _get_subvolume_path(self
, vol_name
, subvol_name
, group_name
=None):
217 args
= ["subvolume", "getpath", vol_name
, subvol_name
]
219 args
.append(group_name
)
221 path
= self
._fs
_cmd
(*args
)
222 # remove the leading '/', and trailing whitespaces
223 return path
[1:].rstrip()
225 def _get_subvolume_info(self
, vol_name
, subvol_name
, group_name
=None):
226 args
= ["subvolume", "info", vol_name
, subvol_name
]
228 args
.append(group_name
)
230 subvol_md
= self
._fs
_cmd
(*args
)
233 def _get_subvolume_snapshot_info(self
, vol_name
, subvol_name
, snapname
, group_name
=None):
234 args
= ["subvolume", "snapshot", "info", vol_name
, subvol_name
, snapname
]
236 args
.append(group_name
)
238 snap_md
= self
._fs
_cmd
(*args
)
241 def _delete_test_volume(self
):
242 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
244 def _do_subvolume_pool_and_namespace_update(self
, subvolume
, pool
=None, pool_namespace
=None, subvolume_group
=None):
245 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
248 self
.mount_a
.setfattr(subvolpath
, 'ceph.dir.layout.pool', pool
, sudo
=True)
250 if pool_namespace
is not None:
251 self
.mount_a
.setfattr(subvolpath
, 'ceph.dir.layout.pool_namespace', pool_namespace
, sudo
=True)
253 def _do_subvolume_attr_update(self
, subvolume
, uid
, gid
, mode
, subvolume_group
=None):
254 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
257 self
.mount_a
.run_shell(['chmod', mode
, subvolpath
], sudo
=True)
260 self
.mount_a
.run_shell(['chown', uid
, subvolpath
], sudo
=True)
261 self
.mount_a
.run_shell(['chgrp', gid
, subvolpath
], sudo
=True)
263 def _do_subvolume_io(self
, subvolume
, subvolume_group
=None, create_dir
=None,
264 number_of_files
=DEFAULT_NUMBER_OF_FILES
, file_size
=DEFAULT_FILE_SIZE
):
265 # get subvolume path for IO
266 args
= ["subvolume", "getpath", self
.volname
, subvolume
]
268 args
.append(subvolume_group
)
270 subvolpath
= self
._fs
_cmd
(*args
)
271 self
.assertNotEqual(subvolpath
, None)
272 subvolpath
= subvolpath
[1:].rstrip() # remove "/" prefix and any trailing newline
276 io_path
= os
.path
.join(subvolpath
, create_dir
)
277 self
.mount_a
.run_shell_payload(f
"mkdir -p {io_path}")
279 log
.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume
, number_of_files
, file_size
, io_path
))
280 for i
in range(number_of_files
):
281 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
282 self
.mount_a
.write_n_mb(os
.path
.join(io_path
, filename
), file_size
)
284 def _do_subvolume_io_mixed(self
, subvolume
, subvolume_group
=None):
285 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
287 reg_file
= "regfile.0"
288 dir_path
= os
.path
.join(subvolpath
, "dir.0")
289 sym_path1
= os
.path
.join(subvolpath
, "sym.0")
290 # this symlink's ownership would be changed
291 sym_path2
= os
.path
.join(dir_path
, "sym.0")
293 self
.mount_a
.run_shell(["mkdir", dir_path
])
294 self
.mount_a
.run_shell(["ln", "-s", "./{}".format(reg_file
), sym_path1
])
295 self
.mount_a
.run_shell(["ln", "-s", "./{}".format(reg_file
), sym_path2
])
296 # flip ownership to nobody. assumption: nobody's id is 65534
297 self
.mount_a
.run_shell(["chown", "-h", "65534:65534", sym_path2
], sudo
=True, omit_sudo
=False)
299 def _wait_for_trash_empty(self
, timeout
=30):
300 # XXX: construct the trash dir path (note that there is no mgr
301 # [sub]volume interface for this).
302 trashdir
= os
.path
.join("./", "volumes", "_deleting")
303 self
.mount_a
.wait_for_dir_empty(trashdir
, timeout
=timeout
)
305 def _assert_meta_location_and_version(self
, vol_name
, subvol_name
, subvol_group
=None, version
=2, legacy
=False):
307 subvol_path
= self
._get
_subvolume
_path
(vol_name
, subvol_name
, group_name
=subvol_group
)
309 m
.update(("/"+subvol_path
).encode('utf-8'))
310 meta_filename
= "{0}.meta".format(m
.digest().hex())
311 metapath
= os
.path
.join(".", "volumes", "_legacy", meta_filename
)
313 group
= subvol_group
if subvol_group
is not None else '_nogroup'
314 metapath
= os
.path
.join(".", "volumes", group
, subvol_name
, ".meta")
316 out
= self
.mount_a
.run_shell(['cat', metapath
], sudo
=True)
317 lines
= out
.stdout
.getvalue().strip().split('\n')
320 if line
== "version = " + str(version
):
323 self
.assertEqual(sv_version
, version
, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
324 version
, sv_version
, metapath
))
326 def _create_v1_subvolume(self
, subvol_name
, subvol_group
=None, has_snapshot
=True, subvol_type
='subvolume', state
='complete'):
327 group
= subvol_group
if subvol_group
is not None else '_nogroup'
328 basepath
= os
.path
.join("volumes", group
, subvol_name
)
329 uuid_str
= str(uuid
.uuid4())
330 createpath
= os
.path
.join(basepath
, uuid_str
)
331 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
333 # create a v1 snapshot, to prevent auto upgrades
335 snappath
= os
.path
.join(createpath
, ".snap", "fake")
336 self
.mount_a
.run_shell(['mkdir', '-p', snappath
], sudo
=True)
338 # add required xattrs to subvolume
339 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
340 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
342 # create a v1 .meta file
343 meta_contents
= "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type
, "/" + createpath
, state
)
344 if state
== 'pending':
345 # add a fake clone source
346 meta_contents
= meta_contents
+ '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
347 meta_filepath1
= os
.path
.join(self
.mount_a
.mountpoint
, basepath
, ".meta")
348 self
.mount_a
.client_remote
.write_file(meta_filepath1
, meta_contents
, sudo
=True)
351 def _update_fake_trash(self
, subvol_name
, subvol_group
=None, trash_name
='fake', create
=True):
352 group
= subvol_group
if subvol_group
is not None else '_nogroup'
353 trashpath
= os
.path
.join("volumes", group
, subvol_name
, '.trash', trash_name
)
355 self
.mount_a
.run_shell(['mkdir', '-p', trashpath
], sudo
=True)
357 self
.mount_a
.run_shell(['rmdir', trashpath
], sudo
=True)
359 def _configure_guest_auth(self
, guest_mount
, authid
, key
):
361 Set up auth credentials for a guest client.
363 # Create keyring file for the guest client.
364 keyring_txt
= dedent("""
368 """.format(authid
=authid
,key
=key
))
370 guest_mount
.client_id
= authid
371 guest_mount
.client_remote
.write_file(guest_mount
.get_keyring_path(),
372 keyring_txt
, sudo
=True)
373 # Add a guest client section to the ceph config file.
374 self
.config_set("client.{0}".format(authid
), "debug client", 20)
375 self
.config_set("client.{0}".format(authid
), "debug objecter", 20)
376 self
.set_conf("client.{0}".format(authid
),
377 "keyring", guest_mount
.get_keyring_path())
379 def _auth_metadata_get(self
, filedata
):
381 Return a deserialized JSON object, or None
384 data
= json
.loads(filedata
)
385 except json
.decoder
.JSONDecodeError
:
390 super(TestVolumesHelper
, self
).setUp()
392 self
.vol_created
= False
393 self
._enable
_multi
_fs
()
394 self
._create
_or
_reuse
_test
_volume
()
395 self
.config_set('mon', 'mon_allow_pool_delete', True)
396 self
.volume_start
= random
.randint(1, (1<<20))
397 self
.subvolume_start
= random
.randint(1, (1<<20))
398 self
.group_start
= random
.randint(1, (1<<20))
399 self
.snapshot_start
= random
.randint(1, (1<<20))
400 self
.clone_start
= random
.randint(1, (1<<20))
404 self
._delete
_test
_volume
()
405 super(TestVolumesHelper
, self
).tearDown()
408 class TestVolumes(TestVolumesHelper
):
409 """Tests for FS volume operations."""
410 def test_volume_create(self
):
412 That the volume can be created and then cleans up
414 volname
= self
._generate
_random
_volume
_name
()
415 self
._fs
_cmd
("volume", "create", volname
)
416 volumels
= json
.loads(self
._fs
_cmd
("volume", "ls"))
418 if not (volname
in ([volume
['name'] for volume
in volumels
])):
419 raise RuntimeError("Error creating volume '{0}'".format(volname
))
422 self
._fs
_cmd
("volume", "rm", volname
, "--yes-i-really-mean-it")
424 def test_volume_ls(self
):
426 That the existing and the newly created volumes can be listed and
429 vls
= json
.loads(self
._fs
_cmd
("volume", "ls"))
430 volumes
= [volume
['name'] for volume
in vls
]
432 #create new volumes and add it to the existing list of volumes
433 volumenames
= self
._generate
_random
_volume
_name
(2)
434 for volumename
in volumenames
:
435 self
._fs
_cmd
("volume", "create", volumename
)
436 volumes
.extend(volumenames
)
440 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
441 if len(volumels
) == 0:
442 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
444 volnames
= [volume
['name'] for volume
in volumels
]
445 if collections
.Counter(volnames
) != collections
.Counter(volumes
):
446 raise RuntimeError("Error creating or listing volumes")
449 for volume
in volumenames
:
450 self
._fs
_cmd
("volume", "rm", volume
, "--yes-i-really-mean-it")
452 def test_volume_rm(self
):
454 That the volume can only be removed when --yes-i-really-mean-it is used
455 and verify that the deleted volume is not listed anymore.
457 for m
in self
.mounts
:
460 self
._fs
_cmd
("volume", "rm", self
.volname
)
461 except CommandFailedError
as ce
:
462 if ce
.exitstatus
!= errno
.EPERM
:
463 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
464 "but it failed with {0}".format(ce
.exitstatus
))
466 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
469 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
470 if (self
.volname
in [volume
['name'] for volume
in volumes
]):
471 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
472 "The volume {0} not removed.".format(self
.volname
))
474 raise RuntimeError("expected the 'fs volume rm' command to fail.")
476 def test_volume_rm_arbitrary_pool_removal(self
):
478 That the arbitrary pool added to the volume out of band is removed
479 successfully on volume removal.
481 for m
in self
.mounts
:
483 new_pool
= "new_pool"
484 # add arbitrary data pool
485 self
.fs
.add_data_pool(new_pool
)
486 vol_status
= json
.loads(self
._fs
_cmd
("status", self
.volname
, "--format=json-pretty"))
487 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
490 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
491 volnames
= [volume
['name'] for volume
in volumes
]
492 self
.assertNotIn(self
.volname
, volnames
)
494 #check if osd pools are gone
495 pools
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "--format=json-pretty"))
496 for pool
in vol_status
["pools"]:
497 self
.assertNotIn(pool
["name"], pools
)
499 def test_volume_rm_when_mon_delete_pool_false(self
):
501 That the volume can only be removed when mon_allowd_pool_delete is set
502 to true and verify that the pools are removed after volume deletion.
504 for m
in self
.mounts
:
506 self
.config_set('mon', 'mon_allow_pool_delete', False)
508 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
509 except CommandFailedError
as ce
:
510 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
511 "expected the 'fs volume rm' command to fail with EPERM, "
512 "but it failed with {0}".format(ce
.exitstatus
))
513 vol_status
= json
.loads(self
._fs
_cmd
("status", self
.volname
, "--format=json-pretty"))
514 self
.config_set('mon', 'mon_allow_pool_delete', True)
515 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
518 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
519 volnames
= [volume
['name'] for volume
in volumes
]
520 self
.assertNotIn(self
.volname
, volnames
,
521 "volume {0} exists after removal".format(self
.volname
))
522 #check if pools are gone
523 pools
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "--format=json-pretty"))
524 for pool
in vol_status
["pools"]:
525 self
.assertNotIn(pool
["name"], pools
,
526 "pool {0} exists after volume removal".format(pool
["name"]))
528 def test_volume_rename(self
):
530 That volume, its file system and pools, can be renamed.
532 for m
in self
.mounts
:
534 oldvolname
= self
.volname
535 newvolname
= self
._generate
_random
_volume
_name
()
536 new_data_pool
, new_metadata_pool
= f
"cephfs.{newvolname}.data", f
"cephfs.{newvolname}.meta"
537 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
538 "--yes-i-really-mean-it")
539 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
540 volnames
= [volume
['name'] for volume
in volumels
]
541 # volume name changed
542 self
.assertIn(newvolname
, volnames
)
543 self
.assertNotIn(oldvolname
, volnames
)
545 self
.fs
.get_pool_names(refresh
=True)
546 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
547 self
.assertEqual(new_data_pool
, self
.fs
.get_data_pool_name())
549 def test_volume_rename_idempotency(self
):
551 That volume rename is idempotent.
553 for m
in self
.mounts
:
555 oldvolname
= self
.volname
556 newvolname
= self
._generate
_random
_volume
_name
()
557 new_data_pool
, new_metadata_pool
= f
"cephfs.{newvolname}.data", f
"cephfs.{newvolname}.meta"
558 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
559 "--yes-i-really-mean-it")
560 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
561 "--yes-i-really-mean-it")
562 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
563 volnames
= [volume
['name'] for volume
in volumels
]
564 self
.assertIn(newvolname
, volnames
)
565 self
.assertNotIn(oldvolname
, volnames
)
566 self
.fs
.get_pool_names(refresh
=True)
567 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
568 self
.assertEqual(new_data_pool
, self
.fs
.get_data_pool_name())
570 def test_volume_rename_fails_without_confirmation_flag(self
):
572 That renaming volume fails without --yes-i-really-mean-it flag.
574 newvolname
= self
._generate
_random
_volume
_name
()
576 self
._fs
_cmd
("volume", "rename", self
.volname
, newvolname
)
577 except CommandFailedError
as ce
:
578 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
579 "invalid error code on renaming a FS volume without the "
580 "'--yes-i-really-mean-it' flag")
582 self
.fail("expected renaming of FS volume to fail without the "
583 "'--yes-i-really-mean-it' flag")
585 def test_volume_rename_for_more_than_one_data_pool(self
):
587 That renaming a volume with more than one data pool does not change
588 the name of the data pools.
590 for m
in self
.mounts
:
592 self
.fs
.add_data_pool('another-data-pool')
593 oldvolname
= self
.volname
594 newvolname
= self
._generate
_random
_volume
_name
()
595 self
.fs
.get_pool_names(refresh
=True)
596 orig_data_pool_names
= list(self
.fs
.data_pools
.values())
597 new_metadata_pool
= f
"cephfs.{newvolname}.meta"
598 self
._fs
_cmd
("volume", "rename", self
.volname
, newvolname
,
599 "--yes-i-really-mean-it")
600 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
601 volnames
= [volume
['name'] for volume
in volumels
]
602 # volume name changed
603 self
.assertIn(newvolname
, volnames
)
604 self
.assertNotIn(oldvolname
, volnames
)
605 self
.fs
.get_pool_names(refresh
=True)
606 # metadata pool name changed
607 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
608 # data pool names unchanged
609 self
.assertCountEqual(orig_data_pool_names
, list(self
.fs
.data_pools
.values()))
612 class TestSubvolumeGroups(TestVolumesHelper
):
613 """Tests for FS subvolume group operations."""
614 def test_default_uid_gid_subvolume_group(self
):
615 group
= self
._generate
_random
_group
_name
()
620 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
621 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
623 # check group's uid and gid
624 stat
= self
.mount_a
.stat(group_path
)
625 self
.assertEqual(stat
['st_uid'], expected_uid
)
626 self
.assertEqual(stat
['st_gid'], expected_gid
)
629 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
631 def test_nonexistent_subvolume_group_create(self
):
632 subvolume
= self
._generate
_random
_subvolume
_name
()
633 group
= "non_existent_group"
635 # try, creating subvolume in a nonexistent group
637 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
638 except CommandFailedError
as ce
:
639 if ce
.exitstatus
!= errno
.ENOENT
:
642 raise RuntimeError("expected the 'fs subvolume create' command to fail")
644 def test_nonexistent_subvolume_group_rm(self
):
645 group
= "non_existent_group"
647 # try, remove subvolume group
649 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
650 except CommandFailedError
as ce
:
651 if ce
.exitstatus
!= errno
.ENOENT
:
654 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
656 def test_subvolume_group_create_with_auto_cleanup_on_fail(self
):
657 group
= self
._generate
_random
_group
_name
()
658 data_pool
= "invalid_pool"
659 # create group with invalid data pool layout
660 with self
.assertRaises(CommandFailedError
):
661 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", data_pool
)
663 # check whether group path is cleaned up
665 self
._fs
_cmd
("subvolumegroup", "getpath", self
.volname
, group
)
666 except CommandFailedError
as ce
:
667 if ce
.exitstatus
!= errno
.ENOENT
:
670 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
672 def test_subvolume_group_create_with_desired_data_pool_layout(self
):
673 group1
, group2
= self
._generate
_random
_group
_name
(2)
676 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group1
)
677 group1_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group1
)
679 default_pool
= self
.mount_a
.getfattr(group1_path
, "ceph.dir.layout.pool")
680 new_pool
= "new_pool"
681 self
.assertNotEqual(default_pool
, new_pool
)
684 newid
= self
.fs
.add_data_pool(new_pool
)
686 # create group specifying the new data pool as its pool layout
687 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group2
,
688 "--pool_layout", new_pool
)
689 group2_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group2
)
691 desired_pool
= self
.mount_a
.getfattr(group2_path
, "ceph.dir.layout.pool")
693 self
.assertEqual(desired_pool
, new_pool
)
694 except AssertionError:
695 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
697 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group1
)
698 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group2
)
700 def test_subvolume_group_create_with_desired_mode(self
):
701 group1
, group2
= self
._generate
_random
_group
_name
(2)
703 expected_mode1
= "755"
705 expected_mode2
= "777"
708 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group2
, f
"--mode={expected_mode2}")
709 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group1
)
711 group1_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group1
)
712 group2_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group2
)
713 volumes_path
= os
.path
.dirname(group1_path
)
716 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', group1_path
]).stdout
.getvalue().strip()
717 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', group2_path
]).stdout
.getvalue().strip()
718 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', volumes_path
]).stdout
.getvalue().strip()
719 self
.assertEqual(actual_mode1
, expected_mode1
)
720 self
.assertEqual(actual_mode2
, expected_mode2
)
721 self
.assertEqual(actual_mode3
, expected_mode1
)
723 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group1
)
724 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group2
)
726 def test_subvolume_group_create_with_desired_uid_gid(self
):
728 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
734 # create subvolume group
735 subvolgroupname
= self
._generate
_random
_group
_name
()
736 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, subvolgroupname
, "--uid", str(uid
), "--gid", str(gid
))
738 # make sure it exists
739 subvolgrouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, subvolgroupname
)
740 self
.assertNotEqual(subvolgrouppath
, None)
742 # verify the uid and gid
743 suid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', subvolgrouppath
]).stdout
.getvalue().strip())
744 sgid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', subvolgrouppath
]).stdout
.getvalue().strip())
745 self
.assertEqual(uid
, suid
)
746 self
.assertEqual(gid
, sgid
)
749 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, subvolgroupname
)
751 def test_subvolume_group_create_with_invalid_data_pool_layout(self
):
752 group
= self
._generate
_random
_group
_name
()
753 data_pool
= "invalid_pool"
754 # create group with invalid data pool layout
756 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", data_pool
)
757 except CommandFailedError
as ce
:
758 if ce
.exitstatus
!= errno
.EINVAL
:
761 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
763 def test_subvolume_group_ls(self
):
764 # tests the 'fs subvolumegroup ls' command
768 #create subvolumegroups
769 subvolumegroups
= self
._generate
_random
_group
_name
(3)
770 for groupname
in subvolumegroups
:
771 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
773 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
774 if len(subvolumegroupls
) == 0:
775 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
777 subvolgroupnames
= [subvolumegroup
['name'] for subvolumegroup
in subvolumegroupls
]
778 if collections
.Counter(subvolgroupnames
) != collections
.Counter(subvolumegroups
):
779 raise RuntimeError("Error creating or listing subvolume groups")
781 def test_subvolume_group_ls_filter(self
):
782 # tests the 'fs subvolumegroup ls' command filters '_deleting' directory
786 #create subvolumegroup
787 subvolumegroups
= self
._generate
_random
_group
_name
(3)
788 for groupname
in subvolumegroups
:
789 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
791 # create subvolume and remove. This creates '_deleting' directory.
792 subvolume
= self
._generate
_random
_subvolume
_name
()
793 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
794 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
796 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
797 subvolgroupnames
= [subvolumegroup
['name'] for subvolumegroup
in subvolumegroupls
]
798 if "_deleting" in subvolgroupnames
:
799 self
.fail("Listing subvolume groups listed '_deleting' directory")
801 def test_subvolume_group_ls_for_nonexistent_volume(self
):
802 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
803 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
805 # list subvolume groups
806 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
807 if len(subvolumegroupls
) > 0:
808 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
810 def test_subvolumegroup_pin_distributed(self
):
811 self
.fs
.set_max_mds(2)
812 status
= self
.fs
.wait_for_daemons()
813 self
.config_set('mds', 'mds_export_ephemeral_distributed', True)
816 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
817 self
._fs
_cmd
("subvolumegroup", "pin", self
.volname
, group
, "distributed", "True")
818 subvolumes
= self
._generate
_random
_subvolume
_name
(50)
819 for subvolume
in subvolumes
:
820 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
821 self
._wait
_distributed
_subtrees
(2 * 2, status
=status
, rank
="all")
824 for subvolume
in subvolumes
:
825 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
827 # verify trash dir is clean
828 self
._wait
_for
_trash
_empty
()
830 def test_subvolume_group_rm_force(self
):
831 # test removing non-existing subvolume group with --force
832 group
= self
._generate
_random
_group
_name
()
834 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
, "--force")
835 except CommandFailedError
:
836 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
839 class TestSubvolumes(TestVolumesHelper
):
840 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
841 def test_async_subvolume_rm(self
):
842 subvolumes
= self
._generate
_random
_subvolume
_name
(100)
845 for subvolume
in subvolumes
:
846 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
847 self
._do
_subvolume
_io
(subvolume
, number_of_files
=10)
849 self
.mount_a
.umount_wait()
852 for subvolume
in subvolumes
:
853 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
855 self
.mount_a
.mount_wait()
857 # verify trash dir is clean
858 self
._wait
_for
_trash
_empty
(timeout
=300)
860 def test_default_uid_gid_subvolume(self
):
861 subvolume
= self
._generate
_random
_subvolume
_name
()
866 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
867 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
869 # check subvolume's uid and gid
870 stat
= self
.mount_a
.stat(subvol_path
)
871 self
.assertEqual(stat
['st_uid'], expected_uid
)
872 self
.assertEqual(stat
['st_gid'], expected_gid
)
875 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
877 # verify trash dir is clean
878 self
._wait
_for
_trash
_empty
()
880 def test_nonexistent_subvolume_rm(self
):
881 # remove non-existing subvolume
882 subvolume
= "non_existent_subvolume"
884 # try, remove subvolume
886 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
887 except CommandFailedError
as ce
:
888 if ce
.exitstatus
!= errno
.ENOENT
:
891 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
893 def test_subvolume_create_and_rm(self
):
895 subvolume
= self
._generate
_random
_subvolume
_name
()
896 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
898 # make sure it exists
899 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
900 self
.assertNotEqual(subvolpath
, None)
903 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
906 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
907 except CommandFailedError
as ce
:
908 if ce
.exitstatus
!= errno
.ENOENT
:
911 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
913 # verify trash dir is clean
914 self
._wait
_for
_trash
_empty
()
916 def test_subvolume_create_and_rm_in_group(self
):
917 subvolume
= self
._generate
_random
_subvolume
_name
()
918 group
= self
._generate
_random
_group
_name
()
921 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
923 # create subvolume in group
924 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
927 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
929 # verify trash dir is clean
930 self
._wait
_for
_trash
_empty
()
933 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
935 def test_subvolume_create_idempotence(self
):
937 subvolume
= self
._generate
_random
_subvolume
_name
()
938 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
940 # try creating w/ same subvolume name -- should be idempotent
941 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
944 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
946 # verify trash dir is clean
947 self
._wait
_for
_trash
_empty
()
949 def test_subvolume_create_idempotence_resize(self
):
951 subvolume
= self
._generate
_random
_subvolume
_name
()
952 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
954 # try creating w/ same subvolume name with size -- should set quota
955 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "1000000000")
957 # get subvolume metadata
958 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
959 self
.assertEqual(subvol_info
["bytes_quota"], 1000000000)
962 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
964 # verify trash dir is clean
965 self
._wait
_for
_trash
_empty
()
967 def test_subvolume_create_idempotence_mode(self
):
972 subvolume
= self
._generate
_random
_subvolume
_name
()
973 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
975 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
977 actual_mode_1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
978 self
.assertEqual(actual_mode_1
, default_mode
)
980 # try creating w/ same subvolume name with --mode 777
982 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", new_mode
)
984 actual_mode_2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
985 self
.assertEqual(actual_mode_2
, new_mode
)
988 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
990 # verify trash dir is clean
991 self
._wait
_for
_trash
_empty
()
993 def test_subvolume_create_idempotence_without_passing_mode(self
):
996 subvolume
= self
._generate
_random
_subvolume
_name
()
997 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", desired_mode
)
999 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
1001 actual_mode_1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
1002 self
.assertEqual(actual_mode_1
, desired_mode
)
1005 default_mode
= "755"
1007 # try creating w/ same subvolume name without passing --mode argument
1008 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1010 actual_mode_2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
1011 self
.assertEqual(actual_mode_2
, default_mode
)
1014 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1016 # verify trash dir is clean
1017 self
._wait
_for
_trash
_empty
()
1019 def test_subvolume_create_isolated_namespace(self
):
1021 Create subvolume in separate rados namespace
1025 subvolume
= self
._generate
_random
_subvolume
_name
()
1026 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--namespace-isolated")
1028 # get subvolume metadata
1029 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
1030 self
.assertNotEqual(len(subvol_info
), 0)
1031 self
.assertEqual(subvol_info
["pool_namespace"], "fsvolumens_" + subvolume
)
1034 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1036 # verify trash dir is clean
1037 self
._wait
_for
_trash
_empty
()
1039 def test_subvolume_create_with_auto_cleanup_on_fail(self
):
1040 subvolume
= self
._generate
_random
_subvolume
_name
()
1041 data_pool
= "invalid_pool"
1042 # create subvolume with invalid data pool layout fails
1043 with self
.assertRaises(CommandFailedError
):
1044 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
1046 # check whether subvol path is cleaned up
1048 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1049 except CommandFailedError
as ce
:
1050 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of non-existent subvolume")
1052 self
.fail("expected the 'fs subvolume getpath' command to fail")
1054 # verify trash dir is clean
1055 self
._wait
_for
_trash
_empty
()
1057 def test_subvolume_create_with_desired_data_pool_layout_in_group(self
):
1058 subvol1
, subvol2
= self
._generate
_random
_subvolume
_name
(2)
1059 group
= self
._generate
_random
_group
_name
()
1061 # create group. this also helps set default pool layout for subvolumes
1062 # created within the group.
1063 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1065 # create subvolume in group.
1066 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
1067 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
, group_name
=group
)
1069 default_pool
= self
.mount_a
.getfattr(subvol1_path
, "ceph.dir.layout.pool")
1070 new_pool
= "new_pool"
1071 self
.assertNotEqual(default_pool
, new_pool
)
1074 newid
= self
.fs
.add_data_pool(new_pool
)
1076 # create subvolume specifying the new data pool as its pool layout
1077 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
,
1078 "--pool_layout", new_pool
)
1079 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvol2
, group_name
=group
)
1081 desired_pool
= self
.mount_a
.getfattr(subvol2_path
, "ceph.dir.layout.pool")
1083 self
.assertEqual(desired_pool
, new_pool
)
1084 except AssertionError:
1085 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
1087 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
1088 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
1089 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1091 # verify trash dir is clean
1092 self
._wait
_for
_trash
_empty
()
1094 def test_subvolume_create_with_desired_mode(self
):
1095 subvol1
= self
._generate
_random
_subvolume
_name
()
1098 default_mode
= "755"
1100 desired_mode
= "777"
1102 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--mode", "777")
1104 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
)
1106 # check subvolumegroup's mode
1107 subvol_par_path
= os
.path
.dirname(subvol1_path
)
1108 group_path
= os
.path
.dirname(subvol_par_path
)
1109 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', group_path
]).stdout
.getvalue().strip()
1110 self
.assertEqual(actual_mode1
, default_mode
)
1111 # check /volumes mode
1112 volumes_path
= os
.path
.dirname(group_path
)
1113 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', volumes_path
]).stdout
.getvalue().strip()
1114 self
.assertEqual(actual_mode2
, default_mode
)
1115 # check subvolume's mode
1116 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol1_path
]).stdout
.getvalue().strip()
1117 self
.assertEqual(actual_mode3
, desired_mode
)
1119 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
)
1121 # verify trash dir is clean
1122 self
._wait
_for
_trash
_empty
()
1124 def test_subvolume_create_with_desired_mode_in_group(self
):
1125 subvol1
, subvol2
, subvol3
= self
._generate
_random
_subvolume
_name
(3)
1127 group
= self
._generate
_random
_group
_name
()
1129 expected_mode1
= "755"
1131 expected_mode2
= "777"
1134 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1136 # create subvolume in group
1137 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
1138 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
, "--mode", "777")
1139 # check whether mode 0777 also works
1140 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol3
, "--group_name", group
, "--mode", "0777")
1142 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
, group_name
=group
)
1143 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvol2
, group_name
=group
)
1144 subvol3_path
= self
._get
_subvolume
_path
(self
.volname
, subvol3
, group_name
=group
)
1146 # check subvolume's mode
1147 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol1_path
]).stdout
.getvalue().strip()
1148 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol2_path
]).stdout
.getvalue().strip()
1149 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol3_path
]).stdout
.getvalue().strip()
1150 self
.assertEqual(actual_mode1
, expected_mode1
)
1151 self
.assertEqual(actual_mode2
, expected_mode2
)
1152 self
.assertEqual(actual_mode3
, expected_mode2
)
1154 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
1155 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
1156 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol3
, group
)
1157 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1159 # verify trash dir is clean
1160 self
._wait
_for
_trash
_empty
()
1162 def test_subvolume_create_with_desired_uid_gid(self
):
1164 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
1171 subvolname
= self
._generate
_random
_subvolume
_name
()
1172 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--uid", str(uid
), "--gid", str(gid
))
1174 # make sure it exists
1175 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
1176 self
.assertNotEqual(subvolpath
, None)
1178 # verify the uid and gid
1179 suid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', subvolpath
]).stdout
.getvalue().strip())
1180 sgid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', subvolpath
]).stdout
.getvalue().strip())
1181 self
.assertEqual(uid
, suid
)
1182 self
.assertEqual(gid
, sgid
)
1185 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
1187 # verify trash dir is clean
1188 self
._wait
_for
_trash
_empty
()
1190 def test_subvolume_create_with_invalid_data_pool_layout(self
):
1191 subvolume
= self
._generate
_random
_subvolume
_name
()
1192 data_pool
= "invalid_pool"
1193 # create subvolume with invalid data pool layout
1195 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
1196 except CommandFailedError
as ce
:
1197 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on create of subvolume with invalid pool layout")
1199 self
.fail("expected the 'fs subvolume create' command to fail")
1201 # verify trash dir is clean
1202 self
._wait
_for
_trash
_empty
()
1204 def test_subvolume_create_with_invalid_size(self
):
1205 # create subvolume with an invalid size -1
1206 subvolume
= self
._generate
_random
_subvolume
_name
()
1208 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--size", "-1")
1209 except CommandFailedError
as ce
:
1210 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on create of subvolume with invalid size")
1212 self
.fail("expected the 'fs subvolume create' command to fail")
1214 # verify trash dir is clean
1215 self
._wait
_for
_trash
_empty
()
1217 def test_subvolume_expand(self
):
1219 That a subvolume can be expanded in size and its quota matches the expected size.
1223 subvolname
= self
._generate
_random
_subvolume
_name
()
1224 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1225 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
1227 # make sure it exists
1228 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
1229 self
.assertNotEqual(subvolpath
, None)
1231 # expand the subvolume
1233 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
1236 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
1237 self
.assertEqual(size
, nsize
)
1240 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
1242 # verify trash dir is clean
1243 self
._wait
_for
_trash
_empty
()
1245 def test_subvolume_info(self
):
1246 # tests the 'fs subvolume info' command
1248 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
1249 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
1250 "type", "uid", "features", "state"]
1253 subvolume
= self
._generate
_random
_subvolume
_name
()
1254 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1256 # get subvolume metadata
1257 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
1258 for md
in subvol_md
:
1259 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
1261 self
.assertEqual(subvol_info
["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
1262 self
.assertEqual(subvol_info
["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
1263 self
.assertEqual(subvol_info
["pool_namespace"], "", "expected pool namespace to be empty")
1264 self
.assertEqual(subvol_info
["state"], "complete", "expected state to be complete")
1266 self
.assertEqual(len(subvol_info
["features"]), 3,
1267 msg
="expected 3 features, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
1268 for feature
in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1269 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
1271 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1272 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
1274 # get subvolume metadata after quota set
1275 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
1276 for md
in subvol_md
:
1277 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
1279 self
.assertNotEqual(subvol_info
["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
1280 self
.assertEqual(subvol_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
1281 self
.assertEqual(subvol_info
["type"], "subvolume", "type should be set to subvolume")
1282 self
.assertEqual(subvol_info
["state"], "complete", "expected state to be complete")
1284 self
.assertEqual(len(subvol_info
["features"]), 3,
1285 msg
="expected 3 features, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
1286 for feature
in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1287 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
1290 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1292 # verify trash dir is clean
1293 self
._wait
_for
_trash
_empty
()
1295 def test_subvolume_ls(self
):
1296 # tests the 'fs subvolume ls' command
1301 subvolumes
= self
._generate
_random
_subvolume
_name
(3)
1302 for subvolume
in subvolumes
:
1303 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1306 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
1307 if len(subvolumels
) == 0:
1308 self
.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
1310 subvolnames
= [subvolume
['name'] for subvolume
in subvolumels
]
1311 if collections
.Counter(subvolnames
) != collections
.Counter(subvolumes
):
1312 self
.fail("Error creating or listing subvolumes")
1315 for subvolume
in subvolumes
:
1316 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1318 # verify trash dir is clean
1319 self
._wait
_for
_trash
_empty
()
1321 def test_subvolume_ls_for_notexistent_default_group(self
):
1322 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
1323 # prerequisite: we expect that the volume is created and the default group _nogroup is
1324 # NOT created (i.e. a subvolume without group is not created)
1327 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
1328 if len(subvolumels
) > 0:
1329 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
1331 def test_subvolume_marked(self
):
1333 ensure a subvolume is marked with the ceph.dir.subvolume xattr
1335 subvolume
= self
._generate
_random
_subvolume
_name
()
1338 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1341 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
1343 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
1344 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
1345 # outside the subvolume
1346 dstpath
= os
.path
.join(self
.mount_a
.mountpoint
, 'volumes', '_nogroup', 'new_subvol_location')
1347 srcpath
= os
.path
.join(self
.mount_a
.mountpoint
, subvolpath
)
1348 rename_script
= dedent("""
1352 os.rename("{src}", "{dst}")
1353 except OSError as e:
1354 if e.errno != errno.EXDEV:
1355 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
1357 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
1359 self
.mount_a
.run_python(rename_script
.format(src
=srcpath
, dst
=dstpath
), sudo
=True)
1362 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1364 # verify trash dir is clean
1365 self
._wait
_for
_trash
_empty
()
1367 def test_subvolume_pin_export(self
):
1368 self
.fs
.set_max_mds(2)
1369 status
= self
.fs
.wait_for_daemons()
1371 subvolume
= self
._generate
_random
_subvolume
_name
()
1372 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1373 self
._fs
_cmd
("subvolume", "pin", self
.volname
, subvolume
, "export", "1")
1374 path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1375 path
= os
.path
.dirname(path
) # get subvolume path
1377 self
._get
_subtrees
(status
=status
, rank
=1)
1378 self
._wait
_subtrees
([(path
, 1)], status
=status
)
1381 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1383 # verify trash dir is clean
1384 self
._wait
_for
_trash
_empty
()
1386 ### authorize operations
1388 def test_authorize_deauthorize_legacy_subvolume(self
):
1389 subvolume
= self
._generate
_random
_subvolume
_name
()
1390 group
= self
._generate
_random
_group
_name
()
1393 guest_mount
= self
.mount_b
1394 guest_mount
.umount_wait()
1396 # emulate a old-fashioned subvolume in a custom group
1397 createpath
= os
.path
.join(".", "volumes", group
, subvolume
)
1398 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
1400 # add required xattrs to subvolume
1401 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
1402 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
1404 mount_path
= os
.path
.join("/", "volumes", group
, subvolume
)
1406 # authorize guest authID read-write access to subvolume
1407 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
1408 "--group_name", group
, "--tenant_id", "tenant_id")
1410 # guest authID should exist
1411 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
1412 self
.assertIn("client.{0}".format(authid
), existing_ids
)
1414 # configure credentials for guest client
1415 self
._configure
_guest
_auth
(guest_mount
, authid
, key
)
1417 # mount the subvolume, and write to it
1418 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1419 guest_mount
.write_n_mb("data.bin", 1)
1421 # authorize guest authID read access to subvolume
1422 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
1423 "--group_name", group
, "--tenant_id", "tenant_id", "--access_level", "r")
1425 # guest client sees the change in access level to read only after a
1426 # remount of the subvolume.
1427 guest_mount
.umount_wait()
1428 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1430 # read existing content of the subvolume
1431 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
1432 # cannot write into read-only subvolume
1433 with self
.assertRaises(CommandFailedError
):
1434 guest_mount
.write_n_mb("rogue.bin", 1)
1437 guest_mount
.umount_wait()
1438 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid
,
1439 "--group_name", group
)
1440 # guest authID should no longer exist
1441 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
1442 self
.assertNotIn("client.{0}".format(authid
), existing_ids
)
1443 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1444 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1446 def test_authorize_deauthorize_subvolume(self
):
1447 subvolume
= self
._generate
_random
_subvolume
_name
()
1448 group
= self
._generate
_random
_group
_name
()
1451 guest_mount
= self
.mount_b
1452 guest_mount
.umount_wait()
1455 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--mode=777")
1457 # create subvolume in group
1458 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1459 mount_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
,
1460 "--group_name", group
).rstrip()
1462 # authorize guest authID read-write access to subvolume
1463 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
1464 "--group_name", group
, "--tenant_id", "tenant_id")
1466 # guest authID should exist
1467 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
1468 self
.assertIn("client.{0}".format(authid
), existing_ids
)
1470 # configure credentials for guest client
1471 self
._configure
_guest
_auth
(guest_mount
, authid
, key
)
1473 # mount the subvolume, and write to it
1474 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1475 guest_mount
.write_n_mb("data.bin", 1)
1477 # authorize guest authID read access to subvolume
1478 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
1479 "--group_name", group
, "--tenant_id", "tenant_id", "--access_level", "r")
1481 # guest client sees the change in access level to read only after a
1482 # remount of the subvolume.
1483 guest_mount
.umount_wait()
1484 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1486 # read existing content of the subvolume
1487 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
1488 # cannot write into read-only subvolume
1489 with self
.assertRaises(CommandFailedError
):
1490 guest_mount
.write_n_mb("rogue.bin", 1)
1493 guest_mount
.umount_wait()
1494 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid
,
1495 "--group_name", group
)
1496 # guest authID should no longer exist
1497 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
1498 self
.assertNotIn("client.{0}".format(authid
), existing_ids
)
1499 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1500 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1502 def test_multitenant_subvolumes(self
):
1504 That subvolume access can be restricted to a tenant.
1506 That metadata used to enforce tenant isolation of
1507 subvolumes is stored as a two-way mapping between auth
1508 IDs and subvolumes that they're authorized to access.
1510 subvolume
= self
._generate
_random
_subvolume
_name
()
1511 group
= self
._generate
_random
_group
_name
()
1513 guest_mount
= self
.mount_b
1515 # Guest clients belonging to different tenants, but using the same
1520 "tenant_id": "tenant1",
1524 "tenant_id": "tenant2",
1528 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1530 # create subvolume in group
1531 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1533 # Check that subvolume metadata file is created on subvolume creation.
1534 subvol_metadata_filename
= "_{0}:{1}.meta".format(group
, subvolume
)
1535 self
.assertIn(subvol_metadata_filename
, guest_mount
.ls("volumes"))
1537 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
1538 # 'tenant1', with 'rw' access to the volume.
1539 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1540 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1542 # Check that auth metadata file for auth ID 'alice', is
1543 # created on authorizing 'alice' access to the subvolume.
1544 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
1545 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
1547 # Verify that the auth metadata file stores the tenant ID that the
1548 # auth ID belongs to, the auth ID's authorized access levels
1549 # for different subvolumes, versioning details, etc.
1550 expected_auth_metadata
= {
1552 "compat_version": 6,
1554 "tenant_id": "tenant1",
1556 "{0}/{1}".format(group
,subvolume
): {
1558 "access_level": "rw"
1563 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1564 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
1565 del expected_auth_metadata
["version"]
1566 del auth_metadata
["version"]
1567 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
1569 # Verify that the subvolume metadata file stores info about auth IDs
1570 # and their access levels to the subvolume, versioning details, etc.
1571 expected_subvol_metadata
= {
1573 "compat_version": 1,
1577 "access_level": "rw"
1581 subvol_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(subvol_metadata_filename
)))
1583 self
.assertGreaterEqual(subvol_metadata
["version"], expected_subvol_metadata
["version"])
1584 del expected_subvol_metadata
["version"]
1585 del subvol_metadata
["version"]
1586 self
.assertEqual(expected_subvol_metadata
, subvol_metadata
)
1588 # Cannot authorize 'guestclient_2' to access the volume.
1589 # It uses auth ID 'alice', which has already been used by a
1590 # 'guestclient_1' belonging to an another tenant for accessing
1594 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_2
["auth_id"],
1595 "--group_name", group
, "--tenant_id", guestclient_2
["tenant_id"])
1596 except CommandFailedError
as ce
:
1597 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
1598 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
1600 self
.fail("expected the 'fs subvolume authorize' command to fail")
1602 # Check that auth metadata file is cleaned up on removing
1603 # auth ID's only access to a volume.
1605 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
,
1606 "--group_name", group
)
1607 self
.assertNotIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
1609 # Check that subvolume metadata file is cleaned up on subvolume deletion.
1610 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1611 self
.assertNotIn(subvol_metadata_filename
, guest_mount
.ls("volumes"))
1614 guest_mount
.umount_wait()
1615 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1617 def test_subvolume_authorized_list(self
):
1618 subvolume
= self
._generate
_random
_subvolume
_name
()
1619 group
= self
._generate
_random
_group
_name
()
1625 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1627 # create subvolume in group
1628 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1630 # authorize alice authID read-write access to subvolume
1631 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid1
,
1632 "--group_name", group
)
1633 # authorize guest1 authID read-write access to subvolume
1634 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid2
,
1635 "--group_name", group
)
1636 # authorize guest2 authID read access to subvolume
1637 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid3
,
1638 "--group_name", group
, "--access_level", "r")
1640 # list authorized-ids of the subvolume
1641 expected_auth_list
= [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
1642 auth_list
= json
.loads(self
._fs
_cmd
('subvolume', 'authorized_list', self
.volname
, subvolume
, "--group_name", group
))
1643 self
.assertCountEqual(expected_auth_list
, auth_list
)
1646 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid1
,
1647 "--group_name", group
)
1648 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid2
,
1649 "--group_name", group
)
1650 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid3
,
1651 "--group_name", group
)
1652 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1653 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1655 def test_authorize_auth_id_not_created_by_mgr_volumes(self
):
1657 If the auth_id already exists and is not created by mgr plugin,
1658 it's not allowed to authorize the auth-id by default.
1661 subvolume
= self
._generate
_random
_subvolume
_name
()
1662 group
= self
._generate
_random
_group
_name
()
1665 self
.fs
.mon_manager
.raw_cluster_cmd(
1666 "auth", "get-or-create", "client.guest1",
1675 "tenant_id": "tenant1",
1679 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1681 # create subvolume in group
1682 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1685 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1686 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1687 except CommandFailedError
as ce
:
1688 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
1689 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
1691 self
.fail("expected the 'fs subvolume authorize' command to fail")
1694 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1695 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1696 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1698 def test_authorize_allow_existing_id_option(self
):
1700 If the auth_id already exists and is not created by mgr volumes,
1701 it's not allowed to authorize the auth-id by default but is
1702 allowed with option allow_existing_id.
1705 subvolume
= self
._generate
_random
_subvolume
_name
()
1706 group
= self
._generate
_random
_group
_name
()
1709 self
.fs
.mon_manager
.raw_cluster_cmd(
1710 "auth", "get-or-create", "client.guest1",
1719 "tenant_id": "tenant1",
1723 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1725 # create subvolume in group
1726 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1728 # Cannot authorize 'guestclient_1' to access the volume by default,
1729 # which already exists and not created by mgr volumes but is allowed
1730 # with option 'allow_existing_id'.
1731 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1732 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"], "--allow-existing-id")
1735 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
,
1736 "--group_name", group
)
1737 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1738 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1739 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1741 def test_deauthorize_auth_id_after_out_of_band_update(self
):
1743 If the auth_id authorized by mgr/volumes plugin is updated
1744 out of band, the auth_id should not be deleted after a
1745 deauthorize. It should only remove caps associated with it.
1748 subvolume
= self
._generate
_random
_subvolume
_name
()
1749 group
= self
._generate
_random
_group
_name
()
1754 "tenant_id": "tenant1",
1758 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1760 # create subvolume in group
1761 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1763 # Authorize 'guestclient_1' to access the subvolume.
1764 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1765 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1767 subvol_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
,
1768 "--group_name", group
).rstrip()
1770 # Update caps for guestclient_1 out of band
1771 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
1772 "auth", "caps", "client.guest1",
1773 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group
, subvol_path
),
1774 "osd", "allow rw pool=cephfs_data",
1779 # Deauthorize guestclient_1
1780 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
, "--group_name", group
)
1782 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
1783 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
1784 out
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
1786 self
.assertEqual("client.guest1", out
[0]["entity"])
1787 self
.assertEqual("allow rw path=/volumes/{0}".format(group
), out
[0]["caps"]["mds"])
1788 self
.assertEqual("allow *", out
[0]["caps"]["mgr"])
1789 self
.assertNotIn("osd", out
[0]["caps"])
1792 out
= self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1793 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1794 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1796 def test_recover_auth_metadata_during_authorize(self
):
1798 That auth metadata manager can recover from partial auth updates using
1799 metadata files, which store auth info and its update status info. This
1800 test validates the recovery during authorize.
1803 guest_mount
= self
.mount_b
1805 subvolume
= self
._generate
_random
_subvolume
_name
()
1806 group
= self
._generate
_random
_group
_name
()
1811 "tenant_id": "tenant1",
1815 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1817 # create subvolume in group
1818 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1820 # Authorize 'guestclient_1' to access the subvolume.
1821 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1822 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1824 # Check that auth metadata file for auth ID 'guest1', is
1825 # created on authorizing 'guest1' access to the subvolume.
1826 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
1827 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
1828 expected_auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1830 # Induce partial auth update state by modifying the auth metadata file,
1831 # and then run authorize again.
1832 guest_mount
.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
1834 # Authorize 'guestclient_1' to access the subvolume.
1835 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1836 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1838 auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1839 self
.assertEqual(auth_metadata_content
, expected_auth_metadata_content
)
1842 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
, "--group_name", group
)
1843 guest_mount
.umount_wait()
1844 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1845 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1846 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1848 def test_recover_auth_metadata_during_deauthorize(self
):
1850 That auth metadata manager can recover from partial auth updates using
1851 metadata files, which store auth info and its update status info. This
1852 test validates the recovery during deauthorize.
1855 guest_mount
= self
.mount_b
1857 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
1858 group
= self
._generate
_random
_group
_name
()
1861 "auth_id": "guest1",
1862 "tenant_id": "tenant1",
1866 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1868 # create subvolumes in group
1869 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
1870 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
1872 # Authorize 'guestclient_1' to access the subvolume1.
1873 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
1874 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1876 # Check that auth metadata file for auth ID 'guest1', is
1877 # created on authorizing 'guest1' access to the subvolume1.
1878 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
1879 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
1880 expected_auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1882 # Authorize 'guestclient_1' to access the subvolume2.
1883 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
1884 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1886 # Induce partial auth update state by modifying the auth metadata file,
1887 # and then run de-authorize.
1888 guest_mount
.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
1890 # Deauthorize 'guestclient_1' to access the subvolume2.
1891 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
1892 "--group_name", group
)
1894 auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1895 self
.assertEqual(auth_metadata_content
, expected_auth_metadata_content
)
1898 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, "guest1", "--group_name", group
)
1899 guest_mount
.umount_wait()
1900 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1901 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
1902 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
1903 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1905 def test_update_old_style_auth_metadata_to_new_during_authorize(self
):
1907 CephVolumeClient stores the subvolume data in auth metadata file with
1908 'volumes' key as there was no subvolume namespace. It doesn't makes sense
1909 with mgr/volumes. This test validates the transparent update of 'volumes'
1910 key to 'subvolumes' key in auth metadata file during authorize.
1913 guest_mount
= self
.mount_b
1915 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
1916 group
= self
._generate
_random
_group
_name
()
1921 "tenant_id": "tenant1",
1925 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1927 # create subvolumes in group
1928 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
1929 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
1931 # Authorize 'guestclient_1' to access the subvolume1.
1932 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
1933 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1935 # Check that auth metadata file for auth ID 'guest1', is
1936 # created on authorizing 'guest1' access to the subvolume1.
1937 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
1938 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
1940 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
1941 guest_mount
.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
1943 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
1944 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
1945 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1947 expected_auth_metadata
= {
1949 "compat_version": 6,
1951 "tenant_id": "tenant1",
1953 "{0}/{1}".format(group
,subvolume1
): {
1955 "access_level": "rw"
1957 "{0}/{1}".format(group
,subvolume2
): {
1959 "access_level": "rw"
1964 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1966 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
1967 del expected_auth_metadata
["version"]
1968 del auth_metadata
["version"]
1969 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
1972 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, auth_id
, "--group_name", group
)
1973 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, auth_id
, "--group_name", group
)
1974 guest_mount
.umount_wait()
1975 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1976 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
1977 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
1978 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1980 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self
):
1982 CephVolumeClient stores the subvolume data in auth metadata file with
1983 'volumes' key as there was no subvolume namespace. It doesn't makes sense
1984 with mgr/volumes. This test validates the transparent update of 'volumes'
1985 key to 'subvolumes' key in auth metadata file during deauthorize.
1988 guest_mount
= self
.mount_b
1990 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
1991 group
= self
._generate
_random
_group
_name
()
1996 "tenant_id": "tenant1",
2000 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2002 # create subvolumes in group
2003 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
2004 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
2006 # Authorize 'guestclient_1' to access the subvolume1.
2007 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
2008 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2010 # Authorize 'guestclient_1' to access the subvolume2.
2011 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
2012 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2014 # Check that auth metadata file for auth ID 'guest1', is created.
2015 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2016 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2018 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
2019 guest_mount
.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
2021 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
2022 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, auth_id
, "--group_name", group
)
2024 expected_auth_metadata
= {
2026 "compat_version": 6,
2028 "tenant_id": "tenant1",
2030 "{0}/{1}".format(group
,subvolume1
): {
2032 "access_level": "rw"
2037 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2039 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
2040 del expected_auth_metadata
["version"]
2041 del auth_metadata
["version"]
2042 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
2045 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, auth_id
, "--group_name", group
)
2046 guest_mount
.umount_wait()
2047 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2048 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
2049 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
2050 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2052 def test_subvolume_evict_client(self
):
2054 That a subvolume client can be evicted based on the auth ID
2057 subvolumes
= self
._generate
_random
_subvolume
_name
(2)
2058 group
= self
._generate
_random
_group
_name
()
2061 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2063 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
2064 for i
in range(0, 2):
2065 self
.mounts
[i
].umount_wait()
2066 guest_mounts
= (self
.mounts
[0], self
.mounts
[1])
2070 "tenant_id": "tenant1",
2073 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
2074 # subvolumes. Mount the two subvolumes. Write data to the volumes.
2077 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolumes
[i
], "--group_name", group
, "--mode=777")
2079 # authorize guest authID read-write access to subvolume
2080 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolumes
[i
], guestclient_1
["auth_id"],
2081 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2083 mount_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolumes
[i
],
2084 "--group_name", group
).rstrip()
2085 # configure credentials for guest client
2086 self
._configure
_guest
_auth
(guest_mounts
[i
], auth_id
, key
)
2088 # mount the subvolume, and write to it
2089 guest_mounts
[i
].mount_wait(cephfs_mntpt
=mount_path
)
2090 guest_mounts
[i
].write_n_mb("data.bin", 1)
2092 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
2094 self
._fs
_cmd
("subvolume", "evict", self
.volname
, subvolumes
[0], auth_id
, "--group_name", group
)
2096 # Evicted guest client, guest_mounts[0], should not be able to do
2097 # anymore metadata ops. It should start failing all operations
2098 # when it sees that its own address is in the blocklist.
2100 guest_mounts
[0].write_n_mb("rogue.bin", 1)
2101 except CommandFailedError
:
2104 raise RuntimeError("post-eviction write should have failed!")
2106 # The blocklisted guest client should now be unmountable
2107 guest_mounts
[0].umount_wait()
2109 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
2110 # has mounted the other volume, should be able to use its volume
2112 guest_mounts
[1].write_n_mb("data.bin.1", 1)
2115 guest_mounts
[1].umount_wait()
2117 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolumes
[i
], auth_id
, "--group_name", group
)
2118 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolumes
[i
], "--group_name", group
)
2119 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2121 def test_subvolume_pin_random(self
):
2122 self
.fs
.set_max_mds(2)
2123 self
.fs
.wait_for_daemons()
2124 self
.config_set('mds', 'mds_export_ephemeral_random', True)
2126 subvolume
= self
._generate
_random
_subvolume
_name
()
2127 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2128 self
._fs
_cmd
("subvolume", "pin", self
.volname
, subvolume
, "random", ".01")
2132 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2134 # verify trash dir is clean
2135 self
._wait
_for
_trash
_empty
()
2137 def test_subvolume_resize_fail_invalid_size(self
):
2139 That a subvolume cannot be resized to an invalid size and the quota did not change
2142 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2144 subvolname
= self
._generate
_random
_subvolume
_name
()
2145 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
2147 # make sure it exists
2148 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2149 self
.assertNotEqual(subvolpath
, None)
2151 # try to resize the subvolume with an invalid size -10
2154 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2155 except CommandFailedError
as ce
:
2156 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
2158 self
.fail("expected the 'fs subvolume resize' command to fail")
2160 # verify the quota did not change
2161 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2162 self
.assertEqual(size
, osize
)
2165 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2167 # verify trash dir is clean
2168 self
._wait
_for
_trash
_empty
()
2170 def test_subvolume_resize_fail_zero_size(self
):
2172 That a subvolume cannot be resized to a zero size and the quota did not change
2175 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2177 subvolname
= self
._generate
_random
_subvolume
_name
()
2178 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
2180 # make sure it exists
2181 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2182 self
.assertNotEqual(subvolpath
, None)
2184 # try to resize the subvolume with size 0
2187 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2188 except CommandFailedError
as ce
:
2189 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
2191 self
.fail("expected the 'fs subvolume resize' command to fail")
2193 # verify the quota did not change
2194 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2195 self
.assertEqual(size
, osize
)
2198 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2200 # verify trash dir is clean
2201 self
._wait
_for
_trash
_empty
()
2203 def test_subvolume_resize_quota_lt_used_size(self
):
2205 That a subvolume can be resized to a size smaller than the current used size
2206 and the resulting quota matches the expected size.
2209 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
2211 subvolname
= self
._generate
_random
_subvolume
_name
()
2212 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
2214 # make sure it exists
2215 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2216 self
.assertNotEqual(subvolpath
, None)
2218 # create one file of 10MB
2219 file_size
=self
.DEFAULT_FILE_SIZE
*10
2221 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
2224 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+1)
2225 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2227 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
2228 susedsize
= int(self
.mount_a
.run_shell(['stat', '-c' '%s', subvolpath
]).stdout
.getvalue().strip())
2229 if isinstance(self
.mount_a
, FuseMount
):
2230 # kclient dir does not have size==rbytes
2231 self
.assertEqual(usedsize
, susedsize
)
2233 # shrink the subvolume
2234 nsize
= usedsize
// 2
2236 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2237 except CommandFailedError
:
2238 self
.fail("expected the 'fs subvolume resize' command to succeed")
2241 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2242 self
.assertEqual(size
, nsize
)
2245 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2247 # verify trash dir is clean
2248 self
._wait
_for
_trash
_empty
()
2250 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self
):
2252 That a subvolume cannot be resized to a size smaller than the current used size
2253 when --no_shrink is given and the quota did not change.
2256 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
2258 subvolname
= self
._generate
_random
_subvolume
_name
()
2259 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
2261 # make sure it exists
2262 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2263 self
.assertNotEqual(subvolpath
, None)
2265 # create one file of 10MB
2266 file_size
=self
.DEFAULT_FILE_SIZE
*10
2268 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
2271 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+2)
2272 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2274 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
2275 susedsize
= int(self
.mount_a
.run_shell(['stat', '-c' '%s', subvolpath
]).stdout
.getvalue().strip())
2276 if isinstance(self
.mount_a
, FuseMount
):
2277 # kclient dir does not have size==rbytes
2278 self
.assertEqual(usedsize
, susedsize
)
2280 # shrink the subvolume
2281 nsize
= usedsize
// 2
2283 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
), "--no_shrink")
2284 except CommandFailedError
as ce
:
2285 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
2287 self
.fail("expected the 'fs subvolume resize' command to fail")
2289 # verify the quota did not change
2290 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2291 self
.assertEqual(size
, osize
)
2294 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2296 # verify trash dir is clean
2297 self
._wait
_for
_trash
_empty
()
2299 def test_subvolume_resize_expand_on_full_subvolume(self
):
2301 That the subvolume can be expanded from a full subvolume and future writes succeed.
2304 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*10
2305 # create subvolume of quota 10MB and make sure it exists
2306 subvolname
= self
._generate
_random
_subvolume
_name
()
2307 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
2308 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2309 self
.assertNotEqual(subvolpath
, None)
2311 # create one file of size 10MB and write
2312 file_size
=self
.DEFAULT_FILE_SIZE
*10
2314 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
2317 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+3)
2318 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2320 # create a file of size 5MB and try write more
2321 file_size
=file_size
// 2
2323 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
2326 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+4)
2328 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2329 except CommandFailedError
:
2330 # Not able to write. So expand the subvolume more and try writing the 5MB file again
2332 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2334 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2335 except CommandFailedError
:
2336 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB"
2337 "to succeed".format(subvolname
, number_of_files
, file_size
))
2339 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB"
2340 "to fail".format(subvolname
, number_of_files
, file_size
))
2343 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2345 # verify trash dir is clean
2346 self
._wait
_for
_trash
_empty
()
2348 def test_subvolume_resize_infinite_size(self
):
2350 That a subvolume can be resized to an infinite size by unsetting its quota.
2354 subvolname
= self
._generate
_random
_subvolume
_name
()
2355 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size",
2356 str(self
.DEFAULT_FILE_SIZE
*1024*1024))
2358 # make sure it exists
2359 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2360 self
.assertNotEqual(subvolpath
, None)
2363 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, "inf")
2365 # verify that the quota is None
2366 size
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes")
2367 self
.assertEqual(size
, None)
2370 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2372 # verify trash dir is clean
2373 self
._wait
_for
_trash
_empty
()
2375 def test_subvolume_resize_infinite_size_future_writes(self
):
2377 That a subvolume can be resized to an infinite size and the future writes succeed.
2381 subvolname
= self
._generate
_random
_subvolume
_name
()
2382 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size",
2383 str(self
.DEFAULT_FILE_SIZE
*1024*1024*5), "--mode=777")
2385 # make sure it exists
2386 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2387 self
.assertNotEqual(subvolpath
, None)
2390 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, "inf")
2392 # verify that the quota is None
2393 size
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes")
2394 self
.assertEqual(size
, None)
2396 # create one file of 10MB and try to write
2397 file_size
=self
.DEFAULT_FILE_SIZE
*10
2399 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
2402 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+5)
2405 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2406 except CommandFailedError
:
2407 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB "
2408 "to succeed".format(subvolname
, number_of_files
, file_size
))
2411 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2413 # verify trash dir is clean
2414 self
._wait
_for
_trash
_empty
()
2416 def test_subvolume_rm_force(self
):
2417 # test removing non-existing subvolume with --force
2418 subvolume
= self
._generate
_random
_subvolume
_name
()
2420 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--force")
2421 except CommandFailedError
:
2422 self
.fail("expected the 'fs subvolume rm --force' command to succeed")
2424 def test_subvolume_shrink(self
):
2426 That a subvolume can be shrinked in size and its quota matches the expected size.
2430 subvolname
= self
._generate
_random
_subvolume
_name
()
2431 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2432 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
2434 # make sure it exists
2435 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2436 self
.assertNotEqual(subvolpath
, None)
2438 # shrink the subvolume
2440 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2443 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2444 self
.assertEqual(size
, nsize
)
2447 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2449 # verify trash dir is clean
2450 self
._wait
_for
_trash
_empty
()
2452 def test_subvolume_retain_snapshot_rm_idempotency(self
):
2454 ensure subvolume deletion of a subvolume which is already deleted with retain snapshots option passes.
2455 After subvolume deletion with retain snapshots, the subvolume exists until the trash directory (resides inside subvolume)
2456 is cleaned up. The subvolume deletion issued while the trash directory is not empty, should pass and should
2457 not error out with EAGAIN.
2459 subvolume
= self
._generate
_random
_subvolume
_name
()
2460 snapshot
= self
._generate
_random
_snapshot
_name
()
2463 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
2466 self
._do
_subvolume
_io
(subvolume
, number_of_files
=256)
2468 # snapshot subvolume
2469 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
2471 # remove with snapshot retention
2472 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
2474 # remove snapshots (removes retained volume)
2475 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
2477 # remove subvolume (check idempotency)
2479 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2480 except CommandFailedError
as ce
:
2481 if ce
.exitstatus
!= errno
.ENOENT
:
2482 self
.fail(f
"expected subvolume rm to pass with error: {os.strerror(ce.exitstatus)}")
2484 # verify trash dir is clean
2485 self
._wait
_for
_trash
_empty
()
2488 def test_subvolume_user_metadata_set(self
):
2489 subvolname
= self
._generate
_random
_subvolume
_name
()
2490 group
= self
._generate
_random
_group
_name
()
2493 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2495 # create subvolume in group.
2496 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2498 # set metadata for subvolume.
2502 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2503 except CommandFailedError
:
2504 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
2506 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2507 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2509 # verify trash dir is clean.
2510 self
._wait
_for
_trash
_empty
()
2512 def test_subvolume_user_metadata_set_idempotence(self
):
2513 subvolname
= self
._generate
_random
_subvolume
_name
()
2514 group
= self
._generate
_random
_group
_name
()
2517 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2519 # create subvolume in group.
2520 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2522 # set metadata for subvolume.
2526 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2527 except CommandFailedError
:
2528 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
2530 # set same metadata again for subvolume.
2532 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2533 except CommandFailedError
:
2534 self
.fail("expected the 'fs subvolume metadata set' command to succeed because it is idempotent operation")
2536 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2537 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2539 # verify trash dir is clean.
2540 self
._wait
_for
_trash
_empty
()
2542 def test_subvolume_user_metadata_get(self
):
2543 subvolname
= self
._generate
_random
_subvolume
_name
()
2544 group
= self
._generate
_random
_group
_name
()
2547 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2549 # create subvolume in group.
2550 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2552 # set metadata for subvolume.
2555 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2557 # get value for specified key.
2559 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
2560 except CommandFailedError
:
2561 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
2563 # remove '\n' from returned value.
2564 ret
= ret
.strip('\n')
2566 # match received value with expected value.
2567 self
.assertEqual(value
, ret
)
2569 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2570 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2572 # verify trash dir is clean.
2573 self
._wait
_for
_trash
_empty
()
2575 def test_subvolume_user_metadata_get_for_nonexisting_key(self
):
2576 subvolname
= self
._generate
_random
_subvolume
_name
()
2577 group
= self
._generate
_random
_group
_name
()
2580 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2582 # create subvolume in group.
2583 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2585 # set metadata for subvolume.
2588 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2590 # try to get value for nonexisting key
2591 # Expecting ENOENT exit status because key does not exist
2593 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key_nonexist", "--group_name", group
)
2594 except CommandFailedError
as e
:
2595 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
2597 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
2599 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2600 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2602 # verify trash dir is clean.
2603 self
._wait
_for
_trash
_empty
()
2605 def test_subvolume_user_metadata_get_for_nonexisting_section(self
):
2606 subvolname
= self
._generate
_random
_subvolume
_name
()
2607 group
= self
._generate
_random
_group
_name
()
2610 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2612 # create subvolume in group.
2613 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2615 # try to get value for nonexisting key (as section does not exist)
2616 # Expecting ENOENT exit status because key does not exist
2618 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key", "--group_name", group
)
2619 except CommandFailedError
as e
:
2620 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
2622 self
.fail("Expected ENOENT because section does not exist")
2624 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2625 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2627 # verify trash dir is clean.
2628 self
._wait
_for
_trash
_empty
()
2630 def test_subvolume_user_metadata_update(self
):
2631 subvolname
= self
._generate
_random
_subvolume
_name
()
2632 group
= self
._generate
_random
_group
_name
()
2635 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2637 # create subvolume in group.
2638 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2640 # set metadata for subvolume.
2643 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2645 # update metadata against key.
2646 new_value
= "new_value"
2647 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, new_value
, "--group_name", group
)
2649 # get metadata for specified key of subvolume.
2651 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
2652 except CommandFailedError
:
2653 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
2655 # remove '\n' from returned value.
2656 ret
= ret
.strip('\n')
2658 # match received value with expected value.
2659 self
.assertEqual(new_value
, ret
)
2661 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2662 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2664 # verify trash dir is clean.
2665 self
._wait
_for
_trash
_empty
()
2667 def test_subvolume_user_metadata_list(self
):
2668 subvolname
= self
._generate
_random
_subvolume
_name
()
2669 group
= self
._generate
_random
_group
_name
()
2672 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2674 # create subvolume in group.
2675 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2677 # set metadata for subvolume.
2678 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
2680 for k
, v
in input_metadata_dict
.items():
2681 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, k
, v
, "--group_name", group
)
2685 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
2686 except CommandFailedError
:
2687 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
2689 ret_dict
= json
.loads(ret
)
2691 # compare output with expected output
2692 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
2694 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2695 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2697 # verify trash dir is clean.
2698 self
._wait
_for
_trash
_empty
()
2700 def test_subvolume_user_metadata_list_if_no_metadata_set(self
):
2701 subvolname
= self
._generate
_random
_subvolume
_name
()
2702 group
= self
._generate
_random
_group
_name
()
2705 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2707 # create subvolume in group.
2708 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2712 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
2713 except CommandFailedError
:
2714 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
2716 # remove '\n' from returned value.
2717 ret
= ret
.strip('\n')
2719 # compare output with expected output
2720 # expecting empty json/dictionary
2721 self
.assertEqual(ret
, "{}")
2723 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2724 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2726 # verify trash dir is clean.
2727 self
._wait
_for
_trash
_empty
()
2729 def test_subvolume_user_metadata_remove(self
):
2730 subvolname
= self
._generate
_random
_subvolume
_name
()
2731 group
= self
._generate
_random
_group
_name
()
2734 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2736 # create subvolume in group.
2737 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2739 # set metadata for subvolume.
2742 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2744 # remove metadata against specified key.
2746 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
)
2747 except CommandFailedError
:
2748 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
2750 # confirm key is removed by again fetching metadata
2752 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
2753 except CommandFailedError
as e
:
2754 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
2756 self
.fail("Expected ENOENT because key does not exist")
2758 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2759 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2761 # verify trash dir is clean.
2762 self
._wait
_for
_trash
_empty
()
2764 def test_subvolume_user_metadata_remove_for_nonexisting_key(self
):
2765 subvolname
= self
._generate
_random
_subvolume
_name
()
2766 group
= self
._generate
_random
_group
_name
()
2769 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2771 # create subvolume in group.
2772 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2774 # set metadata for subvolume.
2777 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2779 # try to remove value for nonexisting key
2780 # Expecting ENOENT exit status because key does not exist
2782 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key_nonexist", "--group_name", group
)
2783 except CommandFailedError
as e
:
2784 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
2786 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
2788 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2789 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2791 # verify trash dir is clean.
2792 self
._wait
_for
_trash
_empty
()
2794 def test_subvolume_user_metadata_remove_for_nonexisting_section(self
):
2795 subvolname
= self
._generate
_random
_subvolume
_name
()
2796 group
= self
._generate
_random
_group
_name
()
2799 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2801 # create subvolume in group.
2802 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2804 # try to remove value for nonexisting key (as section does not exist)
2805 # Expecting ENOENT exit status because key does not exist
2807 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key", "--group_name", group
)
2808 except CommandFailedError
as e
:
2809 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
2811 self
.fail("Expected ENOENT because section does not exist")
2813 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2814 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2816 # verify trash dir is clean.
2817 self
._wait
_for
_trash
_empty
()
2819 def test_subvolume_user_metadata_remove_force(self
):
2820 subvolname
= self
._generate
_random
_subvolume
_name
()
2821 group
= self
._generate
_random
_group
_name
()
2824 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2826 # create subvolume in group.
2827 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2829 # set metadata for subvolume.
2832 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2834 # remove metadata against specified key with --force option.
2836 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
, "--force")
2837 except CommandFailedError
:
2838 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
2840 # confirm key is removed by again fetching metadata
2842 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
2843 except CommandFailedError
as e
:
2844 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
2846 self
.fail("Expected ENOENT because key does not exist")
2848 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2849 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2851 # verify trash dir is clean.
2852 self
._wait
_for
_trash
_empty
()
2854 def test_subvolume_user_metadata_remove_force_for_nonexisting_key(self
):
2855 subvolname
= self
._generate
_random
_subvolume
_name
()
2856 group
= self
._generate
_random
_group
_name
()
2859 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2861 # create subvolume in group.
2862 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
2864 # set metadata for subvolume.
2867 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2869 # remove metadata against specified key.
2871 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
)
2872 except CommandFailedError
:
2873 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
2875 # confirm key is removed by again fetching metadata
2877 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
2878 except CommandFailedError
as e
:
2879 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
2881 self
.fail("Expected ENOENT because key does not exist")
2883 # again remove metadata against already removed key with --force option.
2885 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
, "--force")
2886 except CommandFailedError
:
2887 self
.fail("expected the 'fs subvolume metadata rm' (with --force) command to succeed")
2889 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2890 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2892 # verify trash dir is clean.
2893 self
._wait
_for
_trash
_empty
()
2895 def test_subvolume_user_metadata_set_and_get_for_legacy_subvolume(self
):
2896 subvolname
= self
._generate
_random
_subvolume
_name
()
2897 group
= self
._generate
_random
_group
_name
()
2899 # emulate a old-fashioned subvolume in a custom group
2900 createpath
= os
.path
.join(".", "volumes", group
, subvolname
)
2901 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
2903 # set metadata for subvolume.
2907 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
2908 except CommandFailedError
:
2909 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
2911 # get value for specified key.
2913 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
2914 except CommandFailedError
:
2915 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
2917 # remove '\n' from returned value.
2918 ret
= ret
.strip('\n')
2920 # match received value with expected value.
2921 self
.assertEqual(value
, ret
)
2923 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2924 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2926 # verify trash dir is clean.
2927 self
._wait
_for
_trash
_empty
()
2929 def test_subvolume_user_metadata_list_and_remove_for_legacy_subvolume(self
):
2930 subvolname
= self
._generate
_random
_subvolume
_name
()
2931 group
= self
._generate
_random
_group
_name
()
2933 # emulate a old-fashioned subvolume in a custom group
2934 createpath
= os
.path
.join(".", "volumes", group
, subvolname
)
2935 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
2937 # set metadata for subvolume.
2938 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
2940 for k
, v
in input_metadata_dict
.items():
2941 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, k
, v
, "--group_name", group
)
2945 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
2946 except CommandFailedError
:
2947 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
2949 ret_dict
= json
.loads(ret
)
2951 # compare output with expected output
2952 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
2954 # remove metadata against specified key.
2956 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key_1", "--group_name", group
)
2957 except CommandFailedError
:
2958 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
2960 # confirm key is removed by again fetching metadata
2962 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key_1", "--group_name", group
)
2963 except CommandFailedError
as e
:
2964 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
2966 self
.fail("Expected ENOENT because key_1 does not exist")
2968 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
2969 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2971 # verify trash dir is clean.
2972 self
._wait
_for
_trash
_empty
()
2974 class TestSubvolumeGroupSnapshots(TestVolumesHelper
):
2975 """Tests for FS subvolume group snapshot operations."""
2976 @unittest.skip("skipping subvolumegroup snapshot tests")
2977 def test_nonexistent_subvolume_group_snapshot_rm(self
):
2978 subvolume
= self
._generate
_random
_subvolume
_name
()
2979 group
= self
._generate
_random
_group
_name
()
2980 snapshot
= self
._generate
_random
_snapshot
_name
()
2983 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2985 # create subvolume in group
2986 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2989 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
2992 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
2996 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
2997 except CommandFailedError
as ce
:
2998 if ce
.exitstatus
!= errno
.ENOENT
:
3001 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
3004 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
3006 # verify trash dir is clean
3007 self
._wait
_for
_trash
_empty
()
3010 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3012 @unittest.skip("skipping subvolumegroup snapshot tests")
3013 def test_subvolume_group_snapshot_create_and_rm(self
):
3014 subvolume
= self
._generate
_random
_subvolume
_name
()
3015 group
= self
._generate
_random
_group
_name
()
3016 snapshot
= self
._generate
_random
_snapshot
_name
()
3019 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3021 # create subvolume in group
3022 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
3025 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
3028 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
3031 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
3033 # verify trash dir is clean
3034 self
._wait
_for
_trash
_empty
()
3037 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3039 @unittest.skip("skipping subvolumegroup snapshot tests")
3040 def test_subvolume_group_snapshot_idempotence(self
):
3041 subvolume
= self
._generate
_random
_subvolume
_name
()
3042 group
= self
._generate
_random
_group
_name
()
3043 snapshot
= self
._generate
_random
_snapshot
_name
()
3046 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3048 # create subvolume in group
3049 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
3052 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
3054 # try creating snapshot w/ same snapshot name -- shoule be idempotent
3055 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
3058 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
3061 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
3063 # verify trash dir is clean
3064 self
._wait
_for
_trash
_empty
()
3067 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3069 @unittest.skip("skipping subvolumegroup snapshot tests")
3070 def test_subvolume_group_snapshot_ls(self
):
3071 # tests the 'fs subvolumegroup snapshot ls' command
3076 group
= self
._generate
_random
_group
_name
()
3077 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3079 # create subvolumegroup snapshots
3080 snapshots
= self
._generate
_random
_snapshot
_name
(3)
3081 for snapshot
in snapshots
:
3082 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
3084 subvolgrpsnapshotls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'snapshot', 'ls', self
.volname
, group
))
3085 if len(subvolgrpsnapshotls
) == 0:
3086 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
3088 snapshotnames
= [snapshot
['name'] for snapshot
in subvolgrpsnapshotls
]
3089 if collections
.Counter(snapshotnames
) != collections
.Counter(snapshots
):
3090 raise RuntimeError("Error creating or listing subvolume group snapshots")
3092 @unittest.skip("skipping subvolumegroup snapshot tests")
3093 def test_subvolume_group_snapshot_rm_force(self
):
3094 # test removing non-existing subvolume group snapshot with --force
3095 group
= self
._generate
_random
_group
_name
()
3096 snapshot
= self
._generate
_random
_snapshot
_name
()
3099 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
, "--force")
3100 except CommandFailedError
:
3101 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
3103 def test_subvolume_group_snapshot_unsupported_status(self
):
3104 group
= self
._generate
_random
_group
_name
()
3105 snapshot
= self
._generate
_random
_snapshot
_name
()
3108 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3112 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
3113 except CommandFailedError
as ce
:
3114 self
.assertEqual(ce
.exitstatus
, errno
.ENOSYS
, "invalid error code on subvolumegroup snapshot create")
3116 self
.fail("expected subvolumegroup snapshot create command to fail")
3119 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3122 class TestSubvolumeSnapshots(TestVolumesHelper
):
3123 """Tests for FS subvolume snapshot operations."""
3124 def test_nonexistent_subvolume_snapshot_rm(self
):
3125 subvolume
= self
._generate
_random
_subvolume
_name
()
3126 snapshot
= self
._generate
_random
_snapshot
_name
()
3129 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3131 # snapshot subvolume
3132 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3135 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3137 # remove snapshot again
3139 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3140 except CommandFailedError
as ce
:
3141 if ce
.exitstatus
!= errno
.ENOENT
:
3144 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
3147 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3149 # verify trash dir is clean
3150 self
._wait
_for
_trash
_empty
()
3152 def test_subvolume_snapshot_create_and_rm(self
):
3153 subvolume
= self
._generate
_random
_subvolume
_name
()
3154 snapshot
= self
._generate
_random
_snapshot
_name
()
3157 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3159 # snapshot subvolume
3160 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3163 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3166 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3168 # verify trash dir is clean
3169 self
._wait
_for
_trash
_empty
()
3171 def test_subvolume_snapshot_create_idempotence(self
):
3172 subvolume
= self
._generate
_random
_subvolume
_name
()
3173 snapshot
= self
._generate
_random
_snapshot
_name
()
3176 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3178 # snapshot subvolume
3179 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3181 # try creating w/ same subvolume snapshot name -- should be idempotent
3182 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3185 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3188 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3190 # verify trash dir is clean
3191 self
._wait
_for
_trash
_empty
()
3193 def test_subvolume_snapshot_info(self
):
3196 tests the 'fs subvolume snapshot info' command
3199 snap_md
= ["created_at", "data_pool", "has_pending_clones", "size"]
3201 subvolume
= self
._generate
_random
_subvolume
_name
()
3202 snapshot
, snap_missing
= self
._generate
_random
_snapshot
_name
(2)
3205 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3208 self
._do
_subvolume
_io
(subvolume
, number_of_files
=1)
3210 # snapshot subvolume
3211 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3213 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
3215 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
3216 self
.assertEqual(snap_info
["has_pending_clones"], "no")
3218 # snapshot info for non-existent snapshot
3220 self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snap_missing
)
3221 except CommandFailedError
as ce
:
3222 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on snapshot info of non-existent snapshot")
3224 self
.fail("expected snapshot info of non-existent snapshot to fail")
3227 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3230 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3232 # verify trash dir is clean
3233 self
._wait
_for
_trash
_empty
()
3235 def test_subvolume_snapshot_in_group(self
):
3236 subvolume
= self
._generate
_random
_subvolume
_name
()
3237 group
= self
._generate
_random
_group
_name
()
3238 snapshot
= self
._generate
_random
_snapshot
_name
()
3241 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3243 # create subvolume in group
3244 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
3246 # snapshot subvolume in group
3247 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
3250 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
3253 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
3255 # verify trash dir is clean
3256 self
._wait
_for
_trash
_empty
()
3259 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3261 def test_subvolume_snapshot_ls(self
):
3262 # tests the 'fs subvolume snapshot ls' command
3267 subvolume
= self
._generate
_random
_subvolume
_name
()
3268 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3270 # create subvolume snapshots
3271 snapshots
= self
._generate
_random
_snapshot
_name
(3)
3272 for snapshot
in snapshots
:
3273 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3275 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
3276 if len(subvolsnapshotls
) == 0:
3277 self
.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
3279 snapshotnames
= [snapshot
['name'] for snapshot
in subvolsnapshotls
]
3280 if collections
.Counter(snapshotnames
) != collections
.Counter(snapshots
):
3281 self
.fail("Error creating or listing subvolume snapshots")
3284 for snapshot
in snapshots
:
3285 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3288 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3290 # verify trash dir is clean
3291 self
._wait
_for
_trash
_empty
()
3293 def test_subvolume_inherited_snapshot_ls(self
):
3294 # tests the scenario where 'fs subvolume snapshot ls' command
3295 # should not list inherited snapshots created as part of snapshot
3296 # at ancestral level
3299 subvolume
= self
._generate
_random
_subvolume
_name
()
3300 group
= self
._generate
_random
_group
_name
()
3304 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3306 # create subvolume in group
3307 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
3309 # create subvolume snapshots
3310 snapshots
= self
._generate
_random
_snapshot
_name
(snap_count
)
3311 for snapshot
in snapshots
:
3312 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
3314 # Create snapshot at ancestral level
3315 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", "ancestral_snap_1")
3316 ancestral_snappath2
= os
.path
.join(".", "volumes", group
, ".snap", "ancestral_snap_2")
3317 self
.mount_a
.run_shell(['mkdir', '-p', ancestral_snappath1
, ancestral_snappath2
], sudo
=True)
3319 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
, group
))
3320 self
.assertEqual(len(subvolsnapshotls
), snap_count
)
3322 # remove ancestral snapshots
3323 self
.mount_a
.run_shell(['rmdir', ancestral_snappath1
, ancestral_snappath2
], sudo
=True)
3326 for snapshot
in snapshots
:
3327 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
3330 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
3332 # verify trash dir is clean
3333 self
._wait
_for
_trash
_empty
()
3336 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3338 def test_subvolume_inherited_snapshot_info(self
):
3340 tests the scenario where 'fs subvolume snapshot info' command
3341 should fail for inherited snapshots created as part of snapshot
3345 subvolume
= self
._generate
_random
_subvolume
_name
()
3346 group
= self
._generate
_random
_group
_name
()
3349 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3351 # create subvolume in group
3352 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
3354 # Create snapshot at ancestral level
3355 ancestral_snap_name
= "ancestral_snap_1"
3356 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", ancestral_snap_name
)
3357 self
.mount_a
.run_shell(['mkdir', '-p', ancestral_snappath1
], sudo
=True)
3359 # Validate existence of inherited snapshot
3360 group_path
= os
.path
.join(".", "volumes", group
)
3361 inode_number_group_dir
= int(self
.mount_a
.run_shell(['stat', '-c' '%i', group_path
]).stdout
.getvalue().strip())
3362 inherited_snap
= "_{0}_{1}".format(ancestral_snap_name
, inode_number_group_dir
)
3363 inherited_snappath
= os
.path
.join(".", "volumes", group
, subvolume
,".snap", inherited_snap
)
3364 self
.mount_a
.run_shell(['ls', inherited_snappath
])
3366 # snapshot info on inherited snapshot
3368 self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, inherited_snap
, group
)
3369 except CommandFailedError
as ce
:
3370 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on snapshot info of inherited snapshot")
3372 self
.fail("expected snapshot info of inherited snapshot to fail")
3374 # remove ancestral snapshots
3375 self
.mount_a
.run_shell(['rmdir', ancestral_snappath1
], sudo
=True)
3378 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
3380 # verify trash dir is clean
3381 self
._wait
_for
_trash
_empty
()
3384 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3386 def test_subvolume_inherited_snapshot_rm(self
):
3388 tests the scenario where 'fs subvolume snapshot rm' command
3389 should fail for inherited snapshots created as part of snapshot
3393 subvolume
= self
._generate
_random
_subvolume
_name
()
3394 group
= self
._generate
_random
_group
_name
()
3397 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3399 # create subvolume in group
3400 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
3402 # Create snapshot at ancestral level
3403 ancestral_snap_name
= "ancestral_snap_1"
3404 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", ancestral_snap_name
)
3405 self
.mount_a
.run_shell(['mkdir', '-p', ancestral_snappath1
], sudo
=True)
3407 # Validate existence of inherited snap
3408 group_path
= os
.path
.join(".", "volumes", group
)
3409 inode_number_group_dir
= int(self
.mount_a
.run_shell(['stat', '-c' '%i', group_path
]).stdout
.getvalue().strip())
3410 inherited_snap
= "_{0}_{1}".format(ancestral_snap_name
, inode_number_group_dir
)
3411 inherited_snappath
= os
.path
.join(".", "volumes", group
, subvolume
,".snap", inherited_snap
)
3412 self
.mount_a
.run_shell(['ls', inherited_snappath
])
3414 # inherited snapshot should not be deletable
3416 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, inherited_snap
, "--group_name", group
)
3417 except CommandFailedError
as ce
:
3418 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, msg
="invalid error code when removing inherited snapshot")
3420 self
.fail("expected removing inheirted snapshot to fail")
3422 # remove ancestral snapshots
3423 self
.mount_a
.run_shell(['rmdir', ancestral_snappath1
], sudo
=True)
3426 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
3428 # verify trash dir is clean
3429 self
._wait
_for
_trash
_empty
()
3432 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3434 def test_subvolume_subvolumegroup_snapshot_name_conflict(self
):
3436 tests the scenario where creation of subvolume snapshot name
3437 with same name as it's subvolumegroup snapshot name. This should
3441 subvolume
= self
._generate
_random
_subvolume
_name
()
3442 group
= self
._generate
_random
_group
_name
()
3443 group_snapshot
= self
._generate
_random
_snapshot
_name
()
3446 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3448 # create subvolume in group
3449 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
3451 # Create subvolumegroup snapshot
3452 group_snapshot_path
= os
.path
.join(".", "volumes", group
, ".snap", group_snapshot
)
3453 self
.mount_a
.run_shell(['mkdir', '-p', group_snapshot_path
], sudo
=True)
3455 # Validate existence of subvolumegroup snapshot
3456 self
.mount_a
.run_shell(['ls', group_snapshot_path
])
3458 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
3460 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, group_snapshot
, "--group_name", group
)
3461 except CommandFailedError
as ce
:
3462 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, msg
="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
3464 self
.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
3466 # remove subvolumegroup snapshot
3467 self
.mount_a
.run_shell(['rmdir', group_snapshot_path
], sudo
=True)
3470 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
3472 # verify trash dir is clean
3473 self
._wait
_for
_trash
_empty
()
3476 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3478 def test_subvolume_retain_snapshot_invalid_recreate(self
):
3480 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
3482 subvolume
= self
._generate
_random
_subvolume
_name
()
3483 snapshot
= self
._generate
_random
_snapshot
_name
()
3486 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3488 # snapshot subvolume
3489 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3491 # remove with snapshot retention
3492 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3494 # recreate subvolume with an invalid pool
3495 data_pool
= "invalid_pool"
3497 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
3498 except CommandFailedError
as ce
:
3499 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on recreate of subvolume with invalid poolname")
3501 self
.fail("expected recreate of subvolume with invalid poolname to fail")
3504 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
3505 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
3506 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
3510 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
3511 except CommandFailedError
as ce
:
3512 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of subvolume with retained snapshots")
3514 self
.fail("expected getpath of subvolume with retained snapshots to fail")
3516 # remove snapshot (should remove volume)
3517 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3519 # verify trash dir is clean
3520 self
._wait
_for
_trash
_empty
()
3522 def test_subvolume_retain_snapshot_recreate_subvolume(self
):
3524 ensure a retained subvolume can be recreated and further snapshotted
3526 snap_md
= ["created_at", "data_pool", "has_pending_clones", "size"]
3528 subvolume
= self
._generate
_random
_subvolume
_name
()
3529 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
3532 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3534 # snapshot subvolume
3535 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
3537 # remove with snapshot retention
3538 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3541 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
3542 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
3543 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
3545 # recreate retained subvolume
3546 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3549 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
3550 self
.assertEqual(subvol_info
["state"], "complete",
3551 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
3553 # snapshot info (older snapshot)
3554 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot1
))
3556 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
3557 self
.assertEqual(snap_info
["has_pending_clones"], "no")
3559 # snap-create (new snapshot)
3560 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot2
)
3562 # remove with retain snapshots
3563 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3566 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
3567 self
.assertEqual(len(subvolsnapshotls
), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
3568 " created subvolume snapshots")
3569 snapshotnames
= [snapshot
['name'] for snapshot
in subvolsnapshotls
]
3570 for snap
in [snapshot1
, snapshot2
]:
3571 self
.assertIn(snap
, snapshotnames
, "Missing snapshot '{0}' in snapshot list".format(snap
))
3573 # remove snapshots (should remove volume)
3574 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
3575 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot2
)
3577 # verify list subvolumes returns an empty list
3578 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3579 self
.assertEqual(len(subvolumels
), 0)
3581 # verify trash dir is clean
3582 self
._wait
_for
_trash
_empty
()
3584 def test_subvolume_retain_snapshot_with_snapshots(self
):
3586 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
3587 also test allowed and dis-allowed operations on a retained subvolume
3589 snap_md
= ["created_at", "data_pool", "has_pending_clones", "size"]
3591 subvolume
= self
._generate
_random
_subvolume
_name
()
3592 snapshot
= self
._generate
_random
_snapshot
_name
()
3595 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3597 # snapshot subvolume
3598 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3600 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
3602 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3603 except CommandFailedError
as ce
:
3604 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of retained subvolume with snapshots")
3606 self
.fail("expected rm of subvolume with retained snapshots to fail")
3608 # remove with snapshot retention
3609 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3612 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
3613 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
3614 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
3616 ## test allowed ops in retained state
3618 subvolumes
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3619 self
.assertEqual(len(subvolumes
), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes
)))
3620 self
.assertEqual(subvolumes
[0]['name'], subvolume
,
3621 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume
, subvolumes
[0]['name']))
3624 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
3626 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
3627 self
.assertEqual(snap_info
["has_pending_clones"], "no")
3629 # rm --force (allowed but should fail)
3631 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--force")
3632 except CommandFailedError
as ce
:
3633 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of subvolume with retained snapshots")
3635 self
.fail("expected rm of subvolume with retained snapshots to fail")
3637 # rm (allowed but should fail)
3639 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3640 except CommandFailedError
as ce
:
3641 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of subvolume with retained snapshots")
3643 self
.fail("expected rm of subvolume with retained snapshots to fail")
3645 ## test disallowed ops
3648 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
3649 except CommandFailedError
as ce
:
3650 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of subvolume with retained snapshots")
3652 self
.fail("expected getpath of subvolume with retained snapshots to fail")
3655 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3657 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
3658 except CommandFailedError
as ce
:
3659 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on resize of subvolume with retained snapshots")
3661 self
.fail("expected resize of subvolume with retained snapshots to fail")
3665 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, "fail")
3666 except CommandFailedError
as ce
:
3667 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on snapshot create of subvolume with retained snapshots")
3669 self
.fail("expected snapshot create of subvolume with retained snapshots to fail")
3671 # remove snapshot (should remove volume)
3672 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3674 # verify list subvolumes returns an empty list
3675 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3676 self
.assertEqual(len(subvolumels
), 0)
3678 # verify trash dir is clean
3679 self
._wait
_for
_trash
_empty
()
3681 def test_subvolume_retain_snapshot_without_snapshots(self
):
3683 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
3685 subvolume
= self
._generate
_random
_subvolume
_name
()
3688 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3690 # remove with snapshot retention (should remove volume, no snapshots to retain)
3691 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3693 # verify list subvolumes returns an empty list
3694 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3695 self
.assertEqual(len(subvolumels
), 0)
3697 # verify trash dir is clean
3698 self
._wait
_for
_trash
_empty
()
3700 def test_subvolume_retain_snapshot_trash_busy_recreate(self
):
3702 ensure retained subvolume recreate fails if its trash is not yet purged
3704 subvolume
= self
._generate
_random
_subvolume
_name
()
3705 snapshot
= self
._generate
_random
_snapshot
_name
()
3708 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3710 # snapshot subvolume
3711 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3713 # remove with snapshot retention
3714 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3716 # fake a trash entry
3717 self
._update
_fake
_trash
(subvolume
)
3719 # recreate subvolume
3721 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3722 except CommandFailedError
as ce
:
3723 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on recreate of subvolume with purge pending")
3725 self
.fail("expected recreate of subvolume with purge pending to fail")
3727 # clear fake trash entry
3728 self
._update
_fake
_trash
(subvolume
, create
=False)
3730 # recreate subvolume
3731 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3734 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3737 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3739 # verify trash dir is clean
3740 self
._wait
_for
_trash
_empty
()
3742 def test_subvolume_rm_with_snapshots(self
):
3743 subvolume
= self
._generate
_random
_subvolume
_name
()
3744 snapshot
= self
._generate
_random
_snapshot
_name
()
3747 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3749 # snapshot subvolume
3750 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3752 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
3754 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3755 except CommandFailedError
as ce
:
3756 if ce
.exitstatus
!= errno
.ENOTEMPTY
:
3757 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
3759 raise RuntimeError("expected subvolume deletion to fail")
3762 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3765 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3767 # verify trash dir is clean
3768 self
._wait
_for
_trash
_empty
()
3770 def test_subvolume_snapshot_protect_unprotect_sanity(self
):
3772 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
3773 invoking the command does not cause errors, till they are removed from a subsequent release.
3775 subvolume
= self
._generate
_random
_subvolume
_name
()
3776 snapshot
= self
._generate
_random
_snapshot
_name
()
3777 clone
= self
._generate
_random
_clone
_name
()
3780 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3783 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
3785 # snapshot subvolume
3786 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3788 # now, protect snapshot
3789 self
._fs
_cmd
("subvolume", "snapshot", "protect", self
.volname
, subvolume
, snapshot
)
3792 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3794 # check clone status
3795 self
._wait
_for
_clone
_to
_complete
(clone
)
3797 # now, unprotect snapshot
3798 self
._fs
_cmd
("subvolume", "snapshot", "unprotect", self
.volname
, subvolume
, snapshot
)
3801 self
._verify
_clone
(subvolume
, snapshot
, clone
)
3804 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3807 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3808 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3810 # verify trash dir is clean
3811 self
._wait
_for
_trash
_empty
()
3813 def test_subvolume_snapshot_rm_force(self
):
3814 # test removing non existing subvolume snapshot with --force
3815 subvolume
= self
._generate
_random
_subvolume
_name
()
3816 snapshot
= self
._generate
_random
_snapshot
_name
()
3820 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, "--force")
3821 except CommandFailedError
:
3822 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
3824 def test_subvolume_snapshot_metadata_set(self
):
3826 Set custom metadata for subvolume snapshot.
3828 subvolname
= self
._generate
_random
_subvolume
_name
()
3829 group
= self
._generate
_random
_group
_name
()
3830 snapshot
= self
._generate
_random
_snapshot
_name
()
3833 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3835 # create subvolume in group.
3836 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
3838 # snapshot subvolume
3839 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
3841 # set metadata for snapshot.
3845 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
3846 except CommandFailedError
:
3847 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
3849 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
3850 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3851 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3853 # verify trash dir is clean.
3854 self
._wait
_for
_trash
_empty
()
3856 def test_subvolume_snapshot_metadata_set_idempotence(self
):
3858 Set custom metadata for subvolume snapshot (Idempotency).
3860 subvolname
= self
._generate
_random
_subvolume
_name
()
3861 group
= self
._generate
_random
_group
_name
()
3862 snapshot
= self
._generate
_random
_snapshot
_name
()
3865 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3867 # create subvolume in group.
3868 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
3870 # snapshot subvolume
3871 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
3873 # set metadata for snapshot.
3877 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
3878 except CommandFailedError
:
3879 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
3881 # set same metadata again for subvolume.
3883 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
3884 except CommandFailedError
:
3885 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed because it is idempotent operation")
3887 # get value for specified key.
3889 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
3890 except CommandFailedError
:
3891 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
3893 # remove '\n' from returned value.
3894 ret
= ret
.strip('\n')
3896 # match received value with expected value.
3897 self
.assertEqual(value
, ret
)
3899 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
3900 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3901 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3903 # verify trash dir is clean.
3904 self
._wait
_for
_trash
_empty
()
3906 def test_subvolume_snapshot_metadata_get(self
):
3908 Get custom metadata for a specified key in subvolume snapshot metadata.
3910 subvolname
= self
._generate
_random
_subvolume
_name
()
3911 group
= self
._generate
_random
_group
_name
()
3912 snapshot
= self
._generate
_random
_snapshot
_name
()
3915 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3917 # create subvolume in group.
3918 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
3920 # snapshot subvolume
3921 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
3923 # set metadata for snapshot.
3926 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
3928 # get value for specified key.
3930 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
3931 except CommandFailedError
:
3932 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
3934 # remove '\n' from returned value.
3935 ret
= ret
.strip('\n')
3937 # match received value with expected value.
3938 self
.assertEqual(value
, ret
)
3940 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
3941 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3942 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3944 # verify trash dir is clean.
3945 self
._wait
_for
_trash
_empty
()
3947 def test_subvolume_snapshot_metadata_get_for_nonexisting_key(self
):
3949 Get custom metadata for subvolume snapshot if specified key not exist in metadata.
3951 subvolname
= self
._generate
_random
_subvolume
_name
()
3952 group
= self
._generate
_random
_group
_name
()
3953 snapshot
= self
._generate
_random
_snapshot
_name
()
3956 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3958 # create subvolume in group.
3959 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
3961 # snapshot subvolume
3962 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
3964 # set metadata for snapshot.
3967 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
3969 # try to get value for nonexisting key
3970 # Expecting ENOENT exit status because key does not exist
3972 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, "key_nonexist", group
)
3973 except CommandFailedError
as e
:
3974 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3976 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
3978 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
3979 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3980 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3982 # verify trash dir is clean.
3983 self
._wait
_for
_trash
_empty
()
3985 def test_subvolume_snapshot_metadata_get_for_nonexisting_section(self
):
3987 Get custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
3989 subvolname
= self
._generate
_random
_subvolume
_name
()
3990 group
= self
._generate
_random
_group
_name
()
3991 snapshot
= self
._generate
_random
_snapshot
_name
()
3994 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3996 # create subvolume in group.
3997 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
3999 # snapshot subvolume
4000 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4002 # try to get value for nonexisting key (as section does not exist)
4003 # Expecting ENOENT exit status because key does not exist
4005 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, "key", group
)
4006 except CommandFailedError
as e
:
4007 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4009 self
.fail("Expected ENOENT because section does not exist")
4011 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4012 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4013 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4015 # verify trash dir is clean.
4016 self
._wait
_for
_trash
_empty
()
4018 def test_subvolume_snapshot_metadata_update(self
):
4020 Update custom metadata for a specified key in subvolume snapshot metadata.
4022 subvolname
= self
._generate
_random
_subvolume
_name
()
4023 group
= self
._generate
_random
_group
_name
()
4024 snapshot
= self
._generate
_random
_snapshot
_name
()
4027 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4029 # create subvolume in group.
4030 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4032 # snapshot subvolume
4033 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4035 # set metadata for snapshot.
4038 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
4040 # update metadata against key.
4041 new_value
= "new_value"
4042 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, new_value
, group
)
4044 # get metadata for specified key of snapshot.
4046 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
4047 except CommandFailedError
:
4048 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
4050 # remove '\n' from returned value.
4051 ret
= ret
.strip('\n')
4053 # match received value with expected value.
4054 self
.assertEqual(new_value
, ret
)
4056 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4057 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4058 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4060 # verify trash dir is clean.
4061 self
._wait
_for
_trash
_empty
()
4063 def test_subvolume_snapshot_metadata_list(self
):
4065 List custom metadata for subvolume snapshot.
4067 subvolname
= self
._generate
_random
_subvolume
_name
()
4068 group
= self
._generate
_random
_group
_name
()
4069 snapshot
= self
._generate
_random
_snapshot
_name
()
4072 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4074 # create subvolume in group.
4075 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4077 # snapshot subvolume
4078 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4080 # set metadata for subvolume.
4081 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
4083 for k
, v
in input_metadata_dict
.items():
4084 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, k
, v
, group
)
4088 ret_dict
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "metadata", "ls", self
.volname
, subvolname
, snapshot
, group
))
4089 except CommandFailedError
:
4090 self
.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
4092 # compare output with expected output
4093 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
4095 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4096 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4097 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4099 # verify trash dir is clean.
4100 self
._wait
_for
_trash
_empty
()
4102 def test_subvolume_snapshot_metadata_list_if_no_metadata_set(self
):
4104 List custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
4106 subvolname
= self
._generate
_random
_subvolume
_name
()
4107 group
= self
._generate
_random
_group
_name
()
4108 snapshot
= self
._generate
_random
_snapshot
_name
()
4111 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4113 # create subvolume in group.
4114 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4116 # snapshot subvolume
4117 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4121 ret_dict
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "metadata", "ls", self
.volname
, subvolname
, snapshot
, group
))
4122 except CommandFailedError
:
4123 self
.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
4125 # compare output with expected output
4127 self
.assertDictEqual(ret_dict
, empty_dict
)
4129 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4130 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4131 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4133 # verify trash dir is clean.
4134 self
._wait
_for
_trash
_empty
()
4136 def test_subvolume_snapshot_metadata_remove(self
):
4138 Remove custom metadata for a specified key in subvolume snapshot metadata.
4140 subvolname
= self
._generate
_random
_subvolume
_name
()
4141 group
= self
._generate
_random
_group
_name
()
4142 snapshot
= self
._generate
_random
_snapshot
_name
()
4145 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4147 # create subvolume in group.
4148 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4150 # snapshot subvolume
4151 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4153 # set metadata for snapshot.
4156 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
4158 # remove metadata against specified key.
4160 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
)
4161 except CommandFailedError
:
4162 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
4164 # confirm key is removed by again fetching metadata
4166 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, key
, snapshot
, group
)
4167 except CommandFailedError
as e
:
4168 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4170 self
.fail("Expected ENOENT because key does not exist")
4172 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4173 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4174 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4176 # verify trash dir is clean.
4177 self
._wait
_for
_trash
_empty
()
4179 def test_subvolume_snapshot_metadata_remove_for_nonexisting_key(self
):
4181 Remove custom metadata for subvolume snapshot if specified key not exist in metadata.
4183 subvolname
= self
._generate
_random
_subvolume
_name
()
4184 group
= self
._generate
_random
_group
_name
()
4185 snapshot
= self
._generate
_random
_snapshot
_name
()
4188 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4190 # create subvolume in group.
4191 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4193 # snapshot subvolume
4194 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4196 # set metadata for snapshot.
4199 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
4201 # try to remove value for nonexisting key
4202 # Expecting ENOENT exit status because key does not exist
4204 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, "key_nonexist", group
)
4205 except CommandFailedError
as e
:
4206 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4208 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
4210 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4211 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4212 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4214 # verify trash dir is clean.
4215 self
._wait
_for
_trash
_empty
()
4217 def test_subvolume_snapshot_metadata_remove_for_nonexisting_section(self
):
4219 Remove custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
4221 subvolname
= self
._generate
_random
_subvolume
_name
()
4222 group
= self
._generate
_random
_group
_name
()
4223 snapshot
= self
._generate
_random
_snapshot
_name
()
4226 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4228 # create subvolume in group.
4229 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4231 # snapshot subvolume
4232 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4234 # try to remove value for nonexisting key (as section does not exist)
4235 # Expecting ENOENT exit status because key does not exist
4237 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, "key", group
)
4238 except CommandFailedError
as e
:
4239 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4241 self
.fail("Expected ENOENT because section does not exist")
4243 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4244 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4245 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4247 # verify trash dir is clean.
4248 self
._wait
_for
_trash
_empty
()
4250 def test_subvolume_snapshot_metadata_remove_force(self
):
4252 Forcefully remove custom metadata for a specified key in subvolume snapshot metadata.
4254 subvolname
= self
._generate
_random
_subvolume
_name
()
4255 group
= self
._generate
_random
_group
_name
()
4256 snapshot
= self
._generate
_random
_snapshot
_name
()
4259 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4261 # create subvolume in group.
4262 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4264 # snapshot subvolume
4265 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4267 # set metadata for snapshot.
4270 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
4272 # remove metadata against specified key with --force option.
4274 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
, "--force")
4275 except CommandFailedError
:
4276 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
4278 # confirm key is removed by again fetching metadata
4280 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
4281 except CommandFailedError
as e
:
4282 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4284 self
.fail("Expected ENOENT because key does not exist")
4286 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4287 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4288 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4290 # verify trash dir is clean.
4291 self
._wait
_for
_trash
_empty
()
4293 def test_subvolume_snapshot_metadata_remove_force_for_nonexisting_key(self
):
4295 Forcefully remove custom metadata for subvolume snapshot if specified key not exist in metadata.
4297 subvolname
= self
._generate
_random
_subvolume
_name
()
4298 group
= self
._generate
_random
_group
_name
()
4299 snapshot
= self
._generate
_random
_snapshot
_name
()
4302 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4304 # create subvolume in group.
4305 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4307 # snapshot subvolume
4308 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4310 # set metadata for snapshot.
4313 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
4315 # remove metadata against specified key.
4317 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
)
4318 except CommandFailedError
:
4319 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
4321 # confirm key is removed by again fetching metadata
4323 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
4324 except CommandFailedError
as e
:
4325 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4327 self
.fail("Expected ENOENT because key does not exist")
4329 # again remove metadata against already removed key with --force option.
4331 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
, "--force")
4332 except CommandFailedError
:
4333 self
.fail("expected the 'fs subvolume snapshot metadata rm' (with --force) command to succeed")
4335 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4336 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4337 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4339 # verify trash dir is clean.
4340 self
._wait
_for
_trash
_empty
()
4342 def test_subvolume_snapshot_metadata_after_snapshot_remove(self
):
4344 Verify metadata removal of subvolume snapshot after snapshot removal.
4346 subvolname
= self
._generate
_random
_subvolume
_name
()
4347 group
= self
._generate
_random
_group
_name
()
4348 snapshot
= self
._generate
_random
_snapshot
_name
()
4351 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4353 # create subvolume in group.
4354 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4356 # snapshot subvolume
4357 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4359 # set metadata for snapshot.
4362 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
4364 # get value for specified key.
4365 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
4367 # remove '\n' from returned value.
4368 ret
= ret
.strip('\n')
4370 # match received value with expected value.
4371 self
.assertEqual(value
, ret
)
4373 # remove subvolume snapshot.
4374 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4376 # try to get metadata after removing snapshot.
4377 # Expecting error ENOENT with error message of snapshot does not exist
4378 cmd_ret
= self
.mgr_cluster
.mon_manager
.run_cluster_cmd(
4379 args
=["fs", "subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
],
4380 check_status
=False, stdout
=StringIO(), stderr
=StringIO())
4381 self
.assertEqual(cmd_ret
.returncode
, errno
.ENOENT
, "Expecting ENOENT error")
4382 self
.assertIn(f
"snapshot '{snapshot}' does not exist", cmd_ret
.stderr
.getvalue(),
4383 f
"Expecting message: snapshot '{snapshot}' does not exist ")
4385 # confirm metadata is removed by searching section name in .meta file
4386 meta_path
= os
.path
.join(".", "volumes", group
, subvolname
, ".meta")
4387 section_name
= "SNAP_METADATA_" + snapshot
4390 self
.mount_a
.run_shell(f
"sudo grep {section_name} {meta_path}", omit_sudo
=False)
4391 except CommandFailedError
as e
:
4392 self
.assertNotEqual(e
.exitstatus
, 0)
4394 self
.fail("Expected non-zero exist status because section should not exist")
4396 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4397 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4399 # verify trash dir is clean.
4400 self
._wait
_for
_trash
_empty
()
4402 class TestSubvolumeSnapshotClones(TestVolumesHelper
):
4403 """ Tests for FS subvolume snapshot clone operations."""
4404 def test_clone_subvolume_info(self
):
4405 # tests the 'fs subvolume info' command for a clone
4406 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
4407 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
4410 subvolume
= self
._generate
_random
_subvolume
_name
()
4411 snapshot
= self
._generate
_random
_snapshot
_name
()
4412 clone
= self
._generate
_random
_clone
_name
()
4415 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4418 self
._do
_subvolume
_io
(subvolume
, number_of_files
=1)
4420 # snapshot subvolume
4421 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4424 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4426 # check clone status
4427 self
._wait
_for
_clone
_to
_complete
(clone
)
4430 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4432 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, clone
))
4433 if len(subvol_info
) == 0:
4434 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
4435 for md
in subvol_md
:
4436 if md
not in subvol_info
.keys():
4437 raise RuntimeError("%s not present in the metadata of subvolume" % md
)
4438 if subvol_info
["type"] != "clone":
4439 raise RuntimeError("type should be set to clone")
4442 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4443 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4445 # verify trash dir is clean
4446 self
._wait
_for
_trash
_empty
()
4448 def test_non_clone_status(self
):
4449 subvolume
= self
._generate
_random
_subvolume
_name
()
4452 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4455 self
._fs
_cmd
("clone", "status", self
.volname
, subvolume
)
4456 except CommandFailedError
as ce
:
4457 if ce
.exitstatus
!= errno
.ENOTSUP
:
4458 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
4460 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
4463 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4465 # verify trash dir is clean
4466 self
._wait
_for
_trash
_empty
()
4468 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self
):
4469 subvolume
= self
._generate
_random
_subvolume
_name
()
4470 snapshot
= self
._generate
_random
_snapshot
_name
()
4471 clone
= self
._generate
_random
_clone
_name
()
4472 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*12
4474 # create subvolume, in an isolated namespace with a specified size
4475 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--namespace-isolated", "--size", str(osize
), "--mode=777")
4478 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
4480 # snapshot subvolume
4481 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4483 # create a pool different from current subvolume pool
4484 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
4485 default_pool
= self
.mount_a
.getfattr(subvol_path
, "ceph.dir.layout.pool")
4486 new_pool
= "new_pool"
4487 self
.assertNotEqual(default_pool
, new_pool
)
4488 self
.fs
.add_data_pool(new_pool
)
4490 # update source subvolume pool
4491 self
._do
_subvolume
_pool
_and
_namespace
_update
(subvolume
, pool
=new_pool
, pool_namespace
="")
4493 # schedule a clone, with NO --pool specification
4494 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4496 # check clone status
4497 self
._wait
_for
_clone
_to
_complete
(clone
)
4500 self
._verify
_clone
(subvolume
, snapshot
, clone
)
4503 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4506 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4507 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4509 # verify trash dir is clean
4510 self
._wait
_for
_trash
_empty
()
4512 def test_subvolume_clone_inherit_quota_attrs(self
):
4513 subvolume
= self
._generate
_random
_subvolume
_name
()
4514 snapshot
= self
._generate
_random
_snapshot
_name
()
4515 clone
= self
._generate
_random
_clone
_name
()
4516 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*12
4518 # create subvolume with a specified size
4519 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777", "--size", str(osize
))
4522 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
4524 # get subvolume path
4525 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
4527 # set quota on number of files
4528 self
.mount_a
.setfattr(subvolpath
, 'ceph.quota.max_files', "20", sudo
=True)
4530 # snapshot subvolume
4531 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4534 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4536 # check clone status
4537 self
._wait
_for
_clone
_to
_complete
(clone
)
4540 self
._verify
_clone
(subvolume
, snapshot
, clone
)
4542 # get subvolume path
4543 clonepath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
4545 # verify quota max_files is inherited from source snapshot
4546 subvol_quota
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_files")
4547 clone_quota
= self
.mount_a
.getfattr(clonepath
, "ceph.quota.max_files")
4548 self
.assertEqual(subvol_quota
, clone_quota
)
4551 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4554 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4555 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4557 # verify trash dir is clean
4558 self
._wait
_for
_trash
_empty
()
4560 def test_subvolume_clone_in_progress_getpath(self
):
4561 subvolume
= self
._generate
_random
_subvolume
_name
()
4562 snapshot
= self
._generate
_random
_snapshot
_name
()
4563 clone
= self
._generate
_random
_clone
_name
()
4566 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4569 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
4571 # snapshot subvolume
4572 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4574 # Insert delay at the beginning of snapshot clone
4575 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4578 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4580 # clone should not be accessible right now
4582 self
._get
_subvolume
_path
(self
.volname
, clone
)
4583 except CommandFailedError
as ce
:
4584 if ce
.exitstatus
!= errno
.EAGAIN
:
4585 raise RuntimeError("invalid error code when fetching path of an pending clone")
4587 raise RuntimeError("expected fetching path of an pending clone to fail")
4589 # check clone status
4590 self
._wait
_for
_clone
_to
_complete
(clone
)
4592 # clone should be accessible now
4593 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
4594 self
.assertNotEqual(subvolpath
, None)
4597 self
._verify
_clone
(subvolume
, snapshot
, clone
)
4600 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4603 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4604 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4606 # verify trash dir is clean
4607 self
._wait
_for
_trash
_empty
()
4609 def test_subvolume_clone_in_progress_snapshot_rm(self
):
4610 subvolume
= self
._generate
_random
_subvolume
_name
()
4611 snapshot
= self
._generate
_random
_snapshot
_name
()
4612 clone
= self
._generate
_random
_clone
_name
()
4615 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4618 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
4620 # snapshot subvolume
4621 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4623 # Insert delay at the beginning of snapshot clone
4624 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4627 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4629 # snapshot should not be deletable now
4631 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4632 except CommandFailedError
as ce
:
4633 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, msg
="invalid error code when removing source snapshot of a clone")
4635 self
.fail("expected removing source snapshot of a clone to fail")
4637 # check clone status
4638 self
._wait
_for
_clone
_to
_complete
(clone
)
4640 # clone should be accessible now
4641 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
4642 self
.assertNotEqual(subvolpath
, None)
4645 self
._verify
_clone
(subvolume
, snapshot
, clone
)
4648 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4651 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4652 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4654 # verify trash dir is clean
4655 self
._wait
_for
_trash
_empty
()
4657 def test_subvolume_clone_in_progress_source(self
):
4658 subvolume
= self
._generate
_random
_subvolume
_name
()
4659 snapshot
= self
._generate
_random
_snapshot
_name
()
4660 clone
= self
._generate
_random
_clone
_name
()
4663 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4666 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
4668 # snapshot subvolume
4669 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4671 # Insert delay at the beginning of snapshot clone
4672 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4675 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4677 # verify clone source
4678 result
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, clone
))
4679 source
= result
['status']['source']
4680 self
.assertEqual(source
['volume'], self
.volname
)
4681 self
.assertEqual(source
['subvolume'], subvolume
)
4682 self
.assertEqual(source
.get('group', None), None)
4683 self
.assertEqual(source
['snapshot'], snapshot
)
4685 # check clone status
4686 self
._wait
_for
_clone
_to
_complete
(clone
)
4688 # clone should be accessible now
4689 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
4690 self
.assertNotEqual(subvolpath
, None)
4693 self
._verify
_clone
(subvolume
, snapshot
, clone
)
4696 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4699 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4700 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4702 # verify trash dir is clean
4703 self
._wait
_for
_trash
_empty
()
4705 def test_subvolume_clone_retain_snapshot_with_snapshots(self
):
4707 retain snapshots of a cloned subvolume and check disallowed operations
4709 subvolume
= self
._generate
_random
_subvolume
_name
()
4710 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
4711 clone
= self
._generate
_random
_clone
_name
()
4714 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4716 # store path for clone verification
4717 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
4720 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
4722 # snapshot subvolume
4723 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
4725 # remove with snapshot retention
4726 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4728 # clone retained subvolume snapshot
4729 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot1
, clone
)
4731 # check clone status
4732 self
._wait
_for
_clone
_to
_complete
(clone
)
4735 self
._verify
_clone
(subvolume
, snapshot1
, clone
, subvol_path
=subvol1_path
)
4737 # create a snapshot on the clone
4738 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone
, snapshot2
)
4741 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--retain-snapshots")
4744 clonesnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, clone
))
4745 self
.assertEqual(len(clonesnapshotls
), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
4746 " created subvolume snapshots")
4747 snapshotnames
= [snapshot
['name'] for snapshot
in clonesnapshotls
]
4748 for snap
in [snapshot2
]:
4749 self
.assertIn(snap
, snapshotnames
, "Missing snapshot '{0}' in snapshot list".format(snap
))
4751 ## check disallowed operations on retained clone
4754 self
._fs
_cmd
("clone", "status", self
.volname
, clone
)
4755 except CommandFailedError
as ce
:
4756 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on clone status of clone with retained snapshots")
4758 self
.fail("expected clone status of clone with retained snapshots to fail")
4762 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
4763 except CommandFailedError
as ce
:
4764 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on clone cancel of clone with retained snapshots")
4766 self
.fail("expected clone cancel of clone with retained snapshots to fail")
4768 # remove snapshots (removes subvolumes as all are in retained state)
4769 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
4770 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone
, snapshot2
)
4772 # verify list subvolumes returns an empty list
4773 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4774 self
.assertEqual(len(subvolumels
), 0)
4776 # verify trash dir is clean
4777 self
._wait
_for
_trash
_empty
()
4779 def test_subvolume_retain_snapshot_clone(self
):
4781 clone a snapshot from a snapshot retained subvolume
4783 subvolume
= self
._generate
_random
_subvolume
_name
()
4784 snapshot
= self
._generate
_random
_snapshot
_name
()
4785 clone
= self
._generate
_random
_clone
_name
()
4788 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4790 # store path for clone verification
4791 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
4794 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
4796 # snapshot subvolume
4797 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4799 # remove with snapshot retention
4800 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4802 # clone retained subvolume snapshot
4803 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4805 # check clone status
4806 self
._wait
_for
_clone
_to
_complete
(clone
)
4809 self
._verify
_clone
(subvolume
, snapshot
, clone
, subvol_path
=subvol_path
)
4811 # remove snapshots (removes retained volume)
4812 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4815 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4817 # verify list subvolumes returns an empty list
4818 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4819 self
.assertEqual(len(subvolumels
), 0)
4821 # verify trash dir is clean
4822 self
._wait
_for
_trash
_empty
()
4824 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self
):
4826 clone a subvolume from recreated subvolume's latest snapshot
4828 subvolume
= self
._generate
_random
_subvolume
_name
()
4829 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
4830 clone
= self
._generate
_random
_clone
_name
(1)
4833 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4836 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
4838 # snapshot subvolume
4839 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
4841 # remove with snapshot retention
4842 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4844 # recreate subvolume
4845 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4847 # get and store path for clone verification
4848 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
4851 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
4853 # snapshot newer subvolume
4854 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot2
)
4856 # remove with snapshot retention
4857 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4859 # clone retained subvolume's newer snapshot
4860 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot2
, clone
)
4862 # check clone status
4863 self
._wait
_for
_clone
_to
_complete
(clone
)
4866 self
._verify
_clone
(subvolume
, snapshot2
, clone
, subvol_path
=subvol2_path
)
4869 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
4870 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot2
)
4873 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4875 # verify list subvolumes returns an empty list
4876 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4877 self
.assertEqual(len(subvolumels
), 0)
4879 # verify trash dir is clean
4880 self
._wait
_for
_trash
_empty
()
4882 def test_subvolume_retain_snapshot_recreate(self
):
4884 recreate a subvolume from one of its retained snapshots
4886 subvolume
= self
._generate
_random
_subvolume
_name
()
4887 snapshot
= self
._generate
_random
_snapshot
_name
()
4890 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4892 # store path for clone verification
4893 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
4896 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
4898 # snapshot subvolume
4899 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4901 # remove with snapshot retention
4902 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4904 # recreate retained subvolume using its own snapshot to clone
4905 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, subvolume
)
4907 # check clone status
4908 self
._wait
_for
_clone
_to
_complete
(subvolume
)
4911 self
._verify
_clone
(subvolume
, snapshot
, subvolume
, subvol_path
=subvol_path
)
4914 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4917 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4919 # verify list subvolumes returns an empty list
4920 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4921 self
.assertEqual(len(subvolumels
), 0)
4923 # verify trash dir is clean
4924 self
._wait
_for
_trash
_empty
()
4926 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self
):
4928 ensure retained clone recreate fails if its trash is not yet purged
4930 subvolume
= self
._generate
_random
_subvolume
_name
()
4931 snapshot
= self
._generate
_random
_snapshot
_name
()
4932 clone
= self
._generate
_random
_clone
_name
()
4935 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4937 # snapshot subvolume
4938 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4940 # clone subvolume snapshot
4941 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4943 # check clone status
4944 self
._wait
_for
_clone
_to
_complete
(clone
)
4947 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone
, snapshot
)
4949 # remove clone with snapshot retention
4950 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--retain-snapshots")
4952 # fake a trash entry
4953 self
._update
_fake
_trash
(clone
)
4955 # clone subvolume snapshot (recreate)
4957 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4958 except CommandFailedError
as ce
:
4959 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on recreate of clone with purge pending")
4961 self
.fail("expected recreate of clone with purge pending to fail")
4963 # clear fake trash entry
4964 self
._update
_fake
_trash
(clone
, create
=False)
4966 # recreate subvolume
4967 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4969 # check clone status
4970 self
._wait
_for
_clone
_to
_complete
(clone
)
4973 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4974 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone
, snapshot
)
4977 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4978 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4980 # verify trash dir is clean
4981 self
._wait
_for
_trash
_empty
()
4983 def test_subvolume_snapshot_attr_clone(self
):
4984 subvolume
= self
._generate
_random
_subvolume
_name
()
4985 snapshot
= self
._generate
_random
_snapshot
_name
()
4986 clone
= self
._generate
_random
_clone
_name
()
4989 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4992 self
._do
_subvolume
_io
_mixed
(subvolume
)
4994 # snapshot subvolume
4995 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4998 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5000 # check clone status
5001 self
._wait
_for
_clone
_to
_complete
(clone
)
5004 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5007 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5010 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5011 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5013 # verify trash dir is clean
5014 self
._wait
_for
_trash
_empty
()
5016 def test_clone_failure_status_pending_in_progress_complete(self
):
5018 ensure failure status is not shown when clone is not in failed/cancelled state
5020 subvolume
= self
._generate
_random
_subvolume
_name
()
5021 snapshot
= self
._generate
_random
_snapshot
_name
()
5022 clone1
= self
._generate
_random
_clone
_name
()
5025 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5028 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
5030 # snapshot subvolume
5031 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5033 # Insert delay at the beginning of snapshot clone
5034 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5037 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
5039 # pending clone shouldn't show failure status
5040 clone1_result
= self
._get
_clone
_status
(clone1
)
5042 clone1_result
["status"]["failure"]["errno"]
5043 except KeyError as e
:
5044 self
.assertEqual(str(e
), "'failure'")
5046 self
.fail("clone status shouldn't show failure for pending clone")
5048 # check clone1 to be in-progress
5049 self
._wait
_for
_clone
_to
_be
_in
_progress
(clone1
)
5051 # in-progress clone1 shouldn't show failure status
5052 clone1_result
= self
._get
_clone
_status
(clone1
)
5054 clone1_result
["status"]["failure"]["errno"]
5055 except KeyError as e
:
5056 self
.assertEqual(str(e
), "'failure'")
5058 self
.fail("clone status shouldn't show failure for in-progress clone")
5060 # wait for clone1 to complete
5061 self
._wait
_for
_clone
_to
_complete
(clone1
)
5063 # complete clone1 shouldn't show failure status
5064 clone1_result
= self
._get
_clone
_status
(clone1
)
5066 clone1_result
["status"]["failure"]["errno"]
5067 except KeyError as e
:
5068 self
.assertEqual(str(e
), "'failure'")
5070 self
.fail("clone status shouldn't show failure for complete clone")
5073 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5076 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5077 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
5079 # verify trash dir is clean
5080 self
._wait
_for
_trash
_empty
()
5082 def test_clone_failure_status_failed(self
):
5084 ensure failure status is shown when clone is in failed state and validate the reason
5086 subvolume
= self
._generate
_random
_subvolume
_name
()
5087 snapshot
= self
._generate
_random
_snapshot
_name
()
5088 clone1
= self
._generate
_random
_clone
_name
()
5091 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5094 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
5096 # snapshot subvolume
5097 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5099 # Insert delay at the beginning of snapshot clone
5100 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5103 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
5105 # remove snapshot from backend to force the clone failure.
5106 snappath
= os
.path
.join(".", "volumes", "_nogroup", subvolume
, ".snap", snapshot
)
5107 self
.mount_a
.run_shell(['rmdir', snappath
], sudo
=True)
5109 # wait for clone1 to fail.
5110 self
._wait
_for
_clone
_to
_fail
(clone1
)
5112 # check clone1 status
5113 clone1_result
= self
._get
_clone
_status
(clone1
)
5114 self
.assertEqual(clone1_result
["status"]["state"], "failed")
5115 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "2")
5116 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "snapshot '{0}' does not exist".format(snapshot
))
5118 # clone removal should succeed after failure, remove clone1
5119 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
5122 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5124 # verify trash dir is clean
5125 self
._wait
_for
_trash
_empty
()
5127 def test_clone_failure_status_pending_cancelled(self
):
5129 ensure failure status is shown when clone is cancelled during pending state and validate the reason
5131 subvolume
= self
._generate
_random
_subvolume
_name
()
5132 snapshot
= self
._generate
_random
_snapshot
_name
()
5133 clone1
= self
._generate
_random
_clone
_name
()
5136 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5139 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
5141 # snapshot subvolume
5142 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5144 # Insert delay at the beginning of snapshot clone
5145 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5148 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
5150 # cancel pending clone1
5151 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone1
)
5153 # check clone1 status
5154 clone1_result
= self
._get
_clone
_status
(clone1
)
5155 self
.assertEqual(clone1_result
["status"]["state"], "canceled")
5156 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "4")
5157 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "user interrupted clone operation")
5159 # clone removal should succeed with force after cancelled, remove clone1
5160 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
5163 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5166 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5168 # verify trash dir is clean
5169 self
._wait
_for
_trash
_empty
()
5171 def test_clone_failure_status_in_progress_cancelled(self
):
5173 ensure failure status is shown when clone is cancelled during in-progress state and validate the reason
5175 subvolume
= self
._generate
_random
_subvolume
_name
()
5176 snapshot
= self
._generate
_random
_snapshot
_name
()
5177 clone1
= self
._generate
_random
_clone
_name
()
5180 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5183 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
5185 # snapshot subvolume
5186 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5188 # Insert delay at the beginning of snapshot clone
5189 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5192 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
5194 # wait for clone1 to be in-progress
5195 self
._wait
_for
_clone
_to
_be
_in
_progress
(clone1
)
5197 # cancel in-progess clone1
5198 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone1
)
5200 # check clone1 status
5201 clone1_result
= self
._get
_clone
_status
(clone1
)
5202 self
.assertEqual(clone1_result
["status"]["state"], "canceled")
5203 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "4")
5204 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "user interrupted clone operation")
5206 # clone removal should succeed with force after cancelled, remove clone1
5207 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
5210 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5213 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5215 # verify trash dir is clean
5216 self
._wait
_for
_trash
_empty
()
5218 def test_subvolume_snapshot_clone(self
):
5219 subvolume
= self
._generate
_random
_subvolume
_name
()
5220 snapshot
= self
._generate
_random
_snapshot
_name
()
5221 clone
= self
._generate
_random
_clone
_name
()
5224 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5227 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
5229 # snapshot subvolume
5230 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5233 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5235 # check clone status
5236 self
._wait
_for
_clone
_to
_complete
(clone
)
5239 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5242 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5245 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5246 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5248 # verify trash dir is clean
5249 self
._wait
_for
_trash
_empty
()
5251 def test_subvolume_snapshot_clone_quota_exceeded(self
):
5252 subvolume
= self
._generate
_random
_subvolume
_name
()
5253 snapshot
= self
._generate
_random
_snapshot
_name
()
5254 clone
= self
._generate
_random
_clone
_name
()
5256 # create subvolume with 20MB quota
5257 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
5258 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
,"--mode=777", "--size", str(osize
))
5260 # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
5261 self
._do
_subvolume
_io
(subvolume
, number_of_files
=50)
5263 # snapshot subvolume
5264 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5267 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5269 # check clone status
5270 self
._wait
_for
_clone
_to
_complete
(clone
)
5273 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5276 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5279 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5280 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5282 # verify trash dir is clean
5283 self
._wait
_for
_trash
_empty
()
5285 def test_subvolume_snapshot_in_complete_clone_rm(self
):
5287 Validates the removal of clone when it is not in 'complete|cancelled|failed' state.
5288 The forceful removl of subvolume clone succeeds only if it's in any of the
5289 'complete|cancelled|failed' states. It fails with EAGAIN in any other states.
5292 subvolume
= self
._generate
_random
_subvolume
_name
()
5293 snapshot
= self
._generate
_random
_snapshot
_name
()
5294 clone
= self
._generate
_random
_clone
_name
()
5297 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5300 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
5302 # snapshot subvolume
5303 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5305 # Insert delay at the beginning of snapshot clone
5306 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5309 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5311 # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled.
5313 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
5314 except CommandFailedError
as ce
:
5315 if ce
.exitstatus
!= errno
.EAGAIN
:
5316 raise RuntimeError("invalid error code when trying to remove failed clone")
5318 raise RuntimeError("expected error when removing a failed clone")
5320 # cancel on-going clone
5321 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
5323 # verify canceled state
5324 self
._check
_clone
_canceled
(clone
)
5326 # clone removal should succeed after cancel
5327 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
5330 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5333 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5335 # verify trash dir is clean
5336 self
._wait
_for
_trash
_empty
()
5338 def test_subvolume_snapshot_clone_retain_suid_guid(self
):
5339 subvolume
= self
._generate
_random
_subvolume
_name
()
5340 snapshot
= self
._generate
_random
_snapshot
_name
()
5341 clone
= self
._generate
_random
_clone
_name
()
5344 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5346 # Create a file with suid, guid bits set along with executable bit.
5347 args
= ["subvolume", "getpath", self
.volname
, subvolume
]
5349 subvolpath
= self
._fs
_cmd
(*args
)
5350 self
.assertNotEqual(subvolpath
, None)
5351 subvolpath
= subvolpath
[1:].rstrip() # remove "/" prefix and any trailing newline
5353 file_path
= subvolpath
5354 file_path
= os
.path
.join(subvolpath
, "test_suid_file")
5355 self
.mount_a
.run_shell(["touch", file_path
])
5356 self
.mount_a
.run_shell(["chmod", "u+sx,g+sx", file_path
])
5358 # snapshot subvolume
5359 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5362 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5364 # check clone status
5365 self
._wait
_for
_clone
_to
_complete
(clone
)
5368 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5371 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5374 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5375 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5377 # verify trash dir is clean
5378 self
._wait
_for
_trash
_empty
()
5380 def test_subvolume_snapshot_clone_and_reclone(self
):
5381 subvolume
= self
._generate
_random
_subvolume
_name
()
5382 snapshot
= self
._generate
_random
_snapshot
_name
()
5383 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
5386 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5389 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
5391 # snapshot subvolume
5392 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5395 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
5397 # check clone status
5398 self
._wait
_for
_clone
_to
_complete
(clone1
)
5401 self
._verify
_clone
(subvolume
, snapshot
, clone1
)
5404 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5406 # now the clone is just like a normal subvolume -- snapshot the clone and fork
5407 # another clone. before that do some IO so it's can be differentiated.
5408 self
._do
_subvolume
_io
(clone1
, create_dir
="data", number_of_files
=32)
5410 # snapshot clone -- use same snap name
5411 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone1
, snapshot
)
5414 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, clone1
, snapshot
, clone2
)
5416 # check clone status
5417 self
._wait
_for
_clone
_to
_complete
(clone2
)
5420 self
._verify
_clone
(clone1
, snapshot
, clone2
)
5423 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone1
, snapshot
)
5426 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5427 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
5428 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
5430 # verify trash dir is clean
5431 self
._wait
_for
_trash
_empty
()
5433 def test_subvolume_snapshot_clone_cancel_in_progress(self
):
5434 subvolume
= self
._generate
_random
_subvolume
_name
()
5435 snapshot
= self
._generate
_random
_snapshot
_name
()
5436 clone
= self
._generate
_random
_clone
_name
()
5439 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5442 self
._do
_subvolume
_io
(subvolume
, number_of_files
=128)
5444 # snapshot subvolume
5445 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5447 # Insert delay at the beginning of snapshot clone
5448 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5451 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5453 # cancel on-going clone
5454 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
5456 # verify canceled state
5457 self
._check
_clone
_canceled
(clone
)
5460 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5463 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5464 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
5466 # verify trash dir is clean
5467 self
._wait
_for
_trash
_empty
()
5469 def test_subvolume_snapshot_clone_cancel_pending(self
):
5471 this test is a bit more involved compared to canceling an in-progress clone.
5472 we'd need to ensure that a to-be canceled clone has still not been picked up
5473 by cloner threads. exploit the fact that clones are picked up in an FCFS
5474 fashion and there are four (4) cloner threads by default. When the number of
5475 cloner threads increase, this test _may_ start tripping -- so, the number of
5476 clone operations would need to be jacked up.
5478 # default number of clone threads
5480 # good enough for 4 threads
5482 # yeh, 1gig -- we need the clone to run for sometime
5485 subvolume
= self
._generate
_random
_subvolume
_name
()
5486 snapshot
= self
._generate
_random
_snapshot
_name
()
5487 clones
= self
._generate
_random
_clone
_name
(NR_CLONES
)
5490 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5493 self
._do
_subvolume
_io
(subvolume
, number_of_files
=4, file_size
=FILE_SIZE_MB
)
5495 # snapshot subvolume
5496 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5499 for clone
in clones
:
5500 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5502 to_wait
= clones
[0:NR_THREADS
]
5503 to_cancel
= clones
[NR_THREADS
:]
5505 # cancel pending clones and verify
5506 for clone
in to_cancel
:
5507 status
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, clone
))
5508 self
.assertEqual(status
["status"]["state"], "pending")
5509 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
5510 self
._check
_clone
_canceled
(clone
)
5512 # let's cancel on-going clones. handle the case where some of the clones
5514 for clone
in list(to_wait
):
5516 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
5517 to_cancel
.append(clone
)
5518 to_wait
.remove(clone
)
5519 except CommandFailedError
as ce
:
5520 if ce
.exitstatus
!= errno
.EINVAL
:
5521 raise RuntimeError("invalid error code when cancelling on-going clone")
5524 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5527 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5528 for clone
in to_wait
:
5529 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5530 for clone
in to_cancel
:
5531 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
5533 # verify trash dir is clean
5534 self
._wait
_for
_trash
_empty
()
5536 def test_subvolume_snapshot_clone_different_groups(self
):
5537 subvolume
= self
._generate
_random
_subvolume
_name
()
5538 snapshot
= self
._generate
_random
_snapshot
_name
()
5539 clone
= self
._generate
_random
_clone
_name
()
5540 s_group
, c_group
= self
._generate
_random
_group
_name
(2)
5543 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, s_group
)
5544 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, c_group
)
5547 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, s_group
, "--mode=777")
5550 self
._do
_subvolume
_io
(subvolume
, subvolume_group
=s_group
, number_of_files
=32)
5552 # snapshot subvolume
5553 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, s_group
)
5556 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
,
5557 '--group_name', s_group
, '--target_group_name', c_group
)
5559 # check clone status
5560 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=c_group
)
5563 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_group
=s_group
, clone_group
=c_group
)
5566 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, s_group
)
5569 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, s_group
)
5570 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, c_group
)
5573 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, s_group
)
5574 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, c_group
)
5576 # verify trash dir is clean
5577 self
._wait
_for
_trash
_empty
()
5579 def test_subvolume_snapshot_clone_fail_with_remove(self
):
5580 subvolume
= self
._generate
_random
_subvolume
_name
()
5581 snapshot
= self
._generate
_random
_snapshot
_name
()
5582 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
5584 pool_capacity
= 32 * 1024 * 1024
5585 # number of files required to fill up 99% of the pool
5586 nr_files
= int((pool_capacity
* 0.99) / (TestVolumes
.DEFAULT_FILE_SIZE
* 1024 * 1024))
5589 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5592 self
._do
_subvolume
_io
(subvolume
, number_of_files
=nr_files
)
5594 # snapshot subvolume
5595 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5598 new_pool
= "new_pool"
5599 self
.fs
.add_data_pool(new_pool
)
5601 self
.fs
.mon_manager
.raw_cluster_cmd("osd", "pool", "set-quota", new_pool
,
5602 "max_bytes", "{0}".format(pool_capacity
// 4))
5605 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
, "--pool_layout", new_pool
)
5607 # check clone status -- this should dramatically overshoot the pool quota
5608 self
._wait
_for
_clone
_to
_complete
(clone1
)
5611 self
._verify
_clone
(subvolume
, snapshot
, clone1
, clone_pool
=new_pool
)
5613 # wait a bit so that subsequent I/O will give pool full error
5617 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone2
, "--pool_layout", new_pool
)
5619 # check clone status
5620 self
._wait
_for
_clone
_to
_fail
(clone2
)
5623 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5626 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5627 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
5629 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
5630 except CommandFailedError
as ce
:
5631 if ce
.exitstatus
!= errno
.EAGAIN
:
5632 raise RuntimeError("invalid error code when trying to remove failed clone")
5634 raise RuntimeError("expected error when removing a failed clone")
5636 # ... and with force, failed clone can be removed
5637 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
, "--force")
5639 # verify trash dir is clean
5640 self
._wait
_for
_trash
_empty
()
5642 def test_subvolume_snapshot_clone_on_existing_subvolumes(self
):
5643 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
5644 snapshot
= self
._generate
_random
_snapshot
_name
()
5645 clone
= self
._generate
_random
_clone
_name
()
5648 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--mode=777")
5649 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--mode=777")
5652 self
._do
_subvolume
_io
(subvolume1
, number_of_files
=32)
5654 # snapshot subvolume
5655 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume1
, snapshot
)
5657 # schedule a clone with target as subvolume2
5659 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, subvolume2
)
5660 except CommandFailedError
as ce
:
5661 if ce
.exitstatus
!= errno
.EEXIST
:
5662 raise RuntimeError("invalid error code when cloning to existing subvolume")
5664 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
5666 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, clone
)
5668 # schedule a clone with target as clone
5670 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, clone
)
5671 except CommandFailedError
as ce
:
5672 if ce
.exitstatus
!= errno
.EEXIST
:
5673 raise RuntimeError("invalid error code when cloning to existing clone")
5675 raise RuntimeError("expected cloning to fail if the target is an existing clone")
5677 # check clone status
5678 self
._wait
_for
_clone
_to
_complete
(clone
)
5681 self
._verify
_clone
(subvolume1
, snapshot
, clone
)
5684 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume1
, snapshot
)
5687 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
5688 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
)
5689 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5691 # verify trash dir is clean
5692 self
._wait
_for
_trash
_empty
()
5694 def test_subvolume_snapshot_clone_pool_layout(self
):
5695 subvolume
= self
._generate
_random
_subvolume
_name
()
5696 snapshot
= self
._generate
_random
_snapshot
_name
()
5697 clone
= self
._generate
_random
_clone
_name
()
5700 new_pool
= "new_pool"
5701 newid
= self
.fs
.add_data_pool(new_pool
)
5704 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5707 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
5709 # snapshot subvolume
5710 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5713 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, "--pool_layout", new_pool
)
5715 # check clone status
5716 self
._wait
_for
_clone
_to
_complete
(clone
)
5719 self
._verify
_clone
(subvolume
, snapshot
, clone
, clone_pool
=new_pool
)
5722 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5724 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, clone
)
5725 desired_pool
= self
.mount_a
.getfattr(subvol_path
, "ceph.dir.layout.pool")
5727 self
.assertEqual(desired_pool
, new_pool
)
5728 except AssertionError:
5729 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
5732 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5733 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5735 # verify trash dir is clean
5736 self
._wait
_for
_trash
_empty
()
5738 def test_subvolume_snapshot_clone_under_group(self
):
5739 subvolume
= self
._generate
_random
_subvolume
_name
()
5740 snapshot
= self
._generate
_random
_snapshot
_name
()
5741 clone
= self
._generate
_random
_clone
_name
()
5742 group
= self
._generate
_random
_group
_name
()
5745 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5748 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
5750 # snapshot subvolume
5751 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5754 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5757 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, '--target_group_name', group
)
5759 # check clone status
5760 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=group
)
5763 self
._verify
_clone
(subvolume
, snapshot
, clone
, clone_group
=group
)
5766 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5769 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5770 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, group
)
5773 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5775 # verify trash dir is clean
5776 self
._wait
_for
_trash
_empty
()
5778 def test_subvolume_snapshot_clone_with_attrs(self
):
5779 subvolume
= self
._generate
_random
_subvolume
_name
()
5780 snapshot
= self
._generate
_random
_snapshot
_name
()
5781 clone
= self
._generate
_random
_clone
_name
()
5791 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", mode
, "--uid", uid
, "--gid", gid
)
5794 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
5796 # snapshot subvolume
5797 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5799 # change subvolume attrs (to ensure clone picks up snapshot attrs)
5800 self
._do
_subvolume
_attr
_update
(subvolume
, new_uid
, new_gid
, new_mode
)
5803 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5805 # check clone status
5806 self
._wait
_for
_clone
_to
_complete
(clone
)
5809 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5812 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5815 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5816 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5818 # verify trash dir is clean
5819 self
._wait
_for
_trash
_empty
()
5821 def test_subvolume_snapshot_clone_with_upgrade(self
):
5823 yet another poor man's upgrade test -- rather than going through a full
5824 upgrade cycle, emulate old types subvolumes by going through the wormhole
5825 and verify clone operation.
5826 further ensure that a legacy volume is not updated to v2, but clone is.
5828 subvolume
= self
._generate
_random
_subvolume
_name
()
5829 snapshot
= self
._generate
_random
_snapshot
_name
()
5830 clone
= self
._generate
_random
_clone
_name
()
5832 # emulate a old-fashioned subvolume
5833 createpath
= os
.path
.join(".", "volumes", "_nogroup", subvolume
)
5834 self
.mount_a
.run_shell_payload(f
"mkdir -p -m 777 {createpath}", sudo
=True)
5836 # add required xattrs to subvolume
5837 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
5838 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
5841 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
5843 # snapshot subvolume
5844 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5846 # ensure metadata file is in legacy location, with required version v1
5847 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume
, version
=1, legacy
=True)
5849 # Insert delay at the beginning of snapshot clone
5850 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5853 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5855 # snapshot should not be deletable now
5857 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5858 except CommandFailedError
as ce
:
5859 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, msg
="invalid error code when removing source snapshot of a clone")
5861 self
.fail("expected removing source snapshot of a clone to fail")
5863 # check clone status
5864 self
._wait
_for
_clone
_to
_complete
(clone
)
5867 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_version
=1)
5870 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5872 # ensure metadata file is in v2 location, with required version v2
5873 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone
)
5876 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5877 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5879 # verify trash dir is clean
5880 self
._wait
_for
_trash
_empty
()
5882 def test_subvolume_snapshot_reconf_max_concurrent_clones(self
):
5884 Validate 'max_concurrent_clones' config option
5887 # get the default number of cloner threads
5888 default_max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
5889 self
.assertEqual(default_max_concurrent_clones
, 4)
5891 # Increase number of cloner threads
5892 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
5893 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
5894 self
.assertEqual(max_concurrent_clones
, 6)
5896 # Decrease number of cloner threads
5897 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
5898 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
5899 self
.assertEqual(max_concurrent_clones
, 2)
5901 def test_subvolume_snapshot_config_snapshot_clone_delay(self
):
5903 Validate 'snapshot_clone_delay' config option
5906 # get the default delay before starting the clone
5907 default_timeout
= int(self
.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
5908 self
.assertEqual(default_timeout
, 0)
5910 # Insert delay of 2 seconds at the beginning of the snapshot clone
5911 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5912 default_timeout
= int(self
.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
5913 self
.assertEqual(default_timeout
, 2)
5915 # Decrease number of cloner threads
5916 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
5917 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
5918 self
.assertEqual(max_concurrent_clones
, 2)
5920 def test_subvolume_under_group_snapshot_clone(self
):
5921 subvolume
= self
._generate
_random
_subvolume
_name
()
5922 group
= self
._generate
_random
_group
_name
()
5923 snapshot
= self
._generate
_random
_snapshot
_name
()
5924 clone
= self
._generate
_random
_clone
_name
()
5927 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5930 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, group
, "--mode=777")
5933 self
._do
_subvolume
_io
(subvolume
, subvolume_group
=group
, number_of_files
=32)
5935 # snapshot subvolume
5936 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
5939 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, '--group_name', group
)
5941 # check clone status
5942 self
._wait
_for
_clone
_to
_complete
(clone
)
5945 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_group
=group
)
5948 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
5951 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
5952 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5955 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5957 # verify trash dir is clean
5958 self
._wait
_for
_trash
_empty
()
5961 class TestMisc(TestVolumesHelper
):
5962 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
5963 def test_connection_expiration(self
):
5964 # unmount any cephfs mounts
5965 for i
in range(0, self
.CLIENTS_REQUIRED
):
5966 self
.mounts
[i
].umount_wait()
5967 sessions
= self
._session
_list
()
5968 self
.assertLessEqual(len(sessions
), 1) # maybe mgr is already mounted
5970 # Get the mgr to definitely mount cephfs
5971 subvolume
= self
._generate
_random
_subvolume
_name
()
5972 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
5973 sessions
= self
._session
_list
()
5974 self
.assertEqual(len(sessions
), 1)
5976 # Now wait for the mgr to expire the connection:
5977 self
.wait_until_evicted(sessions
[0]['id'], timeout
=90)
5979 def test_mgr_eviction(self
):
5980 # unmount any cephfs mounts
5981 for i
in range(0, self
.CLIENTS_REQUIRED
):
5982 self
.mounts
[i
].umount_wait()
5983 sessions
= self
._session
_list
()
5984 self
.assertLessEqual(len(sessions
), 1) # maybe mgr is already mounted
5986 # Get the mgr to definitely mount cephfs
5987 subvolume
= self
._generate
_random
_subvolume
_name
()
5988 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
5989 sessions
= self
._session
_list
()
5990 self
.assertEqual(len(sessions
), 1)
5992 # Now fail the mgr, check the session was evicted
5993 mgr
= self
.mgr_cluster
.get_active_id()
5994 self
.mgr_cluster
.mgr_fail(mgr
)
5995 self
.wait_until_evicted(sessions
[0]['id'])
5997 def test_names_can_only_be_goodchars(self
):
5999 Test the creating vols, subvols subvolgroups fails when their names uses
6000 characters beyond [a-zA-Z0-9 -_.].
6002 volname
, badname
= 'testvol', 'abcd@#'
6004 with self
.assertRaises(CommandFailedError
):
6005 self
._fs
_cmd
('volume', 'create', badname
)
6006 self
._fs
_cmd
('volume', 'create', volname
)
6008 with self
.assertRaises(CommandFailedError
):
6009 self
._fs
_cmd
('subvolumegroup', 'create', volname
, badname
)
6011 with self
.assertRaises(CommandFailedError
):
6012 self
._fs
_cmd
('subvolume', 'create', volname
, badname
)
6013 self
._fs
_cmd
('volume', 'rm', volname
, '--yes-i-really-mean-it')
6015 def test_subvolume_ops_on_nonexistent_vol(self
):
6016 # tests the fs subvolume operations on non existing volume
6018 volname
= "non_existent_subvolume"
6020 # try subvolume operations
6021 for op
in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
6024 self
._fs
_cmd
("subvolume", "resize", volname
, "subvolname_1", "inf")
6026 self
._fs
_cmd
("subvolume", "pin", volname
, "subvolname_1", "export", "1")
6028 self
._fs
_cmd
("subvolume", "ls", volname
)
6030 self
._fs
_cmd
("subvolume", op
, volname
, "subvolume_1")
6031 except CommandFailedError
as ce
:
6032 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
6034 self
.fail("expected the 'fs subvolume {0}' command to fail".format(op
))
6036 # try subvolume snapshot operations and clone create
6037 for op
in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
6040 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1")
6042 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1", "snapshot_1", "clone_1")
6044 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1", "snapshot_1")
6045 except CommandFailedError
as ce
:
6046 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
6048 self
.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op
))
6052 self
._fs
_cmd
("clone", "status", volname
, "clone_1")
6053 except CommandFailedError
as ce
:
6054 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
6056 self
.fail("expected the 'fs clone status' command to fail")
6058 # try subvolumegroup operations
6059 for op
in ("create", "rm", "getpath", "pin", "ls"):
6062 self
._fs
_cmd
("subvolumegroup", "pin", volname
, "group_1", "export", "0")
6064 self
._fs
_cmd
("subvolumegroup", op
, volname
)
6066 self
._fs
_cmd
("subvolumegroup", op
, volname
, "group_1")
6067 except CommandFailedError
as ce
:
6068 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
6070 self
.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op
))
6072 # try subvolumegroup snapshot operations
6073 for op
in ("create", "rm", "ls"):
6076 self
._fs
_cmd
("subvolumegroup", "snapshot", op
, volname
, "group_1")
6078 self
._fs
_cmd
("subvolumegroup", "snapshot", op
, volname
, "group_1", "snapshot_1")
6079 except CommandFailedError
as ce
:
6080 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
6082 self
.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op
))
6084 def test_subvolume_upgrade_legacy_to_v1(self
):
6086 poor man's upgrade test -- rather than going through a full upgrade cycle,
6087 emulate subvolumes by going through the wormhole and verify if they are
6089 further ensure that a legacy volume is not updated to v2.
6091 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
6092 group
= self
._generate
_random
_group
_name
()
6094 # emulate a old-fashioned subvolume -- one in the default group and
6095 # the other in a custom group
6096 createpath1
= os
.path
.join(".", "volumes", "_nogroup", subvolume1
)
6097 self
.mount_a
.run_shell(['mkdir', '-p', createpath1
], sudo
=True)
6100 createpath2
= os
.path
.join(".", "volumes", group
, subvolume2
)
6101 self
.mount_a
.run_shell(['mkdir', '-p', createpath2
], sudo
=True)
6103 # this would auto-upgrade on access without anyone noticing
6104 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume1
)
6105 self
.assertNotEqual(subvolpath1
, None)
6106 subvolpath1
= subvolpath1
.rstrip() # remove "/" prefix and any trailing newline
6108 subvolpath2
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume2
, group
)
6109 self
.assertNotEqual(subvolpath2
, None)
6110 subvolpath2
= subvolpath2
.rstrip() # remove "/" prefix and any trailing newline
6112 # and... the subvolume path returned should be what we created behind the scene
6113 self
.assertEqual(createpath1
[1:], subvolpath1
)
6114 self
.assertEqual(createpath2
[1:], subvolpath2
)
6116 # ensure metadata file is in legacy location, with required version v1
6117 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=1, legacy
=True)
6118 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=1, legacy
=True)
6121 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
6122 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
6124 # verify trash dir is clean
6125 self
._wait
_for
_trash
_empty
()
6128 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
6130 def test_subvolume_no_upgrade_v1_sanity(self
):
6132 poor man's upgrade test -- theme continues...
6134 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
6135 a series of operations on the v1 subvolume to ensure they work as expected.
6137 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
6138 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
6139 "type", "uid", "features", "state"]
6140 snap_md
= ["created_at", "data_pool", "has_pending_clones", "size"]
6142 subvolume
= self
._generate
_random
_subvolume
_name
()
6143 snapshot
= self
._generate
_random
_snapshot
_name
()
6144 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
6149 # emulate a v1 subvolume -- in the default group
6150 subvolume_path
= self
._create
_v
1_subvolume
(subvolume
)
6153 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6154 self
.assertEqual(subvolpath
, subvolume_path
)
6157 subvolumes
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6158 self
.assertEqual(len(subvolumes
), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes
)))
6159 self
.assertEqual(subvolumes
[0]['name'], subvolume
,
6160 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume
, subvolumes
[0]['name']))
6163 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
6164 for md
in subvol_md
:
6165 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
6167 self
.assertEqual(subvol_info
["state"], "complete",
6168 msg
="expected state to be 'complete', found '{0}".format(subvol_info
["state"]))
6169 self
.assertEqual(len(subvol_info
["features"]), 2,
6170 msg
="expected 1 feature, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
6171 for feature
in ['snapshot-clone', 'snapshot-autoprotect']:
6172 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
6175 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024*10
6176 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
6177 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
6178 for md
in subvol_md
:
6179 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
6180 self
.assertEqual(subvol_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
6182 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
6183 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", mode
, "--uid", uid
, "--gid", gid
)
6186 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
6189 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6192 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6194 # check clone status
6195 self
._wait
_for
_clone
_to
_complete
(clone1
)
6197 # ensure clone is v2
6198 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone1
, version
=2)
6201 self
._verify
_clone
(subvolume
, snapshot
, clone1
, source_version
=1)
6203 # clone (older snapshot)
6204 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, 'fake', clone2
)
6206 # check clone status
6207 self
._wait
_for
_clone
_to
_complete
(clone2
)
6209 # ensure clone is v2
6210 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone2
, version
=2)
6213 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
6214 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
6217 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
6219 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
6220 self
.assertEqual(snap_info
["has_pending_clones"], "no")
6223 subvol_snapshots
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
6224 self
.assertEqual(len(subvol_snapshots
), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots
)))
6225 snapshotnames
= [snapshot
['name'] for snapshot
in subvol_snapshots
]
6226 for name
in [snapshot
, 'fake']:
6227 self
.assertIn(name
, snapshotnames
, msg
="expected snapshot '{0}' in subvolume snapshot ls".format(name
))
6230 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6231 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, "fake")
6233 # ensure volume is still at version 1
6234 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume
, version
=1)
6237 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6238 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
6239 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
6241 # verify trash dir is clean
6242 self
._wait
_for
_trash
_empty
()
6244 def test_subvolume_no_upgrade_v1_to_v2(self
):
6246 poor man's upgrade test -- theme continues...
6247 ensure v1 to v2 upgrades are not done automatically due to various states of v1
6249 subvolume1
, subvolume2
, subvolume3
= self
._generate
_random
_subvolume
_name
(3)
6250 group
= self
._generate
_random
_group
_name
()
6252 # emulate a v1 subvolume -- in the default group
6253 subvol1_path
= self
._create
_v
1_subvolume
(subvolume1
)
6255 # emulate a v1 subvolume -- in a custom group
6256 subvol2_path
= self
._create
_v
1_subvolume
(subvolume2
, subvol_group
=group
)
6258 # emulate a v1 subvolume -- in a clone pending state
6259 self
._create
_v
1_subvolume
(subvolume3
, subvol_type
='clone', has_snapshot
=False, state
='pending')
6261 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
6262 subvolpath1
= self
._get
_subvolume
_path
(self
.volname
, subvolume1
)
6263 self
.assertEqual(subvolpath1
, subvol1_path
)
6265 subvolpath2
= self
._get
_subvolume
_path
(self
.volname
, subvolume2
, group_name
=group
)
6266 self
.assertEqual(subvolpath2
, subvol2_path
)
6268 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
6269 # use clone status, as only certain operations are allowed in pending state
6270 status
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, subvolume3
))
6271 self
.assertEqual(status
["status"]["state"], "pending")
6274 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume1
, "fake")
6275 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume2
, "fake", group
)
6277 # ensure metadata file is in v1 location, with version retained as v1
6278 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=1)
6279 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=1)
6282 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
6283 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
6285 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume3
)
6286 except CommandFailedError
as ce
:
6287 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on rm of subvolume undergoing clone")
6289 self
.fail("expected rm of subvolume undergoing clone to fail")
6291 # ensure metadata file is in v1 location, with version retained as v1
6292 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume3
, version
=1)
6293 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume3
, "--force")
6295 # verify list subvolumes returns an empty list
6296 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6297 self
.assertEqual(len(subvolumels
), 0)
6299 # verify trash dir is clean
6300 self
._wait
_for
_trash
_empty
()
6302 def test_subvolume_upgrade_v1_to_v2(self
):
6304 poor man's upgrade test -- theme continues...
6305 ensure v1 to v2 upgrades work
6307 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
6308 group
= self
._generate
_random
_group
_name
()
6310 # emulate a v1 subvolume -- in the default group
6311 subvol1_path
= self
._create
_v
1_subvolume
(subvolume1
, has_snapshot
=False)
6313 # emulate a v1 subvolume -- in a custom group
6314 subvol2_path
= self
._create
_v
1_subvolume
(subvolume2
, subvol_group
=group
, has_snapshot
=False)
6316 # this would attempt auto-upgrade on access
6317 subvolpath1
= self
._get
_subvolume
_path
(self
.volname
, subvolume1
)
6318 self
.assertEqual(subvolpath1
, subvol1_path
)
6320 subvolpath2
= self
._get
_subvolume
_path
(self
.volname
, subvolume2
, group_name
=group
)
6321 self
.assertEqual(subvolpath2
, subvol2_path
)
6323 # ensure metadata file is in v2 location, with version retained as v2
6324 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=2)
6325 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=2)
6328 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
6329 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
6331 # verify trash dir is clean
6332 self
._wait
_for
_trash
_empty
()
6334 def test_malicious_metafile_on_legacy_to_v1_upgrade(self
):
6336 Validate handcrafted .meta file on legacy subvol root doesn't break the system
6337 on legacy subvol upgrade to v1
6338 poor man's upgrade test -- theme continues...
6340 subvol1
, subvol2
= self
._generate
_random
_subvolume
_name
(2)
6341 group
= self
._generate
_random
_group
_name
()
6343 # emulate a old-fashioned subvolume in the default group
6344 createpath1
= os
.path
.join(".", "volumes", "_nogroup", subvol1
)
6345 self
.mount_a
.run_shell(['mkdir', '-p', createpath1
], sudo
=True)
6347 # add required xattrs to subvolume
6348 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
6349 self
.mount_a
.setfattr(createpath1
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
6351 # create v2 subvolume
6352 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
)
6354 # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume
6355 # .meta into legacy subvol1's root
6356 subvol2_metapath
= os
.path
.join(".", "volumes", "_nogroup", subvol2
, ".meta")
6357 self
.mount_a
.run_shell(["cp", subvol2_metapath
, createpath1
], sudo
=True)
6359 # Upgrade legacy subvol1 to v1
6360 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol1
)
6361 self
.assertNotEqual(subvolpath1
, None)
6362 subvolpath1
= subvolpath1
.rstrip()
6364 # the subvolume path returned should not be of subvol2 from handcrafted
6366 self
.assertEqual(createpath1
[1:], subvolpath1
)
6368 # ensure metadata file is in legacy location, with required version v1
6369 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol1
, version
=1, legacy
=True)
6371 # Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2
6372 # path whose '.meta' file is copied to subvol1 root
6374 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvol1
, authid1
)
6376 # Validate that the mds path added is of subvol1 and not of subvol2
6377 out
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
6378 self
.assertEqual("client.alice", out
[0]["entity"])
6379 self
.assertEqual("allow rw path={0}".format(createpath1
[1:]), out
[0]["caps"]["mds"])
6382 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
)
6383 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
)
6385 # verify trash dir is clean
6386 self
._wait
_for
_trash
_empty
()
6388 def test_binary_metafile_on_legacy_to_v1_upgrade(self
):
6390 Validate binary .meta file on legacy subvol root doesn't break the system
6391 on legacy subvol upgrade to v1
6392 poor man's upgrade test -- theme continues...
6394 subvol
= self
._generate
_random
_subvolume
_name
()
6395 group
= self
._generate
_random
_group
_name
()
6397 # emulate a old-fashioned subvolume -- in a custom group
6398 createpath
= os
.path
.join(".", "volumes", group
, subvol
)
6399 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
6401 # add required xattrs to subvolume
6402 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
6403 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
6405 # Create unparseable binary .meta file on legacy subvol's root
6406 meta_contents
= os
.urandom(4096)
6407 meta_filepath
= os
.path
.join(self
.mount_a
.mountpoint
, createpath
, ".meta")
6408 self
.mount_a
.client_remote
.write_file(meta_filepath
, meta_contents
, sudo
=True)
6410 # Upgrade legacy subvol to v1
6411 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol
, group
)
6412 self
.assertNotEqual(subvolpath
, None)
6413 subvolpath
= subvolpath
.rstrip()
6415 # The legacy subvolume path should be returned for subvol.
6416 # Should ignore unparseable binary .meta file in subvol's root
6417 self
.assertEqual(createpath
[1:], subvolpath
)
6419 # ensure metadata file is in legacy location, with required version v1
6420 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol
, subvol_group
=group
, version
=1, legacy
=True)
6423 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol
, group
)
6425 # verify trash dir is clean
6426 self
._wait
_for
_trash
_empty
()
6429 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
6431 def test_unparseable_metafile_on_legacy_to_v1_upgrade(self
):
6433 Validate unparseable text .meta file on legacy subvol root doesn't break the system
6434 on legacy subvol upgrade to v1
6435 poor man's upgrade test -- theme continues...
6437 subvol
= self
._generate
_random
_subvolume
_name
()
6438 group
= self
._generate
_random
_group
_name
()
6440 # emulate a old-fashioned subvolume -- in a custom group
6441 createpath
= os
.path
.join(".", "volumes", group
, subvol
)
6442 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
6444 # add required xattrs to subvolume
6445 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
6446 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
6448 # Create unparseable text .meta file on legacy subvol's root
6449 meta_contents
= "unparseable config\nfile ...\nunparseable config\nfile ...\n"
6450 meta_filepath
= os
.path
.join(self
.mount_a
.mountpoint
, createpath
, ".meta")
6451 self
.mount_a
.client_remote
.write_file(meta_filepath
, meta_contents
, sudo
=True)
6453 # Upgrade legacy subvol to v1
6454 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol
, group
)
6455 self
.assertNotEqual(subvolpath
, None)
6456 subvolpath
= subvolpath
.rstrip()
6458 # The legacy subvolume path should be returned for subvol.
6459 # Should ignore unparseable binary .meta file in subvol's root
6460 self
.assertEqual(createpath
[1:], subvolpath
)
6462 # ensure metadata file is in legacy location, with required version v1
6463 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol
, subvol_group
=group
, version
=1, legacy
=True)
6466 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol
, group
)
6468 # verify trash dir is clean
6469 self
._wait
_for
_trash
_empty
()
6472 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)