10 from hashlib
import md5
11 from textwrap
import dedent
13 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
14 from tasks
.cephfs
.fuse_mount
import FuseMount
15 from teuthology
.exceptions
import CommandFailedError
17 log
= logging
.getLogger(__name__
)
19 class TestVolumesHelper(CephFSTestCase
):
20 """Helper class for testing FS volume, subvolume group and subvolume operations."""
21 TEST_VOLUME_PREFIX
= "volume"
22 TEST_SUBVOLUME_PREFIX
="subvolume"
23 TEST_GROUP_PREFIX
="group"
24 TEST_SNAPSHOT_PREFIX
="snapshot"
25 TEST_CLONE_PREFIX
="clone"
26 TEST_FILE_NAME_PREFIX
="subvolume_file"
28 # for filling subvolume with data
33 DEFAULT_FILE_SIZE
= 1 # MB
34 DEFAULT_NUMBER_OF_FILES
= 1024
36 def _fs_cmd(self
, *args
):
37 return self
.mgr_cluster
.mon_manager
.raw_cluster_cmd("fs", *args
)
39 def _raw_cmd(self
, *args
):
40 return self
.mgr_cluster
.mon_manager
.raw_cluster_cmd(*args
)
42 def __check_clone_state(self
, state
, clone
, clone_group
=None, timo
=120):
44 args
= ["clone", "status", self
.volname
, clone
]
46 args
.append(clone_group
)
49 result
= json
.loads(self
._fs
_cmd
(*args
))
50 if result
["status"]["state"] == state
:
54 self
.assertTrue(check
< timo
)
56 def _wait_for_clone_to_complete(self
, clone
, clone_group
=None, timo
=120):
57 self
.__check
_clone
_state
("complete", clone
, clone_group
, timo
)
59 def _wait_for_clone_to_fail(self
, clone
, clone_group
=None, timo
=120):
60 self
.__check
_clone
_state
("failed", clone
, clone_group
, timo
)
62 def _check_clone_canceled(self
, clone
, clone_group
=None):
63 self
.__check
_clone
_state
("canceled", clone
, clone_group
, timo
=1)
65 def _get_subvolume_snapshot_path(self
, subvolume
, snapshot
, source_group
, subvol_path
, source_version
):
66 if source_version
== 2:
68 if subvol_path
is not None:
69 (base_path
, uuid_str
) = os
.path
.split(subvol_path
)
71 (base_path
, uuid_str
) = os
.path
.split(self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=source_group
))
72 return os
.path
.join(base_path
, ".snap", snapshot
, uuid_str
)
75 base_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=source_group
)
76 return os
.path
.join(base_path
, ".snap", snapshot
)
78 def _verify_clone_attrs(self
, source_path
, clone_path
):
82 p
= self
.mount_a
.run_shell(["find", path1
])
83 paths
= p
.stdout
.getvalue().strip().split()
85 # for each entry in source and clone (sink) verify certain inode attributes:
86 # inode type, mode, ownership, [am]time.
87 for source_path
in paths
:
88 sink_entry
= source_path
[len(path1
)+1:]
89 sink_path
= os
.path
.join(path2
, sink_entry
)
92 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%f', source_path
]).stdout
.getvalue().strip(), 16)
93 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%f', sink_path
]).stdout
.getvalue().strip(), 16)
94 self
.assertEqual(sval
, cval
)
97 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', source_path
]).stdout
.getvalue().strip())
98 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', sink_path
]).stdout
.getvalue().strip())
99 self
.assertEqual(sval
, cval
)
101 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', source_path
]).stdout
.getvalue().strip())
102 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', sink_path
]).stdout
.getvalue().strip())
103 self
.assertEqual(sval
, cval
)
106 # do not check access as kclient will generally not update this like ceph-fuse will.
107 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%Y', source_path
]).stdout
.getvalue().strip())
108 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%Y', sink_path
]).stdout
.getvalue().strip())
109 self
.assertEqual(sval
, cval
)
111 def _verify_clone_root(self
, source_path
, clone_path
, clone
, clone_group
, clone_pool
):
112 # verifies following clone root attrs quota, data_pool and pool_namespace
113 # remaining attributes of clone root are validated in _verify_clone_attrs
115 clone_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, clone
, clone_group
))
117 # verify quota is inherited from source snapshot
118 src_quota
= self
.mount_a
.getfattr(source_path
, "ceph.quota.max_bytes")
119 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
120 if isinstance(self
.mount_a
, FuseMount
):
121 self
.assertEqual(clone_info
["bytes_quota"], "infinite" if src_quota
is None else int(src_quota
))
124 # verify pool is set as per request
125 self
.assertEqual(clone_info
["data_pool"], clone_pool
)
127 # verify pool and pool namespace are inherited from snapshot
128 self
.assertEqual(clone_info
["data_pool"],
129 self
.mount_a
.getfattr(source_path
, "ceph.dir.layout.pool"))
130 self
.assertEqual(clone_info
["pool_namespace"],
131 self
.mount_a
.getfattr(source_path
, "ceph.dir.layout.pool_namespace"))
133 def _verify_clone(self
, subvolume
, snapshot
, clone
,
134 source_group
=None, clone_group
=None, clone_pool
=None,
135 subvol_path
=None, source_version
=2, timo
=120):
136 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
137 # but snapshots are retained for clone verification
138 path1
= self
._get
_subvolume
_snapshot
_path
(subvolume
, snapshot
, source_group
, subvol_path
, source_version
)
139 path2
= self
._get
_subvolume
_path
(self
.volname
, clone
, group_name
=clone_group
)
142 # TODO: currently snapshot rentries are not stable if snapshot source entries
143 # are removed, https://tracker.ceph.com/issues/46747
144 while check
< timo
and subvol_path
is None:
145 val1
= int(self
.mount_a
.getfattr(path1
, "ceph.dir.rentries"))
146 val2
= int(self
.mount_a
.getfattr(path2
, "ceph.dir.rentries"))
151 self
.assertTrue(check
< timo
)
153 self
._verify
_clone
_root
(path1
, path2
, clone
, clone_group
, clone_pool
)
154 self
._verify
_clone
_attrs
(path1
, path2
)
156 def _generate_random_volume_name(self
, count
=1):
157 n
= self
.volume_start
158 volumes
= [f
"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
159 self
.volume_start
+= count
160 return volumes
[0] if count
== 1 else volumes
162 def _generate_random_subvolume_name(self
, count
=1):
163 n
= self
.subvolume_start
164 subvolumes
= [f
"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
165 self
.subvolume_start
+= count
166 return subvolumes
[0] if count
== 1 else subvolumes
168 def _generate_random_group_name(self
, count
=1):
170 groups
= [f
"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
171 self
.group_start
+= count
172 return groups
[0] if count
== 1 else groups
174 def _generate_random_snapshot_name(self
, count
=1):
175 n
= self
.snapshot_start
176 snaps
= [f
"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
177 self
.snapshot_start
+= count
178 return snaps
[0] if count
== 1 else snaps
180 def _generate_random_clone_name(self
, count
=1):
182 clones
= [f
"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
183 self
.clone_start
+= count
184 return clones
[0] if count
== 1 else clones
186 def _enable_multi_fs(self
):
187 self
._fs
_cmd
("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
189 def _create_or_reuse_test_volume(self
):
190 result
= json
.loads(self
._fs
_cmd
("volume", "ls"))
192 self
.vol_created
= True
193 self
.volname
= self
._generate
_random
_volume
_name
()
194 self
._fs
_cmd
("volume", "create", self
.volname
)
196 self
.volname
= result
[0]['name']
198 def _get_subvolume_group_path(self
, vol_name
, group_name
):
199 args
= ("subvolumegroup", "getpath", vol_name
, group_name
)
200 path
= self
._fs
_cmd
(*args
)
201 # remove the leading '/', and trailing whitespaces
202 return path
[1:].rstrip()
204 def _get_subvolume_path(self
, vol_name
, subvol_name
, group_name
=None):
205 args
= ["subvolume", "getpath", vol_name
, subvol_name
]
207 args
.append(group_name
)
209 path
= self
._fs
_cmd
(*args
)
210 # remove the leading '/', and trailing whitespaces
211 return path
[1:].rstrip()
213 def _get_subvolume_info(self
, vol_name
, subvol_name
, group_name
=None):
214 args
= ["subvolume", "info", vol_name
, subvol_name
]
216 args
.append(group_name
)
218 subvol_md
= self
._fs
_cmd
(*args
)
221 def _get_subvolume_snapshot_info(self
, vol_name
, subvol_name
, snapname
, group_name
=None):
222 args
= ["subvolume", "snapshot", "info", vol_name
, subvol_name
, snapname
]
224 args
.append(group_name
)
226 snap_md
= self
._fs
_cmd
(*args
)
229 def _delete_test_volume(self
):
230 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
232 def _do_subvolume_pool_and_namespace_update(self
, subvolume
, pool
=None, pool_namespace
=None, subvolume_group
=None):
233 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
236 self
.mount_a
.setfattr(subvolpath
, 'ceph.dir.layout.pool', pool
, sudo
=True)
238 if pool_namespace
is not None:
239 self
.mount_a
.setfattr(subvolpath
, 'ceph.dir.layout.pool_namespace', pool_namespace
, sudo
=True)
241 def _do_subvolume_attr_update(self
, subvolume
, uid
, gid
, mode
, subvolume_group
=None):
242 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
245 self
.mount_a
.run_shell(['chmod', mode
, subvolpath
], sudo
=True)
248 self
.mount_a
.run_shell(['chown', uid
, subvolpath
], sudo
=True)
249 self
.mount_a
.run_shell(['chgrp', gid
, subvolpath
], sudo
=True)
251 def _do_subvolume_io(self
, subvolume
, subvolume_group
=None, create_dir
=None,
252 number_of_files
=DEFAULT_NUMBER_OF_FILES
, file_size
=DEFAULT_FILE_SIZE
):
253 # get subvolume path for IO
254 args
= ["subvolume", "getpath", self
.volname
, subvolume
]
256 args
.append(subvolume_group
)
258 subvolpath
= self
._fs
_cmd
(*args
)
259 self
.assertNotEqual(subvolpath
, None)
260 subvolpath
= subvolpath
[1:].rstrip() # remove "/" prefix and any trailing newline
264 io_path
= os
.path
.join(subvolpath
, create_dir
)
265 self
.mount_a
.run_shell_payload(f
"mkdir -p {io_path}")
267 log
.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume
, number_of_files
, file_size
, io_path
))
268 for i
in range(number_of_files
):
269 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
270 self
.mount_a
.write_n_mb(os
.path
.join(io_path
, filename
), file_size
)
272 def _do_subvolume_io_mixed(self
, subvolume
, subvolume_group
=None):
273 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
275 reg_file
= "regfile.0"
276 dir_path
= os
.path
.join(subvolpath
, "dir.0")
277 sym_path1
= os
.path
.join(subvolpath
, "sym.0")
278 # this symlink's ownership would be changed
279 sym_path2
= os
.path
.join(dir_path
, "sym.0")
281 self
.mount_a
.run_shell(["mkdir", dir_path
])
282 self
.mount_a
.run_shell(["ln", "-s", "./{}".format(reg_file
), sym_path1
])
283 self
.mount_a
.run_shell(["ln", "-s", "./{}".format(reg_file
), sym_path2
])
284 # flip ownership to nobody. assumption: nobody's id is 65534
285 self
.mount_a
.run_shell(["chown", "-h", "65534:65534", sym_path2
], sudo
=True, omit_sudo
=False)
287 def _wait_for_trash_empty(self
, timeout
=30):
288 # XXX: construct the trash dir path (note that there is no mgr
289 # [sub]volume interface for this).
290 trashdir
= os
.path
.join("./", "volumes", "_deleting")
291 self
.mount_a
.wait_for_dir_empty(trashdir
, timeout
=timeout
)
293 def _assert_meta_location_and_version(self
, vol_name
, subvol_name
, subvol_group
=None, version
=2, legacy
=False):
295 subvol_path
= self
._get
_subvolume
_path
(vol_name
, subvol_name
, group_name
=subvol_group
)
297 m
.update(("/"+subvol_path
).encode('utf-8'))
298 meta_filename
= "{0}.meta".format(m
.digest().hex())
299 metapath
= os
.path
.join(".", "volumes", "_legacy", meta_filename
)
301 group
= subvol_group
if subvol_group
is not None else '_nogroup'
302 metapath
= os
.path
.join(".", "volumes", group
, subvol_name
, ".meta")
304 out
= self
.mount_a
.run_shell(['cat', metapath
], sudo
=True)
305 lines
= out
.stdout
.getvalue().strip().split('\n')
308 if line
== "version = " + str(version
):
311 self
.assertEqual(sv_version
, version
, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
312 version
, sv_version
, metapath
))
314 def _create_v1_subvolume(self
, subvol_name
, subvol_group
=None, has_snapshot
=True, subvol_type
='subvolume', state
='complete'):
315 group
= subvol_group
if subvol_group
is not None else '_nogroup'
316 basepath
= os
.path
.join("volumes", group
, subvol_name
)
317 uuid_str
= str(uuid
.uuid4())
318 createpath
= os
.path
.join(basepath
, uuid_str
)
319 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
321 # create a v1 snapshot, to prevent auto upgrades
323 snappath
= os
.path
.join(createpath
, ".snap", "fake")
324 self
.mount_a
.run_shell(['mkdir', '-p', snappath
], sudo
=True)
326 # add required xattrs to subvolume
327 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
328 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
330 # create a v1 .meta file
331 meta_contents
= "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type
, "/" + createpath
, state
)
332 if state
== 'pending':
333 # add a fake clone source
334 meta_contents
= meta_contents
+ '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
335 meta_filepath1
= os
.path
.join(self
.mount_a
.mountpoint
, basepath
, ".meta")
336 self
.mount_a
.client_remote
.write_file(meta_filepath1
, meta_contents
, sudo
=True)
339 def _update_fake_trash(self
, subvol_name
, subvol_group
=None, trash_name
='fake', create
=True):
340 group
= subvol_group
if subvol_group
is not None else '_nogroup'
341 trashpath
= os
.path
.join("volumes", group
, subvol_name
, '.trash', trash_name
)
343 self
.mount_a
.run_shell(['mkdir', '-p', trashpath
], sudo
=True)
345 self
.mount_a
.run_shell(['rmdir', trashpath
], sudo
=True)
347 def _configure_guest_auth(self
, guest_mount
, authid
, key
):
349 Set up auth credentials for a guest client.
351 # Create keyring file for the guest client.
352 keyring_txt
= dedent("""
356 """.format(authid
=authid
,key
=key
))
358 guest_mount
.client_id
= authid
359 guest_mount
.client_remote
.write_file(guest_mount
.get_keyring_path(),
360 keyring_txt
, sudo
=True)
361 # Add a guest client section to the ceph config file.
362 self
.config_set("client.{0}".format(authid
), "debug client", 20)
363 self
.config_set("client.{0}".format(authid
), "debug objecter", 20)
364 self
.set_conf("client.{0}".format(authid
),
365 "keyring", guest_mount
.get_keyring_path())
367 def _auth_metadata_get(self
, filedata
):
369 Return a deserialized JSON object, or None
372 data
= json
.loads(filedata
)
373 except json
.decoder
.JSONDecodeError
:
378 super(TestVolumesHelper
, self
).setUp()
380 self
.vol_created
= False
381 self
._enable
_multi
_fs
()
382 self
._create
_or
_reuse
_test
_volume
()
383 self
.config_set('mon', 'mon_allow_pool_delete', True)
384 self
.volume_start
= random
.randint(1, (1<<20))
385 self
.subvolume_start
= random
.randint(1, (1<<20))
386 self
.group_start
= random
.randint(1, (1<<20))
387 self
.snapshot_start
= random
.randint(1, (1<<20))
388 self
.clone_start
= random
.randint(1, (1<<20))
392 self
._delete
_test
_volume
()
393 super(TestVolumesHelper
, self
).tearDown()
396 class TestVolumes(TestVolumesHelper
):
397 """Tests for FS volume operations."""
398 def test_volume_create(self
):
400 That the volume can be created and then cleans up
402 volname
= self
._generate
_random
_volume
_name
()
403 self
._fs
_cmd
("volume", "create", volname
)
404 volumels
= json
.loads(self
._fs
_cmd
("volume", "ls"))
406 if not (volname
in ([volume
['name'] for volume
in volumels
])):
407 raise RuntimeError("Error creating volume '{0}'".format(volname
))
410 self
._fs
_cmd
("volume", "rm", volname
, "--yes-i-really-mean-it")
412 def test_volume_ls(self
):
414 That the existing and the newly created volumes can be listed and
417 vls
= json
.loads(self
._fs
_cmd
("volume", "ls"))
418 volumes
= [volume
['name'] for volume
in vls
]
420 #create new volumes and add it to the existing list of volumes
421 volumenames
= self
._generate
_random
_volume
_name
(2)
422 for volumename
in volumenames
:
423 self
._fs
_cmd
("volume", "create", volumename
)
424 volumes
.extend(volumenames
)
428 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
429 if len(volumels
) == 0:
430 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
432 volnames
= [volume
['name'] for volume
in volumels
]
433 if collections
.Counter(volnames
) != collections
.Counter(volumes
):
434 raise RuntimeError("Error creating or listing volumes")
437 for volume
in volumenames
:
438 self
._fs
_cmd
("volume", "rm", volume
, "--yes-i-really-mean-it")
440 def test_volume_rm(self
):
442 That the volume can only be removed when --yes-i-really-mean-it is used
443 and verify that the deleted volume is not listed anymore.
445 for m
in self
.mounts
:
448 self
._fs
_cmd
("volume", "rm", self
.volname
)
449 except CommandFailedError
as ce
:
450 if ce
.exitstatus
!= errno
.EPERM
:
451 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
452 "but it failed with {0}".format(ce
.exitstatus
))
454 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
457 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
458 if (self
.volname
in [volume
['name'] for volume
in volumes
]):
459 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
460 "The volume {0} not removed.".format(self
.volname
))
462 raise RuntimeError("expected the 'fs volume rm' command to fail.")
464 def test_volume_rm_arbitrary_pool_removal(self
):
466 That the arbitrary pool added to the volume out of band is removed
467 successfully on volume removal.
469 for m
in self
.mounts
:
471 new_pool
= "new_pool"
472 # add arbitrary data pool
473 self
.fs
.add_data_pool(new_pool
)
474 vol_status
= json
.loads(self
._fs
_cmd
("status", self
.volname
, "--format=json-pretty"))
475 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
478 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
479 volnames
= [volume
['name'] for volume
in volumes
]
480 self
.assertNotIn(self
.volname
, volnames
)
482 #check if osd pools are gone
483 pools
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "--format=json-pretty"))
484 for pool
in vol_status
["pools"]:
485 self
.assertNotIn(pool
["name"], pools
)
487 def test_volume_rm_when_mon_delete_pool_false(self
):
489 That the volume can only be removed when mon_allowd_pool_delete is set
490 to true and verify that the pools are removed after volume deletion.
492 for m
in self
.mounts
:
494 self
.config_set('mon', 'mon_allow_pool_delete', False)
496 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
497 except CommandFailedError
as ce
:
498 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
499 "expected the 'fs volume rm' command to fail with EPERM, "
500 "but it failed with {0}".format(ce
.exitstatus
))
501 vol_status
= json
.loads(self
._fs
_cmd
("status", self
.volname
, "--format=json-pretty"))
502 self
.config_set('mon', 'mon_allow_pool_delete', True)
503 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
506 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
507 volnames
= [volume
['name'] for volume
in volumes
]
508 self
.assertNotIn(self
.volname
, volnames
,
509 "volume {0} exists after removal".format(self
.volname
))
510 #check if pools are gone
511 pools
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "--format=json-pretty"))
512 for pool
in vol_status
["pools"]:
513 self
.assertNotIn(pool
["name"], pools
,
514 "pool {0} exists after volume removal".format(pool
["name"]))
516 def test_volume_rename(self
):
518 That volume, its file system and pools, can be renamed.
520 for m
in self
.mounts
:
522 oldvolname
= self
.volname
523 newvolname
= self
._generate
_random
_volume
_name
()
524 new_data_pool
, new_metadata_pool
= f
"cephfs.{newvolname}.data", f
"cephfs.{newvolname}.meta"
525 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
526 "--yes-i-really-mean-it")
527 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
528 volnames
= [volume
['name'] for volume
in volumels
]
529 # volume name changed
530 self
.assertIn(newvolname
, volnames
)
531 self
.assertNotIn(oldvolname
, volnames
)
533 self
.fs
.get_pool_names(refresh
=True)
534 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
535 self
.assertEqual(new_data_pool
, self
.fs
.get_data_pool_name())
537 def test_volume_rename_idempotency(self
):
539 That volume rename is idempotent.
541 for m
in self
.mounts
:
543 oldvolname
= self
.volname
544 newvolname
= self
._generate
_random
_volume
_name
()
545 new_data_pool
, new_metadata_pool
= f
"cephfs.{newvolname}.data", f
"cephfs.{newvolname}.meta"
546 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
547 "--yes-i-really-mean-it")
548 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
549 "--yes-i-really-mean-it")
550 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
551 volnames
= [volume
['name'] for volume
in volumels
]
552 self
.assertIn(newvolname
, volnames
)
553 self
.assertNotIn(oldvolname
, volnames
)
554 self
.fs
.get_pool_names(refresh
=True)
555 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
556 self
.assertEqual(new_data_pool
, self
.fs
.get_data_pool_name())
558 def test_volume_rename_fails_without_confirmation_flag(self
):
560 That renaming volume fails without --yes-i-really-mean-it flag.
562 newvolname
= self
._generate
_random
_volume
_name
()
564 self
._fs
_cmd
("volume", "rename", self
.volname
, newvolname
)
565 except CommandFailedError
as ce
:
566 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
567 "invalid error code on renaming a FS volume without the "
568 "'--yes-i-really-mean-it' flag")
570 self
.fail("expected renaming of FS volume to fail without the "
571 "'--yes-i-really-mean-it' flag")
573 def test_volume_rename_for_more_than_one_data_pool(self
):
575 That renaming a volume with more than one data pool does not change
576 the name of the data pools.
578 for m
in self
.mounts
:
580 self
.fs
.add_data_pool('another-data-pool')
581 oldvolname
= self
.volname
582 newvolname
= self
._generate
_random
_volume
_name
()
583 self
.fs
.get_pool_names(refresh
=True)
584 orig_data_pool_names
= list(self
.fs
.data_pools
.values())
585 new_metadata_pool
= f
"cephfs.{newvolname}.meta"
586 self
._fs
_cmd
("volume", "rename", self
.volname
, newvolname
,
587 "--yes-i-really-mean-it")
588 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
589 volnames
= [volume
['name'] for volume
in volumels
]
590 # volume name changed
591 self
.assertIn(newvolname
, volnames
)
592 self
.assertNotIn(oldvolname
, volnames
)
593 self
.fs
.get_pool_names(refresh
=True)
594 # metadata pool name changed
595 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
596 # data pool names unchanged
597 self
.assertCountEqual(orig_data_pool_names
, list(self
.fs
.data_pools
.values()))
600 class TestSubvolumeGroups(TestVolumesHelper
):
601 """Tests for FS subvolume group operations."""
602 def test_default_uid_gid_subvolume_group(self
):
603 group
= self
._generate
_random
_group
_name
()
608 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
609 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
611 # check group's uid and gid
612 stat
= self
.mount_a
.stat(group_path
)
613 self
.assertEqual(stat
['st_uid'], expected_uid
)
614 self
.assertEqual(stat
['st_gid'], expected_gid
)
617 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
619 def test_nonexistent_subvolume_group_create(self
):
620 subvolume
= self
._generate
_random
_subvolume
_name
()
621 group
= "non_existent_group"
623 # try, creating subvolume in a nonexistent group
625 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
626 except CommandFailedError
as ce
:
627 if ce
.exitstatus
!= errno
.ENOENT
:
630 raise RuntimeError("expected the 'fs subvolume create' command to fail")
632 def test_nonexistent_subvolume_group_rm(self
):
633 group
= "non_existent_group"
635 # try, remove subvolume group
637 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
638 except CommandFailedError
as ce
:
639 if ce
.exitstatus
!= errno
.ENOENT
:
642 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
644 def test_subvolume_group_create_with_auto_cleanup_on_fail(self
):
645 group
= self
._generate
_random
_group
_name
()
646 data_pool
= "invalid_pool"
647 # create group with invalid data pool layout
648 with self
.assertRaises(CommandFailedError
):
649 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", data_pool
)
651 # check whether group path is cleaned up
653 self
._fs
_cmd
("subvolumegroup", "getpath", self
.volname
, group
)
654 except CommandFailedError
as ce
:
655 if ce
.exitstatus
!= errno
.ENOENT
:
658 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
660 def test_subvolume_group_create_with_desired_data_pool_layout(self
):
661 group1
, group2
= self
._generate
_random
_group
_name
(2)
664 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group1
)
665 group1_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group1
)
667 default_pool
= self
.mount_a
.getfattr(group1_path
, "ceph.dir.layout.pool")
668 new_pool
= "new_pool"
669 self
.assertNotEqual(default_pool
, new_pool
)
672 newid
= self
.fs
.add_data_pool(new_pool
)
674 # create group specifying the new data pool as its pool layout
675 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group2
,
676 "--pool_layout", new_pool
)
677 group2_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group2
)
679 desired_pool
= self
.mount_a
.getfattr(group2_path
, "ceph.dir.layout.pool")
681 self
.assertEqual(desired_pool
, new_pool
)
682 except AssertionError:
683 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
685 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group1
)
686 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group2
)
688 def test_subvolume_group_create_with_desired_mode(self
):
689 group1
, group2
= self
._generate
_random
_group
_name
(2)
691 expected_mode1
= "755"
693 expected_mode2
= "777"
696 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group2
, f
"--mode={expected_mode2}")
697 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group1
)
699 group1_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group1
)
700 group2_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group2
)
701 volumes_path
= os
.path
.dirname(group1_path
)
704 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', group1_path
]).stdout
.getvalue().strip()
705 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', group2_path
]).stdout
.getvalue().strip()
706 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', volumes_path
]).stdout
.getvalue().strip()
707 self
.assertEqual(actual_mode1
, expected_mode1
)
708 self
.assertEqual(actual_mode2
, expected_mode2
)
709 self
.assertEqual(actual_mode3
, expected_mode1
)
711 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group1
)
712 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group2
)
714 def test_subvolume_group_create_with_desired_uid_gid(self
):
716 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
722 # create subvolume group
723 subvolgroupname
= self
._generate
_random
_group
_name
()
724 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, subvolgroupname
, "--uid", str(uid
), "--gid", str(gid
))
726 # make sure it exists
727 subvolgrouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, subvolgroupname
)
728 self
.assertNotEqual(subvolgrouppath
, None)
730 # verify the uid and gid
731 suid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', subvolgrouppath
]).stdout
.getvalue().strip())
732 sgid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', subvolgrouppath
]).stdout
.getvalue().strip())
733 self
.assertEqual(uid
, suid
)
734 self
.assertEqual(gid
, sgid
)
737 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, subvolgroupname
)
739 def test_subvolume_group_create_with_invalid_data_pool_layout(self
):
740 group
= self
._generate
_random
_group
_name
()
741 data_pool
= "invalid_pool"
742 # create group with invalid data pool layout
744 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", data_pool
)
745 except CommandFailedError
as ce
:
746 if ce
.exitstatus
!= errno
.EINVAL
:
749 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
751 def test_subvolume_group_ls(self
):
752 # tests the 'fs subvolumegroup ls' command
756 #create subvolumegroups
757 subvolumegroups
= self
._generate
_random
_group
_name
(3)
758 for groupname
in subvolumegroups
:
759 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
761 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
762 if len(subvolumegroupls
) == 0:
763 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
765 subvolgroupnames
= [subvolumegroup
['name'] for subvolumegroup
in subvolumegroupls
]
766 if collections
.Counter(subvolgroupnames
) != collections
.Counter(subvolumegroups
):
767 raise RuntimeError("Error creating or listing subvolume groups")
769 def test_subvolume_group_ls_filter(self
):
770 # tests the 'fs subvolumegroup ls' command filters '_deleting' directory
774 #create subvolumegroup
775 subvolumegroups
= self
._generate
_random
_group
_name
(3)
776 for groupname
in subvolumegroups
:
777 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
779 # create subvolume and remove. This creates '_deleting' directory.
780 subvolume
= self
._generate
_random
_subvolume
_name
()
781 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
782 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
784 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
785 subvolgroupnames
= [subvolumegroup
['name'] for subvolumegroup
in subvolumegroupls
]
786 if "_deleting" in subvolgroupnames
:
787 self
.fail("Listing subvolume groups listed '_deleting' directory")
789 def test_subvolume_group_ls_for_nonexistent_volume(self
):
790 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
791 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
793 # list subvolume groups
794 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
795 if len(subvolumegroupls
) > 0:
796 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
798 def test_subvolumegroup_pin_distributed(self
):
799 self
.fs
.set_max_mds(2)
800 status
= self
.fs
.wait_for_daemons()
801 self
.config_set('mds', 'mds_export_ephemeral_distributed', True)
804 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
805 self
._fs
_cmd
("subvolumegroup", "pin", self
.volname
, group
, "distributed", "True")
806 subvolumes
= self
._generate
_random
_subvolume
_name
(50)
807 for subvolume
in subvolumes
:
808 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
809 self
._wait
_distributed
_subtrees
(2 * 2, status
=status
, rank
="all")
812 for subvolume
in subvolumes
:
813 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
815 # verify trash dir is clean
816 self
._wait
_for
_trash
_empty
()
818 def test_subvolume_group_rm_force(self
):
819 # test removing non-existing subvolume group with --force
820 group
= self
._generate
_random
_group
_name
()
822 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
, "--force")
823 except CommandFailedError
:
824 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
827 class TestSubvolumes(TestVolumesHelper
):
828 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
829 def test_async_subvolume_rm(self
):
830 subvolumes
= self
._generate
_random
_subvolume
_name
(100)
833 for subvolume
in subvolumes
:
834 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
835 self
._do
_subvolume
_io
(subvolume
, number_of_files
=10)
837 self
.mount_a
.umount_wait()
840 for subvolume
in subvolumes
:
841 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
843 self
.mount_a
.mount_wait()
845 # verify trash dir is clean
846 self
._wait
_for
_trash
_empty
(timeout
=300)
848 def test_default_uid_gid_subvolume(self
):
849 subvolume
= self
._generate
_random
_subvolume
_name
()
854 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
855 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
857 # check subvolume's uid and gid
858 stat
= self
.mount_a
.stat(subvol_path
)
859 self
.assertEqual(stat
['st_uid'], expected_uid
)
860 self
.assertEqual(stat
['st_gid'], expected_gid
)
863 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
865 # verify trash dir is clean
866 self
._wait
_for
_trash
_empty
()
868 def test_nonexistent_subvolume_rm(self
):
869 # remove non-existing subvolume
870 subvolume
= "non_existent_subvolume"
872 # try, remove subvolume
874 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
875 except CommandFailedError
as ce
:
876 if ce
.exitstatus
!= errno
.ENOENT
:
879 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
881 def test_subvolume_create_and_rm(self
):
883 subvolume
= self
._generate
_random
_subvolume
_name
()
884 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
886 # make sure it exists
887 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
888 self
.assertNotEqual(subvolpath
, None)
891 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
894 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
895 except CommandFailedError
as ce
:
896 if ce
.exitstatus
!= errno
.ENOENT
:
899 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
901 # verify trash dir is clean
902 self
._wait
_for
_trash
_empty
()
904 def test_subvolume_create_and_rm_in_group(self
):
905 subvolume
= self
._generate
_random
_subvolume
_name
()
906 group
= self
._generate
_random
_group
_name
()
909 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
911 # create subvolume in group
912 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
915 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
917 # verify trash dir is clean
918 self
._wait
_for
_trash
_empty
()
921 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
923 def test_subvolume_create_idempotence(self
):
925 subvolume
= self
._generate
_random
_subvolume
_name
()
926 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
928 # try creating w/ same subvolume name -- should be idempotent
929 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
932 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
934 # verify trash dir is clean
935 self
._wait
_for
_trash
_empty
()
937 def test_subvolume_create_idempotence_resize(self
):
939 subvolume
= self
._generate
_random
_subvolume
_name
()
940 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
942 # try creating w/ same subvolume name with size -- should set quota
943 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "1000000000")
945 # get subvolume metadata
946 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
947 self
.assertEqual(subvol_info
["bytes_quota"], 1000000000)
950 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
952 # verify trash dir is clean
953 self
._wait
_for
_trash
_empty
()
955 def test_subvolume_create_idempotence_mode(self
):
960 subvolume
= self
._generate
_random
_subvolume
_name
()
961 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
963 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
965 actual_mode_1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
966 self
.assertEqual(actual_mode_1
, default_mode
)
968 # try creating w/ same subvolume name with --mode 777
970 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", new_mode
)
972 actual_mode_2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
973 self
.assertEqual(actual_mode_2
, new_mode
)
976 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
978 # verify trash dir is clean
979 self
._wait
_for
_trash
_empty
()
981 def test_subvolume_create_idempotence_without_passing_mode(self
):
984 subvolume
= self
._generate
_random
_subvolume
_name
()
985 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", desired_mode
)
987 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
989 actual_mode_1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
990 self
.assertEqual(actual_mode_1
, desired_mode
)
995 # try creating w/ same subvolume name without passing --mode argument
996 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
998 actual_mode_2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
999 self
.assertEqual(actual_mode_2
, default_mode
)
1002 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1004 # verify trash dir is clean
1005 self
._wait
_for
_trash
_empty
()
1007 def test_subvolume_create_isolated_namespace(self
):
1009 Create subvolume in separate rados namespace
1013 subvolume
= self
._generate
_random
_subvolume
_name
()
1014 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--namespace-isolated")
1016 # get subvolume metadata
1017 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
1018 self
.assertNotEqual(len(subvol_info
), 0)
1019 self
.assertEqual(subvol_info
["pool_namespace"], "fsvolumens_" + subvolume
)
1022 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1024 # verify trash dir is clean
1025 self
._wait
_for
_trash
_empty
()
1027 def test_subvolume_create_with_auto_cleanup_on_fail(self
):
1028 subvolume
= self
._generate
_random
_subvolume
_name
()
1029 data_pool
= "invalid_pool"
1030 # create subvolume with invalid data pool layout fails
1031 with self
.assertRaises(CommandFailedError
):
1032 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
1034 # check whether subvol path is cleaned up
1036 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1037 except CommandFailedError
as ce
:
1038 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of non-existent subvolume")
1040 self
.fail("expected the 'fs subvolume getpath' command to fail")
1042 # verify trash dir is clean
1043 self
._wait
_for
_trash
_empty
()
1045 def test_subvolume_create_with_desired_data_pool_layout_in_group(self
):
1046 subvol1
, subvol2
= self
._generate
_random
_subvolume
_name
(2)
1047 group
= self
._generate
_random
_group
_name
()
1049 # create group. this also helps set default pool layout for subvolumes
1050 # created within the group.
1051 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1053 # create subvolume in group.
1054 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
1055 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
, group_name
=group
)
1057 default_pool
= self
.mount_a
.getfattr(subvol1_path
, "ceph.dir.layout.pool")
1058 new_pool
= "new_pool"
1059 self
.assertNotEqual(default_pool
, new_pool
)
1062 newid
= self
.fs
.add_data_pool(new_pool
)
1064 # create subvolume specifying the new data pool as its pool layout
1065 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
,
1066 "--pool_layout", new_pool
)
1067 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvol2
, group_name
=group
)
1069 desired_pool
= self
.mount_a
.getfattr(subvol2_path
, "ceph.dir.layout.pool")
1071 self
.assertEqual(desired_pool
, new_pool
)
1072 except AssertionError:
1073 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
1075 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
1076 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
1077 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1079 # verify trash dir is clean
1080 self
._wait
_for
_trash
_empty
()
1082 def test_subvolume_create_with_desired_mode(self
):
1083 subvol1
= self
._generate
_random
_subvolume
_name
()
1086 default_mode
= "755"
1088 desired_mode
= "777"
1090 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--mode", "777")
1092 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
)
1094 # check subvolumegroup's mode
1095 subvol_par_path
= os
.path
.dirname(subvol1_path
)
1096 group_path
= os
.path
.dirname(subvol_par_path
)
1097 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', group_path
]).stdout
.getvalue().strip()
1098 self
.assertEqual(actual_mode1
, default_mode
)
1099 # check /volumes mode
1100 volumes_path
= os
.path
.dirname(group_path
)
1101 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', volumes_path
]).stdout
.getvalue().strip()
1102 self
.assertEqual(actual_mode2
, default_mode
)
1103 # check subvolume's mode
1104 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol1_path
]).stdout
.getvalue().strip()
1105 self
.assertEqual(actual_mode3
, desired_mode
)
1107 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
)
1109 # verify trash dir is clean
1110 self
._wait
_for
_trash
_empty
()
1112 def test_subvolume_create_with_desired_mode_in_group(self
):
1113 subvol1
, subvol2
, subvol3
= self
._generate
_random
_subvolume
_name
(3)
1115 group
= self
._generate
_random
_group
_name
()
1117 expected_mode1
= "755"
1119 expected_mode2
= "777"
1122 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1124 # create subvolume in group
1125 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
1126 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
, "--mode", "777")
1127 # check whether mode 0777 also works
1128 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol3
, "--group_name", group
, "--mode", "0777")
1130 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
, group_name
=group
)
1131 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvol2
, group_name
=group
)
1132 subvol3_path
= self
._get
_subvolume
_path
(self
.volname
, subvol3
, group_name
=group
)
1134 # check subvolume's mode
1135 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol1_path
]).stdout
.getvalue().strip()
1136 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol2_path
]).stdout
.getvalue().strip()
1137 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol3_path
]).stdout
.getvalue().strip()
1138 self
.assertEqual(actual_mode1
, expected_mode1
)
1139 self
.assertEqual(actual_mode2
, expected_mode2
)
1140 self
.assertEqual(actual_mode3
, expected_mode2
)
1142 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
1143 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
1144 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol3
, group
)
1145 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1147 # verify trash dir is clean
1148 self
._wait
_for
_trash
_empty
()
1150 def test_subvolume_create_with_desired_uid_gid(self
):
1152 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
1159 subvolname
= self
._generate
_random
_subvolume
_name
()
1160 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--uid", str(uid
), "--gid", str(gid
))
1162 # make sure it exists
1163 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
1164 self
.assertNotEqual(subvolpath
, None)
1166 # verify the uid and gid
1167 suid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', subvolpath
]).stdout
.getvalue().strip())
1168 sgid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', subvolpath
]).stdout
.getvalue().strip())
1169 self
.assertEqual(uid
, suid
)
1170 self
.assertEqual(gid
, sgid
)
1173 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
1175 # verify trash dir is clean
1176 self
._wait
_for
_trash
_empty
()
1178 def test_subvolume_create_with_invalid_data_pool_layout(self
):
1179 subvolume
= self
._generate
_random
_subvolume
_name
()
1180 data_pool
= "invalid_pool"
1181 # create subvolume with invalid data pool layout
1183 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
1184 except CommandFailedError
as ce
:
1185 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on create of subvolume with invalid pool layout")
1187 self
.fail("expected the 'fs subvolume create' command to fail")
1189 # verify trash dir is clean
1190 self
._wait
_for
_trash
_empty
()
1192 def test_subvolume_create_with_invalid_size(self
):
1193 # create subvolume with an invalid size -1
1194 subvolume
= self
._generate
_random
_subvolume
_name
()
1196 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--size", "-1")
1197 except CommandFailedError
as ce
:
1198 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on create of subvolume with invalid size")
1200 self
.fail("expected the 'fs subvolume create' command to fail")
1202 # verify trash dir is clean
1203 self
._wait
_for
_trash
_empty
()
1205 def test_subvolume_expand(self
):
1207 That a subvolume can be expanded in size and its quota matches the expected size.
1211 subvolname
= self
._generate
_random
_subvolume
_name
()
1212 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1213 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
1215 # make sure it exists
1216 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
1217 self
.assertNotEqual(subvolpath
, None)
1219 # expand the subvolume
1221 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
1224 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
1225 self
.assertEqual(size
, nsize
)
1228 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
1230 # verify trash dir is clean
1231 self
._wait
_for
_trash
_empty
()
1233 def test_subvolume_info(self
):
1234 # tests the 'fs subvolume info' command
1236 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
1237 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
1238 "type", "uid", "features", "state"]
1241 subvolume
= self
._generate
_random
_subvolume
_name
()
1242 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1244 # get subvolume metadata
1245 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
1246 for md
in subvol_md
:
1247 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
1249 self
.assertEqual(subvol_info
["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
1250 self
.assertEqual(subvol_info
["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
1251 self
.assertEqual(subvol_info
["pool_namespace"], "", "expected pool namespace to be empty")
1252 self
.assertEqual(subvol_info
["state"], "complete", "expected state to be complete")
1254 self
.assertEqual(len(subvol_info
["features"]), 3,
1255 msg
="expected 3 features, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
1256 for feature
in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1257 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
1259 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1260 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
1262 # get subvolume metadata after quota set
1263 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
1264 for md
in subvol_md
:
1265 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
1267 self
.assertNotEqual(subvol_info
["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
1268 self
.assertEqual(subvol_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
1269 self
.assertEqual(subvol_info
["type"], "subvolume", "type should be set to subvolume")
1270 self
.assertEqual(subvol_info
["state"], "complete", "expected state to be complete")
1272 self
.assertEqual(len(subvol_info
["features"]), 3,
1273 msg
="expected 3 features, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
1274 for feature
in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1275 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
1278 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1280 # verify trash dir is clean
1281 self
._wait
_for
_trash
_empty
()
1283 def test_subvolume_ls(self
):
1284 # tests the 'fs subvolume ls' command
1289 subvolumes
= self
._generate
_random
_subvolume
_name
(3)
1290 for subvolume
in subvolumes
:
1291 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1294 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
1295 if len(subvolumels
) == 0:
1296 self
.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
1298 subvolnames
= [subvolume
['name'] for subvolume
in subvolumels
]
1299 if collections
.Counter(subvolnames
) != collections
.Counter(subvolumes
):
1300 self
.fail("Error creating or listing subvolumes")
1303 for subvolume
in subvolumes
:
1304 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1306 # verify trash dir is clean
1307 self
._wait
_for
_trash
_empty
()
1309 def test_subvolume_ls_for_notexistent_default_group(self
):
1310 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
1311 # prerequisite: we expect that the volume is created and the default group _nogroup is
1312 # NOT created (i.e. a subvolume without group is not created)
1315 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
1316 if len(subvolumels
) > 0:
1317 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
1319 def test_subvolume_marked(self
):
1321 ensure a subvolume is marked with the ceph.dir.subvolume xattr
1323 subvolume
= self
._generate
_random
_subvolume
_name
()
1326 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1329 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
1331 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
1332 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
1333 # outside the subvolume
1334 dstpath
= os
.path
.join(self
.mount_a
.mountpoint
, 'volumes', '_nogroup', 'new_subvol_location')
1335 srcpath
= os
.path
.join(self
.mount_a
.mountpoint
, subvolpath
)
1336 rename_script
= dedent("""
1340 os.rename("{src}", "{dst}")
1341 except OSError as e:
1342 if e.errno != errno.EXDEV:
1343 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
1345 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
1347 self
.mount_a
.run_python(rename_script
.format(src
=srcpath
, dst
=dstpath
), sudo
=True)
1350 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1352 # verify trash dir is clean
1353 self
._wait
_for
_trash
_empty
()
1355 def test_subvolume_pin_export(self
):
1356 self
.fs
.set_max_mds(2)
1357 status
= self
.fs
.wait_for_daemons()
1359 subvolume
= self
._generate
_random
_subvolume
_name
()
1360 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1361 self
._fs
_cmd
("subvolume", "pin", self
.volname
, subvolume
, "export", "1")
1362 path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1363 path
= os
.path
.dirname(path
) # get subvolume path
1365 self
._get
_subtrees
(status
=status
, rank
=1)
1366 self
._wait
_subtrees
([(path
, 1)], status
=status
)
1369 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1371 # verify trash dir is clean
1372 self
._wait
_for
_trash
_empty
()
1374 ### authorize operations
1376 def test_authorize_deauthorize_legacy_subvolume(self
):
1377 subvolume
= self
._generate
_random
_subvolume
_name
()
1378 group
= self
._generate
_random
_group
_name
()
1381 guest_mount
= self
.mount_b
1382 guest_mount
.umount_wait()
1384 # emulate a old-fashioned subvolume in a custom group
1385 createpath
= os
.path
.join(".", "volumes", group
, subvolume
)
1386 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
1388 # add required xattrs to subvolume
1389 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
1390 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
1392 mount_path
= os
.path
.join("/", "volumes", group
, subvolume
)
1394 # authorize guest authID read-write access to subvolume
1395 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
1396 "--group_name", group
, "--tenant_id", "tenant_id")
1398 # guest authID should exist
1399 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
1400 self
.assertIn("client.{0}".format(authid
), existing_ids
)
1402 # configure credentials for guest client
1403 self
._configure
_guest
_auth
(guest_mount
, authid
, key
)
1405 # mount the subvolume, and write to it
1406 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1407 guest_mount
.write_n_mb("data.bin", 1)
1409 # authorize guest authID read access to subvolume
1410 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
1411 "--group_name", group
, "--tenant_id", "tenant_id", "--access_level", "r")
1413 # guest client sees the change in access level to read only after a
1414 # remount of the subvolume.
1415 guest_mount
.umount_wait()
1416 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1418 # read existing content of the subvolume
1419 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
1420 # cannot write into read-only subvolume
1421 with self
.assertRaises(CommandFailedError
):
1422 guest_mount
.write_n_mb("rogue.bin", 1)
1425 guest_mount
.umount_wait()
1426 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid
,
1427 "--group_name", group
)
1428 # guest authID should no longer exist
1429 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
1430 self
.assertNotIn("client.{0}".format(authid
), existing_ids
)
1431 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1432 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1434 def test_authorize_deauthorize_subvolume(self
):
1435 subvolume
= self
._generate
_random
_subvolume
_name
()
1436 group
= self
._generate
_random
_group
_name
()
1439 guest_mount
= self
.mount_b
1440 guest_mount
.umount_wait()
1443 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--mode=777")
1445 # create subvolume in group
1446 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1447 mount_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
,
1448 "--group_name", group
).rstrip()
1450 # authorize guest authID read-write access to subvolume
1451 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
1452 "--group_name", group
, "--tenant_id", "tenant_id")
1454 # guest authID should exist
1455 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
1456 self
.assertIn("client.{0}".format(authid
), existing_ids
)
1458 # configure credentials for guest client
1459 self
._configure
_guest
_auth
(guest_mount
, authid
, key
)
1461 # mount the subvolume, and write to it
1462 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1463 guest_mount
.write_n_mb("data.bin", 1)
1465 # authorize guest authID read access to subvolume
1466 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
1467 "--group_name", group
, "--tenant_id", "tenant_id", "--access_level", "r")
1469 # guest client sees the change in access level to read only after a
1470 # remount of the subvolume.
1471 guest_mount
.umount_wait()
1472 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1474 # read existing content of the subvolume
1475 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
1476 # cannot write into read-only subvolume
1477 with self
.assertRaises(CommandFailedError
):
1478 guest_mount
.write_n_mb("rogue.bin", 1)
1481 guest_mount
.umount_wait()
1482 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid
,
1483 "--group_name", group
)
1484 # guest authID should no longer exist
1485 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
1486 self
.assertNotIn("client.{0}".format(authid
), existing_ids
)
1487 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1488 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1490 def test_multitenant_subvolumes(self
):
1492 That subvolume access can be restricted to a tenant.
1494 That metadata used to enforce tenant isolation of
1495 subvolumes is stored as a two-way mapping between auth
1496 IDs and subvolumes that they're authorized to access.
1498 subvolume
= self
._generate
_random
_subvolume
_name
()
1499 group
= self
._generate
_random
_group
_name
()
1501 guest_mount
= self
.mount_b
1503 # Guest clients belonging to different tenants, but using the same
1508 "tenant_id": "tenant1",
1512 "tenant_id": "tenant2",
1516 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1518 # create subvolume in group
1519 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1521 # Check that subvolume metadata file is created on subvolume creation.
1522 subvol_metadata_filename
= "_{0}:{1}.meta".format(group
, subvolume
)
1523 self
.assertIn(subvol_metadata_filename
, guest_mount
.ls("volumes"))
1525 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
1526 # 'tenant1', with 'rw' access to the volume.
1527 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1528 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1530 # Check that auth metadata file for auth ID 'alice', is
1531 # created on authorizing 'alice' access to the subvolume.
1532 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
1533 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
1535 # Verify that the auth metadata file stores the tenant ID that the
1536 # auth ID belongs to, the auth ID's authorized access levels
1537 # for different subvolumes, versioning details, etc.
1538 expected_auth_metadata
= {
1540 "compat_version": 6,
1542 "tenant_id": "tenant1",
1544 "{0}/{1}".format(group
,subvolume
): {
1546 "access_level": "rw"
1551 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1552 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
1553 del expected_auth_metadata
["version"]
1554 del auth_metadata
["version"]
1555 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
1557 # Verify that the subvolume metadata file stores info about auth IDs
1558 # and their access levels to the subvolume, versioning details, etc.
1559 expected_subvol_metadata
= {
1561 "compat_version": 1,
1565 "access_level": "rw"
1569 subvol_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(subvol_metadata_filename
)))
1571 self
.assertGreaterEqual(subvol_metadata
["version"], expected_subvol_metadata
["version"])
1572 del expected_subvol_metadata
["version"]
1573 del subvol_metadata
["version"]
1574 self
.assertEqual(expected_subvol_metadata
, subvol_metadata
)
1576 # Cannot authorize 'guestclient_2' to access the volume.
1577 # It uses auth ID 'alice', which has already been used by a
1578 # 'guestclient_1' belonging to an another tenant for accessing
1582 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_2
["auth_id"],
1583 "--group_name", group
, "--tenant_id", guestclient_2
["tenant_id"])
1584 except CommandFailedError
as ce
:
1585 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
1586 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
1588 self
.fail("expected the 'fs subvolume authorize' command to fail")
1590 # Check that auth metadata file is cleaned up on removing
1591 # auth ID's only access to a volume.
1593 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
,
1594 "--group_name", group
)
1595 self
.assertNotIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
1597 # Check that subvolume metadata file is cleaned up on subvolume deletion.
1598 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1599 self
.assertNotIn(subvol_metadata_filename
, guest_mount
.ls("volumes"))
1602 guest_mount
.umount_wait()
1603 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1605 def test_subvolume_authorized_list(self
):
1606 subvolume
= self
._generate
_random
_subvolume
_name
()
1607 group
= self
._generate
_random
_group
_name
()
1613 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1615 # create subvolume in group
1616 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1618 # authorize alice authID read-write access to subvolume
1619 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid1
,
1620 "--group_name", group
)
1621 # authorize guest1 authID read-write access to subvolume
1622 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid2
,
1623 "--group_name", group
)
1624 # authorize guest2 authID read access to subvolume
1625 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid3
,
1626 "--group_name", group
, "--access_level", "r")
1628 # list authorized-ids of the subvolume
1629 expected_auth_list
= [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
1630 auth_list
= json
.loads(self
._fs
_cmd
('subvolume', 'authorized_list', self
.volname
, subvolume
, "--group_name", group
))
1631 self
.assertCountEqual(expected_auth_list
, auth_list
)
1634 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid1
,
1635 "--group_name", group
)
1636 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid2
,
1637 "--group_name", group
)
1638 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid3
,
1639 "--group_name", group
)
1640 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1641 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1643 def test_authorize_auth_id_not_created_by_mgr_volumes(self
):
1645 If the auth_id already exists and is not created by mgr plugin,
1646 it's not allowed to authorize the auth-id by default.
1649 subvolume
= self
._generate
_random
_subvolume
_name
()
1650 group
= self
._generate
_random
_group
_name
()
1653 self
.fs
.mon_manager
.raw_cluster_cmd(
1654 "auth", "get-or-create", "client.guest1",
1663 "tenant_id": "tenant1",
1667 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1669 # create subvolume in group
1670 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1673 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1674 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1675 except CommandFailedError
as ce
:
1676 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
1677 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
1679 self
.fail("expected the 'fs subvolume authorize' command to fail")
1682 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1683 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1684 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1686 def test_authorize_allow_existing_id_option(self
):
1688 If the auth_id already exists and is not created by mgr volumes,
1689 it's not allowed to authorize the auth-id by default but is
1690 allowed with option allow_existing_id.
1693 subvolume
= self
._generate
_random
_subvolume
_name
()
1694 group
= self
._generate
_random
_group
_name
()
1697 self
.fs
.mon_manager
.raw_cluster_cmd(
1698 "auth", "get-or-create", "client.guest1",
1707 "tenant_id": "tenant1",
1711 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1713 # create subvolume in group
1714 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1716 # Cannot authorize 'guestclient_1' to access the volume by default,
1717 # which already exists and not created by mgr volumes but is allowed
1718 # with option 'allow_existing_id'.
1719 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1720 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"], "--allow-existing-id")
1723 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
,
1724 "--group_name", group
)
1725 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1726 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1727 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1729 def test_deauthorize_auth_id_after_out_of_band_update(self
):
1731 If the auth_id authorized by mgr/volumes plugin is updated
1732 out of band, the auth_id should not be deleted after a
1733 deauthorize. It should only remove caps associated with it.
1736 subvolume
= self
._generate
_random
_subvolume
_name
()
1737 group
= self
._generate
_random
_group
_name
()
1742 "tenant_id": "tenant1",
1746 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1748 # create subvolume in group
1749 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1751 # Authorize 'guestclient_1' to access the subvolume.
1752 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1753 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1755 subvol_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
,
1756 "--group_name", group
).rstrip()
1758 # Update caps for guestclient_1 out of band
1759 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
1760 "auth", "caps", "client.guest1",
1761 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group
, subvol_path
),
1762 "osd", "allow rw pool=cephfs_data",
1767 # Deauthorize guestclient_1
1768 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
, "--group_name", group
)
1770 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
1771 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
1772 out
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
1774 self
.assertEqual("client.guest1", out
[0]["entity"])
1775 self
.assertEqual("allow rw path=/volumes/{0}".format(group
), out
[0]["caps"]["mds"])
1776 self
.assertEqual("allow *", out
[0]["caps"]["mgr"])
1777 self
.assertNotIn("osd", out
[0]["caps"])
1780 out
= self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1781 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1782 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1784 def test_recover_auth_metadata_during_authorize(self
):
1786 That auth metadata manager can recover from partial auth updates using
1787 metadata files, which store auth info and its update status info. This
1788 test validates the recovery during authorize.
1791 guest_mount
= self
.mount_b
1793 subvolume
= self
._generate
_random
_subvolume
_name
()
1794 group
= self
._generate
_random
_group
_name
()
1799 "tenant_id": "tenant1",
1803 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1805 # create subvolume in group
1806 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1808 # Authorize 'guestclient_1' to access the subvolume.
1809 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1810 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1812 # Check that auth metadata file for auth ID 'guest1', is
1813 # created on authorizing 'guest1' access to the subvolume.
1814 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
1815 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
1816 expected_auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1818 # Induce partial auth update state by modifying the auth metadata file,
1819 # and then run authorize again.
1820 guest_mount
.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
1822 # Authorize 'guestclient_1' to access the subvolume.
1823 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
1824 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1826 auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1827 self
.assertEqual(auth_metadata_content
, expected_auth_metadata_content
)
1830 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
, "--group_name", group
)
1831 guest_mount
.umount_wait()
1832 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1833 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1834 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1836 def test_recover_auth_metadata_during_deauthorize(self
):
1838 That auth metadata manager can recover from partial auth updates using
1839 metadata files, which store auth info and its update status info. This
1840 test validates the recovery during deauthorize.
1843 guest_mount
= self
.mount_b
1845 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
1846 group
= self
._generate
_random
_group
_name
()
1849 "auth_id": "guest1",
1850 "tenant_id": "tenant1",
1854 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1856 # create subvolumes in group
1857 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
1858 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
1860 # Authorize 'guestclient_1' to access the subvolume1.
1861 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
1862 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1864 # Check that auth metadata file for auth ID 'guest1', is
1865 # created on authorizing 'guest1' access to the subvolume1.
1866 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
1867 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
1868 expected_auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1870 # Authorize 'guestclient_1' to access the subvolume2.
1871 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
1872 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1874 # Induce partial auth update state by modifying the auth metadata file,
1875 # and then run de-authorize.
1876 guest_mount
.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
1878 # Deauthorize 'guestclient_1' to access the subvolume2.
1879 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
1880 "--group_name", group
)
1882 auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1883 self
.assertEqual(auth_metadata_content
, expected_auth_metadata_content
)
1886 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, "guest1", "--group_name", group
)
1887 guest_mount
.umount_wait()
1888 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1889 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
1890 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
1891 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1893 def test_update_old_style_auth_metadata_to_new_during_authorize(self
):
1895 CephVolumeClient stores the subvolume data in auth metadata file with
1896 'volumes' key as there was no subvolume namespace. It doesn't makes sense
1897 with mgr/volumes. This test validates the transparent update of 'volumes'
1898 key to 'subvolumes' key in auth metadata file during authorize.
1901 guest_mount
= self
.mount_b
1903 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
1904 group
= self
._generate
_random
_group
_name
()
1909 "tenant_id": "tenant1",
1913 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1915 # create subvolumes in group
1916 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
1917 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
1919 # Authorize 'guestclient_1' to access the subvolume1.
1920 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
1921 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1923 # Check that auth metadata file for auth ID 'guest1', is
1924 # created on authorizing 'guest1' access to the subvolume1.
1925 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
1926 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
1928 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
1929 guest_mount
.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
1931 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
1932 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
1933 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1935 expected_auth_metadata
= {
1937 "compat_version": 6,
1939 "tenant_id": "tenant1",
1941 "{0}/{1}".format(group
,subvolume1
): {
1943 "access_level": "rw"
1945 "{0}/{1}".format(group
,subvolume2
): {
1947 "access_level": "rw"
1952 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
1954 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
1955 del expected_auth_metadata
["version"]
1956 del auth_metadata
["version"]
1957 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
1960 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, auth_id
, "--group_name", group
)
1961 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, auth_id
, "--group_name", group
)
1962 guest_mount
.umount_wait()
1963 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
1964 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
1965 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
1966 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1968 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self
):
1970 CephVolumeClient stores the subvolume data in auth metadata file with
1971 'volumes' key as there was no subvolume namespace. It doesn't makes sense
1972 with mgr/volumes. This test validates the transparent update of 'volumes'
1973 key to 'subvolumes' key in auth metadata file during deauthorize.
1976 guest_mount
= self
.mount_b
1978 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
1979 group
= self
._generate
_random
_group
_name
()
1984 "tenant_id": "tenant1",
1988 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1990 # create subvolumes in group
1991 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
1992 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
1994 # Authorize 'guestclient_1' to access the subvolume1.
1995 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
1996 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
1998 # Authorize 'guestclient_1' to access the subvolume2.
1999 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
2000 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2002 # Check that auth metadata file for auth ID 'guest1', is created.
2003 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2004 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2006 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
2007 guest_mount
.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
2009 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
2010 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, auth_id
, "--group_name", group
)
2012 expected_auth_metadata
= {
2014 "compat_version": 6,
2016 "tenant_id": "tenant1",
2018 "{0}/{1}".format(group
,subvolume1
): {
2020 "access_level": "rw"
2025 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2027 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
2028 del expected_auth_metadata
["version"]
2029 del auth_metadata
["version"]
2030 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
2033 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, auth_id
, "--group_name", group
)
2034 guest_mount
.umount_wait()
2035 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2036 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
2037 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
2038 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2040 def test_subvolume_evict_client(self
):
2042 That a subvolume client can be evicted based on the auth ID
2045 subvolumes
= self
._generate
_random
_subvolume
_name
(2)
2046 group
= self
._generate
_random
_group
_name
()
2049 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2051 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
2052 for i
in range(0, 2):
2053 self
.mounts
[i
].umount_wait()
2054 guest_mounts
= (self
.mounts
[0], self
.mounts
[1])
2058 "tenant_id": "tenant1",
2061 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
2062 # subvolumes. Mount the two subvolumes. Write data to the volumes.
2065 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolumes
[i
], "--group_name", group
, "--mode=777")
2067 # authorize guest authID read-write access to subvolume
2068 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolumes
[i
], guestclient_1
["auth_id"],
2069 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2071 mount_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolumes
[i
],
2072 "--group_name", group
).rstrip()
2073 # configure credentials for guest client
2074 self
._configure
_guest
_auth
(guest_mounts
[i
], auth_id
, key
)
2076 # mount the subvolume, and write to it
2077 guest_mounts
[i
].mount_wait(cephfs_mntpt
=mount_path
)
2078 guest_mounts
[i
].write_n_mb("data.bin", 1)
2080 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
2082 self
._fs
_cmd
("subvolume", "evict", self
.volname
, subvolumes
[0], auth_id
, "--group_name", group
)
2084 # Evicted guest client, guest_mounts[0], should not be able to do
2085 # anymore metadata ops. It should start failing all operations
2086 # when it sees that its own address is in the blocklist.
2088 guest_mounts
[0].write_n_mb("rogue.bin", 1)
2089 except CommandFailedError
:
2092 raise RuntimeError("post-eviction write should have failed!")
2094 # The blocklisted guest client should now be unmountable
2095 guest_mounts
[0].umount_wait()
2097 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
2098 # has mounted the other volume, should be able to use its volume
2100 guest_mounts
[1].write_n_mb("data.bin.1", 1)
2103 guest_mounts
[1].umount_wait()
2105 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolumes
[i
], auth_id
, "--group_name", group
)
2106 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolumes
[i
], "--group_name", group
)
2107 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2109 def test_subvolume_pin_random(self
):
2110 self
.fs
.set_max_mds(2)
2111 self
.fs
.wait_for_daemons()
2112 self
.config_set('mds', 'mds_export_ephemeral_random', True)
2114 subvolume
= self
._generate
_random
_subvolume
_name
()
2115 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2116 self
._fs
_cmd
("subvolume", "pin", self
.volname
, subvolume
, "random", ".01")
2120 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2122 # verify trash dir is clean
2123 self
._wait
_for
_trash
_empty
()
2125 def test_subvolume_resize_fail_invalid_size(self
):
2127 That a subvolume cannot be resized to an invalid size and the quota did not change
2130 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2132 subvolname
= self
._generate
_random
_subvolume
_name
()
2133 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
2135 # make sure it exists
2136 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2137 self
.assertNotEqual(subvolpath
, None)
2139 # try to resize the subvolume with an invalid size -10
2142 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2143 except CommandFailedError
as ce
:
2144 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
2146 self
.fail("expected the 'fs subvolume resize' command to fail")
2148 # verify the quota did not change
2149 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2150 self
.assertEqual(size
, osize
)
2153 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2155 # verify trash dir is clean
2156 self
._wait
_for
_trash
_empty
()
2158 def test_subvolume_resize_fail_zero_size(self
):
2160 That a subvolume cannot be resized to a zero size and the quota did not change
2163 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2165 subvolname
= self
._generate
_random
_subvolume
_name
()
2166 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
2168 # make sure it exists
2169 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2170 self
.assertNotEqual(subvolpath
, None)
2172 # try to resize the subvolume with size 0
2175 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2176 except CommandFailedError
as ce
:
2177 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
2179 self
.fail("expected the 'fs subvolume resize' command to fail")
2181 # verify the quota did not change
2182 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2183 self
.assertEqual(size
, osize
)
2186 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2188 # verify trash dir is clean
2189 self
._wait
_for
_trash
_empty
()
2191 def test_subvolume_resize_quota_lt_used_size(self
):
2193 That a subvolume can be resized to a size smaller than the current used size
2194 and the resulting quota matches the expected size.
2197 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
2199 subvolname
= self
._generate
_random
_subvolume
_name
()
2200 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
2202 # make sure it exists
2203 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2204 self
.assertNotEqual(subvolpath
, None)
2206 # create one file of 10MB
2207 file_size
=self
.DEFAULT_FILE_SIZE
*10
2209 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
2212 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+1)
2213 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2215 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
2216 susedsize
= int(self
.mount_a
.run_shell(['stat', '-c' '%s', subvolpath
]).stdout
.getvalue().strip())
2217 if isinstance(self
.mount_a
, FuseMount
):
2218 # kclient dir does not have size==rbytes
2219 self
.assertEqual(usedsize
, susedsize
)
2221 # shrink the subvolume
2222 nsize
= usedsize
// 2
2224 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2225 except CommandFailedError
:
2226 self
.fail("expected the 'fs subvolume resize' command to succeed")
2229 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2230 self
.assertEqual(size
, nsize
)
2233 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2235 # verify trash dir is clean
2236 self
._wait
_for
_trash
_empty
()
2238 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self
):
2240 That a subvolume cannot be resized to a size smaller than the current used size
2241 when --no_shrink is given and the quota did not change.
2244 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
2246 subvolname
= self
._generate
_random
_subvolume
_name
()
2247 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
2249 # make sure it exists
2250 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2251 self
.assertNotEqual(subvolpath
, None)
2253 # create one file of 10MB
2254 file_size
=self
.DEFAULT_FILE_SIZE
*10
2256 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
2259 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+2)
2260 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2262 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
2263 susedsize
= int(self
.mount_a
.run_shell(['stat', '-c' '%s', subvolpath
]).stdout
.getvalue().strip())
2264 if isinstance(self
.mount_a
, FuseMount
):
2265 # kclient dir does not have size==rbytes
2266 self
.assertEqual(usedsize
, susedsize
)
2268 # shrink the subvolume
2269 nsize
= usedsize
// 2
2271 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
), "--no_shrink")
2272 except CommandFailedError
as ce
:
2273 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
2275 self
.fail("expected the 'fs subvolume resize' command to fail")
2277 # verify the quota did not change
2278 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2279 self
.assertEqual(size
, osize
)
2282 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2284 # verify trash dir is clean
2285 self
._wait
_for
_trash
_empty
()
2287 def test_subvolume_resize_expand_on_full_subvolume(self
):
2289 That the subvolume can be expanded from a full subvolume and future writes succeed.
2292 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*10
2293 # create subvolume of quota 10MB and make sure it exists
2294 subvolname
= self
._generate
_random
_subvolume
_name
()
2295 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
2296 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2297 self
.assertNotEqual(subvolpath
, None)
2299 # create one file of size 10MB and write
2300 file_size
=self
.DEFAULT_FILE_SIZE
*10
2302 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
2305 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+3)
2306 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2308 # create a file of size 5MB and try write more
2309 file_size
=file_size
// 2
2311 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
2314 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+4)
2316 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2317 except CommandFailedError
:
2318 # Not able to write. So expand the subvolume more and try writing the 5MB file again
2320 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2322 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2323 except CommandFailedError
:
2324 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB"
2325 "to succeed".format(subvolname
, number_of_files
, file_size
))
2327 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB"
2328 "to fail".format(subvolname
, number_of_files
, file_size
))
2331 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2333 # verify trash dir is clean
2334 self
._wait
_for
_trash
_empty
()
2336 def test_subvolume_resize_infinite_size(self
):
2338 That a subvolume can be resized to an infinite size by unsetting its quota.
2342 subvolname
= self
._generate
_random
_subvolume
_name
()
2343 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size",
2344 str(self
.DEFAULT_FILE_SIZE
*1024*1024))
2346 # make sure it exists
2347 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2348 self
.assertNotEqual(subvolpath
, None)
2351 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, "inf")
2353 # verify that the quota is None
2354 size
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes")
2355 self
.assertEqual(size
, None)
2358 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2360 # verify trash dir is clean
2361 self
._wait
_for
_trash
_empty
()
2363 def test_subvolume_resize_infinite_size_future_writes(self
):
2365 That a subvolume can be resized to an infinite size and the future writes succeed.
2369 subvolname
= self
._generate
_random
_subvolume
_name
()
2370 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size",
2371 str(self
.DEFAULT_FILE_SIZE
*1024*1024*5), "--mode=777")
2373 # make sure it exists
2374 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2375 self
.assertNotEqual(subvolpath
, None)
2378 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, "inf")
2380 # verify that the quota is None
2381 size
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes")
2382 self
.assertEqual(size
, None)
2384 # create one file of 10MB and try to write
2385 file_size
=self
.DEFAULT_FILE_SIZE
*10
2387 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
2390 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+5)
2393 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
2394 except CommandFailedError
:
2395 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB "
2396 "to succeed".format(subvolname
, number_of_files
, file_size
))
2399 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2401 # verify trash dir is clean
2402 self
._wait
_for
_trash
_empty
()
2404 def test_subvolume_rm_force(self
):
2405 # test removing non-existing subvolume with --force
2406 subvolume
= self
._generate
_random
_subvolume
_name
()
2408 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--force")
2409 except CommandFailedError
:
2410 self
.fail("expected the 'fs subvolume rm --force' command to succeed")
2412 def test_subvolume_shrink(self
):
2414 That a subvolume can be shrinked in size and its quota matches the expected size.
2418 subvolname
= self
._generate
_random
_subvolume
_name
()
2419 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2420 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
2422 # make sure it exists
2423 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2424 self
.assertNotEqual(subvolpath
, None)
2426 # shrink the subvolume
2428 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2431 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2432 self
.assertEqual(size
, nsize
)
2435 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2437 # verify trash dir is clean
2438 self
._wait
_for
_trash
_empty
()
2441 class TestSubvolumeGroupSnapshots(TestVolumesHelper
):
2442 """Tests for FS subvolume group snapshot operations."""
2443 @unittest.skip("skipping subvolumegroup snapshot tests")
2444 def test_nonexistent_subvolume_group_snapshot_rm(self
):
2445 subvolume
= self
._generate
_random
_subvolume
_name
()
2446 group
= self
._generate
_random
_group
_name
()
2447 snapshot
= self
._generate
_random
_snapshot
_name
()
2450 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2452 # create subvolume in group
2453 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2456 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
2459 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
2463 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
2464 except CommandFailedError
as ce
:
2465 if ce
.exitstatus
!= errno
.ENOENT
:
2468 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
2471 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
2473 # verify trash dir is clean
2474 self
._wait
_for
_trash
_empty
()
2477 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2479 @unittest.skip("skipping subvolumegroup snapshot tests")
2480 def test_subvolume_group_snapshot_create_and_rm(self
):
2481 subvolume
= self
._generate
_random
_subvolume
_name
()
2482 group
= self
._generate
_random
_group
_name
()
2483 snapshot
= self
._generate
_random
_snapshot
_name
()
2486 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2488 # create subvolume in group
2489 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2492 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
2495 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
2498 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
2500 # verify trash dir is clean
2501 self
._wait
_for
_trash
_empty
()
2504 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2506 @unittest.skip("skipping subvolumegroup snapshot tests")
2507 def test_subvolume_group_snapshot_idempotence(self
):
2508 subvolume
= self
._generate
_random
_subvolume
_name
()
2509 group
= self
._generate
_random
_group
_name
()
2510 snapshot
= self
._generate
_random
_snapshot
_name
()
2513 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2515 # create subvolume in group
2516 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2519 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
2521 # try creating snapshot w/ same snapshot name -- shoule be idempotent
2522 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
2525 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
2528 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
2530 # verify trash dir is clean
2531 self
._wait
_for
_trash
_empty
()
2534 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2536 @unittest.skip("skipping subvolumegroup snapshot tests")
2537 def test_subvolume_group_snapshot_ls(self
):
2538 # tests the 'fs subvolumegroup snapshot ls' command
2543 group
= self
._generate
_random
_group
_name
()
2544 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2546 # create subvolumegroup snapshots
2547 snapshots
= self
._generate
_random
_snapshot
_name
(3)
2548 for snapshot
in snapshots
:
2549 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
2551 subvolgrpsnapshotls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'snapshot', 'ls', self
.volname
, group
))
2552 if len(subvolgrpsnapshotls
) == 0:
2553 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
2555 snapshotnames
= [snapshot
['name'] for snapshot
in subvolgrpsnapshotls
]
2556 if collections
.Counter(snapshotnames
) != collections
.Counter(snapshots
):
2557 raise RuntimeError("Error creating or listing subvolume group snapshots")
2559 @unittest.skip("skipping subvolumegroup snapshot tests")
2560 def test_subvolume_group_snapshot_rm_force(self
):
2561 # test removing non-existing subvolume group snapshot with --force
2562 group
= self
._generate
_random
_group
_name
()
2563 snapshot
= self
._generate
_random
_snapshot
_name
()
2566 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
, "--force")
2567 except CommandFailedError
:
2568 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
2570 def test_subvolume_group_snapshot_unsupported_status(self
):
2571 group
= self
._generate
_random
_group
_name
()
2572 snapshot
= self
._generate
_random
_snapshot
_name
()
2575 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2579 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
2580 except CommandFailedError
as ce
:
2581 self
.assertEqual(ce
.exitstatus
, errno
.ENOSYS
, "invalid error code on subvolumegroup snapshot create")
2583 self
.fail("expected subvolumegroup snapshot create command to fail")
2586 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2589 class TestSubvolumeSnapshots(TestVolumesHelper
):
2590 """Tests for FS subvolume snapshot operations."""
2591 def test_nonexistent_subvolume_snapshot_rm(self
):
2592 subvolume
= self
._generate
_random
_subvolume
_name
()
2593 snapshot
= self
._generate
_random
_snapshot
_name
()
2596 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2598 # snapshot subvolume
2599 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
2602 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
2604 # remove snapshot again
2606 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
2607 except CommandFailedError
as ce
:
2608 if ce
.exitstatus
!= errno
.ENOENT
:
2611 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
2614 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2616 # verify trash dir is clean
2617 self
._wait
_for
_trash
_empty
()
2619 def test_subvolume_snapshot_create_and_rm(self
):
2620 subvolume
= self
._generate
_random
_subvolume
_name
()
2621 snapshot
= self
._generate
_random
_snapshot
_name
()
2624 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2626 # snapshot subvolume
2627 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
2630 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
2633 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2635 # verify trash dir is clean
2636 self
._wait
_for
_trash
_empty
()
2638 def test_subvolume_snapshot_create_idempotence(self
):
2639 subvolume
= self
._generate
_random
_subvolume
_name
()
2640 snapshot
= self
._generate
_random
_snapshot
_name
()
2643 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2645 # snapshot subvolume
2646 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
2648 # try creating w/ same subvolume snapshot name -- should be idempotent
2649 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
2652 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
2655 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2657 # verify trash dir is clean
2658 self
._wait
_for
_trash
_empty
()
2660 def test_subvolume_snapshot_info(self
):
2663 tests the 'fs subvolume snapshot info' command
2666 snap_md
= ["created_at", "data_pool", "has_pending_clones", "size"]
2668 subvolume
= self
._generate
_random
_subvolume
_name
()
2669 snapshot
, snap_missing
= self
._generate
_random
_snapshot
_name
(2)
2672 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
2675 self
._do
_subvolume
_io
(subvolume
, number_of_files
=1)
2677 # snapshot subvolume
2678 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
2680 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
2682 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
2683 self
.assertEqual(snap_info
["has_pending_clones"], "no")
2685 # snapshot info for non-existent snapshot
2687 self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snap_missing
)
2688 except CommandFailedError
as ce
:
2689 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on snapshot info of non-existent snapshot")
2691 self
.fail("expected snapshot info of non-existent snapshot to fail")
2694 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
2697 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2699 # verify trash dir is clean
2700 self
._wait
_for
_trash
_empty
()
2702 def test_subvolume_snapshot_in_group(self
):
2703 subvolume
= self
._generate
_random
_subvolume
_name
()
2704 group
= self
._generate
_random
_group
_name
()
2705 snapshot
= self
._generate
_random
_snapshot
_name
()
2708 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2710 # create subvolume in group
2711 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2713 # snapshot subvolume in group
2714 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
2717 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
2720 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
2722 # verify trash dir is clean
2723 self
._wait
_for
_trash
_empty
()
2726 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2728 def test_subvolume_snapshot_ls(self
):
2729 # tests the 'fs subvolume snapshot ls' command
2734 subvolume
= self
._generate
_random
_subvolume
_name
()
2735 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2737 # create subvolume snapshots
2738 snapshots
= self
._generate
_random
_snapshot
_name
(3)
2739 for snapshot
in snapshots
:
2740 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
2742 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
2743 if len(subvolsnapshotls
) == 0:
2744 self
.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
2746 snapshotnames
= [snapshot
['name'] for snapshot
in subvolsnapshotls
]
2747 if collections
.Counter(snapshotnames
) != collections
.Counter(snapshots
):
2748 self
.fail("Error creating or listing subvolume snapshots")
2751 for snapshot
in snapshots
:
2752 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
2755 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2757 # verify trash dir is clean
2758 self
._wait
_for
_trash
_empty
()
2760 def test_subvolume_inherited_snapshot_ls(self
):
2761 # tests the scenario where 'fs subvolume snapshot ls' command
2762 # should not list inherited snapshots created as part of snapshot
2763 # at ancestral level
2766 subvolume
= self
._generate
_random
_subvolume
_name
()
2767 group
= self
._generate
_random
_group
_name
()
2771 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2773 # create subvolume in group
2774 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2776 # create subvolume snapshots
2777 snapshots
= self
._generate
_random
_snapshot
_name
(snap_count
)
2778 for snapshot
in snapshots
:
2779 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
2781 # Create snapshot at ancestral level
2782 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", "ancestral_snap_1")
2783 ancestral_snappath2
= os
.path
.join(".", "volumes", group
, ".snap", "ancestral_snap_2")
2784 self
.mount_a
.run_shell(['mkdir', '-p', ancestral_snappath1
, ancestral_snappath2
], sudo
=True)
2786 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
, group
))
2787 self
.assertEqual(len(subvolsnapshotls
), snap_count
)
2789 # remove ancestral snapshots
2790 self
.mount_a
.run_shell(['rmdir', ancestral_snappath1
, ancestral_snappath2
], sudo
=True)
2793 for snapshot
in snapshots
:
2794 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
2797 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
2799 # verify trash dir is clean
2800 self
._wait
_for
_trash
_empty
()
2803 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2805 def test_subvolume_inherited_snapshot_info(self
):
2807 tests the scenario where 'fs subvolume snapshot info' command
2808 should fail for inherited snapshots created as part of snapshot
2812 subvolume
= self
._generate
_random
_subvolume
_name
()
2813 group
= self
._generate
_random
_group
_name
()
2816 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2818 # create subvolume in group
2819 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2821 # Create snapshot at ancestral level
2822 ancestral_snap_name
= "ancestral_snap_1"
2823 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", ancestral_snap_name
)
2824 self
.mount_a
.run_shell(['mkdir', '-p', ancestral_snappath1
], sudo
=True)
2826 # Validate existence of inherited snapshot
2827 group_path
= os
.path
.join(".", "volumes", group
)
2828 inode_number_group_dir
= int(self
.mount_a
.run_shell(['stat', '-c' '%i', group_path
]).stdout
.getvalue().strip())
2829 inherited_snap
= "_{0}_{1}".format(ancestral_snap_name
, inode_number_group_dir
)
2830 inherited_snappath
= os
.path
.join(".", "volumes", group
, subvolume
,".snap", inherited_snap
)
2831 self
.mount_a
.run_shell(['ls', inherited_snappath
])
2833 # snapshot info on inherited snapshot
2835 self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, inherited_snap
, group
)
2836 except CommandFailedError
as ce
:
2837 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on snapshot info of inherited snapshot")
2839 self
.fail("expected snapshot info of inherited snapshot to fail")
2841 # remove ancestral snapshots
2842 self
.mount_a
.run_shell(['rmdir', ancestral_snappath1
], sudo
=True)
2845 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2847 # verify trash dir is clean
2848 self
._wait
_for
_trash
_empty
()
2851 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2853 def test_subvolume_inherited_snapshot_rm(self
):
2855 tests the scenario where 'fs subvolume snapshot rm' command
2856 should fail for inherited snapshots created as part of snapshot
2860 subvolume
= self
._generate
_random
_subvolume
_name
()
2861 group
= self
._generate
_random
_group
_name
()
2864 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2866 # create subvolume in group
2867 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2869 # Create snapshot at ancestral level
2870 ancestral_snap_name
= "ancestral_snap_1"
2871 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", ancestral_snap_name
)
2872 self
.mount_a
.run_shell(['mkdir', '-p', ancestral_snappath1
], sudo
=True)
2874 # Validate existence of inherited snap
2875 group_path
= os
.path
.join(".", "volumes", group
)
2876 inode_number_group_dir
= int(self
.mount_a
.run_shell(['stat', '-c' '%i', group_path
]).stdout
.getvalue().strip())
2877 inherited_snap
= "_{0}_{1}".format(ancestral_snap_name
, inode_number_group_dir
)
2878 inherited_snappath
= os
.path
.join(".", "volumes", group
, subvolume
,".snap", inherited_snap
)
2879 self
.mount_a
.run_shell(['ls', inherited_snappath
])
2881 # inherited snapshot should not be deletable
2883 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, inherited_snap
, "--group_name", group
)
2884 except CommandFailedError
as ce
:
2885 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, msg
="invalid error code when removing inherited snapshot")
2887 self
.fail("expected removing inheirted snapshot to fail")
2889 # remove ancestral snapshots
2890 self
.mount_a
.run_shell(['rmdir', ancestral_snappath1
], sudo
=True)
2893 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
2895 # verify trash dir is clean
2896 self
._wait
_for
_trash
_empty
()
2899 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2901 def test_subvolume_subvolumegroup_snapshot_name_conflict(self
):
2903 tests the scenario where creation of subvolume snapshot name
2904 with same name as it's subvolumegroup snapshot name. This should
2908 subvolume
= self
._generate
_random
_subvolume
_name
()
2909 group
= self
._generate
_random
_group
_name
()
2910 group_snapshot
= self
._generate
_random
_snapshot
_name
()
2913 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2915 # create subvolume in group
2916 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2918 # Create subvolumegroup snapshot
2919 group_snapshot_path
= os
.path
.join(".", "volumes", group
, ".snap", group_snapshot
)
2920 self
.mount_a
.run_shell(['mkdir', '-p', group_snapshot_path
], sudo
=True)
2922 # Validate existence of subvolumegroup snapshot
2923 self
.mount_a
.run_shell(['ls', group_snapshot_path
])
2925 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
2927 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, group_snapshot
, "--group_name", group
)
2928 except CommandFailedError
as ce
:
2929 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, msg
="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
2931 self
.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
2933 # remove subvolumegroup snapshot
2934 self
.mount_a
.run_shell(['rmdir', group_snapshot_path
], sudo
=True)
2937 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
2939 # verify trash dir is clean
2940 self
._wait
_for
_trash
_empty
()
2943 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2945 def test_subvolume_retain_snapshot_invalid_recreate(self
):
2947 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
2949 subvolume
= self
._generate
_random
_subvolume
_name
()
2950 snapshot
= self
._generate
_random
_snapshot
_name
()
2953 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2955 # snapshot subvolume
2956 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
2958 # remove with snapshot retention
2959 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
2961 # recreate subvolume with an invalid pool
2962 data_pool
= "invalid_pool"
2964 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
2965 except CommandFailedError
as ce
:
2966 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on recreate of subvolume with invalid poolname")
2968 self
.fail("expected recreate of subvolume with invalid poolname to fail")
2971 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
2972 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
2973 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
2977 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
2978 except CommandFailedError
as ce
:
2979 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of subvolume with retained snapshots")
2981 self
.fail("expected getpath of subvolume with retained snapshots to fail")
2983 # remove snapshot (should remove volume)
2984 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
2986 # verify trash dir is clean
2987 self
._wait
_for
_trash
_empty
()
2989 def test_subvolume_retain_snapshot_recreate_subvolume(self
):
2991 ensure a retained subvolume can be recreated and further snapshotted
2993 snap_md
= ["created_at", "data_pool", "has_pending_clones", "size"]
2995 subvolume
= self
._generate
_random
_subvolume
_name
()
2996 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
2999 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3001 # snapshot subvolume
3002 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
3004 # remove with snapshot retention
3005 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3008 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
3009 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
3010 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
3012 # recreate retained subvolume
3013 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3016 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
3017 self
.assertEqual(subvol_info
["state"], "complete",
3018 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
3020 # snapshot info (older snapshot)
3021 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot1
))
3023 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
3024 self
.assertEqual(snap_info
["has_pending_clones"], "no")
3026 # snap-create (new snapshot)
3027 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot2
)
3029 # remove with retain snapshots
3030 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3033 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
3034 self
.assertEqual(len(subvolsnapshotls
), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
3035 " created subvolume snapshots")
3036 snapshotnames
= [snapshot
['name'] for snapshot
in subvolsnapshotls
]
3037 for snap
in [snapshot1
, snapshot2
]:
3038 self
.assertIn(snap
, snapshotnames
, "Missing snapshot '{0}' in snapshot list".format(snap
))
3040 # remove snapshots (should remove volume)
3041 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
3042 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot2
)
3044 # verify list subvolumes returns an empty list
3045 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3046 self
.assertEqual(len(subvolumels
), 0)
3048 # verify trash dir is clean
3049 self
._wait
_for
_trash
_empty
()
3051 def test_subvolume_retain_snapshot_with_snapshots(self
):
3053 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
3054 also test allowed and dis-allowed operations on a retained subvolume
3056 snap_md
= ["created_at", "data_pool", "has_pending_clones", "size"]
3058 subvolume
= self
._generate
_random
_subvolume
_name
()
3059 snapshot
= self
._generate
_random
_snapshot
_name
()
3062 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3064 # snapshot subvolume
3065 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3067 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
3069 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3070 except CommandFailedError
as ce
:
3071 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of retained subvolume with snapshots")
3073 self
.fail("expected rm of subvolume with retained snapshots to fail")
3075 # remove with snapshot retention
3076 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3079 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
3080 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
3081 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
3083 ## test allowed ops in retained state
3085 subvolumes
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3086 self
.assertEqual(len(subvolumes
), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes
)))
3087 self
.assertEqual(subvolumes
[0]['name'], subvolume
,
3088 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume
, subvolumes
[0]['name']))
3091 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
3093 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
3094 self
.assertEqual(snap_info
["has_pending_clones"], "no")
3096 # rm --force (allowed but should fail)
3098 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--force")
3099 except CommandFailedError
as ce
:
3100 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of subvolume with retained snapshots")
3102 self
.fail("expected rm of subvolume with retained snapshots to fail")
3104 # rm (allowed but should fail)
3106 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3107 except CommandFailedError
as ce
:
3108 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of subvolume with retained snapshots")
3110 self
.fail("expected rm of subvolume with retained snapshots to fail")
3112 ## test disallowed ops
3115 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
3116 except CommandFailedError
as ce
:
3117 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of subvolume with retained snapshots")
3119 self
.fail("expected getpath of subvolume with retained snapshots to fail")
3122 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3124 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
3125 except CommandFailedError
as ce
:
3126 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on resize of subvolume with retained snapshots")
3128 self
.fail("expected resize of subvolume with retained snapshots to fail")
3132 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, "fail")
3133 except CommandFailedError
as ce
:
3134 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on snapshot create of subvolume with retained snapshots")
3136 self
.fail("expected snapshot create of subvolume with retained snapshots to fail")
3138 # remove snapshot (should remove volume)
3139 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3141 # verify list subvolumes returns an empty list
3142 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3143 self
.assertEqual(len(subvolumels
), 0)
3145 # verify trash dir is clean
3146 self
._wait
_for
_trash
_empty
()
3148 def test_subvolume_retain_snapshot_without_snapshots(self
):
3150 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
3152 subvolume
= self
._generate
_random
_subvolume
_name
()
3155 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3157 # remove with snapshot retention (should remove volume, no snapshots to retain)
3158 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3160 # verify list subvolumes returns an empty list
3161 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3162 self
.assertEqual(len(subvolumels
), 0)
3164 # verify trash dir is clean
3165 self
._wait
_for
_trash
_empty
()
3167 def test_subvolume_retain_snapshot_trash_busy_recreate(self
):
3169 ensure retained subvolume recreate fails if its trash is not yet purged
3171 subvolume
= self
._generate
_random
_subvolume
_name
()
3172 snapshot
= self
._generate
_random
_snapshot
_name
()
3175 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3177 # snapshot subvolume
3178 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3180 # remove with snapshot retention
3181 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3183 # fake a trash entry
3184 self
._update
_fake
_trash
(subvolume
)
3186 # recreate subvolume
3188 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3189 except CommandFailedError
as ce
:
3190 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on recreate of subvolume with purge pending")
3192 self
.fail("expected recreate of subvolume with purge pending to fail")
3194 # clear fake trash entry
3195 self
._update
_fake
_trash
(subvolume
, create
=False)
3197 # recreate subvolume
3198 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3201 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3204 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3206 # verify trash dir is clean
3207 self
._wait
_for
_trash
_empty
()
3209 def test_subvolume_rm_with_snapshots(self
):
3210 subvolume
= self
._generate
_random
_subvolume
_name
()
3211 snapshot
= self
._generate
_random
_snapshot
_name
()
3214 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3216 # snapshot subvolume
3217 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3219 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
3221 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3222 except CommandFailedError
as ce
:
3223 if ce
.exitstatus
!= errno
.ENOTEMPTY
:
3224 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
3226 raise RuntimeError("expected subvolume deletion to fail")
3229 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3232 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3234 # verify trash dir is clean
3235 self
._wait
_for
_trash
_empty
()
3237 def test_subvolume_snapshot_protect_unprotect_sanity(self
):
3239 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
3240 invoking the command does not cause errors, till they are removed from a subsequent release.
3242 subvolume
= self
._generate
_random
_subvolume
_name
()
3243 snapshot
= self
._generate
_random
_snapshot
_name
()
3244 clone
= self
._generate
_random
_clone
_name
()
3247 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3250 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
3252 # snapshot subvolume
3253 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3255 # now, protect snapshot
3256 self
._fs
_cmd
("subvolume", "snapshot", "protect", self
.volname
, subvolume
, snapshot
)
3259 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3261 # check clone status
3262 self
._wait
_for
_clone
_to
_complete
(clone
)
3264 # now, unprotect snapshot
3265 self
._fs
_cmd
("subvolume", "snapshot", "unprotect", self
.volname
, subvolume
, snapshot
)
3268 self
._verify
_clone
(subvolume
, snapshot
, clone
)
3271 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3274 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3275 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3277 # verify trash dir is clean
3278 self
._wait
_for
_trash
_empty
()
3280 def test_subvolume_snapshot_rm_force(self
):
3281 # test removing non existing subvolume snapshot with --force
3282 subvolume
= self
._generate
_random
_subvolume
_name
()
3283 snapshot
= self
._generate
_random
_snapshot
_name
()
3287 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, "--force")
3288 except CommandFailedError
:
3289 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
3292 class TestSubvolumeSnapshotClones(TestVolumesHelper
):
3293 """ Tests for FS subvolume snapshot clone operations."""
3294 def test_clone_subvolume_info(self
):
3295 # tests the 'fs subvolume info' command for a clone
3296 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
3297 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
3300 subvolume
= self
._generate
_random
_subvolume
_name
()
3301 snapshot
= self
._generate
_random
_snapshot
_name
()
3302 clone
= self
._generate
_random
_clone
_name
()
3305 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3308 self
._do
_subvolume
_io
(subvolume
, number_of_files
=1)
3310 # snapshot subvolume
3311 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3314 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3316 # check clone status
3317 self
._wait
_for
_clone
_to
_complete
(clone
)
3320 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3322 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, clone
))
3323 if len(subvol_info
) == 0:
3324 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
3325 for md
in subvol_md
:
3326 if md
not in subvol_info
.keys():
3327 raise RuntimeError("%s not present in the metadata of subvolume" % md
)
3328 if subvol_info
["type"] != "clone":
3329 raise RuntimeError("type should be set to clone")
3332 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3333 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3335 # verify trash dir is clean
3336 self
._wait
_for
_trash
_empty
()
3338 def test_non_clone_status(self
):
3339 subvolume
= self
._generate
_random
_subvolume
_name
()
3342 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3345 self
._fs
_cmd
("clone", "status", self
.volname
, subvolume
)
3346 except CommandFailedError
as ce
:
3347 if ce
.exitstatus
!= errno
.ENOTSUP
:
3348 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
3350 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
3353 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3355 # verify trash dir is clean
3356 self
._wait
_for
_trash
_empty
()
3358 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self
):
3359 subvolume
= self
._generate
_random
_subvolume
_name
()
3360 snapshot
= self
._generate
_random
_snapshot
_name
()
3361 clone
= self
._generate
_random
_clone
_name
()
3362 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*12
3364 # create subvolume, in an isolated namespace with a specified size
3365 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--namespace-isolated", "--size", str(osize
), "--mode=777")
3368 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
3370 # snapshot subvolume
3371 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3373 # create a pool different from current subvolume pool
3374 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
3375 default_pool
= self
.mount_a
.getfattr(subvol_path
, "ceph.dir.layout.pool")
3376 new_pool
= "new_pool"
3377 self
.assertNotEqual(default_pool
, new_pool
)
3378 self
.fs
.add_data_pool(new_pool
)
3380 # update source subvolume pool
3381 self
._do
_subvolume
_pool
_and
_namespace
_update
(subvolume
, pool
=new_pool
, pool_namespace
="")
3383 # schedule a clone, with NO --pool specification
3384 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3386 # check clone status
3387 self
._wait
_for
_clone
_to
_complete
(clone
)
3390 self
._verify
_clone
(subvolume
, snapshot
, clone
)
3393 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3396 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3397 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3399 # verify trash dir is clean
3400 self
._wait
_for
_trash
_empty
()
3402 def test_subvolume_clone_inherit_quota_attrs(self
):
3403 subvolume
= self
._generate
_random
_subvolume
_name
()
3404 snapshot
= self
._generate
_random
_snapshot
_name
()
3405 clone
= self
._generate
_random
_clone
_name
()
3406 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*12
3408 # create subvolume with a specified size
3409 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777", "--size", str(osize
))
3412 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
3414 # get subvolume path
3415 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
3417 # set quota on number of files
3418 self
.mount_a
.setfattr(subvolpath
, 'ceph.quota.max_files', "20", sudo
=True)
3420 # snapshot subvolume
3421 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3424 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3426 # check clone status
3427 self
._wait
_for
_clone
_to
_complete
(clone
)
3430 self
._verify
_clone
(subvolume
, snapshot
, clone
)
3432 # get subvolume path
3433 clonepath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
3435 # verify quota max_files is inherited from source snapshot
3436 subvol_quota
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_files")
3437 clone_quota
= self
.mount_a
.getfattr(clonepath
, "ceph.quota.max_files")
3438 self
.assertEqual(subvol_quota
, clone_quota
)
3441 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3444 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3445 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3447 # verify trash dir is clean
3448 self
._wait
_for
_trash
_empty
()
3450 def test_subvolume_clone_in_progress_getpath(self
):
3451 subvolume
= self
._generate
_random
_subvolume
_name
()
3452 snapshot
= self
._generate
_random
_snapshot
_name
()
3453 clone
= self
._generate
_random
_clone
_name
()
3456 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3459 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
3461 # snapshot subvolume
3462 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3464 # Insert delay at the beginning of snapshot clone
3465 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3468 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3470 # clone should not be accessible right now
3472 self
._get
_subvolume
_path
(self
.volname
, clone
)
3473 except CommandFailedError
as ce
:
3474 if ce
.exitstatus
!= errno
.EAGAIN
:
3475 raise RuntimeError("invalid error code when fetching path of an pending clone")
3477 raise RuntimeError("expected fetching path of an pending clone to fail")
3479 # check clone status
3480 self
._wait
_for
_clone
_to
_complete
(clone
)
3482 # clone should be accessible now
3483 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
3484 self
.assertNotEqual(subvolpath
, None)
3487 self
._verify
_clone
(subvolume
, snapshot
, clone
)
3490 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3493 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3494 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3496 # verify trash dir is clean
3497 self
._wait
_for
_trash
_empty
()
3499 def test_subvolume_clone_in_progress_snapshot_rm(self
):
3500 subvolume
= self
._generate
_random
_subvolume
_name
()
3501 snapshot
= self
._generate
_random
_snapshot
_name
()
3502 clone
= self
._generate
_random
_clone
_name
()
3505 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3508 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
3510 # snapshot subvolume
3511 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3513 # Insert delay at the beginning of snapshot clone
3514 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3517 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3519 # snapshot should not be deletable now
3521 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3522 except CommandFailedError
as ce
:
3523 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, msg
="invalid error code when removing source snapshot of a clone")
3525 self
.fail("expected removing source snapshot of a clone to fail")
3527 # check clone status
3528 self
._wait
_for
_clone
_to
_complete
(clone
)
3530 # clone should be accessible now
3531 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
3532 self
.assertNotEqual(subvolpath
, None)
3535 self
._verify
_clone
(subvolume
, snapshot
, clone
)
3538 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3541 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3542 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3544 # verify trash dir is clean
3545 self
._wait
_for
_trash
_empty
()
3547 def test_subvolume_clone_in_progress_source(self
):
3548 subvolume
= self
._generate
_random
_subvolume
_name
()
3549 snapshot
= self
._generate
_random
_snapshot
_name
()
3550 clone
= self
._generate
_random
_clone
_name
()
3553 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3556 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
3558 # snapshot subvolume
3559 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3561 # Insert delay at the beginning of snapshot clone
3562 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3565 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3567 # verify clone source
3568 result
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, clone
))
3569 source
= result
['status']['source']
3570 self
.assertEqual(source
['volume'], self
.volname
)
3571 self
.assertEqual(source
['subvolume'], subvolume
)
3572 self
.assertEqual(source
.get('group', None), None)
3573 self
.assertEqual(source
['snapshot'], snapshot
)
3575 # check clone status
3576 self
._wait
_for
_clone
_to
_complete
(clone
)
3578 # clone should be accessible now
3579 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
3580 self
.assertNotEqual(subvolpath
, None)
3583 self
._verify
_clone
(subvolume
, snapshot
, clone
)
3586 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3589 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3590 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3592 # verify trash dir is clean
3593 self
._wait
_for
_trash
_empty
()
3595 def test_subvolume_clone_retain_snapshot_with_snapshots(self
):
3597 retain snapshots of a cloned subvolume and check disallowed operations
3599 subvolume
= self
._generate
_random
_subvolume
_name
()
3600 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
3601 clone
= self
._generate
_random
_clone
_name
()
3604 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3606 # store path for clone verification
3607 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
3610 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
3612 # snapshot subvolume
3613 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
3615 # remove with snapshot retention
3616 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3618 # clone retained subvolume snapshot
3619 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot1
, clone
)
3621 # check clone status
3622 self
._wait
_for
_clone
_to
_complete
(clone
)
3625 self
._verify
_clone
(subvolume
, snapshot1
, clone
, subvol_path
=subvol1_path
)
3627 # create a snapshot on the clone
3628 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone
, snapshot2
)
3631 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--retain-snapshots")
3634 clonesnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, clone
))
3635 self
.assertEqual(len(clonesnapshotls
), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
3636 " created subvolume snapshots")
3637 snapshotnames
= [snapshot
['name'] for snapshot
in clonesnapshotls
]
3638 for snap
in [snapshot2
]:
3639 self
.assertIn(snap
, snapshotnames
, "Missing snapshot '{0}' in snapshot list".format(snap
))
3641 ## check disallowed operations on retained clone
3644 self
._fs
_cmd
("clone", "status", self
.volname
, clone
)
3645 except CommandFailedError
as ce
:
3646 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on clone status of clone with retained snapshots")
3648 self
.fail("expected clone status of clone with retained snapshots to fail")
3652 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
3653 except CommandFailedError
as ce
:
3654 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on clone cancel of clone with retained snapshots")
3656 self
.fail("expected clone cancel of clone with retained snapshots to fail")
3658 # remove snapshots (removes subvolumes as all are in retained state)
3659 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
3660 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone
, snapshot2
)
3662 # verify list subvolumes returns an empty list
3663 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3664 self
.assertEqual(len(subvolumels
), 0)
3666 # verify trash dir is clean
3667 self
._wait
_for
_trash
_empty
()
3669 def test_subvolume_retain_snapshot_clone(self
):
3671 clone a snapshot from a snapshot retained subvolume
3673 subvolume
= self
._generate
_random
_subvolume
_name
()
3674 snapshot
= self
._generate
_random
_snapshot
_name
()
3675 clone
= self
._generate
_random
_clone
_name
()
3678 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3680 # store path for clone verification
3681 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
3684 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
3686 # snapshot subvolume
3687 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3689 # remove with snapshot retention
3690 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3692 # clone retained subvolume snapshot
3693 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3695 # check clone status
3696 self
._wait
_for
_clone
_to
_complete
(clone
)
3699 self
._verify
_clone
(subvolume
, snapshot
, clone
, subvol_path
=subvol_path
)
3701 # remove snapshots (removes retained volume)
3702 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3705 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3707 # verify list subvolumes returns an empty list
3708 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3709 self
.assertEqual(len(subvolumels
), 0)
3711 # verify trash dir is clean
3712 self
._wait
_for
_trash
_empty
()
3714 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self
):
3716 clone a subvolume from recreated subvolume's latest snapshot
3718 subvolume
= self
._generate
_random
_subvolume
_name
()
3719 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
3720 clone
= self
._generate
_random
_clone
_name
(1)
3723 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3726 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
3728 # snapshot subvolume
3729 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
3731 # remove with snapshot retention
3732 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3734 # recreate subvolume
3735 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3737 # get and store path for clone verification
3738 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
3741 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
3743 # snapshot newer subvolume
3744 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot2
)
3746 # remove with snapshot retention
3747 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3749 # clone retained subvolume's newer snapshot
3750 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot2
, clone
)
3752 # check clone status
3753 self
._wait
_for
_clone
_to
_complete
(clone
)
3756 self
._verify
_clone
(subvolume
, snapshot2
, clone
, subvol_path
=subvol2_path
)
3759 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
3760 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot2
)
3763 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3765 # verify list subvolumes returns an empty list
3766 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3767 self
.assertEqual(len(subvolumels
), 0)
3769 # verify trash dir is clean
3770 self
._wait
_for
_trash
_empty
()
3772 def test_subvolume_retain_snapshot_recreate(self
):
3774 recreate a subvolume from one of its retained snapshots
3776 subvolume
= self
._generate
_random
_subvolume
_name
()
3777 snapshot
= self
._generate
_random
_snapshot
_name
()
3780 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3782 # store path for clone verification
3783 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
3786 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
3788 # snapshot subvolume
3789 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3791 # remove with snapshot retention
3792 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3794 # recreate retained subvolume using its own snapshot to clone
3795 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, subvolume
)
3797 # check clone status
3798 self
._wait
_for
_clone
_to
_complete
(subvolume
)
3801 self
._verify
_clone
(subvolume
, snapshot
, subvolume
, subvol_path
=subvol_path
)
3804 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3807 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3809 # verify list subvolumes returns an empty list
3810 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
3811 self
.assertEqual(len(subvolumels
), 0)
3813 # verify trash dir is clean
3814 self
._wait
_for
_trash
_empty
()
3816 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self
):
3818 ensure retained clone recreate fails if its trash is not yet purged
3820 subvolume
= self
._generate
_random
_subvolume
_name
()
3821 snapshot
= self
._generate
_random
_snapshot
_name
()
3822 clone
= self
._generate
_random
_clone
_name
()
3825 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3827 # snapshot subvolume
3828 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3830 # clone subvolume snapshot
3831 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3833 # check clone status
3834 self
._wait
_for
_clone
_to
_complete
(clone
)
3837 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone
, snapshot
)
3839 # remove clone with snapshot retention
3840 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--retain-snapshots")
3842 # fake a trash entry
3843 self
._update
_fake
_trash
(clone
)
3845 # clone subvolume snapshot (recreate)
3847 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3848 except CommandFailedError
as ce
:
3849 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on recreate of clone with purge pending")
3851 self
.fail("expected recreate of clone with purge pending to fail")
3853 # clear fake trash entry
3854 self
._update
_fake
_trash
(clone
, create
=False)
3856 # recreate subvolume
3857 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3859 # check clone status
3860 self
._wait
_for
_clone
_to
_complete
(clone
)
3863 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3864 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone
, snapshot
)
3867 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3868 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3870 # verify trash dir is clean
3871 self
._wait
_for
_trash
_empty
()
3873 def test_subvolume_snapshot_attr_clone(self
):
3874 subvolume
= self
._generate
_random
_subvolume
_name
()
3875 snapshot
= self
._generate
_random
_snapshot
_name
()
3876 clone
= self
._generate
_random
_clone
_name
()
3879 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3882 self
._do
_subvolume
_io
_mixed
(subvolume
)
3884 # snapshot subvolume
3885 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3888 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3890 # check clone status
3891 self
._wait
_for
_clone
_to
_complete
(clone
)
3894 self
._verify
_clone
(subvolume
, snapshot
, clone
)
3897 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3900 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3901 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3903 # verify trash dir is clean
3904 self
._wait
_for
_trash
_empty
()
3906 def test_subvolume_snapshot_clone(self
):
3907 subvolume
= self
._generate
_random
_subvolume
_name
()
3908 snapshot
= self
._generate
_random
_snapshot
_name
()
3909 clone
= self
._generate
_random
_clone
_name
()
3912 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3915 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
3917 # snapshot subvolume
3918 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3921 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3923 # check clone status
3924 self
._wait
_for
_clone
_to
_complete
(clone
)
3927 self
._verify
_clone
(subvolume
, snapshot
, clone
)
3930 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3933 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3934 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3936 # verify trash dir is clean
3937 self
._wait
_for
_trash
_empty
()
3939 def test_subvolume_snapshot_clone_quota_exceeded(self
):
3940 subvolume
= self
._generate
_random
_subvolume
_name
()
3941 snapshot
= self
._generate
_random
_snapshot
_name
()
3942 clone
= self
._generate
_random
_clone
_name
()
3944 # create subvolume with 20MB quota
3945 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
3946 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
,"--mode=777", "--size", str(osize
))
3948 # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
3949 self
._do
_subvolume
_io
(subvolume
, number_of_files
=50)
3951 # snapshot subvolume
3952 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3955 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3957 # check clone status
3958 self
._wait
_for
_clone
_to
_complete
(clone
)
3961 self
._verify
_clone
(subvolume
, snapshot
, clone
)
3964 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3967 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3968 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
3970 # verify trash dir is clean
3971 self
._wait
_for
_trash
_empty
()
3973 def test_subvolume_snapshot_in_complete_clone_rm(self
):
3975 Validates the removal of clone when it is not in 'complete|cancelled|failed' state.
3976 The forceful removl of subvolume clone succeeds only if it's in any of the
3977 'complete|cancelled|failed' states. It fails with EAGAIN in any other states.
3980 subvolume
= self
._generate
_random
_subvolume
_name
()
3981 snapshot
= self
._generate
_random
_snapshot
_name
()
3982 clone
= self
._generate
_random
_clone
_name
()
3985 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3988 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
3990 # snapshot subvolume
3991 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3993 # Insert delay at the beginning of snapshot clone
3994 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3997 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
3999 # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled.
4001 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
4002 except CommandFailedError
as ce
:
4003 if ce
.exitstatus
!= errno
.EAGAIN
:
4004 raise RuntimeError("invalid error code when trying to remove failed clone")
4006 raise RuntimeError("expected error when removing a failed clone")
4008 # cancel on-going clone
4009 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
4011 # verify canceled state
4012 self
._check
_clone
_canceled
(clone
)
4014 # clone removal should succeed after cancel
4015 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
4018 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4021 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4023 # verify trash dir is clean
4024 self
._wait
_for
_trash
_empty
()
4026 def test_subvolume_snapshot_clone_retain_suid_guid(self
):
4027 subvolume
= self
._generate
_random
_subvolume
_name
()
4028 snapshot
= self
._generate
_random
_snapshot
_name
()
4029 clone
= self
._generate
_random
_clone
_name
()
4032 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4034 # Create a file with suid, guid bits set along with executable bit.
4035 args
= ["subvolume", "getpath", self
.volname
, subvolume
]
4037 subvolpath
= self
._fs
_cmd
(*args
)
4038 self
.assertNotEqual(subvolpath
, None)
4039 subvolpath
= subvolpath
[1:].rstrip() # remove "/" prefix and any trailing newline
4041 file_path
= subvolpath
4042 file_path
= os
.path
.join(subvolpath
, "test_suid_file")
4043 self
.mount_a
.run_shell(["touch", file_path
])
4044 self
.mount_a
.run_shell(["chmod", "u+sx,g+sx", file_path
])
4046 # snapshot subvolume
4047 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4050 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4052 # check clone status
4053 self
._wait
_for
_clone
_to
_complete
(clone
)
4056 self
._verify
_clone
(subvolume
, snapshot
, clone
)
4059 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4062 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4063 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4065 # verify trash dir is clean
4066 self
._wait
_for
_trash
_empty
()
4068 def test_subvolume_snapshot_clone_and_reclone(self
):
4069 subvolume
= self
._generate
_random
_subvolume
_name
()
4070 snapshot
= self
._generate
_random
_snapshot
_name
()
4071 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
4074 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4077 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
4079 # snapshot subvolume
4080 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4083 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
4085 # check clone status
4086 self
._wait
_for
_clone
_to
_complete
(clone1
)
4089 self
._verify
_clone
(subvolume
, snapshot
, clone1
)
4092 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4094 # now the clone is just like a normal subvolume -- snapshot the clone and fork
4095 # another clone. before that do some IO so it's can be differentiated.
4096 self
._do
_subvolume
_io
(clone1
, create_dir
="data", number_of_files
=32)
4098 # snapshot clone -- use same snap name
4099 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone1
, snapshot
)
4102 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, clone1
, snapshot
, clone2
)
4104 # check clone status
4105 self
._wait
_for
_clone
_to
_complete
(clone2
)
4108 self
._verify
_clone
(clone1
, snapshot
, clone2
)
4111 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone1
, snapshot
)
4114 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4115 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
4116 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
4118 # verify trash dir is clean
4119 self
._wait
_for
_trash
_empty
()
4121 def test_subvolume_snapshot_clone_cancel_in_progress(self
):
4122 subvolume
= self
._generate
_random
_subvolume
_name
()
4123 snapshot
= self
._generate
_random
_snapshot
_name
()
4124 clone
= self
._generate
_random
_clone
_name
()
4127 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4130 self
._do
_subvolume
_io
(subvolume
, number_of_files
=128)
4132 # snapshot subvolume
4133 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4135 # Insert delay at the beginning of snapshot clone
4136 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4139 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4141 # cancel on-going clone
4142 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
4144 # verify canceled state
4145 self
._check
_clone
_canceled
(clone
)
4148 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4151 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4152 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
4154 # verify trash dir is clean
4155 self
._wait
_for
_trash
_empty
()
4157 def test_subvolume_snapshot_clone_cancel_pending(self
):
4159 this test is a bit more involved compared to canceling an in-progress clone.
4160 we'd need to ensure that a to-be canceled clone has still not been picked up
4161 by cloner threads. exploit the fact that clones are picked up in an FCFS
4162 fashion and there are four (4) cloner threads by default. When the number of
4163 cloner threads increase, this test _may_ start tripping -- so, the number of
4164 clone operations would need to be jacked up.
4166 # default number of clone threads
4168 # good enough for 4 threads
4170 # yeh, 1gig -- we need the clone to run for sometime
4173 subvolume
= self
._generate
_random
_subvolume
_name
()
4174 snapshot
= self
._generate
_random
_snapshot
_name
()
4175 clones
= self
._generate
_random
_clone
_name
(NR_CLONES
)
4178 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4181 self
._do
_subvolume
_io
(subvolume
, number_of_files
=4, file_size
=FILE_SIZE_MB
)
4183 # snapshot subvolume
4184 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4187 for clone
in clones
:
4188 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4190 to_wait
= clones
[0:NR_THREADS
]
4191 to_cancel
= clones
[NR_THREADS
:]
4193 # cancel pending clones and verify
4194 for clone
in to_cancel
:
4195 status
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, clone
))
4196 self
.assertEqual(status
["status"]["state"], "pending")
4197 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
4198 self
._check
_clone
_canceled
(clone
)
4200 # let's cancel on-going clones. handle the case where some of the clones
4202 for clone
in list(to_wait
):
4204 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
4205 to_cancel
.append(clone
)
4206 to_wait
.remove(clone
)
4207 except CommandFailedError
as ce
:
4208 if ce
.exitstatus
!= errno
.EINVAL
:
4209 raise RuntimeError("invalid error code when cancelling on-going clone")
4212 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4215 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4216 for clone
in to_wait
:
4217 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4218 for clone
in to_cancel
:
4219 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
4221 # verify trash dir is clean
4222 self
._wait
_for
_trash
_empty
()
4224 def test_subvolume_snapshot_clone_different_groups(self
):
4225 subvolume
= self
._generate
_random
_subvolume
_name
()
4226 snapshot
= self
._generate
_random
_snapshot
_name
()
4227 clone
= self
._generate
_random
_clone
_name
()
4228 s_group
, c_group
= self
._generate
_random
_group
_name
(2)
4231 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, s_group
)
4232 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, c_group
)
4235 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, s_group
, "--mode=777")
4238 self
._do
_subvolume
_io
(subvolume
, subvolume_group
=s_group
, number_of_files
=32)
4240 # snapshot subvolume
4241 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, s_group
)
4244 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
,
4245 '--group_name', s_group
, '--target_group_name', c_group
)
4247 # check clone status
4248 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=c_group
)
4251 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_group
=s_group
, clone_group
=c_group
)
4254 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, s_group
)
4257 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, s_group
)
4258 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, c_group
)
4261 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, s_group
)
4262 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, c_group
)
4264 # verify trash dir is clean
4265 self
._wait
_for
_trash
_empty
()
4267 def test_subvolume_snapshot_clone_fail_with_remove(self
):
4268 subvolume
= self
._generate
_random
_subvolume
_name
()
4269 snapshot
= self
._generate
_random
_snapshot
_name
()
4270 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
4272 pool_capacity
= 32 * 1024 * 1024
4273 # number of files required to fill up 99% of the pool
4274 nr_files
= int((pool_capacity
* 0.99) / (TestVolumes
.DEFAULT_FILE_SIZE
* 1024 * 1024))
4277 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4280 self
._do
_subvolume
_io
(subvolume
, number_of_files
=nr_files
)
4282 # snapshot subvolume
4283 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4286 new_pool
= "new_pool"
4287 self
.fs
.add_data_pool(new_pool
)
4289 self
.fs
.mon_manager
.raw_cluster_cmd("osd", "pool", "set-quota", new_pool
,
4290 "max_bytes", "{0}".format(pool_capacity
// 4))
4293 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
, "--pool_layout", new_pool
)
4295 # check clone status -- this should dramatically overshoot the pool quota
4296 self
._wait
_for
_clone
_to
_complete
(clone1
)
4299 self
._verify
_clone
(subvolume
, snapshot
, clone1
, clone_pool
=new_pool
)
4301 # wait a bit so that subsequent I/O will give pool full error
4305 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone2
, "--pool_layout", new_pool
)
4307 # check clone status
4308 self
._wait
_for
_clone
_to
_fail
(clone2
)
4311 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4314 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4315 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
4317 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
4318 except CommandFailedError
as ce
:
4319 if ce
.exitstatus
!= errno
.EAGAIN
:
4320 raise RuntimeError("invalid error code when trying to remove failed clone")
4322 raise RuntimeError("expected error when removing a failed clone")
4324 # ... and with force, failed clone can be removed
4325 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
, "--force")
4327 # verify trash dir is clean
4328 self
._wait
_for
_trash
_empty
()
4330 def test_subvolume_snapshot_clone_on_existing_subvolumes(self
):
4331 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
4332 snapshot
= self
._generate
_random
_snapshot
_name
()
4333 clone
= self
._generate
_random
_clone
_name
()
4336 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--mode=777")
4337 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--mode=777")
4340 self
._do
_subvolume
_io
(subvolume1
, number_of_files
=32)
4342 # snapshot subvolume
4343 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume1
, snapshot
)
4345 # schedule a clone with target as subvolume2
4347 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, subvolume2
)
4348 except CommandFailedError
as ce
:
4349 if ce
.exitstatus
!= errno
.EEXIST
:
4350 raise RuntimeError("invalid error code when cloning to existing subvolume")
4352 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
4354 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, clone
)
4356 # schedule a clone with target as clone
4358 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, clone
)
4359 except CommandFailedError
as ce
:
4360 if ce
.exitstatus
!= errno
.EEXIST
:
4361 raise RuntimeError("invalid error code when cloning to existing clone")
4363 raise RuntimeError("expected cloning to fail if the target is an existing clone")
4365 # check clone status
4366 self
._wait
_for
_clone
_to
_complete
(clone
)
4369 self
._verify
_clone
(subvolume1
, snapshot
, clone
)
4372 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume1
, snapshot
)
4375 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
4376 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
)
4377 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4379 # verify trash dir is clean
4380 self
._wait
_for
_trash
_empty
()
4382 def test_subvolume_snapshot_clone_pool_layout(self
):
4383 subvolume
= self
._generate
_random
_subvolume
_name
()
4384 snapshot
= self
._generate
_random
_snapshot
_name
()
4385 clone
= self
._generate
_random
_clone
_name
()
4388 new_pool
= "new_pool"
4389 newid
= self
.fs
.add_data_pool(new_pool
)
4392 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4395 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
4397 # snapshot subvolume
4398 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4401 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, "--pool_layout", new_pool
)
4403 # check clone status
4404 self
._wait
_for
_clone
_to
_complete
(clone
)
4407 self
._verify
_clone
(subvolume
, snapshot
, clone
, clone_pool
=new_pool
)
4410 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4412 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, clone
)
4413 desired_pool
= self
.mount_a
.getfattr(subvol_path
, "ceph.dir.layout.pool")
4415 self
.assertEqual(desired_pool
, new_pool
)
4416 except AssertionError:
4417 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
4420 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4421 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4423 # verify trash dir is clean
4424 self
._wait
_for
_trash
_empty
()
4426 def test_subvolume_snapshot_clone_under_group(self
):
4427 subvolume
= self
._generate
_random
_subvolume
_name
()
4428 snapshot
= self
._generate
_random
_snapshot
_name
()
4429 clone
= self
._generate
_random
_clone
_name
()
4430 group
= self
._generate
_random
_group
_name
()
4433 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4436 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
4438 # snapshot subvolume
4439 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4442 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4445 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, '--target_group_name', group
)
4447 # check clone status
4448 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=group
)
4451 self
._verify
_clone
(subvolume
, snapshot
, clone
, clone_group
=group
)
4454 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4457 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4458 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, group
)
4461 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4463 # verify trash dir is clean
4464 self
._wait
_for
_trash
_empty
()
4466 def test_subvolume_snapshot_clone_with_attrs(self
):
4467 subvolume
= self
._generate
_random
_subvolume
_name
()
4468 snapshot
= self
._generate
_random
_snapshot
_name
()
4469 clone
= self
._generate
_random
_clone
_name
()
4479 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", mode
, "--uid", uid
, "--gid", gid
)
4482 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
4484 # snapshot subvolume
4485 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4487 # change subvolume attrs (to ensure clone picks up snapshot attrs)
4488 self
._do
_subvolume
_attr
_update
(subvolume
, new_uid
, new_gid
, new_mode
)
4491 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4493 # check clone status
4494 self
._wait
_for
_clone
_to
_complete
(clone
)
4497 self
._verify
_clone
(subvolume
, snapshot
, clone
)
4500 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4503 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4504 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4506 # verify trash dir is clean
4507 self
._wait
_for
_trash
_empty
()
4509 def test_subvolume_snapshot_clone_with_upgrade(self
):
4511 yet another poor man's upgrade test -- rather than going through a full
4512 upgrade cycle, emulate old types subvolumes by going through the wormhole
4513 and verify clone operation.
4514 further ensure that a legacy volume is not updated to v2, but clone is.
4516 subvolume
= self
._generate
_random
_subvolume
_name
()
4517 snapshot
= self
._generate
_random
_snapshot
_name
()
4518 clone
= self
._generate
_random
_clone
_name
()
4520 # emulate a old-fashioned subvolume
4521 createpath
= os
.path
.join(".", "volumes", "_nogroup", subvolume
)
4522 self
.mount_a
.run_shell_payload(f
"mkdir -p -m 777 {createpath}", sudo
=True)
4524 # add required xattrs to subvolume
4525 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
4526 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
4529 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
4531 # snapshot subvolume
4532 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4534 # ensure metadata file is in legacy location, with required version v1
4535 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume
, version
=1, legacy
=True)
4537 # Insert delay at the beginning of snapshot clone
4538 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4541 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4543 # snapshot should not be deletable now
4545 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4546 except CommandFailedError
as ce
:
4547 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, msg
="invalid error code when removing source snapshot of a clone")
4549 self
.fail("expected removing source snapshot of a clone to fail")
4551 # check clone status
4552 self
._wait
_for
_clone
_to
_complete
(clone
)
4555 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_version
=1)
4558 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4560 # ensure metadata file is in v2 location, with required version v2
4561 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone
)
4564 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4565 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4567 # verify trash dir is clean
4568 self
._wait
_for
_trash
_empty
()
4570 def test_subvolume_snapshot_reconf_max_concurrent_clones(self
):
4572 Validate 'max_concurrent_clones' config option
4575 # get the default number of cloner threads
4576 default_max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4577 self
.assertEqual(default_max_concurrent_clones
, 4)
4579 # Increase number of cloner threads
4580 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
4581 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4582 self
.assertEqual(max_concurrent_clones
, 6)
4584 # Decrease number of cloner threads
4585 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
4586 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4587 self
.assertEqual(max_concurrent_clones
, 2)
4589 def test_subvolume_snapshot_config_snapshot_clone_delay(self
):
4591 Validate 'snapshot_clone_delay' config option
4594 # get the default delay before starting the clone
4595 default_timeout
= int(self
.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
4596 self
.assertEqual(default_timeout
, 0)
4598 # Insert delay of 2 seconds at the beginning of the snapshot clone
4599 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4600 default_timeout
= int(self
.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
4601 self
.assertEqual(default_timeout
, 2)
4603 # Decrease number of cloner threads
4604 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
4605 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4606 self
.assertEqual(max_concurrent_clones
, 2)
4608 def test_subvolume_under_group_snapshot_clone(self
):
4609 subvolume
= self
._generate
_random
_subvolume
_name
()
4610 group
= self
._generate
_random
_group
_name
()
4611 snapshot
= self
._generate
_random
_snapshot
_name
()
4612 clone
= self
._generate
_random
_clone
_name
()
4615 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4618 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, group
, "--mode=777")
4621 self
._do
_subvolume
_io
(subvolume
, subvolume_group
=group
, number_of_files
=32)
4623 # snapshot subvolume
4624 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
4627 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, '--group_name', group
)
4629 # check clone status
4630 self
._wait
_for
_clone
_to
_complete
(clone
)
4633 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_group
=group
)
4636 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
4639 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4640 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4643 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4645 # verify trash dir is clean
4646 self
._wait
_for
_trash
_empty
()
4649 class TestMisc(TestVolumesHelper
):
4650 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
4651 def test_connection_expiration(self
):
4652 # unmount any cephfs mounts
4653 for i
in range(0, self
.CLIENTS_REQUIRED
):
4654 self
.mounts
[i
].umount_wait()
4655 sessions
= self
._session
_list
()
4656 self
.assertLessEqual(len(sessions
), 1) # maybe mgr is already mounted
4658 # Get the mgr to definitely mount cephfs
4659 subvolume
= self
._generate
_random
_subvolume
_name
()
4660 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4661 sessions
= self
._session
_list
()
4662 self
.assertEqual(len(sessions
), 1)
4664 # Now wait for the mgr to expire the connection:
4665 self
.wait_until_evicted(sessions
[0]['id'], timeout
=90)
4667 def test_mgr_eviction(self
):
4668 # unmount any cephfs mounts
4669 for i
in range(0, self
.CLIENTS_REQUIRED
):
4670 self
.mounts
[i
].umount_wait()
4671 sessions
= self
._session
_list
()
4672 self
.assertLessEqual(len(sessions
), 1) # maybe mgr is already mounted
4674 # Get the mgr to definitely mount cephfs
4675 subvolume
= self
._generate
_random
_subvolume
_name
()
4676 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4677 sessions
= self
._session
_list
()
4678 self
.assertEqual(len(sessions
), 1)
4680 # Now fail the mgr, check the session was evicted
4681 mgr
= self
.mgr_cluster
.get_active_id()
4682 self
.mgr_cluster
.mgr_fail(mgr
)
4683 self
.wait_until_evicted(sessions
[0]['id'])
4685 def test_names_can_only_be_goodchars(self
):
4687 Test the creating vols, subvols subvolgroups fails when their names uses
4688 characters beyond [a-zA-Z0-9 -_.].
4690 volname
, badname
= 'testvol', 'abcd@#'
4692 with self
.assertRaises(CommandFailedError
):
4693 self
._fs
_cmd
('volume', 'create', badname
)
4694 self
._fs
_cmd
('volume', 'create', volname
)
4696 with self
.assertRaises(CommandFailedError
):
4697 self
._fs
_cmd
('subvolumegroup', 'create', volname
, badname
)
4699 with self
.assertRaises(CommandFailedError
):
4700 self
._fs
_cmd
('subvolume', 'create', volname
, badname
)
4701 self
._fs
_cmd
('volume', 'rm', volname
, '--yes-i-really-mean-it')
4703 def test_subvolume_ops_on_nonexistent_vol(self
):
4704 # tests the fs subvolume operations on non existing volume
4706 volname
= "non_existent_subvolume"
4708 # try subvolume operations
4709 for op
in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
4712 self
._fs
_cmd
("subvolume", "resize", volname
, "subvolname_1", "inf")
4714 self
._fs
_cmd
("subvolume", "pin", volname
, "subvolname_1", "export", "1")
4716 self
._fs
_cmd
("subvolume", "ls", volname
)
4718 self
._fs
_cmd
("subvolume", op
, volname
, "subvolume_1")
4719 except CommandFailedError
as ce
:
4720 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
4722 self
.fail("expected the 'fs subvolume {0}' command to fail".format(op
))
4724 # try subvolume snapshot operations and clone create
4725 for op
in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
4728 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1")
4730 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1", "snapshot_1", "clone_1")
4732 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1", "snapshot_1")
4733 except CommandFailedError
as ce
:
4734 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
4736 self
.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op
))
4740 self
._fs
_cmd
("clone", "status", volname
, "clone_1")
4741 except CommandFailedError
as ce
:
4742 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
4744 self
.fail("expected the 'fs clone status' command to fail")
4746 # try subvolumegroup operations
4747 for op
in ("create", "rm", "getpath", "pin", "ls"):
4750 self
._fs
_cmd
("subvolumegroup", "pin", volname
, "group_1", "export", "0")
4752 self
._fs
_cmd
("subvolumegroup", op
, volname
)
4754 self
._fs
_cmd
("subvolumegroup", op
, volname
, "group_1")
4755 except CommandFailedError
as ce
:
4756 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
4758 self
.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op
))
4760 # try subvolumegroup snapshot operations
4761 for op
in ("create", "rm", "ls"):
4764 self
._fs
_cmd
("subvolumegroup", "snapshot", op
, volname
, "group_1")
4766 self
._fs
_cmd
("subvolumegroup", "snapshot", op
, volname
, "group_1", "snapshot_1")
4767 except CommandFailedError
as ce
:
4768 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
4770 self
.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op
))
4772 def test_subvolume_upgrade_legacy_to_v1(self
):
4774 poor man's upgrade test -- rather than going through a full upgrade cycle,
4775 emulate subvolumes by going through the wormhole and verify if they are
4777 further ensure that a legacy volume is not updated to v2.
4779 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
4780 group
= self
._generate
_random
_group
_name
()
4782 # emulate a old-fashioned subvolume -- one in the default group and
4783 # the other in a custom group
4784 createpath1
= os
.path
.join(".", "volumes", "_nogroup", subvolume1
)
4785 self
.mount_a
.run_shell(['mkdir', '-p', createpath1
], sudo
=True)
4788 createpath2
= os
.path
.join(".", "volumes", group
, subvolume2
)
4789 self
.mount_a
.run_shell(['mkdir', '-p', createpath2
], sudo
=True)
4791 # this would auto-upgrade on access without anyone noticing
4792 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume1
)
4793 self
.assertNotEqual(subvolpath1
, None)
4794 subvolpath1
= subvolpath1
.rstrip() # remove "/" prefix and any trailing newline
4796 subvolpath2
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume2
, group
)
4797 self
.assertNotEqual(subvolpath2
, None)
4798 subvolpath2
= subvolpath2
.rstrip() # remove "/" prefix and any trailing newline
4800 # and... the subvolume path returned should be what we created behind the scene
4801 self
.assertEqual(createpath1
[1:], subvolpath1
)
4802 self
.assertEqual(createpath2
[1:], subvolpath2
)
4804 # ensure metadata file is in legacy location, with required version v1
4805 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=1, legacy
=True)
4806 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=1, legacy
=True)
4809 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
4810 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
4812 # verify trash dir is clean
4813 self
._wait
_for
_trash
_empty
()
4816 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4818 def test_subvolume_no_upgrade_v1_sanity(self
):
4820 poor man's upgrade test -- theme continues...
4822 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
4823 a series of operations on the v1 subvolume to ensure they work as expected.
4825 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
4826 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
4827 "type", "uid", "features", "state"]
4828 snap_md
= ["created_at", "data_pool", "has_pending_clones", "size"]
4830 subvolume
= self
._generate
_random
_subvolume
_name
()
4831 snapshot
= self
._generate
_random
_snapshot
_name
()
4832 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
4837 # emulate a v1 subvolume -- in the default group
4838 subvolume_path
= self
._create
_v
1_subvolume
(subvolume
)
4841 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
4842 self
.assertEqual(subvolpath
, subvolume_path
)
4845 subvolumes
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4846 self
.assertEqual(len(subvolumes
), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes
)))
4847 self
.assertEqual(subvolumes
[0]['name'], subvolume
,
4848 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume
, subvolumes
[0]['name']))
4851 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
4852 for md
in subvol_md
:
4853 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
4855 self
.assertEqual(subvol_info
["state"], "complete",
4856 msg
="expected state to be 'complete', found '{0}".format(subvol_info
["state"]))
4857 self
.assertEqual(len(subvol_info
["features"]), 2,
4858 msg
="expected 1 feature, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
4859 for feature
in ['snapshot-clone', 'snapshot-autoprotect']:
4860 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
4863 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024*10
4864 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
4865 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
4866 for md
in subvol_md
:
4867 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
4868 self
.assertEqual(subvol_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
4870 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
4871 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", mode
, "--uid", uid
, "--gid", gid
)
4874 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
4877 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4880 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
4882 # check clone status
4883 self
._wait
_for
_clone
_to
_complete
(clone1
)
4885 # ensure clone is v2
4886 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone1
, version
=2)
4889 self
._verify
_clone
(subvolume
, snapshot
, clone1
, source_version
=1)
4891 # clone (older snapshot)
4892 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, 'fake', clone2
)
4894 # check clone status
4895 self
._wait
_for
_clone
_to
_complete
(clone2
)
4897 # ensure clone is v2
4898 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone2
, version
=2)
4901 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
4902 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
4905 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
4907 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
4908 self
.assertEqual(snap_info
["has_pending_clones"], "no")
4911 subvol_snapshots
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
4912 self
.assertEqual(len(subvol_snapshots
), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots
)))
4913 snapshotnames
= [snapshot
['name'] for snapshot
in subvol_snapshots
]
4914 for name
in [snapshot
, 'fake']:
4915 self
.assertIn(name
, snapshotnames
, msg
="expected snapshot '{0}' in subvolume snapshot ls".format(name
))
4918 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4919 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, "fake")
4921 # ensure volume is still at version 1
4922 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume
, version
=1)
4925 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4926 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
4927 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
4929 # verify trash dir is clean
4930 self
._wait
_for
_trash
_empty
()
4932 def test_subvolume_no_upgrade_v1_to_v2(self
):
4934 poor man's upgrade test -- theme continues...
4935 ensure v1 to v2 upgrades are not done automatically due to various states of v1
4937 subvolume1
, subvolume2
, subvolume3
= self
._generate
_random
_subvolume
_name
(3)
4938 group
= self
._generate
_random
_group
_name
()
4940 # emulate a v1 subvolume -- in the default group
4941 subvol1_path
= self
._create
_v
1_subvolume
(subvolume1
)
4943 # emulate a v1 subvolume -- in a custom group
4944 subvol2_path
= self
._create
_v
1_subvolume
(subvolume2
, subvol_group
=group
)
4946 # emulate a v1 subvolume -- in a clone pending state
4947 self
._create
_v
1_subvolume
(subvolume3
, subvol_type
='clone', has_snapshot
=False, state
='pending')
4949 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
4950 subvolpath1
= self
._get
_subvolume
_path
(self
.volname
, subvolume1
)
4951 self
.assertEqual(subvolpath1
, subvol1_path
)
4953 subvolpath2
= self
._get
_subvolume
_path
(self
.volname
, subvolume2
, group_name
=group
)
4954 self
.assertEqual(subvolpath2
, subvol2_path
)
4956 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
4957 # use clone status, as only certain operations are allowed in pending state
4958 status
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, subvolume3
))
4959 self
.assertEqual(status
["status"]["state"], "pending")
4962 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume1
, "fake")
4963 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume2
, "fake", group
)
4965 # ensure metadata file is in v1 location, with version retained as v1
4966 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=1)
4967 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=1)
4970 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
4971 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
4973 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume3
)
4974 except CommandFailedError
as ce
:
4975 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on rm of subvolume undergoing clone")
4977 self
.fail("expected rm of subvolume undergoing clone to fail")
4979 # ensure metadata file is in v1 location, with version retained as v1
4980 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume3
, version
=1)
4981 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume3
, "--force")
4983 # verify list subvolumes returns an empty list
4984 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4985 self
.assertEqual(len(subvolumels
), 0)
4987 # verify trash dir is clean
4988 self
._wait
_for
_trash
_empty
()
4990 def test_subvolume_upgrade_v1_to_v2(self
):
4992 poor man's upgrade test -- theme continues...
4993 ensure v1 to v2 upgrades work
4995 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
4996 group
= self
._generate
_random
_group
_name
()
4998 # emulate a v1 subvolume -- in the default group
4999 subvol1_path
= self
._create
_v
1_subvolume
(subvolume1
, has_snapshot
=False)
5001 # emulate a v1 subvolume -- in a custom group
5002 subvol2_path
= self
._create
_v
1_subvolume
(subvolume2
, subvol_group
=group
, has_snapshot
=False)
5004 # this would attempt auto-upgrade on access
5005 subvolpath1
= self
._get
_subvolume
_path
(self
.volname
, subvolume1
)
5006 self
.assertEqual(subvolpath1
, subvol1_path
)
5008 subvolpath2
= self
._get
_subvolume
_path
(self
.volname
, subvolume2
, group_name
=group
)
5009 self
.assertEqual(subvolpath2
, subvol2_path
)
5011 # ensure metadata file is in v2 location, with version retained as v2
5012 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=2)
5013 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=2)
5016 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
5017 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
5019 # verify trash dir is clean
5020 self
._wait
_for
_trash
_empty
()