]>
Commit | Line | Data |
---|---|---|
81eedcae TL |
1 | import os |
2 | import json | |
92f5a8d4 | 3 | import time |
81eedcae TL |
4 | import errno |
5 | import random | |
6 | import logging | |
eafe8130 | 7 | import collections |
adb31ebb TL |
8 | import uuid |
9 | import unittest | |
10 | from hashlib import md5 | |
11 | from textwrap import dedent | |
33c7a0ef | 12 | from io import StringIO |
81eedcae TL |
13 | |
14 | from tasks.cephfs.cephfs_test_case import CephFSTestCase | |
f67539c2 | 15 | from tasks.cephfs.fuse_mount import FuseMount |
81eedcae TL |
16 | from teuthology.exceptions import CommandFailedError |
17 | ||
18 | log = logging.getLogger(__name__) | |
19 | ||
f67539c2 TL |
20 | class TestVolumesHelper(CephFSTestCase): |
21 | """Helper class for testing FS volume, subvolume group and subvolume operations.""" | |
494da23a TL |
22 | TEST_FILE_NAME_PREFIX="subvolume_file" |
23 | ||
24 | # for filling subvolume with data | |
cd265ab1 | 25 | CLIENTS_REQUIRED = 2 |
f6b5b4d7 | 26 | MDSS_REQUIRED = 2 |
494da23a TL |
27 | |
28 | # io defaults | |
29 | DEFAULT_FILE_SIZE = 1 # MB | |
30 | DEFAULT_NUMBER_OF_FILES = 1024 | |
81eedcae TL |
31 | |
32 | def _fs_cmd(self, *args): | |
f38dd50b | 33 | return self.get_ceph_cmd_stdout("fs", *args) |
81eedcae | 34 | |
f6b5b4d7 | 35 | def _raw_cmd(self, *args): |
f38dd50b | 36 | return self.get_ceph_cmd_stdout(args) |
f6b5b4d7 | 37 | |
92f5a8d4 TL |
38 | def __check_clone_state(self, state, clone, clone_group=None, timo=120): |
39 | check = 0 | |
40 | args = ["clone", "status", self.volname, clone] | |
41 | if clone_group: | |
42 | args.append(clone_group) | |
43 | args = tuple(args) | |
44 | while check < timo: | |
45 | result = json.loads(self._fs_cmd(*args)) | |
46 | if result["status"]["state"] == state: | |
47 | break | |
48 | check += 1 | |
49 | time.sleep(1) | |
50 | self.assertTrue(check < timo) | |
51 | ||
33c7a0ef TL |
52 | def _get_clone_status(self, clone, clone_group=None): |
53 | args = ["clone", "status", self.volname, clone] | |
54 | if clone_group: | |
55 | args.append(clone_group) | |
56 | args = tuple(args) | |
57 | result = json.loads(self._fs_cmd(*args)) | |
58 | return result | |
59 | ||
92f5a8d4 TL |
60 | def _wait_for_clone_to_complete(self, clone, clone_group=None, timo=120): |
61 | self.__check_clone_state("complete", clone, clone_group, timo) | |
62 | ||
63 | def _wait_for_clone_to_fail(self, clone, clone_group=None, timo=120): | |
64 | self.__check_clone_state("failed", clone, clone_group, timo) | |
65 | ||
33c7a0ef TL |
66 | def _wait_for_clone_to_be_in_progress(self, clone, clone_group=None, timo=120): |
67 | self.__check_clone_state("in-progress", clone, clone_group, timo) | |
68 | ||
9f95a23c TL |
69 | def _check_clone_canceled(self, clone, clone_group=None): |
70 | self.__check_clone_state("canceled", clone, clone_group, timo=1) | |
71 | ||
adb31ebb TL |
72 | def _get_subvolume_snapshot_path(self, subvolume, snapshot, source_group, subvol_path, source_version): |
73 | if source_version == 2: | |
74 | # v2 | |
75 | if subvol_path is not None: | |
76 | (base_path, uuid_str) = os.path.split(subvol_path) | |
77 | else: | |
78 | (base_path, uuid_str) = os.path.split(self._get_subvolume_path(self.volname, subvolume, group_name=source_group)) | |
79 | return os.path.join(base_path, ".snap", snapshot, uuid_str) | |
80 | ||
81 | # v1 | |
82 | base_path = self._get_subvolume_path(self.volname, subvolume, group_name=source_group) | |
83 | return os.path.join(base_path, ".snap", snapshot) | |
84 | ||
85 | def _verify_clone_attrs(self, source_path, clone_path): | |
86 | path1 = source_path | |
87 | path2 = clone_path | |
92f5a8d4 | 88 | |
9f95a23c TL |
89 | p = self.mount_a.run_shell(["find", path1]) |
90 | paths = p.stdout.getvalue().strip().split() | |
91 | ||
92 | # for each entry in source and clone (sink) verify certain inode attributes: | |
93 | # inode type, mode, ownership, [am]time. | |
94 | for source_path in paths: | |
95 | sink_entry = source_path[len(path1)+1:] | |
96 | sink_path = os.path.join(path2, sink_entry) | |
97 | ||
98 | # mode+type | |
99 | sval = int(self.mount_a.run_shell(['stat', '-c' '%f', source_path]).stdout.getvalue().strip(), 16) | |
100 | cval = int(self.mount_a.run_shell(['stat', '-c' '%f', sink_path]).stdout.getvalue().strip(), 16) | |
101 | self.assertEqual(sval, cval) | |
102 | ||
103 | # ownership | |
104 | sval = int(self.mount_a.run_shell(['stat', '-c' '%u', source_path]).stdout.getvalue().strip()) | |
105 | cval = int(self.mount_a.run_shell(['stat', '-c' '%u', sink_path]).stdout.getvalue().strip()) | |
106 | self.assertEqual(sval, cval) | |
92f5a8d4 | 107 | |
9f95a23c TL |
108 | sval = int(self.mount_a.run_shell(['stat', '-c' '%g', source_path]).stdout.getvalue().strip()) |
109 | cval = int(self.mount_a.run_shell(['stat', '-c' '%g', sink_path]).stdout.getvalue().strip()) | |
110 | self.assertEqual(sval, cval) | |
92f5a8d4 | 111 | |
9f95a23c | 112 | # inode timestamps |
f67539c2 | 113 | # do not check access as kclient will generally not update this like ceph-fuse will. |
9f95a23c TL |
114 | sval = int(self.mount_a.run_shell(['stat', '-c' '%Y', source_path]).stdout.getvalue().strip()) |
115 | cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip()) | |
116 | self.assertEqual(sval, cval) | |
117 | ||
adb31ebb TL |
118 | def _verify_clone_root(self, source_path, clone_path, clone, clone_group, clone_pool): |
119 | # verifies following clone root attrs quota, data_pool and pool_namespace | |
120 | # remaining attributes of clone root are validated in _verify_clone_attrs | |
121 | ||
122 | clone_info = json.loads(self._get_subvolume_info(self.volname, clone, clone_group)) | |
123 | ||
124 | # verify quota is inherited from source snapshot | |
125 | src_quota = self.mount_a.getfattr(source_path, "ceph.quota.max_bytes") | |
f67539c2 TL |
126 | # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075 |
127 | if isinstance(self.mount_a, FuseMount): | |
128 | self.assertEqual(clone_info["bytes_quota"], "infinite" if src_quota is None else int(src_quota)) | |
adb31ebb TL |
129 | |
130 | if clone_pool: | |
131 | # verify pool is set as per request | |
132 | self.assertEqual(clone_info["data_pool"], clone_pool) | |
133 | else: | |
134 | # verify pool and pool namespace are inherited from snapshot | |
135 | self.assertEqual(clone_info["data_pool"], | |
136 | self.mount_a.getfattr(source_path, "ceph.dir.layout.pool")) | |
137 | self.assertEqual(clone_info["pool_namespace"], | |
138 | self.mount_a.getfattr(source_path, "ceph.dir.layout.pool_namespace")) | |
139 | ||
140 | def _verify_clone(self, subvolume, snapshot, clone, | |
141 | source_group=None, clone_group=None, clone_pool=None, | |
142 | subvol_path=None, source_version=2, timo=120): | |
143 | # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed | |
144 | # but snapshots are retained for clone verification | |
145 | path1 = self._get_subvolume_snapshot_path(subvolume, snapshot, source_group, subvol_path, source_version) | |
9f95a23c | 146 | path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group) |
92f5a8d4 TL |
147 | |
148 | check = 0 | |
adb31ebb TL |
149 | # TODO: currently snapshot rentries are not stable if snapshot source entries |
150 | # are removed, https://tracker.ceph.com/issues/46747 | |
151 | while check < timo and subvol_path is None: | |
92f5a8d4 TL |
152 | val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries")) |
153 | val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries")) | |
154 | if val1 == val2: | |
155 | break | |
156 | check += 1 | |
157 | time.sleep(1) | |
158 | self.assertTrue(check < timo) | |
159 | ||
adb31ebb TL |
160 | self._verify_clone_root(path1, path2, clone, clone_group, clone_pool) |
161 | self._verify_clone_attrs(path1, path2) | |
9f95a23c | 162 | |
f38dd50b TL |
163 | def _gen_name(self, name, n): |
164 | names = [f'{name}{random.randrange(0, 9999)}{i}' for i in range(n)] | |
165 | return names[0] if n == 1 else names | |
166 | ||
167 | def _gen_vol_name(self, n=1): | |
168 | return self._gen_name('vol', n) | |
169 | ||
170 | def _gen_subvol_name(self, n=1): | |
171 | return self._gen_name('subvol', n) | |
172 | ||
173 | def _gen_subvol_grp_name(self, n=1): | |
174 | return self._gen_name('subvol_grp', n) | |
175 | ||
176 | def _gen_subvol_snap_name(self, n=1): | |
177 | return self._gen_name('subvol_snap', n) | |
178 | ||
179 | def _gen_subvol_clone_name(self, n=1): | |
180 | return self._gen_name('subvol_clone', n) | |
81eedcae TL |
181 | |
182 | def _enable_multi_fs(self): | |
183 | self._fs_cmd("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it") | |
184 | ||
185 | def _create_or_reuse_test_volume(self): | |
186 | result = json.loads(self._fs_cmd("volume", "ls")) | |
187 | if len(result) == 0: | |
188 | self.vol_created = True | |
f38dd50b | 189 | self.volname = self._gen_vol_name() |
81eedcae TL |
190 | self._fs_cmd("volume", "create", self.volname) |
191 | else: | |
192 | self.volname = result[0]['name'] | |
193 | ||
39ae355f TL |
194 | def _get_volume_info(self, vol_name, human_readable=False): |
195 | if human_readable: | |
196 | args = ["volume", "info", vol_name, human_readable] | |
197 | else: | |
198 | args = ["volume", "info", vol_name] | |
2a845540 TL |
199 | args = tuple(args) |
200 | vol_md = self._fs_cmd(*args) | |
201 | return vol_md | |
202 | ||
494da23a TL |
203 | def _get_subvolume_group_path(self, vol_name, group_name): |
204 | args = ("subvolumegroup", "getpath", vol_name, group_name) | |
205 | path = self._fs_cmd(*args) | |
206 | # remove the leading '/', and trailing whitespaces | |
207 | return path[1:].rstrip() | |
208 | ||
2a845540 TL |
209 | def _get_subvolume_group_info(self, vol_name, group_name): |
210 | args = ["subvolumegroup", "info", vol_name, group_name] | |
211 | args = tuple(args) | |
212 | group_md = self._fs_cmd(*args) | |
213 | return group_md | |
214 | ||
81eedcae TL |
215 | def _get_subvolume_path(self, vol_name, subvol_name, group_name=None): |
216 | args = ["subvolume", "getpath", vol_name, subvol_name] | |
217 | if group_name: | |
218 | args.append(group_name) | |
219 | args = tuple(args) | |
220 | path = self._fs_cmd(*args) | |
221 | # remove the leading '/', and trailing whitespaces | |
222 | return path[1:].rstrip() | |
223 | ||
1911f103 TL |
224 | def _get_subvolume_info(self, vol_name, subvol_name, group_name=None): |
225 | args = ["subvolume", "info", vol_name, subvol_name] | |
226 | if group_name: | |
227 | args.append(group_name) | |
228 | args = tuple(args) | |
229 | subvol_md = self._fs_cmd(*args) | |
230 | return subvol_md | |
231 | ||
e306af50 TL |
232 | def _get_subvolume_snapshot_info(self, vol_name, subvol_name, snapname, group_name=None): |
233 | args = ["subvolume", "snapshot", "info", vol_name, subvol_name, snapname] | |
234 | if group_name: | |
235 | args.append(group_name) | |
236 | args = tuple(args) | |
237 | snap_md = self._fs_cmd(*args) | |
238 | return snap_md | |
239 | ||
81eedcae | 240 | def _delete_test_volume(self): |
eafe8130 | 241 | self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") |
81eedcae | 242 | |
adb31ebb TL |
243 | def _do_subvolume_pool_and_namespace_update(self, subvolume, pool=None, pool_namespace=None, subvolume_group=None): |
244 | subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group) | |
245 | ||
246 | if pool is not None: | |
522d829b | 247 | self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool, sudo=True) |
adb31ebb TL |
248 | |
249 | if pool_namespace is not None: | |
522d829b | 250 | self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace, sudo=True) |
adb31ebb TL |
251 | |
252 | def _do_subvolume_attr_update(self, subvolume, uid, gid, mode, subvolume_group=None): | |
253 | subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group) | |
254 | ||
255 | # mode | |
1e59de90 | 256 | self.mount_a.run_shell(['sudo', 'chmod', mode, subvolpath], omit_sudo=False) |
adb31ebb TL |
257 | |
258 | # ownership | |
1e59de90 TL |
259 | self.mount_a.run_shell(['sudo', 'chown', uid, subvolpath], omit_sudo=False) |
260 | self.mount_a.run_shell(['sudo', 'chgrp', gid, subvolpath], omit_sudo=False) | |
adb31ebb | 261 | |
92f5a8d4 TL |
262 | def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None, |
263 | number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE): | |
494da23a | 264 | # get subvolume path for IO |
92f5a8d4 TL |
265 | args = ["subvolume", "getpath", self.volname, subvolume] |
266 | if subvolume_group: | |
267 | args.append(subvolume_group) | |
268 | args = tuple(args) | |
269 | subvolpath = self._fs_cmd(*args) | |
494da23a TL |
270 | self.assertNotEqual(subvolpath, None) |
271 | subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline | |
272 | ||
92f5a8d4 TL |
273 | io_path = subvolpath |
274 | if create_dir: | |
275 | io_path = os.path.join(subvolpath, create_dir) | |
522d829b | 276 | self.mount_a.run_shell_payload(f"mkdir -p {io_path}") |
92f5a8d4 TL |
277 | |
278 | log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path)) | |
494da23a TL |
279 | for i in range(number_of_files): |
280 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) | |
92f5a8d4 | 281 | self.mount_a.write_n_mb(os.path.join(io_path, filename), file_size) |
494da23a | 282 | |
9f95a23c TL |
283 | def _do_subvolume_io_mixed(self, subvolume, subvolume_group=None): |
284 | subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group) | |
285 | ||
286 | reg_file = "regfile.0" | |
9f95a23c TL |
287 | dir_path = os.path.join(subvolpath, "dir.0") |
288 | sym_path1 = os.path.join(subvolpath, "sym.0") | |
289 | # this symlink's ownership would be changed | |
290 | sym_path2 = os.path.join(dir_path, "sym.0") | |
291 | ||
522d829b TL |
292 | self.mount_a.run_shell(["mkdir", dir_path]) |
293 | self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path1]) | |
294 | self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path2]) | |
9f95a23c | 295 | # flip ownership to nobody. assumption: nobody's id is 65534 |
1e59de90 | 296 | self.mount_a.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2], omit_sudo=False) |
9f95a23c | 297 | |
2a845540 | 298 | def _wait_for_trash_empty(self, timeout=60): |
494da23a TL |
299 | # XXX: construct the trash dir path (note that there is no mgr |
300 | # [sub]volume interface for this). | |
301 | trashdir = os.path.join("./", "volumes", "_deleting") | |
92f5a8d4 | 302 | self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout) |
494da23a | 303 | |
2a845540 TL |
304 | def _wait_for_subvol_trash_empty(self, subvol, group="_nogroup", timeout=30): |
305 | trashdir = os.path.join("./", "volumes", group, subvol, ".trash") | |
306 | try: | |
307 | self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout) | |
308 | except CommandFailedError as ce: | |
309 | if ce.exitstatus != errno.ENOENT: | |
310 | pass | |
311 | else: | |
312 | raise | |
313 | ||
adb31ebb TL |
314 | def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False): |
315 | if legacy: | |
316 | subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group) | |
317 | m = md5() | |
318 | m.update(("/"+subvol_path).encode('utf-8')) | |
319 | meta_filename = "{0}.meta".format(m.digest().hex()) | |
320 | metapath = os.path.join(".", "volumes", "_legacy", meta_filename) | |
321 | else: | |
322 | group = subvol_group if subvol_group is not None else '_nogroup' | |
323 | metapath = os.path.join(".", "volumes", group, subvol_name, ".meta") | |
324 | ||
1e59de90 | 325 | out = self.mount_a.run_shell(['sudo', 'cat', metapath], omit_sudo=False) |
adb31ebb TL |
326 | lines = out.stdout.getvalue().strip().split('\n') |
327 | sv_version = -1 | |
328 | for line in lines: | |
329 | if line == "version = " + str(version): | |
330 | sv_version = version | |
331 | break | |
332 | self.assertEqual(sv_version, version, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format( | |
333 | version, sv_version, metapath)) | |
334 | ||
335 | def _create_v1_subvolume(self, subvol_name, subvol_group=None, has_snapshot=True, subvol_type='subvolume', state='complete'): | |
336 | group = subvol_group if subvol_group is not None else '_nogroup' | |
337 | basepath = os.path.join("volumes", group, subvol_name) | |
338 | uuid_str = str(uuid.uuid4()) | |
339 | createpath = os.path.join(basepath, uuid_str) | |
1e59de90 | 340 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False) |
adb31ebb TL |
341 | |
342 | # create a v1 snapshot, to prevent auto upgrades | |
343 | if has_snapshot: | |
344 | snappath = os.path.join(createpath, ".snap", "fake") | |
1e59de90 | 345 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', snappath], omit_sudo=False) |
adb31ebb TL |
346 | |
347 | # add required xattrs to subvolume | |
348 | default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") | |
522d829b | 349 | self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True) |
adb31ebb TL |
350 | |
351 | # create a v1 .meta file | |
352 | meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, "/" + createpath, state) | |
353 | if state == 'pending': | |
354 | # add a fake clone source | |
355 | meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n' | |
356 | meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta") | |
522d829b | 357 | self.mount_a.client_remote.write_file(meta_filepath1, meta_contents, sudo=True) |
adb31ebb TL |
358 | return createpath |
359 | ||
360 | def _update_fake_trash(self, subvol_name, subvol_group=None, trash_name='fake', create=True): | |
361 | group = subvol_group if subvol_group is not None else '_nogroup' | |
362 | trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name) | |
363 | if create: | |
1e59de90 | 364 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', trashpath], omit_sudo=False) |
adb31ebb | 365 | else: |
1e59de90 | 366 | self.mount_a.run_shell(['sudo', 'rmdir', trashpath], omit_sudo=False) |
adb31ebb | 367 | |
cd265ab1 TL |
368 | def _configure_guest_auth(self, guest_mount, authid, key): |
369 | """ | |
370 | Set up auth credentials for a guest client. | |
371 | """ | |
372 | # Create keyring file for the guest client. | |
373 | keyring_txt = dedent(""" | |
374 | [client.{authid}] | |
375 | key = {key} | |
376 | ||
377 | """.format(authid=authid,key=key)) | |
378 | ||
379 | guest_mount.client_id = authid | |
f38dd50b TL |
380 | guest_keyring_path = guest_mount.client_remote.mktemp( |
381 | data=keyring_txt) | |
cd265ab1 TL |
382 | # Add a guest client section to the ceph config file. |
383 | self.config_set("client.{0}".format(authid), "debug client", 20) | |
384 | self.config_set("client.{0}".format(authid), "debug objecter", 20) | |
385 | self.set_conf("client.{0}".format(authid), | |
386 | "keyring", guest_mount.get_keyring_path()) | |
387 | ||
f38dd50b TL |
388 | return guest_keyring_path |
389 | ||
cd265ab1 TL |
390 | def _auth_metadata_get(self, filedata): |
391 | """ | |
392 | Return a deserialized JSON object, or None | |
393 | """ | |
394 | try: | |
395 | data = json.loads(filedata) | |
396 | except json.decoder.JSONDecodeError: | |
397 | data = None | |
398 | return data | |
399 | ||
81eedcae | 400 | def setUp(self): |
f67539c2 | 401 | super(TestVolumesHelper, self).setUp() |
81eedcae TL |
402 | self.volname = None |
403 | self.vol_created = False | |
404 | self._enable_multi_fs() | |
405 | self._create_or_reuse_test_volume() | |
f6b5b4d7 | 406 | self.config_set('mon', 'mon_allow_pool_delete', True) |
81eedcae TL |
407 | |
408 | def tearDown(self): | |
409 | if self.vol_created: | |
410 | self._delete_test_volume() | |
f67539c2 | 411 | super(TestVolumesHelper, self).tearDown() |
92f5a8d4 | 412 | |
92f5a8d4 | 413 | |
f67539c2 TL |
414 | class TestVolumes(TestVolumesHelper): |
415 | """Tests for FS volume operations.""" | |
92f5a8d4 TL |
416 | def test_volume_create(self): |
417 | """ | |
418 | That the volume can be created and then cleans up | |
419 | """ | |
f38dd50b | 420 | volname = self._gen_vol_name() |
92f5a8d4 TL |
421 | self._fs_cmd("volume", "create", volname) |
422 | volumels = json.loads(self._fs_cmd("volume", "ls")) | |
423 | ||
424 | if not (volname in ([volume['name'] for volume in volumels])): | |
425 | raise RuntimeError("Error creating volume '{0}'".format(volname)) | |
aee94f69 TL |
426 | |
427 | # check that the pools were created with the correct config | |
428 | pool_details = json.loads(self._raw_cmd("osd", "pool", "ls", "detail", "--format=json")) | |
429 | pool_flags = {} | |
430 | for pool in pool_details: | |
431 | pool_flags[pool["pool_id"]] = pool["flags_names"].split(",") | |
432 | ||
433 | volume_details = json.loads(self._fs_cmd("get", volname, "--format=json")) | |
434 | for data_pool_id in volume_details['mdsmap']['data_pools']: | |
435 | self.assertIn("bulk", pool_flags[data_pool_id]) | |
436 | meta_pool_id = volume_details['mdsmap']['metadata_pool'] | |
437 | self.assertNotIn("bulk", pool_flags[meta_pool_id]) | |
438 | ||
439 | # clean up | |
440 | self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it") | |
92f5a8d4 TL |
441 | |
442 | def test_volume_ls(self): | |
443 | """ | |
444 | That the existing and the newly created volumes can be listed and | |
445 | finally cleans up. | |
446 | """ | |
447 | vls = json.loads(self._fs_cmd("volume", "ls")) | |
448 | volumes = [volume['name'] for volume in vls] | |
449 | ||
450 | #create new volumes and add it to the existing list of volumes | |
f38dd50b | 451 | volumenames = self._gen_vol_name(2) |
92f5a8d4 TL |
452 | for volumename in volumenames: |
453 | self._fs_cmd("volume", "create", volumename) | |
454 | volumes.extend(volumenames) | |
455 | ||
456 | # list volumes | |
457 | try: | |
458 | volumels = json.loads(self._fs_cmd('volume', 'ls')) | |
459 | if len(volumels) == 0: | |
460 | raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.") | |
461 | else: | |
462 | volnames = [volume['name'] for volume in volumels] | |
463 | if collections.Counter(volnames) != collections.Counter(volumes): | |
464 | raise RuntimeError("Error creating or listing volumes") | |
465 | finally: | |
466 | # clean up | |
467 | for volume in volumenames: | |
468 | self._fs_cmd("volume", "rm", volume, "--yes-i-really-mean-it") | |
469 | ||
eafe8130 | 470 | def test_volume_rm(self): |
92f5a8d4 TL |
471 | """ |
472 | That the volume can only be removed when --yes-i-really-mean-it is used | |
473 | and verify that the deleted volume is not listed anymore. | |
474 | """ | |
adb31ebb TL |
475 | for m in self.mounts: |
476 | m.umount_wait() | |
eafe8130 TL |
477 | try: |
478 | self._fs_cmd("volume", "rm", self.volname) | |
479 | except CommandFailedError as ce: | |
480 | if ce.exitstatus != errno.EPERM: | |
481 | raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, " | |
482 | "but it failed with {0}".format(ce.exitstatus)) | |
483 | else: | |
484 | self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") | |
485 | ||
486 | #check if it's gone | |
92f5a8d4 | 487 | volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) |
eafe8130 | 488 | if (self.volname in [volume['name'] for volume in volumes]): |
92f5a8d4 TL |
489 | raise RuntimeError("Expected the 'fs volume rm' command to succeed. " |
490 | "The volume {0} not removed.".format(self.volname)) | |
eafe8130 TL |
491 | else: |
492 | raise RuntimeError("expected the 'fs volume rm' command to fail.") | |
493 | ||
f6b5b4d7 TL |
494 | def test_volume_rm_arbitrary_pool_removal(self): |
495 | """ | |
496 | That the arbitrary pool added to the volume out of band is removed | |
497 | successfully on volume removal. | |
498 | """ | |
adb31ebb TL |
499 | for m in self.mounts: |
500 | m.umount_wait() | |
f6b5b4d7 TL |
501 | new_pool = "new_pool" |
502 | # add arbitrary data pool | |
503 | self.fs.add_data_pool(new_pool) | |
504 | vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty")) | |
505 | self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") | |
506 | ||
507 | #check if fs is gone | |
508 | volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) | |
509 | volnames = [volume['name'] for volume in volumes] | |
510 | self.assertNotIn(self.volname, volnames) | |
511 | ||
512 | #check if osd pools are gone | |
513 | pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty")) | |
514 | for pool in vol_status["pools"]: | |
515 | self.assertNotIn(pool["name"], pools) | |
516 | ||
517 | def test_volume_rm_when_mon_delete_pool_false(self): | |
518 | """ | |
519 | That the volume can only be removed when mon_allowd_pool_delete is set | |
520 | to true and verify that the pools are removed after volume deletion. | |
521 | """ | |
adb31ebb TL |
522 | for m in self.mounts: |
523 | m.umount_wait() | |
f6b5b4d7 TL |
524 | self.config_set('mon', 'mon_allow_pool_delete', False) |
525 | try: | |
526 | self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") | |
527 | except CommandFailedError as ce: | |
528 | self.assertEqual(ce.exitstatus, errno.EPERM, | |
529 | "expected the 'fs volume rm' command to fail with EPERM, " | |
530 | "but it failed with {0}".format(ce.exitstatus)) | |
531 | vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty")) | |
532 | self.config_set('mon', 'mon_allow_pool_delete', True) | |
533 | self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") | |
534 | ||
535 | #check if fs is gone | |
536 | volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) | |
537 | volnames = [volume['name'] for volume in volumes] | |
538 | self.assertNotIn(self.volname, volnames, | |
539 | "volume {0} exists after removal".format(self.volname)) | |
540 | #check if pools are gone | |
541 | pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty")) | |
542 | for pool in vol_status["pools"]: | |
543 | self.assertNotIn(pool["name"], pools, | |
544 | "pool {0} exists after volume removal".format(pool["name"])) | |
545 | ||
f38dd50b TL |
546 | def test_volume_info(self): |
547 | """ | |
548 | Tests the 'fs volume info' command | |
549 | """ | |
550 | vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"] | |
551 | group = self._gen_subvol_grp_name() | |
552 | # create subvolumegroup | |
553 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
554 | # get volume metadata | |
555 | vol_info = json.loads(self._get_volume_info(self.volname)) | |
556 | for md in vol_fields: | |
557 | self.assertIn(md, vol_info, | |
558 | f"'{md}' key not present in metadata of volume") | |
559 | self.assertEqual(vol_info["used_size"], 0, | |
560 | "Size should be zero when volumes directory is empty") | |
561 | ||
562 | def test_volume_info_pending_subvol_deletions(self): | |
563 | """ | |
564 | Tests the pending_subvolume_deletions in 'fs volume info' command | |
565 | """ | |
566 | subvolname = self._gen_subvol_name() | |
567 | # create subvolume | |
568 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--mode=777") | |
569 | # create 3K zero byte files | |
570 | self._do_subvolume_io(subvolname, number_of_files=3000, file_size=0) | |
571 | # Delete the subvolume | |
572 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) | |
573 | # get volume metadata | |
574 | vol_info = json.loads(self._get_volume_info(self.volname)) | |
575 | self.assertNotEqual(vol_info['pending_subvolume_deletions'], 0, | |
576 | "pending_subvolume_deletions should be 1") | |
577 | # verify trash dir is clean | |
578 | self._wait_for_trash_empty() | |
579 | ||
580 | def test_volume_info_without_subvolumegroup(self): | |
581 | """ | |
582 | Tests the 'fs volume info' command without subvolume group | |
583 | """ | |
584 | vol_fields = ["pools", "mon_addrs"] | |
585 | # get volume metadata | |
586 | vol_info = json.loads(self._get_volume_info(self.volname)) | |
587 | for md in vol_fields: | |
588 | self.assertIn(md, vol_info, | |
589 | f"'{md}' key not present in metadata of volume") | |
590 | self.assertNotIn("used_size", vol_info, | |
591 | "'used_size' should not be present in absence of subvolumegroup") | |
592 | self.assertNotIn("pending_subvolume_deletions", vol_info, | |
593 | "'pending_subvolume_deletions' should not be present in absence" | |
594 | " of subvolumegroup") | |
595 | ||
596 | def test_volume_info_with_human_readable_flag(self): | |
597 | """ | |
598 | Tests the 'fs volume info --human_readable' command | |
599 | """ | |
600 | vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"] | |
601 | group = self._gen_subvol_grp_name() | |
602 | # create subvolumegroup | |
603 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
604 | # get volume metadata | |
605 | vol_info = json.loads(self._get_volume_info(self.volname, "--human_readable")) | |
606 | for md in vol_fields: | |
607 | self.assertIn(md, vol_info, | |
608 | f"'{md}' key not present in metadata of volume") | |
609 | units = [' ', 'k', 'M', 'G', 'T', 'P', 'E'] | |
610 | assert vol_info["used_size"][-1] in units, "unit suffix in used_size is absent" | |
611 | assert vol_info["pools"]["data"][0]["avail"][-1] in units, "unit suffix in avail data is absent" | |
612 | assert vol_info["pools"]["data"][0]["used"][-1] in units, "unit suffix in used data is absent" | |
613 | assert vol_info["pools"]["metadata"][0]["avail"][-1] in units, "unit suffix in avail metadata is absent" | |
614 | assert vol_info["pools"]["metadata"][0]["used"][-1] in units, "unit suffix in used metadata is absent" | |
615 | self.assertEqual(int(vol_info["used_size"]), 0, | |
616 | "Size should be zero when volumes directory is empty") | |
617 | ||
618 | def test_volume_info_with_human_readable_flag_without_subvolumegroup(self): | |
619 | """ | |
620 | Tests the 'fs volume info --human_readable' command without subvolume group | |
621 | """ | |
622 | vol_fields = ["pools", "mon_addrs"] | |
623 | # get volume metadata | |
624 | vol_info = json.loads(self._get_volume_info(self.volname, "--human_readable")) | |
625 | for md in vol_fields: | |
626 | self.assertIn(md, vol_info, | |
627 | f"'{md}' key not present in metadata of volume") | |
628 | units = [' ', 'k', 'M', 'G', 'T', 'P', 'E'] | |
629 | assert vol_info["pools"]["data"][0]["avail"][-1] in units, "unit suffix in avail data is absent" | |
630 | assert vol_info["pools"]["data"][0]["used"][-1] in units, "unit suffix in used data is absent" | |
631 | assert vol_info["pools"]["metadata"][0]["avail"][-1] in units, "unit suffix in avail metadata is absent" | |
632 | assert vol_info["pools"]["metadata"][0]["used"][-1] in units, "unit suffix in used metadata is absent" | |
633 | self.assertNotIn("used_size", vol_info, | |
634 | "'used_size' should not be present in absence of subvolumegroup") | |
635 | self.assertNotIn("pending_subvolume_deletions", vol_info, | |
636 | "'pending_subvolume_deletions' should not be present in absence" | |
637 | " of subvolumegroup") | |
638 | ||
639 | ||
640 | class TestRenameCmd(TestVolumesHelper): | |
641 | ||
1d09f67e TL |
642 | def test_volume_rename(self): |
643 | """ | |
644 | That volume, its file system and pools, can be renamed. | |
645 | """ | |
646 | for m in self.mounts: | |
647 | m.umount_wait() | |
648 | oldvolname = self.volname | |
f38dd50b | 649 | newvolname = self._gen_vol_name() |
1d09f67e TL |
650 | new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta" |
651 | self._fs_cmd("volume", "rename", oldvolname, newvolname, | |
652 | "--yes-i-really-mean-it") | |
653 | volumels = json.loads(self._fs_cmd('volume', 'ls')) | |
654 | volnames = [volume['name'] for volume in volumels] | |
655 | # volume name changed | |
656 | self.assertIn(newvolname, volnames) | |
657 | self.assertNotIn(oldvolname, volnames) | |
658 | # pool names changed | |
659 | self.fs.get_pool_names(refresh=True) | |
660 | self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name()) | |
661 | self.assertEqual(new_data_pool, self.fs.get_data_pool_name()) | |
662 | ||
663 | def test_volume_rename_idempotency(self): | |
664 | """ | |
665 | That volume rename is idempotent. | |
666 | """ | |
667 | for m in self.mounts: | |
668 | m.umount_wait() | |
669 | oldvolname = self.volname | |
f38dd50b | 670 | newvolname = self._gen_vol_name() |
1d09f67e TL |
671 | new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta" |
672 | self._fs_cmd("volume", "rename", oldvolname, newvolname, | |
673 | "--yes-i-really-mean-it") | |
674 | self._fs_cmd("volume", "rename", oldvolname, newvolname, | |
675 | "--yes-i-really-mean-it") | |
676 | volumels = json.loads(self._fs_cmd('volume', 'ls')) | |
677 | volnames = [volume['name'] for volume in volumels] | |
678 | self.assertIn(newvolname, volnames) | |
679 | self.assertNotIn(oldvolname, volnames) | |
680 | self.fs.get_pool_names(refresh=True) | |
681 | self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name()) | |
682 | self.assertEqual(new_data_pool, self.fs.get_data_pool_name()) | |
683 | ||
684 | def test_volume_rename_fails_without_confirmation_flag(self): | |
685 | """ | |
686 | That renaming volume fails without --yes-i-really-mean-it flag. | |
687 | """ | |
f38dd50b TL |
688 | newvolname = self._gen_vol_name() |
689 | ||
1d09f67e TL |
690 | try: |
691 | self._fs_cmd("volume", "rename", self.volname, newvolname) | |
692 | except CommandFailedError as ce: | |
693 | self.assertEqual(ce.exitstatus, errno.EPERM, | |
694 | "invalid error code on renaming a FS volume without the " | |
695 | "'--yes-i-really-mean-it' flag") | |
696 | else: | |
697 | self.fail("expected renaming of FS volume to fail without the " | |
698 | "'--yes-i-really-mean-it' flag") | |
699 | ||
700 | def test_volume_rename_for_more_than_one_data_pool(self): | |
701 | """ | |
702 | That renaming a volume with more than one data pool does not change | |
703 | the name of the data pools. | |
704 | """ | |
705 | for m in self.mounts: | |
706 | m.umount_wait() | |
707 | self.fs.add_data_pool('another-data-pool') | |
708 | oldvolname = self.volname | |
f38dd50b | 709 | newvolname = self._gen_vol_name() |
1d09f67e TL |
710 | self.fs.get_pool_names(refresh=True) |
711 | orig_data_pool_names = list(self.fs.data_pools.values()) | |
712 | new_metadata_pool = f"cephfs.{newvolname}.meta" | |
713 | self._fs_cmd("volume", "rename", self.volname, newvolname, | |
714 | "--yes-i-really-mean-it") | |
715 | volumels = json.loads(self._fs_cmd('volume', 'ls')) | |
716 | volnames = [volume['name'] for volume in volumels] | |
717 | # volume name changed | |
718 | self.assertIn(newvolname, volnames) | |
719 | self.assertNotIn(oldvolname, volnames) | |
720 | self.fs.get_pool_names(refresh=True) | |
721 | # metadata pool name changed | |
722 | self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name()) | |
723 | # data pool names unchanged | |
724 | self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values())) | |
725 | ||
2a845540 TL |
726 | def test_volume_info(self): |
727 | """ | |
728 | Tests the 'fs volume info' command | |
729 | """ | |
730 | vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"] | |
f38dd50b | 731 | group = self._gen_subvol_grp_name() |
2a845540 TL |
732 | # create subvolumegroup |
733 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
734 | # get volume metadata | |
735 | vol_info = json.loads(self._get_volume_info(self.volname)) | |
736 | for md in vol_fields: | |
737 | self.assertIn(md, vol_info, | |
738 | f"'{md}' key not present in metadata of volume") | |
739 | self.assertEqual(vol_info["used_size"], 0, | |
740 | "Size should be zero when volumes directory is empty") | |
741 | ||
aee94f69 TL |
742 | def test_volume_info_pending_subvol_deletions(self): |
743 | """ | |
744 | Tests the pending_subvolume_deletions in 'fs volume info' command | |
745 | """ | |
f38dd50b | 746 | subvolname = self._gen_subvol_name() |
aee94f69 TL |
747 | # create subvolume |
748 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--mode=777") | |
749 | # create 3K zero byte files | |
750 | self._do_subvolume_io(subvolname, number_of_files=3000, file_size=0) | |
751 | # Delete the subvolume | |
752 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) | |
753 | # get volume metadata | |
754 | vol_info = json.loads(self._get_volume_info(self.volname)) | |
755 | self.assertNotEqual(vol_info['pending_subvolume_deletions'], 0, | |
756 | "pending_subvolume_deletions should be 1") | |
757 | # verify trash dir is clean | |
758 | self._wait_for_trash_empty() | |
759 | ||
2a845540 TL |
760 | def test_volume_info_without_subvolumegroup(self): |
761 | """ | |
762 | Tests the 'fs volume info' command without subvolume group | |
763 | """ | |
764 | vol_fields = ["pools", "mon_addrs"] | |
765 | # get volume metadata | |
766 | vol_info = json.loads(self._get_volume_info(self.volname)) | |
767 | for md in vol_fields: | |
768 | self.assertIn(md, vol_info, | |
769 | f"'{md}' key not present in metadata of volume") | |
770 | self.assertNotIn("used_size", vol_info, | |
771 | "'used_size' should not be present in absence of subvolumegroup") | |
772 | self.assertNotIn("pending_subvolume_deletions", vol_info, | |
773 | "'pending_subvolume_deletions' should not be present in absence" | |
774 | " of subvolumegroup") | |
775 | ||
39ae355f TL |
776 | def test_volume_info_with_human_readable_flag(self): |
777 | """ | |
778 | Tests the 'fs volume info --human_readable' command | |
779 | """ | |
780 | vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"] | |
f38dd50b | 781 | group = self._gen_subvol_grp_name() |
39ae355f TL |
782 | # create subvolumegroup |
783 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
784 | # get volume metadata | |
785 | vol_info = json.loads(self._get_volume_info(self.volname, "--human_readable")) | |
786 | for md in vol_fields: | |
787 | self.assertIn(md, vol_info, | |
788 | f"'{md}' key not present in metadata of volume") | |
789 | units = [' ', 'k', 'M', 'G', 'T', 'P', 'E'] | |
790 | assert vol_info["used_size"][-1] in units, "unit suffix in used_size is absent" | |
791 | assert vol_info["pools"]["data"][0]["avail"][-1] in units, "unit suffix in avail data is absent" | |
792 | assert vol_info["pools"]["data"][0]["used"][-1] in units, "unit suffix in used data is absent" | |
793 | assert vol_info["pools"]["metadata"][0]["avail"][-1] in units, "unit suffix in avail metadata is absent" | |
794 | assert vol_info["pools"]["metadata"][0]["used"][-1] in units, "unit suffix in used metadata is absent" | |
795 | self.assertEqual(int(vol_info["used_size"]), 0, | |
796 | "Size should be zero when volumes directory is empty") | |
797 | ||
798 | def test_volume_info_with_human_readable_flag_without_subvolumegroup(self): | |
799 | """ | |
800 | Tests the 'fs volume info --human_readable' command without subvolume group | |
801 | """ | |
802 | vol_fields = ["pools", "mon_addrs"] | |
803 | # get volume metadata | |
804 | vol_info = json.loads(self._get_volume_info(self.volname, "--human_readable")) | |
805 | for md in vol_fields: | |
806 | self.assertIn(md, vol_info, | |
807 | f"'{md}' key not present in metadata of volume") | |
808 | units = [' ', 'k', 'M', 'G', 'T', 'P', 'E'] | |
809 | assert vol_info["pools"]["data"][0]["avail"][-1] in units, "unit suffix in avail data is absent" | |
810 | assert vol_info["pools"]["data"][0]["used"][-1] in units, "unit suffix in used data is absent" | |
811 | assert vol_info["pools"]["metadata"][0]["avail"][-1] in units, "unit suffix in avail metadata is absent" | |
812 | assert vol_info["pools"]["metadata"][0]["used"][-1] in units, "unit suffix in used metadata is absent" | |
813 | self.assertNotIn("used_size", vol_info, | |
814 | "'used_size' should not be present in absence of subvolumegroup") | |
815 | self.assertNotIn("pending_subvolume_deletions", vol_info, | |
816 | "'pending_subvolume_deletions' should not be present in absence" | |
817 | " of subvolumegroup") | |
818 | ||
81eedcae | 819 | |
f67539c2 TL |
820 | class TestSubvolumeGroups(TestVolumesHelper): |
821 | """Tests for FS subvolume group operations.""" | |
822 | def test_default_uid_gid_subvolume_group(self): | |
f38dd50b | 823 | group = self._gen_subvol_grp_name() |
f67539c2 TL |
824 | expected_uid = 0 |
825 | expected_gid = 0 | |
81eedcae | 826 | |
f67539c2 TL |
827 | # create group |
828 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
829 | group_path = self._get_subvolume_group_path(self.volname, group) | |
81eedcae | 830 | |
f67539c2 TL |
831 | # check group's uid and gid |
832 | stat = self.mount_a.stat(group_path) | |
833 | self.assertEqual(stat['st_uid'], expected_uid) | |
834 | self.assertEqual(stat['st_gid'], expected_gid) | |
835 | ||
836 | # remove group | |
837 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
838 | ||
839 | def test_nonexistent_subvolume_group_create(self): | |
f38dd50b | 840 | subvolume = self._gen_subvol_name() |
f67539c2 TL |
841 | group = "non_existent_group" |
842 | ||
843 | # try, creating subvolume in a nonexistent group | |
81eedcae | 844 | try: |
f67539c2 | 845 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) |
81eedcae TL |
846 | except CommandFailedError as ce: |
847 | if ce.exitstatus != errno.ENOENT: | |
848 | raise | |
92f5a8d4 | 849 | else: |
f67539c2 | 850 | raise RuntimeError("expected the 'fs subvolume create' command to fail") |
81eedcae | 851 | |
f67539c2 TL |
852 | def test_nonexistent_subvolume_group_rm(self): |
853 | group = "non_existent_group" | |
494da23a | 854 | |
f67539c2 TL |
855 | # try, remove subvolume group |
856 | try: | |
857 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
858 | except CommandFailedError as ce: | |
859 | if ce.exitstatus != errno.ENOENT: | |
860 | raise | |
861 | else: | |
862 | raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail") | |
92f5a8d4 | 863 | |
f67539c2 | 864 | def test_subvolume_group_create_with_auto_cleanup_on_fail(self): |
f38dd50b | 865 | group = self._gen_subvol_grp_name() |
f67539c2 TL |
866 | data_pool = "invalid_pool" |
867 | # create group with invalid data pool layout | |
868 | with self.assertRaises(CommandFailedError): | |
869 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool) | |
92f5a8d4 | 870 | |
f67539c2 TL |
871 | # check whether group path is cleaned up |
872 | try: | |
873 | self._fs_cmd("subvolumegroup", "getpath", self.volname, group) | |
874 | except CommandFailedError as ce: | |
875 | if ce.exitstatus != errno.ENOENT: | |
876 | raise | |
877 | else: | |
878 | raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail") | |
92f5a8d4 | 879 | |
f67539c2 | 880 | def test_subvolume_group_create_with_desired_data_pool_layout(self): |
f38dd50b | 881 | group1, group2 = self._gen_subvol_grp_name(2) |
92f5a8d4 | 882 | |
f67539c2 TL |
883 | # create group |
884 | self._fs_cmd("subvolumegroup", "create", self.volname, group1) | |
885 | group1_path = self._get_subvolume_group_path(self.volname, group1) | |
92f5a8d4 | 886 | |
f67539c2 TL |
887 | default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool") |
888 | new_pool = "new_pool" | |
889 | self.assertNotEqual(default_pool, new_pool) | |
adb31ebb | 890 | |
f67539c2 TL |
891 | # add data pool |
892 | newid = self.fs.add_data_pool(new_pool) | |
adb31ebb | 893 | |
f67539c2 TL |
894 | # create group specifying the new data pool as its pool layout |
895 | self._fs_cmd("subvolumegroup", "create", self.volname, group2, | |
896 | "--pool_layout", new_pool) | |
897 | group2_path = self._get_subvolume_group_path(self.volname, group2) | |
92f5a8d4 | 898 | |
f67539c2 TL |
899 | desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool") |
900 | try: | |
901 | self.assertEqual(desired_pool, new_pool) | |
902 | except AssertionError: | |
903 | self.assertEqual(int(desired_pool), newid) # old kernel returns id | |
92f5a8d4 | 904 | |
f67539c2 TL |
905 | self._fs_cmd("subvolumegroup", "rm", self.volname, group1) |
906 | self._fs_cmd("subvolumegroup", "rm", self.volname, group2) | |
92f5a8d4 | 907 | |
f67539c2 | 908 | def test_subvolume_group_create_with_desired_mode(self): |
f38dd50b | 909 | group1, group2 = self._gen_subvol_grp_name(2) |
f67539c2 TL |
910 | # default mode |
911 | expected_mode1 = "755" | |
912 | # desired mode | |
913 | expected_mode2 = "777" | |
92f5a8d4 | 914 | |
f67539c2 | 915 | # create group |
522d829b | 916 | self._fs_cmd("subvolumegroup", "create", self.volname, group2, f"--mode={expected_mode2}") |
a4b75251 | 917 | self._fs_cmd("subvolumegroup", "create", self.volname, group1) |
92f5a8d4 | 918 | |
f67539c2 TL |
919 | group1_path = self._get_subvolume_group_path(self.volname, group1) |
920 | group2_path = self._get_subvolume_group_path(self.volname, group2) | |
a4b75251 | 921 | volumes_path = os.path.dirname(group1_path) |
adb31ebb | 922 | |
f67539c2 TL |
923 | # check group's mode |
924 | actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip() | |
925 | actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip() | |
a4b75251 | 926 | actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip() |
f67539c2 TL |
927 | self.assertEqual(actual_mode1, expected_mode1) |
928 | self.assertEqual(actual_mode2, expected_mode2) | |
a4b75251 | 929 | self.assertEqual(actual_mode3, expected_mode1) |
adb31ebb | 930 | |
f67539c2 TL |
931 | self._fs_cmd("subvolumegroup", "rm", self.volname, group1) |
932 | self._fs_cmd("subvolumegroup", "rm", self.volname, group2) | |
933 | ||
934 | def test_subvolume_group_create_with_desired_uid_gid(self): | |
92f5a8d4 | 935 | """ |
f67539c2 TL |
936 | That the subvolume group can be created with the desired uid and gid and its uid and gid matches the |
937 | expected values. | |
92f5a8d4 | 938 | """ |
f67539c2 TL |
939 | uid = 1000 |
940 | gid = 1000 | |
92f5a8d4 | 941 | |
f67539c2 | 942 | # create subvolume group |
f38dd50b | 943 | subvolgroupname = self._gen_subvol_grp_name() |
f67539c2 | 944 | self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid)) |
92f5a8d4 TL |
945 | |
946 | # make sure it exists | |
f67539c2 TL |
947 | subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname) |
948 | self.assertNotEqual(subvolgrouppath, None) | |
92f5a8d4 | 949 | |
f67539c2 TL |
950 | # verify the uid and gid |
951 | suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip()) | |
952 | sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip()) | |
953 | self.assertEqual(uid, suid) | |
954 | self.assertEqual(gid, sgid) | |
955 | ||
956 | # remove group | |
957 | self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname) | |
958 | ||
959 | def test_subvolume_group_create_with_invalid_data_pool_layout(self): | |
f38dd50b | 960 | group = self._gen_subvol_grp_name() |
f67539c2 TL |
961 | data_pool = "invalid_pool" |
962 | # create group with invalid data pool layout | |
92f5a8d4 | 963 | try: |
f67539c2 | 964 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool) |
92f5a8d4 | 965 | except CommandFailedError as ce: |
f67539c2 TL |
966 | if ce.exitstatus != errno.EINVAL: |
967 | raise | |
92f5a8d4 | 968 | else: |
f67539c2 | 969 | raise RuntimeError("expected the 'fs subvolumegroup create' command to fail") |
92f5a8d4 | 970 | |
2a845540 TL |
971 | def test_subvolume_group_create_with_size(self): |
972 | # create group with size -- should set quota | |
f38dd50b | 973 | group = self._gen_subvol_grp_name() |
2a845540 TL |
974 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000") |
975 | ||
976 | # get group metadata | |
977 | group_info = json.loads(self._get_subvolume_group_info(self.volname, group)) | |
978 | self.assertEqual(group_info["bytes_quota"], 1000000000) | |
979 | ||
980 | # remove group | |
981 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
982 | ||
983 | def test_subvolume_group_info(self): | |
984 | # tests the 'fs subvolumegroup info' command | |
985 | ||
986 | group_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", | |
987 | "data_pool", "gid", "mode", "mon_addrs", "mtime", "uid"] | |
988 | ||
989 | # create group | |
f38dd50b | 990 | group = self._gen_subvol_grp_name() |
2a845540 TL |
991 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
992 | ||
993 | # get group metadata | |
994 | group_info = json.loads(self._get_subvolume_group_info(self.volname, group)) | |
995 | for md in group_md: | |
996 | self.assertIn(md, group_info, "'{0}' key not present in metadata of group".format(md)) | |
997 | ||
998 | self.assertEqual(group_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set") | |
999 | self.assertEqual(group_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set") | |
1000 | self.assertEqual(group_info["uid"], 0) | |
1001 | self.assertEqual(group_info["gid"], 0) | |
1002 | ||
1003 | nsize = self.DEFAULT_FILE_SIZE*1024*1024 | |
1004 | self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize)) | |
1005 | ||
1006 | # get group metadata after quota set | |
1007 | group_info = json.loads(self._get_subvolume_group_info(self.volname, group)) | |
1008 | for md in group_md: | |
1009 | self.assertIn(md, group_info, "'{0}' key not present in metadata of subvolume".format(md)) | |
1010 | ||
1011 | self.assertNotEqual(group_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is set") | |
1012 | self.assertEqual(group_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize)) | |
1013 | ||
1014 | # remove group | |
1015 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1016 | ||
1017 | def test_subvolume_group_create_idempotence(self): | |
1018 | # create group | |
f38dd50b | 1019 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1020 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
1021 | ||
1022 | # try creating w/ same subvolume group name -- should be idempotent | |
1023 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1024 | ||
1025 | # remove group | |
1026 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1027 | ||
1028 | def test_subvolume_group_create_idempotence_mode(self): | |
1029 | # create group | |
f38dd50b | 1030 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1031 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
1032 | ||
1033 | # try creating w/ same subvolume group name with mode -- should set mode | |
1034 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=766") | |
1035 | ||
1036 | group_path = self._get_subvolume_group_path(self.volname, group) | |
1037 | ||
1038 | # check subvolumegroup's mode | |
1039 | mode = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip() | |
1040 | self.assertEqual(mode, "766") | |
1041 | ||
1042 | # remove group | |
1043 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1044 | ||
1045 | def test_subvolume_group_create_idempotence_uid_gid(self): | |
1046 | desired_uid = 1000 | |
1047 | desired_gid = 1000 | |
1048 | ||
1049 | # create group | |
f38dd50b | 1050 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1051 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
1052 | ||
1053 | # try creating w/ same subvolume group name with uid/gid -- should set uid/gid | |
1054 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "--uid", str(desired_uid), "--gid", str(desired_gid)) | |
1055 | ||
1056 | group_path = self._get_subvolume_group_path(self.volname, group) | |
1057 | ||
1058 | # verify the uid and gid | |
1059 | actual_uid = int(self.mount_a.run_shell(['stat', '-c' '%u', group_path]).stdout.getvalue().strip()) | |
1060 | actual_gid = int(self.mount_a.run_shell(['stat', '-c' '%g', group_path]).stdout.getvalue().strip()) | |
1061 | self.assertEqual(desired_uid, actual_uid) | |
1062 | self.assertEqual(desired_gid, actual_gid) | |
1063 | ||
1064 | # remove group | |
1065 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1066 | ||
1067 | def test_subvolume_group_create_idempotence_data_pool(self): | |
1068 | # create group | |
f38dd50b | 1069 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1070 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
1071 | ||
1072 | group_path = self._get_subvolume_group_path(self.volname, group) | |
1073 | ||
1074 | default_pool = self.mount_a.getfattr(group_path, "ceph.dir.layout.pool") | |
1075 | new_pool = "new_pool" | |
1076 | self.assertNotEqual(default_pool, new_pool) | |
1077 | ||
1078 | # add data pool | |
1079 | newid = self.fs.add_data_pool(new_pool) | |
1080 | ||
1081 | # try creating w/ same subvolume group name with new data pool -- should set pool | |
1082 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", new_pool) | |
1083 | desired_pool = self.mount_a.getfattr(group_path, "ceph.dir.layout.pool") | |
1084 | try: | |
1085 | self.assertEqual(desired_pool, new_pool) | |
1086 | except AssertionError: | |
1087 | self.assertEqual(int(desired_pool), newid) # old kernel returns id | |
1088 | ||
1089 | # remove group | |
1090 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1091 | ||
1092 | def test_subvolume_group_create_idempotence_resize(self): | |
1093 | # create group | |
f38dd50b | 1094 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1095 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
1096 | ||
1097 | # try creating w/ same subvolume name with size -- should set quota | |
1098 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000") | |
1099 | ||
1100 | # get group metadata | |
1101 | group_info = json.loads(self._get_subvolume_group_info(self.volname, group)) | |
1102 | self.assertEqual(group_info["bytes_quota"], 1000000000) | |
1103 | ||
1104 | # remove group | |
1105 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1106 | ||
1107 | def test_subvolume_group_quota_mds_path_restriction_to_group_path(self): | |
1108 | """ | |
1109 | Tests subvolumegroup quota enforcement with mds path restriction set to group. | |
1110 | For quota to be enforced, read permission needs to be provided to the parent | |
1111 | of the directory on which quota is set. Please see the tracker comment [1] | |
1112 | [1] https://tracker.ceph.com/issues/55090#note-8 | |
1113 | """ | |
1114 | osize = self.DEFAULT_FILE_SIZE*1024*1024*100 | |
1115 | # create group with 100MB quota | |
f38dd50b | 1116 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1117 | self._fs_cmd("subvolumegroup", "create", self.volname, group, |
1118 | "--size", str(osize), "--mode=777") | |
1119 | ||
1120 | # make sure it exists | |
1121 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1122 | self.assertNotEqual(grouppath, None) | |
1123 | ||
1124 | # create subvolume under the group | |
f38dd50b | 1125 | subvolname = self._gen_subvol_name() |
2a845540 TL |
1126 | self._fs_cmd("subvolume", "create", self.volname, subvolname, |
1127 | "--group_name", group, "--mode=777") | |
1128 | ||
1129 | # make sure it exists | |
1130 | subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) | |
1131 | self.assertNotEqual(subvolpath, None) | |
1132 | ||
1133 | # Create auth_id | |
1134 | authid = "client.guest1" | |
f38dd50b | 1135 | user = json.loads(self.get_ceph_cmd_stdout( |
2a845540 TL |
1136 | "auth", "get-or-create", authid, |
1137 | "mds", "allow rw path=/volumes", | |
1138 | "mgr", "allow rw", | |
1139 | "osd", "allow rw tag cephfs *=*", | |
1140 | "mon", "allow r", | |
1141 | "--format=json-pretty" | |
1142 | )) | |
1143 | ||
1144 | # Prepare guest_mount with new authid | |
1145 | guest_mount = self.mount_b | |
1146 | guest_mount.umount_wait() | |
1147 | ||
1148 | # configure credentials for guest client | |
f38dd50b TL |
1149 | guest_keyring_path = self._configure_guest_auth( |
1150 | guest_mount, "guest1", user[0]["key"]) | |
2a845540 TL |
1151 | # mount the subvolume |
1152 | mount_path = os.path.join("/", subvolpath) | |
f38dd50b TL |
1153 | guest_mount.mount_wait(cephfs_mntpt=mount_path, |
1154 | client_keyring_path=guest_keyring_path) | |
2a845540 TL |
1155 | |
1156 | # create 99 files of 1MB | |
1157 | guest_mount.run_shell_payload("mkdir -p dir1") | |
1158 | for i in range(99): | |
1159 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) | |
1160 | guest_mount.write_n_mb(os.path.join("dir1", filename), self.DEFAULT_FILE_SIZE) | |
1161 | try: | |
1162 | # write two files of 1MB file to exceed the quota | |
1163 | guest_mount.run_shell_payload("mkdir -p dir2") | |
1164 | for i in range(2): | |
1165 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) | |
1166 | guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE) | |
1167 | # For quota to be enforced | |
1168 | time.sleep(60) | |
1169 | # create 400 files of 1MB to exceed quota | |
1170 | for i in range(400): | |
1171 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) | |
1172 | guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE) | |
1173 | # Sometimes quota enforcement takes time. | |
1174 | if i == 200: | |
1175 | time.sleep(60) | |
1176 | except CommandFailedError: | |
1177 | pass | |
1178 | else: | |
1179 | self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail") | |
1180 | ||
1181 | # clean up | |
1182 | guest_mount.umount_wait() | |
1183 | ||
1184 | # Delete the subvolume | |
1185 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) | |
1186 | ||
1187 | # remove group | |
1188 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1189 | ||
1190 | # verify trash dir is clean | |
1191 | self._wait_for_trash_empty() | |
1192 | ||
1193 | def test_subvolume_group_quota_mds_path_restriction_to_subvolume_path(self): | |
1194 | """ | |
1195 | Tests subvolumegroup quota enforcement with mds path restriction set to subvolume path | |
1196 | The quota should not be enforced because of the fourth limitation mentioned at | |
1197 | https://docs.ceph.com/en/latest/cephfs/quota/#limitations | |
1198 | """ | |
1199 | osize = self.DEFAULT_FILE_SIZE*1024*1024*100 | |
1200 | # create group with 100MB quota | |
f38dd50b | 1201 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1202 | self._fs_cmd("subvolumegroup", "create", self.volname, group, |
1203 | "--size", str(osize), "--mode=777") | |
1204 | ||
1205 | # make sure it exists | |
1206 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1207 | self.assertNotEqual(grouppath, None) | |
1208 | ||
1209 | # create subvolume under the group | |
f38dd50b | 1210 | subvolname = self._gen_subvol_name() |
2a845540 TL |
1211 | self._fs_cmd("subvolume", "create", self.volname, subvolname, |
1212 | "--group_name", group, "--mode=777") | |
1213 | ||
1214 | # make sure it exists | |
1215 | subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) | |
1216 | self.assertNotEqual(subvolpath, None) | |
1217 | ||
1218 | mount_path = os.path.join("/", subvolpath) | |
1219 | ||
1220 | # Create auth_id | |
1221 | authid = "client.guest1" | |
f38dd50b | 1222 | user = json.loads(self.get_ceph_cmd_stdout( |
2a845540 TL |
1223 | "auth", "get-or-create", authid, |
1224 | "mds", f"allow rw path={mount_path}", | |
1225 | "mgr", "allow rw", | |
1226 | "osd", "allow rw tag cephfs *=*", | |
1227 | "mon", "allow r", | |
1228 | "--format=json-pretty" | |
1229 | )) | |
1230 | ||
1231 | # Prepare guest_mount with new authid | |
1232 | guest_mount = self.mount_b | |
1233 | guest_mount.umount_wait() | |
1234 | ||
1235 | # configure credentials for guest client | |
f38dd50b TL |
1236 | guest_keyring_path = self._configure_guest_auth( |
1237 | guest_mount, "guest1", user[0]["key"]) | |
2a845540 | 1238 | # mount the subvolume |
f38dd50b TL |
1239 | guest_mount.mount_wait(cephfs_mntpt=mount_path, |
1240 | client_keyring_path=guest_keyring_path) | |
2a845540 TL |
1241 | |
1242 | # create 99 files of 1MB to exceed quota | |
1243 | guest_mount.run_shell_payload("mkdir -p dir1") | |
1244 | for i in range(99): | |
1245 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) | |
1246 | guest_mount.write_n_mb(os.path.join("dir1", filename), self.DEFAULT_FILE_SIZE) | |
1247 | try: | |
1248 | # write two files of 1MB file to exceed the quota | |
1249 | guest_mount.run_shell_payload("mkdir -p dir2") | |
1250 | for i in range(2): | |
1251 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) | |
1252 | guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE) | |
1253 | # For quota to be enforced | |
1254 | time.sleep(60) | |
1255 | # create 400 files of 1MB to exceed quota | |
1256 | for i in range(400): | |
1257 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) | |
1258 | guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE) | |
1259 | # Sometimes quota enforcement takes time. | |
1260 | if i == 200: | |
1261 | time.sleep(60) | |
1262 | except CommandFailedError: | |
1263 | self.fail(f"Quota should not be enforced, expected filling subvolume {subvolname} with 400 files of size 1MB to succeed") | |
1264 | ||
1265 | # clean up | |
1266 | guest_mount.umount_wait() | |
1267 | ||
1268 | # Delete the subvolume | |
1269 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) | |
1270 | ||
1271 | # remove group | |
1272 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1273 | ||
1274 | # verify trash dir is clean | |
1275 | self._wait_for_trash_empty() | |
1276 | ||
1277 | def test_subvolume_group_quota_exceeded_subvolume_removal(self): | |
1278 | """ | |
1279 | Tests subvolume removal if it's group quota is exceeded | |
1280 | """ | |
1281 | osize = self.DEFAULT_FILE_SIZE*1024*1024*100 | |
1282 | # create group with 100MB quota | |
f38dd50b | 1283 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1284 | self._fs_cmd("subvolumegroup", "create", self.volname, group, |
1285 | "--size", str(osize), "--mode=777") | |
1286 | ||
1287 | # make sure it exists | |
1288 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1289 | self.assertNotEqual(grouppath, None) | |
1290 | ||
1291 | # create subvolume under the group | |
f38dd50b | 1292 | subvolname = self._gen_subvol_name() |
2a845540 TL |
1293 | self._fs_cmd("subvolume", "create", self.volname, subvolname, |
1294 | "--group_name", group, "--mode=777") | |
1295 | ||
1296 | # make sure it exists | |
1297 | subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) | |
1298 | self.assertNotEqual(subvolpath, None) | |
1299 | ||
1300 | # create 99 files of 1MB to exceed quota | |
1301 | self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99) | |
1302 | ||
1303 | try: | |
1304 | # write two files of 1MB file to exceed the quota | |
1305 | self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2) | |
1306 | # For quota to be enforced | |
1307 | time.sleep(20) | |
1308 | # create 400 files of 1MB to exceed quota | |
1309 | self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=400) | |
1310 | except CommandFailedError: | |
1311 | # Delete subvolume when group quota is exceeded | |
1312 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) | |
1313 | else: | |
1314 | self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail") | |
1315 | ||
1316 | # remove group | |
1317 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1318 | ||
1319 | # verify trash dir is clean | |
1320 | self._wait_for_trash_empty() | |
1321 | ||
1322 | def test_subvolume_group_quota_exceeded_subvolume_removal_retained_snaps(self): | |
1323 | """ | |
1324 | Tests retained snapshot subvolume removal if it's group quota is exceeded | |
1325 | """ | |
f38dd50b TL |
1326 | group = self._gen_subvol_grp_name() |
1327 | subvolname = self._gen_subvol_name() | |
1328 | snapshot1, snapshot2 = self._gen_subvol_snap_name(2) | |
2a845540 TL |
1329 | |
1330 | osize = self.DEFAULT_FILE_SIZE*1024*1024*100 | |
1331 | # create group with 100MB quota | |
1332 | self._fs_cmd("subvolumegroup", "create", self.volname, group, | |
1333 | "--size", str(osize), "--mode=777") | |
1334 | ||
1335 | # make sure it exists | |
1336 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1337 | self.assertNotEqual(grouppath, None) | |
1338 | ||
1339 | # create subvolume under the group | |
1340 | self._fs_cmd("subvolume", "create", self.volname, subvolname, | |
1341 | "--group_name", group, "--mode=777") | |
1342 | ||
1343 | # make sure it exists | |
1344 | subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) | |
1345 | self.assertNotEqual(subvolpath, None) | |
1346 | ||
1347 | # create 99 files of 1MB to exceed quota | |
1348 | self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99) | |
1349 | ||
1350 | # snapshot subvolume | |
1351 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot1, "--group_name", group) | |
1352 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot2, "--group_name", group) | |
1353 | ||
1354 | try: | |
1355 | # write two files of 1MB file to exceed the quota | |
1356 | self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2) | |
1357 | # For quota to be enforced | |
1358 | time.sleep(20) | |
1359 | # create 400 files of 1MB to exceed quota | |
1360 | self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=400) | |
1361 | except CommandFailedError: | |
1362 | # remove with snapshot retention | |
1363 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group, "--retain-snapshots") | |
1364 | # remove snapshot1 | |
1365 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot1, "--group_name", group) | |
1366 | # remove snapshot2 (should remove volume) | |
1367 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot2, "--group_name", group) | |
1368 | # verify subvolume trash is clean | |
1369 | self._wait_for_subvol_trash_empty(subvolname, group=group) | |
1370 | else: | |
1371 | self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail") | |
1372 | ||
1373 | # remove group | |
1374 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1375 | ||
1376 | # verify trash dir is clean | |
1377 | self._wait_for_trash_empty() | |
1378 | ||
1379 | def test_subvolume_group_quota_subvolume_removal(self): | |
1380 | """ | |
1381 | Tests subvolume removal if it's group quota is set. | |
1382 | """ | |
1383 | # create group with size -- should set quota | |
f38dd50b | 1384 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1385 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000") |
1386 | ||
1387 | # create subvolume under the group | |
f38dd50b | 1388 | subvolname = self._gen_subvol_name() |
2a845540 TL |
1389 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) |
1390 | ||
1391 | # remove subvolume | |
1392 | try: | |
1393 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) | |
1394 | except CommandFailedError: | |
1395 | self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set") | |
1396 | ||
1397 | # remove subvolumegroup | |
1398 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1399 | ||
1400 | # verify trash dir is clean | |
1401 | self._wait_for_trash_empty() | |
1402 | ||
1403 | def test_subvolume_group_quota_legacy_subvolume_removal(self): | |
1404 | """ | |
1405 | Tests legacy subvolume removal if it's group quota is set. | |
1406 | """ | |
f38dd50b TL |
1407 | subvolume = self._gen_subvol_name() |
1408 | group = self._gen_subvol_grp_name() | |
2a845540 TL |
1409 | |
1410 | # emulate a old-fashioned subvolume -- in a custom group | |
1411 | createpath1 = os.path.join(".", "volumes", group, subvolume) | |
1e59de90 | 1412 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False) |
2a845540 TL |
1413 | |
1414 | # this would auto-upgrade on access without anyone noticing | |
1415 | subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, "--group-name", group) | |
1416 | self.assertNotEqual(subvolpath1, None) | |
1417 | subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline | |
1418 | ||
1419 | # and... the subvolume path returned should be what we created behind the scene | |
1420 | self.assertEqual(createpath1[1:], subvolpath1) | |
1421 | ||
1422 | # Set subvolumegroup quota on idempotent subvolumegroup creation | |
1423 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000") | |
1424 | ||
1425 | # remove subvolume | |
1426 | try: | |
1427 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) | |
1428 | except CommandFailedError: | |
1429 | self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set") | |
1430 | ||
1431 | # remove subvolumegroup | |
1432 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1433 | ||
1434 | # verify trash dir is clean | |
1435 | self._wait_for_trash_empty() | |
1436 | ||
1437 | def test_subvolume_group_quota_v1_subvolume_removal(self): | |
1438 | """ | |
1439 | Tests v1 subvolume removal if it's group quota is set. | |
1440 | """ | |
f38dd50b TL |
1441 | subvolume = self._gen_subvol_name() |
1442 | group = self._gen_subvol_grp_name() | |
2a845540 TL |
1443 | |
1444 | # emulate a v1 subvolume -- in a custom group | |
1445 | self._create_v1_subvolume(subvolume, subvol_group=group, has_snapshot=False) | |
1446 | ||
1447 | # Set subvolumegroup quota on idempotent subvolumegroup creation | |
1448 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000") | |
1449 | ||
1450 | # remove subvolume | |
1451 | try: | |
1452 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) | |
1453 | except CommandFailedError: | |
1454 | self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set") | |
1455 | ||
1456 | # remove subvolumegroup | |
1457 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1458 | ||
1459 | # verify trash dir is clean | |
1460 | self._wait_for_trash_empty() | |
1461 | ||
1462 | def test_subvolume_group_resize_fail_invalid_size(self): | |
1463 | """ | |
1464 | That a subvolume group cannot be resized to an invalid size and the quota did not change | |
1465 | """ | |
1466 | ||
1467 | osize = self.DEFAULT_FILE_SIZE*1024*1024 | |
1468 | # create group with 1MB quota | |
f38dd50b | 1469 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1470 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "--size", str(osize)) |
1471 | ||
1472 | # make sure it exists | |
1473 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1474 | self.assertNotEqual(grouppath, None) | |
1475 | ||
1476 | # try to resize the subvolume with an invalid size -10 | |
1477 | nsize = -10 | |
1478 | try: | |
1479 | self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize)) | |
1480 | except CommandFailedError as ce: | |
1481 | self.assertEqual(ce.exitstatus, errno.EINVAL, | |
1482 | "invalid error code on resize of subvolume group with invalid size") | |
1483 | else: | |
1484 | self.fail("expected the 'fs subvolumegroup resize' command to fail") | |
1485 | ||
1486 | # verify the quota did not change | |
1487 | size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")) | |
1488 | self.assertEqual(size, osize) | |
1489 | ||
1490 | # remove group | |
1491 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1492 | ||
1493 | def test_subvolume_group_resize_fail_zero_size(self): | |
1494 | """ | |
1495 | That a subvolume group cannot be resized to a zero size and the quota did not change | |
1496 | """ | |
1497 | ||
1498 | osize = self.DEFAULT_FILE_SIZE*1024*1024 | |
1499 | # create group with 1MB quota | |
f38dd50b | 1500 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1501 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "--size", str(osize)) |
1502 | ||
1503 | # make sure it exists | |
1504 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1505 | self.assertNotEqual(grouppath, None) | |
1506 | ||
1507 | # try to resize the subvolume group with size 0 | |
1508 | nsize = 0 | |
1509 | try: | |
1510 | self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize)) | |
1511 | except CommandFailedError as ce: | |
1512 | self.assertEqual(ce.exitstatus, errno.EINVAL, | |
1513 | "invalid error code on resize of subvolume group with invalid size") | |
1514 | else: | |
1515 | self.fail("expected the 'fs subvolumegroup resize' command to fail") | |
1516 | ||
1517 | # verify the quota did not change | |
1518 | size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")) | |
1519 | self.assertEqual(size, osize) | |
1520 | ||
1521 | # remove group | |
1522 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1523 | ||
1524 | def test_subvolume_group_resize_quota_lt_used_size(self): | |
1525 | """ | |
1526 | That a subvolume group can be resized to a size smaller than the current used size | |
1527 | and the resulting quota matches the expected size. | |
1528 | """ | |
1529 | ||
1530 | osize = self.DEFAULT_FILE_SIZE*1024*1024*20 | |
1531 | # create group with 20MB quota | |
f38dd50b | 1532 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1533 | self._fs_cmd("subvolumegroup", "create", self.volname, group, |
1534 | "--size", str(osize), "--mode=777") | |
1535 | ||
1536 | # make sure it exists | |
1537 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1538 | self.assertNotEqual(grouppath, None) | |
1539 | ||
1540 | # create subvolume under the group | |
f38dd50b | 1541 | subvolname = self._gen_subvol_name() |
2a845540 TL |
1542 | self._fs_cmd("subvolume", "create", self.volname, subvolname, |
1543 | "--group_name", group, "--mode=777") | |
1544 | ||
1545 | # make sure it exists | |
1546 | subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) | |
1547 | self.assertNotEqual(subvolpath, None) | |
1548 | ||
1549 | # create one file of 10MB | |
1550 | file_size=self.DEFAULT_FILE_SIZE*10 | |
1551 | number_of_files=1 | |
1552 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
1553 | number_of_files, | |
1554 | file_size)) | |
1555 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1) | |
1556 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
1557 | ||
1558 | usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes")) | |
1559 | ||
1560 | # shrink the subvolume group | |
1561 | nsize = usedsize // 2 | |
1562 | try: | |
1563 | self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize)) | |
1564 | except CommandFailedError: | |
1565 | self.fail("expected the 'fs subvolumegroup resize' command to succeed") | |
1566 | ||
1567 | # verify the quota | |
1568 | size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")) | |
1569 | self.assertEqual(size, nsize) | |
1570 | ||
1571 | # remove subvolume and group | |
1572 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) | |
1573 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1574 | ||
1575 | # verify trash dir is clean | |
1576 | self._wait_for_trash_empty() | |
1577 | ||
1578 | def test_subvolume_group_resize_fail_quota_lt_used_size_no_shrink(self): | |
1579 | """ | |
1580 | That a subvolume group cannot be resized to a size smaller than the current used size | |
1581 | when --no_shrink is given and the quota did not change. | |
1582 | """ | |
1583 | ||
1584 | osize = self.DEFAULT_FILE_SIZE*1024*1024*20 | |
1585 | # create group with 20MB quota | |
f38dd50b | 1586 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1587 | self._fs_cmd("subvolumegroup", "create", self.volname, group, |
1588 | "--size", str(osize), "--mode=777") | |
1589 | ||
1590 | # make sure it exists | |
1591 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1592 | self.assertNotEqual(grouppath, None) | |
1593 | ||
1594 | # create subvolume under the group | |
f38dd50b | 1595 | subvolname = self._gen_subvol_name() |
2a845540 TL |
1596 | self._fs_cmd("subvolume", "create", self.volname, subvolname, |
1597 | "--group_name", group, "--mode=777") | |
1598 | ||
1599 | # make sure it exists | |
1600 | subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) | |
1601 | self.assertNotEqual(subvolpath, None) | |
1602 | ||
1603 | # create one file of 10MB | |
1604 | file_size=self.DEFAULT_FILE_SIZE*10 | |
1605 | number_of_files=1 | |
1606 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
1607 | number_of_files, | |
1608 | file_size)) | |
1609 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2) | |
1610 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
1611 | ||
1612 | usedsize = int(self.mount_a.getfattr(grouppath, "ceph.dir.rbytes")) | |
1613 | ||
1614 | # shrink the subvolume group | |
1615 | nsize = usedsize // 2 | |
1616 | try: | |
1617 | self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize), "--no_shrink") | |
1618 | except CommandFailedError as ce: | |
1619 | self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolumegroup with quota less than used") | |
1620 | else: | |
1621 | self.fail("expected the 'fs subvolumegroup resize' command to fail") | |
1622 | ||
1623 | # verify the quota did not change | |
1624 | size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")) | |
1625 | self.assertEqual(size, osize) | |
1626 | ||
1627 | # remove subvolume and group | |
1628 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) | |
1629 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1630 | ||
1631 | # verify trash dir is clean | |
1632 | self._wait_for_trash_empty() | |
1633 | ||
1634 | def test_subvolume_group_resize_expand_on_full_subvolume(self): | |
1635 | """ | |
1636 | That the subvolume group can be expanded after it is full and future write succeed | |
1637 | """ | |
1638 | ||
1639 | osize = self.DEFAULT_FILE_SIZE*1024*1024*100 | |
1640 | # create group with 100MB quota | |
f38dd50b | 1641 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1642 | self._fs_cmd("subvolumegroup", "create", self.volname, group, |
1643 | "--size", str(osize), "--mode=777") | |
1644 | ||
1645 | # make sure it exists | |
1646 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1647 | self.assertNotEqual(grouppath, None) | |
1648 | ||
1649 | # create subvolume under the group | |
f38dd50b | 1650 | subvolname = self._gen_subvol_name() |
2a845540 TL |
1651 | self._fs_cmd("subvolume", "create", self.volname, subvolname, |
1652 | "--group_name", group, "--mode=777") | |
1653 | ||
1654 | # make sure it exists | |
1655 | subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) | |
1656 | self.assertNotEqual(subvolpath, None) | |
1657 | ||
1658 | # create 99 files of 1MB | |
1659 | self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99) | |
1660 | ||
1661 | try: | |
1662 | # write two files of 1MB file to exceed the quota | |
1663 | self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2) | |
1664 | # For quota to be enforced | |
1665 | time.sleep(20) | |
1666 | # create 500 files of 1MB | |
1667 | self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500) | |
1668 | except CommandFailedError: | |
1669 | # Not able to write. So expand the subvolumegroup more and try writing the files again | |
1670 | nsize = osize*7 | |
1671 | self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize)) | |
1672 | try: | |
1673 | self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500) | |
1674 | except CommandFailedError: | |
1675 | self.fail("expected filling subvolume {0} with 500 files of size 1MB " | |
1676 | "to succeed".format(subvolname)) | |
1677 | else: | |
1678 | self.fail("expected filling subvolume {0} with 500 files of size 1MB " | |
1679 | "to fail".format(subvolname)) | |
1680 | ||
1681 | # remove subvolume and group | |
1682 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) | |
1683 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1684 | ||
1685 | # verify trash dir is clean | |
1686 | self._wait_for_trash_empty() | |
1687 | ||
1688 | def test_subvolume_group_resize_infinite_size(self): | |
1689 | """ | |
1690 | That a subvolume group can be resized to an infinite size by unsetting its quota. | |
1691 | """ | |
1692 | ||
1693 | osize = self.DEFAULT_FILE_SIZE*1024*1024 | |
1694 | # create group | |
f38dd50b | 1695 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1696 | self._fs_cmd("subvolumegroup", "create", self.volname, group, |
1697 | "--size", str(osize)) | |
1698 | ||
1699 | # make sure it exists | |
1700 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1701 | self.assertNotEqual(grouppath, None) | |
1702 | ||
1703 | # resize inf | |
1704 | self._fs_cmd("subvolumegroup", "resize", self.volname, group, "inf") | |
1705 | ||
1706 | # verify that the quota is None | |
1707 | size = self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes") | |
1708 | self.assertEqual(size, None) | |
1709 | ||
1710 | # remove subvolume group | |
1711 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1712 | ||
1713 | def test_subvolume_group_resize_infinite_size_future_writes(self): | |
1714 | """ | |
1715 | That a subvolume group can be resized to an infinite size and the future writes succeed. | |
1716 | """ | |
1717 | ||
1718 | osize = self.DEFAULT_FILE_SIZE*1024*1024*5 | |
1719 | # create group with 5MB quota | |
f38dd50b | 1720 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1721 | self._fs_cmd("subvolumegroup", "create", self.volname, group, |
1722 | "--size", str(osize), "--mode=777") | |
1723 | ||
1724 | # make sure it exists | |
1725 | grouppath = self._get_subvolume_group_path(self.volname, group) | |
1726 | self.assertNotEqual(grouppath, None) | |
1727 | ||
1728 | # create subvolume under the group | |
f38dd50b | 1729 | subvolname = self._gen_subvol_name() |
2a845540 TL |
1730 | self._fs_cmd("subvolume", "create", self.volname, subvolname, |
1731 | "--group_name", group, "--mode=777") | |
1732 | ||
1733 | # make sure it exists | |
1734 | subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) | |
1735 | self.assertNotEqual(subvolpath, None) | |
1736 | ||
1737 | # create 4 files of 1MB | |
1738 | self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=4) | |
1739 | ||
1740 | try: | |
1741 | # write two files of 1MB file to exceed the quota | |
1742 | self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2) | |
1743 | # For quota to be enforced | |
1744 | time.sleep(20) | |
1745 | # create 500 files of 1MB | |
1746 | self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500) | |
1747 | except CommandFailedError: | |
1748 | # Not able to write. So resize subvolumegroup to 'inf' and try writing the files again | |
1749 | # resize inf | |
1750 | self._fs_cmd("subvolumegroup", "resize", self.volname, group, "inf") | |
1751 | try: | |
1752 | self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500) | |
1753 | except CommandFailedError: | |
1754 | self.fail("expected filling subvolume {0} with 500 files of size 1MB " | |
1755 | "to succeed".format(subvolname)) | |
1756 | else: | |
1757 | self.fail("expected filling subvolume {0} with 500 files of size 1MB " | |
1758 | "to fail".format(subvolname)) | |
1759 | ||
1760 | ||
1761 | # verify that the quota is None | |
1762 | size = self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes") | |
1763 | self.assertEqual(size, None) | |
1764 | ||
1765 | # remove subvolume and group | |
1766 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) | |
1767 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1768 | ||
1769 | # verify trash dir is clean | |
1770 | self._wait_for_trash_empty() | |
1771 | ||
f67539c2 TL |
1772 | def test_subvolume_group_ls(self): |
1773 | # tests the 'fs subvolumegroup ls' command | |
92f5a8d4 | 1774 | |
f67539c2 | 1775 | subvolumegroups = [] |
adb31ebb | 1776 | |
f67539c2 | 1777 | #create subvolumegroups |
f38dd50b | 1778 | subvolumegroups = self._gen_subvol_grp_name(3) |
f67539c2 TL |
1779 | for groupname in subvolumegroups: |
1780 | self._fs_cmd("subvolumegroup", "create", self.volname, groupname) | |
adb31ebb | 1781 | |
f67539c2 TL |
1782 | subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) |
1783 | if len(subvolumegroupls) == 0: | |
1784 | raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups") | |
1785 | else: | |
1786 | subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls] | |
1787 | if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups): | |
1788 | raise RuntimeError("Error creating or listing subvolume groups") | |
92f5a8d4 | 1789 | |
1d09f67e TL |
1790 | def test_subvolume_group_ls_filter(self): |
1791 | # tests the 'fs subvolumegroup ls' command filters '_deleting' directory | |
1792 | ||
1793 | subvolumegroups = [] | |
1794 | ||
1795 | #create subvolumegroup | |
f38dd50b | 1796 | subvolumegroups = self._gen_subvol_grp_name(3) |
1d09f67e TL |
1797 | for groupname in subvolumegroups: |
1798 | self._fs_cmd("subvolumegroup", "create", self.volname, groupname) | |
1799 | ||
1800 | # create subvolume and remove. This creates '_deleting' directory. | |
f38dd50b | 1801 | subvolume = self._gen_subvol_name() |
1d09f67e TL |
1802 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
1803 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1804 | ||
2a845540 TL |
1805 | subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) |
1806 | subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls] | |
1807 | if "_deleting" in subvolgroupnames: | |
1808 | self.fail("Listing subvolume groups listed '_deleting' directory") | |
1809 | ||
1810 | def test_subvolume_group_ls_filter_internal_directories(self): | |
1811 | # tests the 'fs subvolumegroup ls' command filters internal directories | |
1812 | # eg: '_deleting', '_nogroup', '_index', "_legacy" | |
1813 | ||
f38dd50b TL |
1814 | subvolumegroups = self._gen_subvol_grp_name(3) |
1815 | subvolume = self._gen_subvol_name() | |
1816 | snapshot = self._gen_subvol_snap_name() | |
1817 | clone = self._gen_subvol_clone_name() | |
2a845540 TL |
1818 | |
1819 | #create subvolumegroups | |
1820 | for groupname in subvolumegroups: | |
1821 | self._fs_cmd("subvolumegroup", "create", self.volname, groupname) | |
1822 | ||
1823 | # create subvolume which will create '_nogroup' directory | |
1824 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1825 | ||
1826 | # create snapshot | |
1827 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1828 | ||
1829 | # clone snapshot which will create '_index' directory | |
1830 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
1831 | ||
39ae355f TL |
1832 | # wait for clone to complete |
1833 | self._wait_for_clone_to_complete(clone) | |
1834 | ||
2a845540 TL |
1835 | # remove snapshot |
1836 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1837 | ||
1838 | # remove subvolume which will create '_deleting' directory | |
1839 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1840 | ||
1841 | # list subvolumegroups | |
1842 | ret = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) | |
1843 | self.assertEqual(len(ret), len(subvolumegroups)) | |
1844 | ||
1845 | ret_list = [subvolumegroup['name'] for subvolumegroup in ret] | |
1846 | self.assertEqual(len(ret_list), len(subvolumegroups)) | |
1847 | ||
1848 | self.assertEqual(all(elem in subvolumegroups for elem in ret_list), True) | |
1d09f67e | 1849 | |
39ae355f TL |
1850 | # cleanup |
1851 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
1852 | for groupname in subvolumegroups: | |
1853 | self._fs_cmd("subvolumegroup", "rm", self.volname, groupname) | |
1854 | ||
f67539c2 TL |
1855 | def test_subvolume_group_ls_for_nonexistent_volume(self): |
1856 | # tests the 'fs subvolumegroup ls' command when /volume doesn't exist | |
1857 | # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created | |
92f5a8d4 | 1858 | |
f67539c2 TL |
1859 | # list subvolume groups |
1860 | subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) | |
1861 | if len(subvolumegroupls) > 0: | |
1862 | raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list") | |
92f5a8d4 | 1863 | |
f67539c2 TL |
1864 | def test_subvolumegroup_pin_distributed(self): |
1865 | self.fs.set_max_mds(2) | |
1866 | status = self.fs.wait_for_daemons() | |
1867 | self.config_set('mds', 'mds_export_ephemeral_distributed', True) | |
92f5a8d4 | 1868 | |
f67539c2 TL |
1869 | group = "pinme" |
1870 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1871 | self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True") | |
f38dd50b | 1872 | subvolumes = self._gen_subvol_name(50) |
f67539c2 TL |
1873 | for subvolume in subvolumes: |
1874 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
1875 | self._wait_distributed_subtrees(2 * 2, status=status, rank="all") | |
92f5a8d4 | 1876 | |
f67539c2 TL |
1877 | # remove subvolumes |
1878 | for subvolume in subvolumes: | |
1879 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
adb31ebb TL |
1880 | |
1881 | # verify trash dir is clean | |
1882 | self._wait_for_trash_empty() | |
1883 | ||
f67539c2 TL |
1884 | def test_subvolume_group_rm_force(self): |
1885 | # test removing non-existing subvolume group with --force | |
f38dd50b | 1886 | group = self._gen_subvol_grp_name() |
f67539c2 TL |
1887 | try: |
1888 | self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force") | |
1889 | except CommandFailedError: | |
1890 | raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed") | |
92f5a8d4 | 1891 | |
2a845540 TL |
1892 | def test_subvolume_group_exists_with_subvolumegroup_and_no_subvolume(self): |
1893 | """Test the presence of any subvolumegroup when only subvolumegroup is present""" | |
1894 | ||
f38dd50b | 1895 | group = self._gen_subvol_grp_name() |
2a845540 TL |
1896 | # create subvolumegroup |
1897 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1898 | ret = self._fs_cmd("subvolumegroup", "exist", self.volname) | |
1899 | self.assertEqual(ret.strip('\n'), "subvolumegroup exists") | |
1900 | # delete subvolumegroup | |
1901 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1902 | ret = self._fs_cmd("subvolumegroup", "exist", self.volname) | |
1903 | self.assertEqual(ret.strip('\n'), "no subvolumegroup exists") | |
1904 | ||
1905 | def test_subvolume_group_exists_with_no_subvolumegroup_and_subvolume(self): | |
1906 | """Test the presence of any subvolumegroup when no subvolumegroup is present""" | |
1907 | ||
1908 | ret = self._fs_cmd("subvolumegroup", "exist", self.volname) | |
1909 | self.assertEqual(ret.strip('\n'), "no subvolumegroup exists") | |
1910 | ||
1911 | def test_subvolume_group_exists_with_subvolumegroup_and_subvolume(self): | |
1912 | """Test the presence of any subvolume when subvolumegroup | |
1913 | and subvolume both are present""" | |
1914 | ||
f38dd50b TL |
1915 | group = self._gen_subvol_grp_name() |
1916 | subvolume = self._gen_subvol_name(2) | |
2a845540 TL |
1917 | # create subvolumegroup |
1918 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1919 | # create subvolume in group | |
1920 | self._fs_cmd("subvolume", "create", self.volname, subvolume[0], "--group_name", group) | |
1921 | # create subvolume | |
1922 | self._fs_cmd("subvolume", "create", self.volname, subvolume[1]) | |
1923 | ret = self._fs_cmd("subvolumegroup", "exist", self.volname) | |
1924 | self.assertEqual(ret.strip('\n'), "subvolumegroup exists") | |
1925 | # delete subvolume in group | |
1926 | self._fs_cmd("subvolume", "rm", self.volname, subvolume[0], "--group_name", group) | |
1927 | ret = self._fs_cmd("subvolumegroup", "exist", self.volname) | |
1928 | self.assertEqual(ret.strip('\n'), "subvolumegroup exists") | |
1929 | # delete subvolume | |
1930 | self._fs_cmd("subvolume", "rm", self.volname, subvolume[1]) | |
1931 | ret = self._fs_cmd("subvolumegroup", "exist", self.volname) | |
1932 | self.assertEqual(ret.strip('\n'), "subvolumegroup exists") | |
1933 | # delete subvolumegroup | |
1934 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1935 | ret = self._fs_cmd("subvolumegroup", "exist", self.volname) | |
1936 | self.assertEqual(ret.strip('\n'), "no subvolumegroup exists") | |
1937 | ||
1938 | def test_subvolume_group_exists_without_subvolumegroup_and_with_subvolume(self): | |
1939 | """Test the presence of any subvolume when subvolume is present | |
1940 | but no subvolumegroup is present""" | |
1941 | ||
f38dd50b | 1942 | subvolume = self._gen_subvol_name() |
2a845540 TL |
1943 | # create subvolume |
1944 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1945 | ret = self._fs_cmd("subvolumegroup", "exist", self.volname) | |
1946 | self.assertEqual(ret.strip('\n'), "no subvolumegroup exists") | |
1947 | # delete subvolume | |
1948 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1949 | ret = self._fs_cmd("subvolumegroup", "exist", self.volname) | |
1950 | self.assertEqual(ret.strip('\n'), "no subvolumegroup exists") | |
1951 | ||
f38dd50b TL |
1952 | def test_subvolume_group_rm_when_its_not_empty(self): |
1953 | group = self._gen_subvol_grp_name() | |
1954 | subvolume = self._gen_subvol_name() | |
1955 | ||
1956 | # create subvolumegroup | |
1957 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1958 | # create subvolume in group | |
1959 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
1960 | # try, remove subvolume group | |
1961 | try: | |
1962 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1963 | except CommandFailedError as ce: | |
1964 | self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on deleting " | |
1965 | "subvolumegroup when it is not empty") | |
1966 | else: | |
1967 | self.fail("expected the 'fs subvolumegroup rm' command to fail") | |
1968 | ||
1969 | # delete subvolume | |
1970 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) | |
1971 | ||
1972 | # delete subvolumegroup | |
1973 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1974 | ||
1975 | # verify trash dir is clean | |
1976 | self._wait_for_trash_empty() | |
1977 | ||
92f5a8d4 | 1978 | |
f67539c2 TL |
1979 | class TestSubvolumes(TestVolumesHelper): |
1980 | """Tests for FS subvolume operations, except snapshot and snapshot clone.""" | |
1981 | def test_async_subvolume_rm(self): | |
f38dd50b | 1982 | subvolumes = self._gen_subvol_name(100) |
92f5a8d4 | 1983 | |
f67539c2 TL |
1984 | # create subvolumes |
1985 | for subvolume in subvolumes: | |
522d829b | 1986 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
f67539c2 | 1987 | self._do_subvolume_io(subvolume, number_of_files=10) |
92f5a8d4 | 1988 | |
f67539c2 | 1989 | self.mount_a.umount_wait() |
92f5a8d4 | 1990 | |
f67539c2 TL |
1991 | # remove subvolumes |
1992 | for subvolume in subvolumes: | |
1993 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
92f5a8d4 | 1994 | |
f67539c2 TL |
1995 | self.mount_a.mount_wait() |
1996 | ||
1997 | # verify trash dir is clean | |
1998 | self._wait_for_trash_empty(timeout=300) | |
1999 | ||
2000 | def test_default_uid_gid_subvolume(self): | |
f38dd50b | 2001 | subvolume = self._gen_subvol_name() |
f67539c2 TL |
2002 | expected_uid = 0 |
2003 | expected_gid = 0 | |
2004 | ||
2005 | # create subvolume | |
2006 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2007 | subvol_path = self._get_subvolume_path(self.volname, subvolume) | |
2008 | ||
2009 | # check subvolume's uid and gid | |
2010 | stat = self.mount_a.stat(subvol_path) | |
2011 | self.assertEqual(stat['st_uid'], expected_uid) | |
2012 | self.assertEqual(stat['st_gid'], expected_gid) | |
92f5a8d4 | 2013 | |
adb31ebb | 2014 | # remove subvolume |
f67539c2 | 2015 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) |
adb31ebb TL |
2016 | |
2017 | # verify trash dir is clean | |
2018 | self._wait_for_trash_empty() | |
2019 | ||
f67539c2 TL |
2020 | def test_nonexistent_subvolume_rm(self): |
2021 | # remove non-existing subvolume | |
2022 | subvolume = "non_existent_subvolume" | |
92f5a8d4 | 2023 | |
f67539c2 TL |
2024 | # try, remove subvolume |
2025 | try: | |
2026 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2027 | except CommandFailedError as ce: | |
2028 | if ce.exitstatus != errno.ENOENT: | |
2029 | raise | |
2030 | else: | |
2031 | raise RuntimeError("expected the 'fs subvolume rm' command to fail") | |
92f5a8d4 | 2032 | |
f67539c2 | 2033 | def test_subvolume_create_and_rm(self): |
92f5a8d4 | 2034 | # create subvolume |
f38dd50b | 2035 | subvolume = self._gen_subvol_name() |
f67539c2 | 2036 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
92f5a8d4 TL |
2037 | |
2038 | # make sure it exists | |
f67539c2 | 2039 | subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume) |
92f5a8d4 TL |
2040 | self.assertNotEqual(subvolpath, None) |
2041 | ||
f67539c2 TL |
2042 | # remove subvolume |
2043 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2044 | # make sure its gone | |
92f5a8d4 | 2045 | try: |
f67539c2 | 2046 | self._fs_cmd("subvolume", "getpath", self.volname, subvolume) |
92f5a8d4 | 2047 | except CommandFailedError as ce: |
f67539c2 TL |
2048 | if ce.exitstatus != errno.ENOENT: |
2049 | raise | |
92f5a8d4 | 2050 | else: |
f67539c2 | 2051 | raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.") |
adb31ebb TL |
2052 | |
2053 | # verify trash dir is clean | |
2054 | self._wait_for_trash_empty() | |
2055 | ||
f67539c2 | 2056 | def test_subvolume_create_and_rm_in_group(self): |
f38dd50b TL |
2057 | subvolume = self._gen_subvol_name() |
2058 | group = self._gen_subvol_grp_name() | |
92f5a8d4 | 2059 | |
f67539c2 TL |
2060 | # create group |
2061 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
92f5a8d4 | 2062 | |
f67539c2 TL |
2063 | # create subvolume in group |
2064 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
92f5a8d4 | 2065 | |
adb31ebb | 2066 | # remove subvolume |
f67539c2 | 2067 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) |
adb31ebb TL |
2068 | |
2069 | # verify trash dir is clean | |
2070 | self._wait_for_trash_empty() | |
2071 | ||
f67539c2 TL |
2072 | # remove group |
2073 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
2074 | ||
81eedcae TL |
2075 | def test_subvolume_create_idempotence(self): |
2076 | # create subvolume | |
f38dd50b | 2077 | subvolume = self._gen_subvol_name() |
81eedcae TL |
2078 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
2079 | ||
2080 | # try creating w/ same subvolume name -- should be idempotent | |
2081 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2082 | ||
2083 | # remove subvolume | |
2084 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2085 | ||
494da23a TL |
2086 | # verify trash dir is clean |
2087 | self._wait_for_trash_empty() | |
2088 | ||
e306af50 TL |
2089 | def test_subvolume_create_idempotence_resize(self): |
2090 | # create subvolume | |
f38dd50b | 2091 | subvolume = self._gen_subvol_name() |
e306af50 TL |
2092 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
2093 | ||
2094 | # try creating w/ same subvolume name with size -- should set quota | |
2095 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "1000000000") | |
2096 | ||
2097 | # get subvolume metadata | |
2098 | subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) | |
2099 | self.assertEqual(subvol_info["bytes_quota"], 1000000000) | |
2100 | ||
2101 | # remove subvolume | |
2102 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2103 | ||
2104 | # verify trash dir is clean | |
2105 | self._wait_for_trash_empty() | |
2106 | ||
1d09f67e TL |
2107 | def test_subvolume_create_idempotence_mode(self): |
2108 | # default mode | |
2109 | default_mode = "755" | |
2110 | ||
2111 | # create subvolume | |
f38dd50b | 2112 | subvolume = self._gen_subvol_name() |
1d09f67e TL |
2113 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
2114 | ||
2115 | subvol_path = self._get_subvolume_path(self.volname, subvolume) | |
2116 | ||
2117 | actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip() | |
2118 | self.assertEqual(actual_mode_1, default_mode) | |
2119 | ||
2120 | # try creating w/ same subvolume name with --mode 777 | |
2121 | new_mode = "777" | |
2122 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", new_mode) | |
2123 | ||
2124 | actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip() | |
2125 | self.assertEqual(actual_mode_2, new_mode) | |
2126 | ||
2127 | # remove subvolume | |
2128 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2129 | ||
2130 | # verify trash dir is clean | |
2131 | self._wait_for_trash_empty() | |
2132 | ||
2133 | def test_subvolume_create_idempotence_without_passing_mode(self): | |
2134 | # create subvolume | |
2135 | desired_mode = "777" | |
f38dd50b | 2136 | subvolume = self._gen_subvol_name() |
1d09f67e TL |
2137 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", desired_mode) |
2138 | ||
2139 | subvol_path = self._get_subvolume_path(self.volname, subvolume) | |
2140 | ||
2141 | actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip() | |
2142 | self.assertEqual(actual_mode_1, desired_mode) | |
2143 | ||
2144 | # default mode | |
2145 | default_mode = "755" | |
2146 | ||
2147 | # try creating w/ same subvolume name without passing --mode argument | |
2148 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2149 | ||
2150 | actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip() | |
2151 | self.assertEqual(actual_mode_2, default_mode) | |
2152 | ||
2153 | # remove subvolume | |
2154 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2155 | ||
2156 | # verify trash dir is clean | |
2157 | self._wait_for_trash_empty() | |
2158 | ||
f67539c2 TL |
2159 | def test_subvolume_create_isolated_namespace(self): |
2160 | """ | |
2161 | Create subvolume in separate rados namespace | |
2162 | """ | |
f6b5b4d7 | 2163 | |
f67539c2 | 2164 | # create subvolume |
f38dd50b | 2165 | subvolume = self._gen_subvol_name() |
f67539c2 | 2166 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated") |
f6b5b4d7 | 2167 | |
f67539c2 TL |
2168 | # get subvolume metadata |
2169 | subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) | |
2170 | self.assertNotEqual(len(subvol_info), 0) | |
2171 | self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume) | |
f6b5b4d7 | 2172 | |
f67539c2 | 2173 | # remove subvolumes |
adb31ebb TL |
2174 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) |
2175 | ||
2176 | # verify trash dir is clean | |
2177 | self._wait_for_trash_empty() | |
2178 | ||
f67539c2 | 2179 | def test_subvolume_create_with_auto_cleanup_on_fail(self): |
f38dd50b | 2180 | subvolume = self._gen_subvol_name() |
f67539c2 TL |
2181 | data_pool = "invalid_pool" |
2182 | # create subvolume with invalid data pool layout fails | |
2183 | with self.assertRaises(CommandFailedError): | |
2184 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool) | |
adb31ebb | 2185 | |
f67539c2 TL |
2186 | # check whether subvol path is cleaned up |
2187 | try: | |
2188 | self._fs_cmd("subvolume", "getpath", self.volname, subvolume) | |
2189 | except CommandFailedError as ce: | |
2190 | self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume") | |
2191 | else: | |
2192 | self.fail("expected the 'fs subvolume getpath' command to fail") | |
2193 | ||
2194 | # verify trash dir is clean | |
adb31ebb TL |
2195 | self._wait_for_trash_empty() |
2196 | ||
f67539c2 | 2197 | def test_subvolume_create_with_desired_data_pool_layout_in_group(self): |
f38dd50b TL |
2198 | subvol1, subvol2 = self._gen_subvol_name(2) |
2199 | group = self._gen_subvol_grp_name() | |
f6b5b4d7 | 2200 | |
f67539c2 TL |
2201 | # create group. this also helps set default pool layout for subvolumes |
2202 | # created within the group. | |
2203 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
f6b5b4d7 | 2204 | |
f67539c2 TL |
2205 | # create subvolume in group. |
2206 | self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group) | |
2207 | subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group) | |
2208 | ||
2209 | default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool") | |
2210 | new_pool = "new_pool" | |
2211 | self.assertNotEqual(default_pool, new_pool) | |
2212 | ||
2213 | # add data pool | |
2214 | newid = self.fs.add_data_pool(new_pool) | |
2215 | ||
2216 | # create subvolume specifying the new data pool as its pool layout | |
2217 | self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, | |
2218 | "--pool_layout", new_pool) | |
2219 | subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group) | |
2220 | ||
2221 | desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool") | |
2222 | try: | |
2223 | self.assertEqual(desired_pool, new_pool) | |
2224 | except AssertionError: | |
2225 | self.assertEqual(int(desired_pool), newid) # old kernel returns id | |
2226 | ||
2227 | self._fs_cmd("subvolume", "rm", self.volname, subvol2, group) | |
2228 | self._fs_cmd("subvolume", "rm", self.volname, subvol1, group) | |
2229 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
adb31ebb TL |
2230 | |
2231 | # verify trash dir is clean | |
2232 | self._wait_for_trash_empty() | |
2233 | ||
a4b75251 | 2234 | def test_subvolume_create_with_desired_mode(self): |
f38dd50b | 2235 | subvol1 = self._gen_subvol_name() |
a4b75251 TL |
2236 | |
2237 | # default mode | |
2238 | default_mode = "755" | |
2239 | # desired mode | |
2240 | desired_mode = "777" | |
2241 | ||
2242 | self._fs_cmd("subvolume", "create", self.volname, subvol1, "--mode", "777") | |
2243 | ||
2244 | subvol1_path = self._get_subvolume_path(self.volname, subvol1) | |
2245 | ||
2246 | # check subvolumegroup's mode | |
2247 | subvol_par_path = os.path.dirname(subvol1_path) | |
2248 | group_path = os.path.dirname(subvol_par_path) | |
2249 | actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip() | |
2250 | self.assertEqual(actual_mode1, default_mode) | |
2251 | # check /volumes mode | |
2252 | volumes_path = os.path.dirname(group_path) | |
2253 | actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip() | |
2254 | self.assertEqual(actual_mode2, default_mode) | |
2255 | # check subvolume's mode | |
2256 | actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip() | |
2257 | self.assertEqual(actual_mode3, desired_mode) | |
2258 | ||
2259 | self._fs_cmd("subvolume", "rm", self.volname, subvol1) | |
2260 | ||
2261 | # verify trash dir is clean | |
2262 | self._wait_for_trash_empty() | |
2263 | ||
f67539c2 | 2264 | def test_subvolume_create_with_desired_mode_in_group(self): |
f38dd50b | 2265 | subvol1, subvol2, subvol3 = self._gen_subvol_name(3) |
f67539c2 | 2266 | |
f38dd50b | 2267 | group = self._gen_subvol_grp_name() |
f67539c2 TL |
2268 | # default mode |
2269 | expected_mode1 = "755" | |
2270 | # desired mode | |
2271 | expected_mode2 = "777" | |
2272 | ||
2273 | # create group | |
2274 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
2275 | ||
2276 | # create subvolume in group | |
2277 | self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group) | |
2278 | self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777") | |
2279 | # check whether mode 0777 also works | |
2280 | self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777") | |
2281 | ||
2282 | subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group) | |
2283 | subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group) | |
2284 | subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group) | |
2285 | ||
2286 | # check subvolume's mode | |
2287 | actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip() | |
2288 | actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip() | |
2289 | actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip() | |
2290 | self.assertEqual(actual_mode1, expected_mode1) | |
2291 | self.assertEqual(actual_mode2, expected_mode2) | |
2292 | self.assertEqual(actual_mode3, expected_mode2) | |
2293 | ||
2294 | self._fs_cmd("subvolume", "rm", self.volname, subvol1, group) | |
2295 | self._fs_cmd("subvolume", "rm", self.volname, subvol2, group) | |
2296 | self._fs_cmd("subvolume", "rm", self.volname, subvol3, group) | |
2297 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
2298 | ||
2299 | # verify trash dir is clean | |
2300 | self._wait_for_trash_empty() | |
2301 | ||
2302 | def test_subvolume_create_with_desired_uid_gid(self): | |
e306af50 | 2303 | """ |
f67539c2 TL |
2304 | That the subvolume can be created with the desired uid and gid and its uid and gid matches the |
2305 | expected values. | |
e306af50 | 2306 | """ |
f67539c2 TL |
2307 | uid = 1000 |
2308 | gid = 1000 | |
e306af50 TL |
2309 | |
2310 | # create subvolume | |
f38dd50b | 2311 | subvolname = self._gen_subvol_name() |
f67539c2 | 2312 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid)) |
e306af50 | 2313 | |
f67539c2 TL |
2314 | # make sure it exists |
2315 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
2316 | self.assertNotEqual(subvolpath, None) | |
e306af50 | 2317 | |
f67539c2 TL |
2318 | # verify the uid and gid |
2319 | suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip()) | |
2320 | sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip()) | |
2321 | self.assertEqual(uid, suid) | |
2322 | self.assertEqual(gid, sgid) | |
2323 | ||
2324 | # remove subvolume | |
2325 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) | |
e306af50 TL |
2326 | |
2327 | # verify trash dir is clean | |
2328 | self._wait_for_trash_empty() | |
2329 | ||
eafe8130 | 2330 | def test_subvolume_create_with_invalid_data_pool_layout(self): |
f38dd50b | 2331 | subvolume = self._gen_subvol_name() |
eafe8130 TL |
2332 | data_pool = "invalid_pool" |
2333 | # create subvolume with invalid data pool layout | |
2334 | try: | |
2335 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool) | |
2336 | except CommandFailedError as ce: | |
adb31ebb | 2337 | self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid pool layout") |
eafe8130 | 2338 | else: |
adb31ebb TL |
2339 | self.fail("expected the 'fs subvolume create' command to fail") |
2340 | ||
2341 | # verify trash dir is clean | |
2342 | self._wait_for_trash_empty() | |
92f5a8d4 | 2343 | |
eafe8130 TL |
2344 | def test_subvolume_create_with_invalid_size(self): |
2345 | # create subvolume with an invalid size -1 | |
f38dd50b | 2346 | subvolume = self._gen_subvol_name() |
eafe8130 TL |
2347 | try: |
2348 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--size", "-1") | |
2349 | except CommandFailedError as ce: | |
adb31ebb | 2350 | self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid size") |
eafe8130 | 2351 | else: |
adb31ebb TL |
2352 | self.fail("expected the 'fs subvolume create' command to fail") |
2353 | ||
2354 | # verify trash dir is clean | |
2355 | self._wait_for_trash_empty() | |
eafe8130 | 2356 | |
2a845540 TL |
2357 | def test_subvolume_create_and_ls_providing_group_as_nogroup(self): |
2358 | """ | |
2359 | That a 'subvolume create' and 'subvolume ls' should throw | |
2360 | permission denied error if option --group=_nogroup is provided. | |
2361 | """ | |
2362 | ||
f38dd50b | 2363 | subvolname = self._gen_subvol_name() |
2a845540 TL |
2364 | |
2365 | # try to create subvolume providing --group_name=_nogroup option | |
2366 | try: | |
2367 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", "_nogroup") | |
2368 | except CommandFailedError as ce: | |
2369 | self.assertEqual(ce.exitstatus, errno.EPERM) | |
2370 | else: | |
2371 | self.fail("expected the 'fs subvolume create' command to fail") | |
2372 | ||
2373 | # create subvolume | |
2374 | self._fs_cmd("subvolume", "create", self.volname, subvolname) | |
2375 | ||
2376 | # try to list subvolumes providing --group_name=_nogroup option | |
2377 | try: | |
2378 | self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_nogroup") | |
2379 | except CommandFailedError as ce: | |
2380 | self.assertEqual(ce.exitstatus, errno.EPERM) | |
2381 | else: | |
2382 | self.fail("expected the 'fs subvolume ls' command to fail") | |
2383 | ||
2384 | # list subvolumes | |
2385 | self._fs_cmd("subvolume", "ls", self.volname) | |
2386 | ||
2387 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) | |
2388 | ||
2389 | # verify trash dir is clean. | |
2390 | self._wait_for_trash_empty() | |
2391 | ||
f67539c2 TL |
2392 | def test_subvolume_expand(self): |
2393 | """ | |
2394 | That a subvolume can be expanded in size and its quota matches the expected size. | |
2395 | """ | |
81eedcae | 2396 | |
f67539c2 | 2397 | # create subvolume |
f38dd50b | 2398 | subvolname = self._gen_subvol_name() |
f67539c2 TL |
2399 | osize = self.DEFAULT_FILE_SIZE*1024*1024 |
2400 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) | |
81eedcae | 2401 | |
f67539c2 TL |
2402 | # make sure it exists |
2403 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
2404 | self.assertNotEqual(subvolpath, None) | |
81eedcae | 2405 | |
f67539c2 TL |
2406 | # expand the subvolume |
2407 | nsize = osize*2 | |
2408 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
81eedcae | 2409 | |
f67539c2 TL |
2410 | # verify the quota |
2411 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
2412 | self.assertEqual(size, nsize) | |
2413 | ||
2414 | # remove subvolume | |
2415 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) | |
2416 | ||
2417 | # verify trash dir is clean | |
2418 | self._wait_for_trash_empty() | |
2419 | ||
2420 | def test_subvolume_info(self): | |
2421 | # tests the 'fs subvolume info' command | |
2422 | ||
2423 | subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", | |
2424 | "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace", | |
2425 | "type", "uid", "features", "state"] | |
494da23a TL |
2426 | |
2427 | # create subvolume | |
f38dd50b | 2428 | subvolume = self._gen_subvol_name() |
494da23a | 2429 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
494da23a | 2430 | |
f67539c2 TL |
2431 | # get subvolume metadata |
2432 | subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) | |
2433 | for md in subvol_md: | |
2434 | self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md)) | |
494da23a | 2435 | |
f67539c2 TL |
2436 | self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set") |
2437 | self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set") | |
2438 | self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty") | |
2439 | self.assertEqual(subvol_info["state"], "complete", "expected state to be complete") | |
2440 | ||
2441 | self.assertEqual(len(subvol_info["features"]), 3, | |
2442 | msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"])) | |
2443 | for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']: | |
2444 | self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature)) | |
2445 | ||
2446 | nsize = self.DEFAULT_FILE_SIZE*1024*1024 | |
2447 | self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize)) | |
2448 | ||
2449 | # get subvolume metadata after quota set | |
2450 | subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) | |
2451 | for md in subvol_md: | |
2452 | self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md)) | |
2453 | ||
2454 | self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set") | |
2455 | self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize)) | |
2456 | self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume") | |
2457 | self.assertEqual(subvol_info["state"], "complete", "expected state to be complete") | |
2458 | ||
2459 | self.assertEqual(len(subvol_info["features"]), 3, | |
2460 | msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"])) | |
2461 | for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']: | |
2462 | self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature)) | |
2463 | ||
2464 | # remove subvolumes | |
494da23a TL |
2465 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) |
2466 | ||
adb31ebb TL |
2467 | # verify trash dir is clean |
2468 | self._wait_for_trash_empty() | |
2469 | ||
eafe8130 TL |
2470 | def test_subvolume_ls(self): |
2471 | # tests the 'fs subvolume ls' command | |
2472 | ||
2473 | subvolumes = [] | |
2474 | ||
2475 | # create subvolumes | |
f38dd50b | 2476 | subvolumes = self._gen_subvol_name(3) |
92f5a8d4 TL |
2477 | for subvolume in subvolumes: |
2478 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
eafe8130 TL |
2479 | |
2480 | # list subvolumes | |
2481 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
2482 | if len(subvolumels) == 0: | |
adb31ebb | 2483 | self.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.") |
eafe8130 TL |
2484 | else: |
2485 | subvolnames = [subvolume['name'] for subvolume in subvolumels] | |
2486 | if collections.Counter(subvolnames) != collections.Counter(subvolumes): | |
adb31ebb TL |
2487 | self.fail("Error creating or listing subvolumes") |
2488 | ||
2489 | # remove subvolume | |
2490 | for subvolume in subvolumes: | |
2491 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2492 | ||
2493 | # verify trash dir is clean | |
2494 | self._wait_for_trash_empty() | |
eafe8130 | 2495 | |
2a845540 TL |
2496 | def test_subvolume_ls_with_groupname_as_internal_directory(self): |
2497 | # tests the 'fs subvolume ls' command when the default groupname as internal directories | |
2498 | # Eg: '_nogroup', '_legacy', '_deleting', '_index'. | |
2499 | # Expecting 'fs subvolume ls' will be fail with errno EINVAL for '_legacy', '_deleting', '_index' | |
2500 | # Expecting 'fs subvolume ls' will be fail with errno EPERM for '_nogroup' | |
2501 | ||
2502 | # try to list subvolumes providing --group_name=_nogroup option | |
2503 | try: | |
2504 | self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_nogroup") | |
2505 | except CommandFailedError as ce: | |
2506 | self.assertEqual(ce.exitstatus, errno.EPERM) | |
2507 | else: | |
2508 | self.fail("expected the 'fs subvolume ls' command to fail with error 'EPERM' for _nogroup") | |
2509 | ||
2510 | # try to list subvolumes providing --group_name=_legacy option | |
2511 | try: | |
2512 | self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_legacy") | |
2513 | except CommandFailedError as ce: | |
2514 | self.assertEqual(ce.exitstatus, errno.EINVAL) | |
2515 | else: | |
2516 | self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _legacy") | |
2517 | ||
2518 | # try to list subvolumes providing --group_name=_deleting option | |
2519 | try: | |
2520 | self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_deleting") | |
2521 | except CommandFailedError as ce: | |
2522 | self.assertEqual(ce.exitstatus, errno.EINVAL) | |
2523 | else: | |
2524 | self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _deleting") | |
2525 | ||
2526 | # try to list subvolumes providing --group_name=_index option | |
2527 | try: | |
2528 | self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_index") | |
2529 | except CommandFailedError as ce: | |
2530 | self.assertEqual(ce.exitstatus, errno.EINVAL) | |
2531 | else: | |
2532 | self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _index") | |
2533 | ||
eafe8130 TL |
2534 | def test_subvolume_ls_for_notexistent_default_group(self): |
2535 | # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist | |
2536 | # prerequisite: we expect that the volume is created and the default group _nogroup is | |
2537 | # NOT created (i.e. a subvolume without group is not created) | |
2538 | ||
2539 | # list subvolumes | |
2540 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
2541 | if len(subvolumels) > 0: | |
2542 | raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.") | |
2543 | ||
f67539c2 | 2544 | def test_subvolume_marked(self): |
92f5a8d4 | 2545 | """ |
f67539c2 | 2546 | ensure a subvolume is marked with the ceph.dir.subvolume xattr |
92f5a8d4 | 2547 | """ |
f38dd50b | 2548 | subvolume = self._gen_subvol_name() |
92f5a8d4 TL |
2549 | |
2550 | # create subvolume | |
f67539c2 | 2551 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
92f5a8d4 | 2552 | |
f67539c2 TL |
2553 | # getpath |
2554 | subvolpath = self._get_subvolume_path(self.volname, subvolume) | |
92f5a8d4 | 2555 | |
f67539c2 TL |
2556 | # subdirectory of a subvolume cannot be moved outside the subvolume once marked with |
2557 | # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation) | |
2558 | # outside the subvolume | |
2559 | dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location') | |
2560 | srcpath = os.path.join(self.mount_a.mountpoint, subvolpath) | |
2561 | rename_script = dedent(""" | |
2562 | import os | |
2563 | import errno | |
2564 | try: | |
2565 | os.rename("{src}", "{dst}") | |
2566 | except OSError as e: | |
2567 | if e.errno != errno.EXDEV: | |
2568 | raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory") | |
2569 | else: | |
2570 | raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail") | |
2571 | """) | |
522d829b | 2572 | self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath), sudo=True) |
f67539c2 TL |
2573 | |
2574 | # remove subvolume | |
2575 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2576 | ||
2577 | # verify trash dir is clean | |
2578 | self._wait_for_trash_empty() | |
2579 | ||
2580 | def test_subvolume_pin_export(self): | |
2581 | self.fs.set_max_mds(2) | |
2582 | status = self.fs.wait_for_daemons() | |
2583 | ||
f38dd50b | 2584 | subvolume = self._gen_subvol_name() |
f67539c2 TL |
2585 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
2586 | self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1") | |
2587 | path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume) | |
2588 | path = os.path.dirname(path) # get subvolume path | |
2589 | ||
2590 | self._get_subtrees(status=status, rank=1) | |
2591 | self._wait_subtrees([(path, 1)], status=status) | |
2592 | ||
2593 | # remove subvolume | |
2594 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
adb31ebb TL |
2595 | |
2596 | # verify trash dir is clean | |
2597 | self._wait_for_trash_empty() | |
2598 | ||
cd265ab1 TL |
2599 | ### authorize operations |
2600 | ||
2601 | def test_authorize_deauthorize_legacy_subvolume(self): | |
f38dd50b TL |
2602 | subvolume = self._gen_subvol_name() |
2603 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
2604 | authid = "alice" |
2605 | ||
2606 | guest_mount = self.mount_b | |
2607 | guest_mount.umount_wait() | |
2608 | ||
2609 | # emulate a old-fashioned subvolume in a custom group | |
2610 | createpath = os.path.join(".", "volumes", group, subvolume) | |
1e59de90 | 2611 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False) |
cd265ab1 TL |
2612 | |
2613 | # add required xattrs to subvolume | |
2614 | default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") | |
522d829b | 2615 | self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True) |
cd265ab1 TL |
2616 | |
2617 | mount_path = os.path.join("/", "volumes", group, subvolume) | |
2618 | ||
2619 | # authorize guest authID read-write access to subvolume | |
2620 | key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, | |
2621 | "--group_name", group, "--tenant_id", "tenant_id") | |
2622 | ||
2623 | # guest authID should exist | |
2624 | existing_ids = [a['entity'] for a in self.auth_list()] | |
2625 | self.assertIn("client.{0}".format(authid), existing_ids) | |
2626 | ||
2627 | # configure credentials for guest client | |
f38dd50b TL |
2628 | guest_keyring_path = self._configure_guest_auth( |
2629 | guest_mount, authid, key) | |
cd265ab1 | 2630 | # mount the subvolume, and write to it |
f38dd50b TL |
2631 | guest_mount.mount_wait(cephfs_mntpt=mount_path, |
2632 | client_keyring_path=guest_keyring_path) | |
cd265ab1 TL |
2633 | guest_mount.write_n_mb("data.bin", 1) |
2634 | ||
2635 | # authorize guest authID read access to subvolume | |
2636 | key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, | |
2637 | "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r") | |
2638 | ||
2639 | # guest client sees the change in access level to read only after a | |
2640 | # remount of the subvolume. | |
2641 | guest_mount.umount_wait() | |
522d829b | 2642 | guest_mount.mount_wait(cephfs_mntpt=mount_path) |
cd265ab1 TL |
2643 | |
2644 | # read existing content of the subvolume | |
2645 | self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) | |
2646 | # cannot write into read-only subvolume | |
2647 | with self.assertRaises(CommandFailedError): | |
2648 | guest_mount.write_n_mb("rogue.bin", 1) | |
2649 | ||
2650 | # cleanup | |
2651 | guest_mount.umount_wait() | |
2652 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid, | |
2653 | "--group_name", group) | |
2654 | # guest authID should no longer exist | |
2655 | existing_ids = [a['entity'] for a in self.auth_list()] | |
2656 | self.assertNotIn("client.{0}".format(authid), existing_ids) | |
2657 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) | |
2658 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
2659 | ||
2660 | def test_authorize_deauthorize_subvolume(self): | |
f38dd50b TL |
2661 | subvolume = self._gen_subvol_name() |
2662 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
2663 | authid = "alice" |
2664 | ||
2665 | guest_mount = self.mount_b | |
2666 | guest_mount.umount_wait() | |
2667 | ||
2668 | # create group | |
522d829b | 2669 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=777") |
cd265ab1 TL |
2670 | |
2671 | # create subvolume in group | |
2672 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
2673 | mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, | |
2674 | "--group_name", group).rstrip() | |
2675 | ||
2676 | # authorize guest authID read-write access to subvolume | |
2677 | key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, | |
2678 | "--group_name", group, "--tenant_id", "tenant_id") | |
2679 | ||
2680 | # guest authID should exist | |
2681 | existing_ids = [a['entity'] for a in self.auth_list()] | |
2682 | self.assertIn("client.{0}".format(authid), existing_ids) | |
2683 | ||
2684 | # configure credentials for guest client | |
f38dd50b TL |
2685 | guest_keyring_path = self._configure_guest_auth( |
2686 | guest_mount, authid, key) | |
cd265ab1 | 2687 | # mount the subvolume, and write to it |
f38dd50b TL |
2688 | guest_mount.mount_wait(cephfs_mntpt=mount_path, |
2689 | client_keyring_path=guest_keyring_path) | |
cd265ab1 TL |
2690 | guest_mount.write_n_mb("data.bin", 1) |
2691 | ||
2692 | # authorize guest authID read access to subvolume | |
2693 | key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, | |
2694 | "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r") | |
2695 | ||
2696 | # guest client sees the change in access level to read only after a | |
2697 | # remount of the subvolume. | |
2698 | guest_mount.umount_wait() | |
522d829b | 2699 | guest_mount.mount_wait(cephfs_mntpt=mount_path) |
cd265ab1 TL |
2700 | |
2701 | # read existing content of the subvolume | |
2702 | self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) | |
2703 | # cannot write into read-only subvolume | |
2704 | with self.assertRaises(CommandFailedError): | |
2705 | guest_mount.write_n_mb("rogue.bin", 1) | |
2706 | ||
2707 | # cleanup | |
2708 | guest_mount.umount_wait() | |
2709 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid, | |
2710 | "--group_name", group) | |
2711 | # guest authID should no longer exist | |
2712 | existing_ids = [a['entity'] for a in self.auth_list()] | |
2713 | self.assertNotIn("client.{0}".format(authid), existing_ids) | |
2714 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) | |
2715 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
2716 | ||
cd265ab1 TL |
2717 | def test_multitenant_subvolumes(self): |
2718 | """ | |
2719 | That subvolume access can be restricted to a tenant. | |
2720 | ||
2721 | That metadata used to enforce tenant isolation of | |
2722 | subvolumes is stored as a two-way mapping between auth | |
2723 | IDs and subvolumes that they're authorized to access. | |
2724 | """ | |
f38dd50b TL |
2725 | subvolume = self._gen_subvol_name() |
2726 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
2727 | |
2728 | guest_mount = self.mount_b | |
2729 | ||
2730 | # Guest clients belonging to different tenants, but using the same | |
2731 | # auth ID. | |
2732 | auth_id = "alice" | |
2733 | guestclient_1 = { | |
2734 | "auth_id": auth_id, | |
2735 | "tenant_id": "tenant1", | |
2736 | } | |
2737 | guestclient_2 = { | |
2738 | "auth_id": auth_id, | |
2739 | "tenant_id": "tenant2", | |
2740 | } | |
2741 | ||
2742 | # create group | |
2743 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
2744 | ||
2745 | # create subvolume in group | |
2746 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
2747 | ||
2748 | # Check that subvolume metadata file is created on subvolume creation. | |
2749 | subvol_metadata_filename = "_{0}:{1}.meta".format(group, subvolume) | |
2750 | self.assertIn(subvol_metadata_filename, guest_mount.ls("volumes")) | |
2751 | ||
2752 | # Authorize 'guestclient_1', using auth ID 'alice' and belonging to | |
2753 | # 'tenant1', with 'rw' access to the volume. | |
2754 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], | |
2755 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
2756 | ||
2757 | # Check that auth metadata file for auth ID 'alice', is | |
2758 | # created on authorizing 'alice' access to the subvolume. | |
2759 | auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) | |
2760 | self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) | |
2761 | ||
2762 | # Verify that the auth metadata file stores the tenant ID that the | |
2763 | # auth ID belongs to, the auth ID's authorized access levels | |
2764 | # for different subvolumes, versioning details, etc. | |
2765 | expected_auth_metadata = { | |
2766 | "version": 5, | |
2767 | "compat_version": 6, | |
2768 | "dirty": False, | |
2769 | "tenant_id": "tenant1", | |
2770 | "subvolumes": { | |
2771 | "{0}/{1}".format(group,subvolume): { | |
2772 | "dirty": False, | |
2773 | "access_level": "rw" | |
2774 | } | |
2775 | } | |
2776 | } | |
2777 | ||
2778 | auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename))) | |
2779 | self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) | |
2780 | del expected_auth_metadata["version"] | |
2781 | del auth_metadata["version"] | |
2782 | self.assertEqual(expected_auth_metadata, auth_metadata) | |
2783 | ||
2784 | # Verify that the subvolume metadata file stores info about auth IDs | |
2785 | # and their access levels to the subvolume, versioning details, etc. | |
2786 | expected_subvol_metadata = { | |
2787 | "version": 1, | |
2788 | "compat_version": 1, | |
2789 | "auths": { | |
2790 | "alice": { | |
2791 | "dirty": False, | |
2792 | "access_level": "rw" | |
2793 | } | |
2794 | } | |
2795 | } | |
2796 | subvol_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(subvol_metadata_filename))) | |
2797 | ||
2798 | self.assertGreaterEqual(subvol_metadata["version"], expected_subvol_metadata["version"]) | |
2799 | del expected_subvol_metadata["version"] | |
2800 | del subvol_metadata["version"] | |
2801 | self.assertEqual(expected_subvol_metadata, subvol_metadata) | |
2802 | ||
2803 | # Cannot authorize 'guestclient_2' to access the volume. | |
2804 | # It uses auth ID 'alice', which has already been used by a | |
2805 | # 'guestclient_1' belonging to an another tenant for accessing | |
2806 | # the volume. | |
2807 | ||
2808 | try: | |
2809 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_2["auth_id"], | |
2810 | "--group_name", group, "--tenant_id", guestclient_2["tenant_id"]) | |
2811 | except CommandFailedError as ce: | |
2812 | self.assertEqual(ce.exitstatus, errno.EPERM, | |
2813 | "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id") | |
2814 | else: | |
2815 | self.fail("expected the 'fs subvolume authorize' command to fail") | |
2816 | ||
2817 | # Check that auth metadata file is cleaned up on removing | |
2818 | # auth ID's only access to a volume. | |
2819 | ||
2820 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, | |
2821 | "--group_name", group) | |
2822 | self.assertNotIn(auth_metadata_filename, guest_mount.ls("volumes")) | |
2823 | ||
2824 | # Check that subvolume metadata file is cleaned up on subvolume deletion. | |
2825 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) | |
2826 | self.assertNotIn(subvol_metadata_filename, guest_mount.ls("volumes")) | |
2827 | ||
2828 | # clean up | |
2829 | guest_mount.umount_wait() | |
2830 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
2831 | ||
2832 | def test_subvolume_authorized_list(self): | |
f38dd50b TL |
2833 | subvolume = self._gen_subvol_name() |
2834 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
2835 | authid1 = "alice" |
2836 | authid2 = "guest1" | |
2837 | authid3 = "guest2" | |
2838 | ||
2839 | # create group | |
2840 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
2841 | ||
2842 | # create subvolume in group | |
2843 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
2844 | ||
2845 | # authorize alice authID read-write access to subvolume | |
2846 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid1, | |
2847 | "--group_name", group) | |
2848 | # authorize guest1 authID read-write access to subvolume | |
2849 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid2, | |
2850 | "--group_name", group) | |
2851 | # authorize guest2 authID read access to subvolume | |
2852 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid3, | |
2853 | "--group_name", group, "--access_level", "r") | |
2854 | ||
2855 | # list authorized-ids of the subvolume | |
2856 | expected_auth_list = [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}] | |
2857 | auth_list = json.loads(self._fs_cmd('subvolume', 'authorized_list', self.volname, subvolume, "--group_name", group)) | |
2858 | self.assertCountEqual(expected_auth_list, auth_list) | |
2859 | ||
2860 | # cleanup | |
2861 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid1, | |
2862 | "--group_name", group) | |
2863 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid2, | |
2864 | "--group_name", group) | |
2865 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid3, | |
2866 | "--group_name", group) | |
2867 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) | |
2868 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
2869 | ||
2870 | def test_authorize_auth_id_not_created_by_mgr_volumes(self): | |
2871 | """ | |
2872 | If the auth_id already exists and is not created by mgr plugin, | |
2873 | it's not allowed to authorize the auth-id by default. | |
2874 | """ | |
2875 | ||
f38dd50b TL |
2876 | subvolume = self._gen_subvol_name() |
2877 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
2878 | |
2879 | # Create auth_id | |
f38dd50b | 2880 | self.run_ceph_cmd( |
cd265ab1 TL |
2881 | "auth", "get-or-create", "client.guest1", |
2882 | "mds", "allow *", | |
2883 | "osd", "allow rw", | |
2884 | "mon", "allow *" | |
2885 | ) | |
2886 | ||
2887 | auth_id = "guest1" | |
2888 | guestclient_1 = { | |
2889 | "auth_id": auth_id, | |
2890 | "tenant_id": "tenant1", | |
2891 | } | |
2892 | ||
2893 | # create group | |
2894 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
2895 | ||
2896 | # create subvolume in group | |
2897 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
2898 | ||
2899 | try: | |
2900 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], | |
2901 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
2902 | except CommandFailedError as ce: | |
2903 | self.assertEqual(ce.exitstatus, errno.EPERM, | |
2904 | "Invalid error code returned on authorize of subvolume for auth_id created out of band") | |
2905 | else: | |
2906 | self.fail("expected the 'fs subvolume authorize' command to fail") | |
2907 | ||
2908 | # clean up | |
f38dd50b | 2909 | self.run_ceph_cmd("auth", "rm", "client.guest1") |
cd265ab1 TL |
2910 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) |
2911 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
2912 | ||
2913 | def test_authorize_allow_existing_id_option(self): | |
2914 | """ | |
2915 | If the auth_id already exists and is not created by mgr volumes, | |
2916 | it's not allowed to authorize the auth-id by default but is | |
2917 | allowed with option allow_existing_id. | |
2918 | """ | |
2919 | ||
f38dd50b TL |
2920 | subvolume = self._gen_subvol_name() |
2921 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
2922 | |
2923 | # Create auth_id | |
f38dd50b | 2924 | self.run_ceph_cmd( |
cd265ab1 TL |
2925 | "auth", "get-or-create", "client.guest1", |
2926 | "mds", "allow *", | |
2927 | "osd", "allow rw", | |
2928 | "mon", "allow *" | |
2929 | ) | |
2930 | ||
2931 | auth_id = "guest1" | |
2932 | guestclient_1 = { | |
2933 | "auth_id": auth_id, | |
2934 | "tenant_id": "tenant1", | |
2935 | } | |
2936 | ||
2937 | # create group | |
2938 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
2939 | ||
2940 | # create subvolume in group | |
2941 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
2942 | ||
2943 | # Cannot authorize 'guestclient_1' to access the volume by default, | |
2944 | # which already exists and not created by mgr volumes but is allowed | |
2945 | # with option 'allow_existing_id'. | |
2946 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], | |
2947 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"], "--allow-existing-id") | |
2948 | ||
2949 | # clean up | |
2950 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, | |
2951 | "--group_name", group) | |
f38dd50b | 2952 | self.run_ceph_cmd("auth", "rm", "client.guest1") |
cd265ab1 TL |
2953 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) |
2954 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
2955 | ||
2956 | def test_deauthorize_auth_id_after_out_of_band_update(self): | |
2957 | """ | |
2958 | If the auth_id authorized by mgr/volumes plugin is updated | |
2959 | out of band, the auth_id should not be deleted after a | |
2960 | deauthorize. It should only remove caps associated with it. | |
2961 | """ | |
2962 | ||
f38dd50b TL |
2963 | subvolume = self._gen_subvol_name() |
2964 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
2965 | |
2966 | auth_id = "guest1" | |
2967 | guestclient_1 = { | |
2968 | "auth_id": auth_id, | |
2969 | "tenant_id": "tenant1", | |
2970 | } | |
2971 | ||
2972 | # create group | |
2973 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
2974 | ||
2975 | # create subvolume in group | |
2976 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
2977 | ||
2978 | # Authorize 'guestclient_1' to access the subvolume. | |
2979 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], | |
2980 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
2981 | ||
2982 | subvol_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, | |
2983 | "--group_name", group).rstrip() | |
2984 | ||
2985 | # Update caps for guestclient_1 out of band | |
f38dd50b | 2986 | out = self.get_ceph_cmd_stdout( |
cd265ab1 TL |
2987 | "auth", "caps", "client.guest1", |
2988 | "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path), | |
2989 | "osd", "allow rw pool=cephfs_data", | |
2990 | "mon", "allow r", | |
2991 | "mgr", "allow *" | |
2992 | ) | |
2993 | ||
2994 | # Deauthorize guestclient_1 | |
2995 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group) | |
2996 | ||
2997 | # Validate the caps of guestclient_1 after deauthorize. It should not have deleted | |
2998 | # guestclient_1. The mgr and mds caps should be present which was updated out of band. | |
f38dd50b | 2999 | out = json.loads(self.get_ceph_cmd_stdout("auth", "get", "client.guest1", "--format=json-pretty")) |
cd265ab1 TL |
3000 | |
3001 | self.assertEqual("client.guest1", out[0]["entity"]) | |
3002 | self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"]) | |
3003 | self.assertEqual("allow *", out[0]["caps"]["mgr"]) | |
3004 | self.assertNotIn("osd", out[0]["caps"]) | |
3005 | ||
3006 | # clean up | |
f38dd50b | 3007 | out = self.get_ceph_cmd_stdout("auth", "rm", "client.guest1") |
cd265ab1 TL |
3008 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) |
3009 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
3010 | ||
3011 | def test_recover_auth_metadata_during_authorize(self): | |
3012 | """ | |
3013 | That auth metadata manager can recover from partial auth updates using | |
3014 | metadata files, which store auth info and its update status info. This | |
3015 | test validates the recovery during authorize. | |
3016 | """ | |
3017 | ||
3018 | guest_mount = self.mount_b | |
3019 | ||
f38dd50b TL |
3020 | subvolume = self._gen_subvol_name() |
3021 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
3022 | |
3023 | auth_id = "guest1" | |
3024 | guestclient_1 = { | |
3025 | "auth_id": auth_id, | |
3026 | "tenant_id": "tenant1", | |
3027 | } | |
3028 | ||
3029 | # create group | |
3030 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
3031 | ||
3032 | # create subvolume in group | |
3033 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
3034 | ||
3035 | # Authorize 'guestclient_1' to access the subvolume. | |
3036 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], | |
3037 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
3038 | ||
3039 | # Check that auth metadata file for auth ID 'guest1', is | |
3040 | # created on authorizing 'guest1' access to the subvolume. | |
3041 | auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) | |
3042 | self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) | |
3043 | expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) | |
3044 | ||
3045 | # Induce partial auth update state by modifying the auth metadata file, | |
3046 | # and then run authorize again. | |
1e59de90 | 3047 | guest_mount.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False) |
cd265ab1 TL |
3048 | |
3049 | # Authorize 'guestclient_1' to access the subvolume. | |
3050 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], | |
3051 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
3052 | ||
3053 | auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) | |
3054 | self.assertEqual(auth_metadata_content, expected_auth_metadata_content) | |
3055 | ||
3056 | # clean up | |
3057 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group) | |
3058 | guest_mount.umount_wait() | |
f38dd50b | 3059 | self.run_ceph_cmd("auth", "rm", "client.guest1") |
cd265ab1 TL |
3060 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) |
3061 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
3062 | ||
3063 | def test_recover_auth_metadata_during_deauthorize(self): | |
3064 | """ | |
3065 | That auth metadata manager can recover from partial auth updates using | |
3066 | metadata files, which store auth info and its update status info. This | |
3067 | test validates the recovery during deauthorize. | |
3068 | """ | |
3069 | ||
3070 | guest_mount = self.mount_b | |
3071 | ||
f38dd50b TL |
3072 | subvolume1, subvolume2 = self._gen_subvol_name(2) |
3073 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
3074 | |
3075 | guestclient_1 = { | |
3076 | "auth_id": "guest1", | |
3077 | "tenant_id": "tenant1", | |
3078 | } | |
3079 | ||
3080 | # create group | |
3081 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
3082 | ||
3083 | # create subvolumes in group | |
3084 | self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) | |
3085 | self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group) | |
3086 | ||
3087 | # Authorize 'guestclient_1' to access the subvolume1. | |
3088 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"], | |
3089 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
3090 | ||
3091 | # Check that auth metadata file for auth ID 'guest1', is | |
3092 | # created on authorizing 'guest1' access to the subvolume1. | |
3093 | auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) | |
3094 | self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) | |
3095 | expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) | |
3096 | ||
3097 | # Authorize 'guestclient_1' to access the subvolume2. | |
3098 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"], | |
3099 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
3100 | ||
3101 | # Induce partial auth update state by modifying the auth metadata file, | |
3102 | # and then run de-authorize. | |
1e59de90 | 3103 | guest_mount.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False) |
cd265ab1 TL |
3104 | |
3105 | # Deauthorize 'guestclient_1' to access the subvolume2. | |
3106 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"], | |
3107 | "--group_name", group) | |
3108 | ||
3109 | auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) | |
3110 | self.assertEqual(auth_metadata_content, expected_auth_metadata_content) | |
3111 | ||
3112 | # clean up | |
3113 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group) | |
3114 | guest_mount.umount_wait() | |
f38dd50b | 3115 | self.run_ceph_cmd("auth", "rm", "client.guest1") |
cd265ab1 TL |
3116 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) |
3117 | self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group) | |
3118 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
3119 | ||
3120 | def test_update_old_style_auth_metadata_to_new_during_authorize(self): | |
3121 | """ | |
3122 | CephVolumeClient stores the subvolume data in auth metadata file with | |
3123 | 'volumes' key as there was no subvolume namespace. It doesn't makes sense | |
3124 | with mgr/volumes. This test validates the transparent update of 'volumes' | |
3125 | key to 'subvolumes' key in auth metadata file during authorize. | |
3126 | """ | |
3127 | ||
3128 | guest_mount = self.mount_b | |
3129 | ||
f38dd50b TL |
3130 | subvolume1, subvolume2 = self._gen_subvol_name(2) |
3131 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
3132 | |
3133 | auth_id = "guest1" | |
3134 | guestclient_1 = { | |
3135 | "auth_id": auth_id, | |
3136 | "tenant_id": "tenant1", | |
3137 | } | |
3138 | ||
3139 | # create group | |
3140 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
3141 | ||
3142 | # create subvolumes in group | |
3143 | self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) | |
3144 | self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group) | |
3145 | ||
3146 | # Authorize 'guestclient_1' to access the subvolume1. | |
3147 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"], | |
3148 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
3149 | ||
3150 | # Check that auth metadata file for auth ID 'guest1', is | |
3151 | # created on authorizing 'guest1' access to the subvolume1. | |
3152 | auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) | |
3153 | self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) | |
3154 | ||
3155 | # Replace 'subvolumes' to 'volumes', old style auth-metadata file | |
1e59de90 | 3156 | guest_mount.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False) |
cd265ab1 TL |
3157 | |
3158 | # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes' | |
3159 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"], | |
3160 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
3161 | ||
3162 | expected_auth_metadata = { | |
3163 | "version": 5, | |
3164 | "compat_version": 6, | |
3165 | "dirty": False, | |
3166 | "tenant_id": "tenant1", | |
3167 | "subvolumes": { | |
3168 | "{0}/{1}".format(group,subvolume1): { | |
3169 | "dirty": False, | |
3170 | "access_level": "rw" | |
3171 | }, | |
3172 | "{0}/{1}".format(group,subvolume2): { | |
3173 | "dirty": False, | |
3174 | "access_level": "rw" | |
3175 | } | |
3176 | } | |
3177 | } | |
3178 | ||
3179 | auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename))) | |
3180 | ||
3181 | self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) | |
3182 | del expected_auth_metadata["version"] | |
3183 | del auth_metadata["version"] | |
3184 | self.assertEqual(expected_auth_metadata, auth_metadata) | |
3185 | ||
3186 | # clean up | |
3187 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group) | |
3188 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group) | |
3189 | guest_mount.umount_wait() | |
f38dd50b | 3190 | self.run_ceph_cmd("auth", "rm", "client.guest1") |
cd265ab1 TL |
3191 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) |
3192 | self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group) | |
3193 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
3194 | ||
3195 | def test_update_old_style_auth_metadata_to_new_during_deauthorize(self): | |
3196 | """ | |
3197 | CephVolumeClient stores the subvolume data in auth metadata file with | |
3198 | 'volumes' key as there was no subvolume namespace. It doesn't makes sense | |
3199 | with mgr/volumes. This test validates the transparent update of 'volumes' | |
3200 | key to 'subvolumes' key in auth metadata file during deauthorize. | |
3201 | """ | |
3202 | ||
3203 | guest_mount = self.mount_b | |
3204 | ||
f38dd50b TL |
3205 | subvolume1, subvolume2 = self._gen_subvol_name(2) |
3206 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
3207 | |
3208 | auth_id = "guest1" | |
3209 | guestclient_1 = { | |
3210 | "auth_id": auth_id, | |
3211 | "tenant_id": "tenant1", | |
3212 | } | |
3213 | ||
3214 | # create group | |
3215 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
3216 | ||
3217 | # create subvolumes in group | |
3218 | self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) | |
3219 | self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group) | |
3220 | ||
3221 | # Authorize 'guestclient_1' to access the subvolume1. | |
3222 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"], | |
3223 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
3224 | ||
3225 | # Authorize 'guestclient_1' to access the subvolume2. | |
3226 | self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"], | |
3227 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
3228 | ||
3229 | # Check that auth metadata file for auth ID 'guest1', is created. | |
3230 | auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) | |
3231 | self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) | |
3232 | ||
3233 | # Replace 'subvolumes' to 'volumes', old style auth-metadata file | |
1e59de90 | 3234 | guest_mount.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False) |
cd265ab1 TL |
3235 | |
3236 | # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes' | |
3237 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group) | |
3238 | ||
3239 | expected_auth_metadata = { | |
3240 | "version": 5, | |
3241 | "compat_version": 6, | |
3242 | "dirty": False, | |
3243 | "tenant_id": "tenant1", | |
3244 | "subvolumes": { | |
3245 | "{0}/{1}".format(group,subvolume1): { | |
3246 | "dirty": False, | |
3247 | "access_level": "rw" | |
3248 | } | |
3249 | } | |
3250 | } | |
3251 | ||
3252 | auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename))) | |
3253 | ||
3254 | self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) | |
3255 | del expected_auth_metadata["version"] | |
3256 | del auth_metadata["version"] | |
3257 | self.assertEqual(expected_auth_metadata, auth_metadata) | |
3258 | ||
3259 | # clean up | |
3260 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group) | |
3261 | guest_mount.umount_wait() | |
f38dd50b | 3262 | self.run_ceph_cmd("auth", "rm", "client.guest1") |
cd265ab1 TL |
3263 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) |
3264 | self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group) | |
3265 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
3266 | ||
cd265ab1 TL |
3267 | def test_subvolume_evict_client(self): |
3268 | """ | |
3269 | That a subvolume client can be evicted based on the auth ID | |
3270 | """ | |
3271 | ||
f38dd50b TL |
3272 | subvolumes = self._gen_subvol_name(2) |
3273 | group = self._gen_subvol_grp_name() | |
cd265ab1 TL |
3274 | |
3275 | # create group | |
3276 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
3277 | ||
3278 | # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares. | |
3279 | for i in range(0, 2): | |
3280 | self.mounts[i].umount_wait() | |
3281 | guest_mounts = (self.mounts[0], self.mounts[1]) | |
3282 | auth_id = "guest" | |
3283 | guestclient_1 = { | |
3284 | "auth_id": auth_id, | |
3285 | "tenant_id": "tenant1", | |
3286 | } | |
3287 | ||
3288 | # Create two subvolumes. Authorize 'guest' auth ID to mount the two | |
3289 | # subvolumes. Mount the two subvolumes. Write data to the volumes. | |
3290 | for i in range(2): | |
3291 | # Create subvolume. | |
522d829b | 3292 | self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group, "--mode=777") |
cd265ab1 TL |
3293 | |
3294 | # authorize guest authID read-write access to subvolume | |
3295 | key = self._fs_cmd("subvolume", "authorize", self.volname, subvolumes[i], guestclient_1["auth_id"], | |
3296 | "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) | |
3297 | ||
3298 | mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolumes[i], | |
3299 | "--group_name", group).rstrip() | |
cd265ab1 | 3300 | |
f38dd50b TL |
3301 | # configure credentials for guest client |
3302 | guest_keyring_path = self._configure_guest_auth(guest_mounts[i], | |
3303 | auth_id, key) | |
cd265ab1 | 3304 | # mount the subvolume, and write to it |
f38dd50b TL |
3305 | guest_mounts[i].mount_wait( |
3306 | cephfs_mntpt=mount_path, | |
3307 | client_keyring_path=guest_keyring_path) | |
cd265ab1 TL |
3308 | guest_mounts[i].write_n_mb("data.bin", 1) |
3309 | ||
3310 | # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted | |
3311 | # one volume. | |
3312 | self._fs_cmd("subvolume", "evict", self.volname, subvolumes[0], auth_id, "--group_name", group) | |
3313 | ||
3314 | # Evicted guest client, guest_mounts[0], should not be able to do | |
3315 | # anymore metadata ops. It should start failing all operations | |
3316 | # when it sees that its own address is in the blocklist. | |
3317 | try: | |
3318 | guest_mounts[0].write_n_mb("rogue.bin", 1) | |
3319 | except CommandFailedError: | |
3320 | pass | |
3321 | else: | |
3322 | raise RuntimeError("post-eviction write should have failed!") | |
3323 | ||
3324 | # The blocklisted guest client should now be unmountable | |
3325 | guest_mounts[0].umount_wait() | |
3326 | ||
3327 | # Guest client, guest_mounts[1], using the same auth ID 'guest', but | |
3328 | # has mounted the other volume, should be able to use its volume | |
3329 | # unaffected. | |
3330 | guest_mounts[1].write_n_mb("data.bin.1", 1) | |
3331 | ||
3332 | # Cleanup. | |
3333 | guest_mounts[1].umount_wait() | |
3334 | for i in range(2): | |
3335 | self._fs_cmd("subvolume", "deauthorize", self.volname, subvolumes[i], auth_id, "--group_name", group) | |
3336 | self._fs_cmd("subvolume", "rm", self.volname, subvolumes[i], "--group_name", group) | |
3337 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
3338 | ||
f67539c2 TL |
3339 | def test_subvolume_pin_random(self): |
3340 | self.fs.set_max_mds(2) | |
3341 | self.fs.wait_for_daemons() | |
3342 | self.config_set('mds', 'mds_export_ephemeral_random', True) | |
1911f103 | 3343 | |
f38dd50b | 3344 | subvolume = self._gen_subvol_name() |
1911f103 | 3345 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
f67539c2 TL |
3346 | self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01") |
3347 | # no verification | |
1911f103 | 3348 | |
f67539c2 | 3349 | # remove subvolume |
1911f103 | 3350 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) |
1911f103 TL |
3351 | |
3352 | # verify trash dir is clean | |
3353 | self._wait_for_trash_empty() | |
3354 | ||
f67539c2 TL |
3355 | def test_subvolume_resize_fail_invalid_size(self): |
3356 | """ | |
3357 | That a subvolume cannot be resized to an invalid size and the quota did not change | |
3358 | """ | |
1911f103 | 3359 | |
f67539c2 TL |
3360 | osize = self.DEFAULT_FILE_SIZE*1024*1024 |
3361 | # create subvolume | |
f38dd50b | 3362 | subvolname = self._gen_subvol_name() |
f67539c2 | 3363 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) |
81eedcae | 3364 | |
f67539c2 TL |
3365 | # make sure it exists |
3366 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
3367 | self.assertNotEqual(subvolpath, None) | |
81eedcae | 3368 | |
f67539c2 TL |
3369 | # try to resize the subvolume with an invalid size -10 |
3370 | nsize = -10 | |
3371 | try: | |
3372 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
3373 | except CommandFailedError as ce: | |
3374 | self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size") | |
3375 | else: | |
3376 | self.fail("expected the 'fs subvolume resize' command to fail") | |
81eedcae | 3377 | |
f67539c2 TL |
3378 | # verify the quota did not change |
3379 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
3380 | self.assertEqual(size, osize) | |
81eedcae TL |
3381 | |
3382 | # remove subvolume | |
f67539c2 | 3383 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) |
81eedcae | 3384 | |
494da23a TL |
3385 | # verify trash dir is clean |
3386 | self._wait_for_trash_empty() | |
3387 | ||
f67539c2 TL |
3388 | def test_subvolume_resize_fail_zero_size(self): |
3389 | """ | |
3390 | That a subvolume cannot be resized to a zero size and the quota did not change | |
3391 | """ | |
81eedcae | 3392 | |
f67539c2 TL |
3393 | osize = self.DEFAULT_FILE_SIZE*1024*1024 |
3394 | # create subvolume | |
f38dd50b | 3395 | subvolname = self._gen_subvol_name() |
f67539c2 | 3396 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) |
81eedcae | 3397 | |
f67539c2 TL |
3398 | # make sure it exists |
3399 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
3400 | self.assertNotEqual(subvolpath, None) | |
81eedcae | 3401 | |
f67539c2 TL |
3402 | # try to resize the subvolume with size 0 |
3403 | nsize = 0 | |
3404 | try: | |
3405 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
3406 | except CommandFailedError as ce: | |
3407 | self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size") | |
3408 | else: | |
3409 | self.fail("expected the 'fs subvolume resize' command to fail") | |
81eedcae | 3410 | |
f67539c2 TL |
3411 | # verify the quota did not change |
3412 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
3413 | self.assertEqual(size, osize) | |
81eedcae | 3414 | |
f67539c2 TL |
3415 | # remove subvolume |
3416 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) | |
81eedcae | 3417 | |
f67539c2 TL |
3418 | # verify trash dir is clean |
3419 | self._wait_for_trash_empty() | |
81eedcae | 3420 | |
f67539c2 TL |
3421 | def test_subvolume_resize_quota_lt_used_size(self): |
3422 | """ | |
3423 | That a subvolume can be resized to a size smaller than the current used size | |
3424 | and the resulting quota matches the expected size. | |
3425 | """ | |
81eedcae | 3426 | |
f67539c2 TL |
3427 | osize = self.DEFAULT_FILE_SIZE*1024*1024*20 |
3428 | # create subvolume | |
f38dd50b | 3429 | subvolname = self._gen_subvol_name() |
522d829b | 3430 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777") |
81eedcae | 3431 | |
f67539c2 TL |
3432 | # make sure it exists |
3433 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
3434 | self.assertNotEqual(subvolpath, None) | |
81eedcae | 3435 | |
f67539c2 TL |
3436 | # create one file of 10MB |
3437 | file_size=self.DEFAULT_FILE_SIZE*10 | |
3438 | number_of_files=1 | |
3439 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
3440 | number_of_files, | |
3441 | file_size)) | |
3442 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1) | |
3443 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
81eedcae | 3444 | |
f67539c2 TL |
3445 | usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes")) |
3446 | susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip()) | |
3447 | if isinstance(self.mount_a, FuseMount): | |
3448 | # kclient dir does not have size==rbytes | |
3449 | self.assertEqual(usedsize, susedsize) | |
81eedcae | 3450 | |
f67539c2 TL |
3451 | # shrink the subvolume |
3452 | nsize = usedsize // 2 | |
3453 | try: | |
3454 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
3455 | except CommandFailedError: | |
3456 | self.fail("expected the 'fs subvolume resize' command to succeed") | |
81eedcae | 3457 | |
f67539c2 TL |
3458 | # verify the quota |
3459 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
3460 | self.assertEqual(size, nsize) | |
81eedcae | 3461 | |
f67539c2 TL |
3462 | # remove subvolume |
3463 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) | |
81eedcae | 3464 | |
adb31ebb TL |
3465 | # verify trash dir is clean |
3466 | self._wait_for_trash_empty() | |
3467 | ||
f67539c2 | 3468 | def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self): |
92f5a8d4 | 3469 | """ |
f67539c2 TL |
3470 | That a subvolume cannot be resized to a size smaller than the current used size |
3471 | when --no_shrink is given and the quota did not change. | |
92f5a8d4 | 3472 | """ |
92f5a8d4 | 3473 | |
f67539c2 TL |
3474 | osize = self.DEFAULT_FILE_SIZE*1024*1024*20 |
3475 | # create subvolume | |
f38dd50b | 3476 | subvolname = self._gen_subvol_name() |
522d829b | 3477 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777") |
92f5a8d4 TL |
3478 | |
3479 | # make sure it exists | |
f67539c2 TL |
3480 | subvolpath = self._get_subvolume_path(self.volname, subvolname) |
3481 | self.assertNotEqual(subvolpath, None) | |
81eedcae | 3482 | |
f67539c2 TL |
3483 | # create one file of 10MB |
3484 | file_size=self.DEFAULT_FILE_SIZE*10 | |
3485 | number_of_files=1 | |
3486 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
3487 | number_of_files, | |
3488 | file_size)) | |
3489 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2) | |
3490 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
81eedcae | 3491 | |
f67539c2 TL |
3492 | usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes")) |
3493 | susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip()) | |
3494 | if isinstance(self.mount_a, FuseMount): | |
3495 | # kclient dir does not have size==rbytes | |
3496 | self.assertEqual(usedsize, susedsize) | |
81eedcae | 3497 | |
f67539c2 TL |
3498 | # shrink the subvolume |
3499 | nsize = usedsize // 2 | |
3500 | try: | |
3501 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink") | |
3502 | except CommandFailedError as ce: | |
3503 | self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size") | |
3504 | else: | |
3505 | self.fail("expected the 'fs subvolume resize' command to fail") | |
81eedcae | 3506 | |
f67539c2 TL |
3507 | # verify the quota did not change |
3508 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
3509 | self.assertEqual(size, osize) | |
81eedcae | 3510 | |
f67539c2 TL |
3511 | # remove subvolume |
3512 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) | |
81eedcae | 3513 | |
adb31ebb TL |
3514 | # verify trash dir is clean |
3515 | self._wait_for_trash_empty() | |
3516 | ||
f67539c2 | 3517 | def test_subvolume_resize_expand_on_full_subvolume(self): |
92f5a8d4 | 3518 | """ |
f67539c2 | 3519 | That the subvolume can be expanded from a full subvolume and future writes succeed. |
92f5a8d4 | 3520 | """ |
92f5a8d4 | 3521 | |
f67539c2 TL |
3522 | osize = self.DEFAULT_FILE_SIZE*1024*1024*10 |
3523 | # create subvolume of quota 10MB and make sure it exists | |
f38dd50b | 3524 | subvolname = self._gen_subvol_name() |
522d829b | 3525 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777") |
92f5a8d4 TL |
3526 | subvolpath = self._get_subvolume_path(self.volname, subvolname) |
3527 | self.assertNotEqual(subvolpath, None) | |
3528 | ||
f67539c2 TL |
3529 | # create one file of size 10MB and write |
3530 | file_size=self.DEFAULT_FILE_SIZE*10 | |
3531 | number_of_files=1 | |
3532 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
3533 | number_of_files, | |
3534 | file_size)) | |
3535 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3) | |
3536 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
3537 | ||
3538 | # create a file of size 5MB and try write more | |
3539 | file_size=file_size // 2 | |
3540 | number_of_files=1 | |
3541 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
3542 | number_of_files, | |
3543 | file_size)) | |
3544 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4) | |
3545 | try: | |
3546 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
3547 | except CommandFailedError: | |
3548 | # Not able to write. So expand the subvolume more and try writing the 5MB file again | |
3549 | nsize = osize*2 | |
3550 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
3551 | try: | |
3552 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
3553 | except CommandFailedError: | |
3554 | self.fail("expected filling subvolume {0} with {1} file of size {2}MB" | |
3555 | "to succeed".format(subvolname, number_of_files, file_size)) | |
3556 | else: | |
3557 | self.fail("expected filling subvolume {0} with {1} file of size {2}MB" | |
3558 | "to fail".format(subvolname, number_of_files, file_size)) | |
92f5a8d4 TL |
3559 | |
3560 | # remove subvolume | |
3561 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) | |
3562 | ||
adb31ebb TL |
3563 | # verify trash dir is clean |
3564 | self._wait_for_trash_empty() | |
3565 | ||
f67539c2 TL |
3566 | def test_subvolume_resize_infinite_size(self): |
3567 | """ | |
3568 | That a subvolume can be resized to an infinite size by unsetting its quota. | |
3569 | """ | |
81eedcae TL |
3570 | |
3571 | # create subvolume | |
f38dd50b | 3572 | subvolname = self._gen_subvol_name() |
f67539c2 TL |
3573 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", |
3574 | str(self.DEFAULT_FILE_SIZE*1024*1024)) | |
81eedcae | 3575 | |
f67539c2 TL |
3576 | # make sure it exists |
3577 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
3578 | self.assertNotEqual(subvolpath, None) | |
81eedcae | 3579 | |
f67539c2 TL |
3580 | # resize inf |
3581 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf") | |
3582 | ||
3583 | # verify that the quota is None | |
3584 | size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes") | |
3585 | self.assertEqual(size, None) | |
81eedcae TL |
3586 | |
3587 | # remove subvolume | |
f67539c2 | 3588 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) |
81eedcae | 3589 | |
494da23a TL |
3590 | # verify trash dir is clean |
3591 | self._wait_for_trash_empty() | |
3592 | ||
f67539c2 | 3593 | def test_subvolume_resize_infinite_size_future_writes(self): |
e306af50 | 3594 | """ |
f67539c2 | 3595 | That a subvolume can be resized to an infinite size and the future writes succeed. |
e306af50 TL |
3596 | """ |
3597 | ||
e306af50 | 3598 | # create subvolume |
f38dd50b | 3599 | subvolname = self._gen_subvol_name() |
f67539c2 | 3600 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", |
522d829b | 3601 | str(self.DEFAULT_FILE_SIZE*1024*1024*5), "--mode=777") |
e306af50 | 3602 | |
f67539c2 TL |
3603 | # make sure it exists |
3604 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
3605 | self.assertNotEqual(subvolpath, None) | |
e306af50 | 3606 | |
f67539c2 TL |
3607 | # resize inf |
3608 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf") | |
e306af50 | 3609 | |
f67539c2 TL |
3610 | # verify that the quota is None |
3611 | size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes") | |
3612 | self.assertEqual(size, None) | |
e306af50 | 3613 | |
f67539c2 TL |
3614 | # create one file of 10MB and try to write |
3615 | file_size=self.DEFAULT_FILE_SIZE*10 | |
3616 | number_of_files=1 | |
3617 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
3618 | number_of_files, | |
3619 | file_size)) | |
3620 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5) | |
adb31ebb | 3621 | |
f67539c2 TL |
3622 | try: |
3623 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
3624 | except CommandFailedError: | |
3625 | self.fail("expected filling subvolume {0} with {1} file of size {2}MB " | |
3626 | "to succeed".format(subvolname, number_of_files, file_size)) | |
e306af50 TL |
3627 | |
3628 | # remove subvolume | |
f67539c2 | 3629 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) |
e306af50 TL |
3630 | |
3631 | # verify trash dir is clean | |
3632 | self._wait_for_trash_empty() | |
3633 | ||
f67539c2 TL |
3634 | def test_subvolume_rm_force(self): |
3635 | # test removing non-existing subvolume with --force | |
f38dd50b | 3636 | subvolume = self._gen_subvol_name() |
f67539c2 TL |
3637 | try: |
3638 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force") | |
3639 | except CommandFailedError: | |
3640 | self.fail("expected the 'fs subvolume rm --force' command to succeed") | |
3641 | ||
2a845540 TL |
3642 | def test_subvolume_exists_with_subvolumegroup_and_subvolume(self): |
3643 | """Test the presence of any subvolume by specifying the name of subvolumegroup""" | |
3644 | ||
f38dd50b TL |
3645 | group = self._gen_subvol_grp_name() |
3646 | subvolume1 = self._gen_subvol_name() | |
2a845540 TL |
3647 | # create subvolumegroup |
3648 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
3649 | # create subvolume in group | |
3650 | self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) | |
3651 | ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group) | |
3652 | self.assertEqual(ret.strip('\n'), "subvolume exists") | |
3653 | # delete subvolume in group | |
3654 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) | |
3655 | ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group) | |
3656 | self.assertEqual(ret.strip('\n'), "no subvolume exists") | |
3657 | # delete subvolumegroup | |
3658 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
3659 | ||
3660 | def test_subvolume_exists_with_subvolumegroup_and_no_subvolume(self): | |
3661 | """Test the presence of any subvolume specifying the name | |
3662 | of subvolumegroup and no subvolumes""" | |
3663 | ||
f38dd50b | 3664 | group = self._gen_subvol_grp_name() |
2a845540 TL |
3665 | # create subvolumegroup |
3666 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
3667 | ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group) | |
3668 | self.assertEqual(ret.strip('\n'), "no subvolume exists") | |
3669 | # delete subvolumegroup | |
3670 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
3671 | ||
3672 | def test_subvolume_exists_without_subvolumegroup_and_with_subvolume(self): | |
3673 | """Test the presence of any subvolume without specifying the name | |
3674 | of subvolumegroup""" | |
3675 | ||
f38dd50b | 3676 | subvolume1 = self._gen_subvol_name() |
2a845540 TL |
3677 | # create subvolume |
3678 | self._fs_cmd("subvolume", "create", self.volname, subvolume1) | |
3679 | ret = self._fs_cmd("subvolume", "exist", self.volname) | |
3680 | self.assertEqual(ret.strip('\n'), "subvolume exists") | |
3681 | # delete subvolume | |
3682 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1) | |
3683 | ret = self._fs_cmd("subvolume", "exist", self.volname) | |
3684 | self.assertEqual(ret.strip('\n'), "no subvolume exists") | |
3685 | ||
3686 | def test_subvolume_exists_without_subvolumegroup_and_without_subvolume(self): | |
3687 | """Test the presence of any subvolume without any subvolumegroup | |
3688 | and without any subvolume""" | |
3689 | ||
3690 | ret = self._fs_cmd("subvolume", "exist", self.volname) | |
3691 | self.assertEqual(ret.strip('\n'), "no subvolume exists") | |
3692 | ||
f67539c2 TL |
3693 | def test_subvolume_shrink(self): |
3694 | """ | |
3695 | That a subvolume can be shrinked in size and its quota matches the expected size. | |
3696 | """ | |
81eedcae TL |
3697 | |
3698 | # create subvolume | |
f38dd50b | 3699 | subvolname = self._gen_subvol_name() |
f67539c2 TL |
3700 | osize = self.DEFAULT_FILE_SIZE*1024*1024 |
3701 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) | |
81eedcae | 3702 | |
f67539c2 TL |
3703 | # make sure it exists |
3704 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
3705 | self.assertNotEqual(subvolpath, None) | |
81eedcae | 3706 | |
f67539c2 TL |
3707 | # shrink the subvolume |
3708 | nsize = osize // 2 | |
3709 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
81eedcae | 3710 | |
f67539c2 TL |
3711 | # verify the quota |
3712 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
3713 | self.assertEqual(size, nsize) | |
81eedcae TL |
3714 | |
3715 | # remove subvolume | |
f67539c2 | 3716 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) |
81eedcae | 3717 | |
494da23a TL |
3718 | # verify trash dir is clean |
3719 | self._wait_for_trash_empty() | |
3720 | ||
33c7a0ef TL |
3721 | def test_subvolume_retain_snapshot_rm_idempotency(self): |
3722 | """ | |
3723 | ensure subvolume deletion of a subvolume which is already deleted with retain snapshots option passes. | |
3724 | After subvolume deletion with retain snapshots, the subvolume exists until the trash directory (resides inside subvolume) | |
3725 | is cleaned up. The subvolume deletion issued while the trash directory is not empty, should pass and should | |
3726 | not error out with EAGAIN. | |
3727 | """ | |
f38dd50b TL |
3728 | subvolume = self._gen_subvol_name() |
3729 | snapshot = self._gen_subvol_snap_name() | |
81eedcae | 3730 | |
33c7a0ef TL |
3731 | # create subvolume |
3732 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
81eedcae | 3733 | |
33c7a0ef TL |
3734 | # do some IO |
3735 | self._do_subvolume_io(subvolume, number_of_files=256) | |
f67539c2 | 3736 | |
33c7a0ef TL |
3737 | # snapshot subvolume |
3738 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
81eedcae | 3739 | |
33c7a0ef TL |
3740 | # remove with snapshot retention |
3741 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
81eedcae | 3742 | |
33c7a0ef TL |
3743 | # remove snapshots (removes retained volume) |
3744 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
3745 | ||
3746 | # remove subvolume (check idempotency) | |
81eedcae | 3747 | try: |
33c7a0ef | 3748 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) |
81eedcae TL |
3749 | except CommandFailedError as ce: |
3750 | if ce.exitstatus != errno.ENOENT: | |
33c7a0ef | 3751 | self.fail(f"expected subvolume rm to pass with error: {os.strerror(ce.exitstatus)}") |
81eedcae | 3752 | |
494da23a TL |
3753 | # verify trash dir is clean |
3754 | self._wait_for_trash_empty() | |
3755 | ||
f67539c2 | 3756 | |
33c7a0ef | 3757 | def test_subvolume_user_metadata_set(self): |
f38dd50b TL |
3758 | subvolname = self._gen_subvol_name() |
3759 | group = self._gen_subvol_grp_name() | |
92f5a8d4 | 3760 | |
33c7a0ef | 3761 | # create group. |
f67539c2 TL |
3762 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
3763 | ||
33c7a0ef TL |
3764 | # create subvolume in group. |
3765 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
f67539c2 | 3766 | |
33c7a0ef TL |
3767 | # set metadata for subvolume. |
3768 | key = "key" | |
3769 | value = "value" | |
3770 | try: | |
3771 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
3772 | except CommandFailedError: | |
3773 | self.fail("expected the 'fs subvolume metadata set' command to succeed") | |
92f5a8d4 | 3774 | |
33c7a0ef TL |
3775 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
3776 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
f67539c2 | 3777 | |
33c7a0ef | 3778 | # verify trash dir is clean. |
f67539c2 TL |
3779 | self._wait_for_trash_empty() |
3780 | ||
33c7a0ef | 3781 | def test_subvolume_user_metadata_set_idempotence(self): |
f38dd50b TL |
3782 | subvolname = self._gen_subvol_name() |
3783 | group = self._gen_subvol_grp_name() | |
81eedcae | 3784 | |
33c7a0ef | 3785 | # create group. |
81eedcae TL |
3786 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
3787 | ||
33c7a0ef TL |
3788 | # create subvolume in group. |
3789 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
81eedcae | 3790 | |
33c7a0ef TL |
3791 | # set metadata for subvolume. |
3792 | key = "key" | |
3793 | value = "value" | |
3794 | try: | |
3795 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
3796 | except CommandFailedError: | |
3797 | self.fail("expected the 'fs subvolume metadata set' command to succeed") | |
81eedcae | 3798 | |
33c7a0ef TL |
3799 | # set same metadata again for subvolume. |
3800 | try: | |
3801 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
3802 | except CommandFailedError: | |
3803 | self.fail("expected the 'fs subvolume metadata set' command to succeed because it is idempotent operation") | |
494da23a | 3804 | |
33c7a0ef | 3805 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
81eedcae TL |
3806 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) |
3807 | ||
33c7a0ef TL |
3808 | # verify trash dir is clean. |
3809 | self._wait_for_trash_empty() | |
eafe8130 | 3810 | |
33c7a0ef | 3811 | def test_subvolume_user_metadata_get(self): |
f38dd50b TL |
3812 | subvolname = self._gen_subvol_name() |
3813 | group = self._gen_subvol_grp_name() | |
33c7a0ef TL |
3814 | |
3815 | # create group. | |
f67539c2 | 3816 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
eafe8130 | 3817 | |
33c7a0ef TL |
3818 | # create subvolume in group. |
3819 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
eafe8130 | 3820 | |
33c7a0ef TL |
3821 | # set metadata for subvolume. |
3822 | key = "key" | |
3823 | value = "value" | |
3824 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
adb31ebb | 3825 | |
33c7a0ef | 3826 | # get value for specified key. |
f67539c2 | 3827 | try: |
33c7a0ef | 3828 | ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group) |
f67539c2 | 3829 | except CommandFailedError: |
33c7a0ef | 3830 | self.fail("expected the 'fs subvolume metadata get' command to succeed") |
eafe8130 | 3831 | |
33c7a0ef TL |
3832 | # remove '\n' from returned value. |
3833 | ret = ret.strip('\n') | |
adb31ebb | 3834 | |
33c7a0ef TL |
3835 | # match received value with expected value. |
3836 | self.assertEqual(value, ret) | |
adb31ebb | 3837 | |
33c7a0ef | 3838 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
adb31ebb TL |
3839 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) |
3840 | ||
33c7a0ef TL |
3841 | # verify trash dir is clean. |
3842 | self._wait_for_trash_empty() | |
cd265ab1 | 3843 | |
33c7a0ef | 3844 | def test_subvolume_user_metadata_get_for_nonexisting_key(self): |
f38dd50b TL |
3845 | subvolname = self._gen_subvol_name() |
3846 | group = self._gen_subvol_grp_name() | |
cd265ab1 | 3847 | |
33c7a0ef TL |
3848 | # create group. |
3849 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
cd265ab1 | 3850 | |
33c7a0ef TL |
3851 | # create subvolume in group. |
3852 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
cd265ab1 | 3853 | |
33c7a0ef TL |
3854 | # set metadata for subvolume. |
3855 | key = "key" | |
3856 | value = "value" | |
3857 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
cd265ab1 | 3858 | |
33c7a0ef TL |
3859 | # try to get value for nonexisting key |
3860 | # Expecting ENOENT exit status because key does not exist | |
f67539c2 | 3861 | try: |
33c7a0ef TL |
3862 | self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key_nonexist", "--group_name", group) |
3863 | except CommandFailedError as e: | |
3864 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
f67539c2 | 3865 | else: |
33c7a0ef | 3866 | self.fail("Expected ENOENT because 'key_nonexist' does not exist") |
cd265ab1 | 3867 | |
33c7a0ef TL |
3868 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
3869 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
cd265ab1 | 3870 | |
33c7a0ef | 3871 | # verify trash dir is clean. |
f67539c2 TL |
3872 | self._wait_for_trash_empty() |
3873 | ||
33c7a0ef | 3874 | def test_subvolume_user_metadata_get_for_nonexisting_section(self): |
f38dd50b TL |
3875 | subvolname = self._gen_subvol_name() |
3876 | group = self._gen_subvol_grp_name() | |
f67539c2 | 3877 | |
33c7a0ef TL |
3878 | # create group. |
3879 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
f67539c2 | 3880 | |
33c7a0ef TL |
3881 | # create subvolume in group. |
3882 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
cd265ab1 | 3883 | |
33c7a0ef TL |
3884 | # try to get value for nonexisting key (as section does not exist) |
3885 | # Expecting ENOENT exit status because key does not exist | |
3886 | try: | |
3887 | self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key", "--group_name", group) | |
3888 | except CommandFailedError as e: | |
3889 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
3890 | else: | |
3891 | self.fail("Expected ENOENT because section does not exist") | |
cd265ab1 | 3892 | |
33c7a0ef TL |
3893 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
3894 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
cd265ab1 | 3895 | |
33c7a0ef | 3896 | # verify trash dir is clean. |
cd265ab1 TL |
3897 | self._wait_for_trash_empty() |
3898 | ||
33c7a0ef | 3899 | def test_subvolume_user_metadata_update(self): |
f38dd50b TL |
3900 | subvolname = self._gen_subvol_name() |
3901 | group = self._gen_subvol_grp_name() | |
cd265ab1 | 3902 | |
33c7a0ef TL |
3903 | # create group. |
3904 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
cd265ab1 | 3905 | |
33c7a0ef TL |
3906 | # create subvolume in group. |
3907 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
cd265ab1 | 3908 | |
33c7a0ef TL |
3909 | # set metadata for subvolume. |
3910 | key = "key" | |
3911 | value = "value" | |
3912 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
cd265ab1 | 3913 | |
33c7a0ef TL |
3914 | # update metadata against key. |
3915 | new_value = "new_value" | |
3916 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, new_value, "--group_name", group) | |
cd265ab1 | 3917 | |
33c7a0ef TL |
3918 | # get metadata for specified key of subvolume. |
3919 | try: | |
3920 | ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group) | |
3921 | except CommandFailedError: | |
3922 | self.fail("expected the 'fs subvolume metadata get' command to succeed") | |
cd265ab1 | 3923 | |
33c7a0ef TL |
3924 | # remove '\n' from returned value. |
3925 | ret = ret.strip('\n') | |
cd265ab1 | 3926 | |
33c7a0ef TL |
3927 | # match received value with expected value. |
3928 | self.assertEqual(new_value, ret) | |
cd265ab1 | 3929 | |
33c7a0ef TL |
3930 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
3931 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
cd265ab1 | 3932 | |
33c7a0ef TL |
3933 | # verify trash dir is clean. |
3934 | self._wait_for_trash_empty() | |
f67539c2 | 3935 | |
33c7a0ef | 3936 | def test_subvolume_user_metadata_list(self): |
f38dd50b TL |
3937 | subvolname = self._gen_subvol_name() |
3938 | group = self._gen_subvol_grp_name() | |
cd265ab1 | 3939 | |
33c7a0ef TL |
3940 | # create group. |
3941 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
cd265ab1 | 3942 | |
33c7a0ef TL |
3943 | # create subvolume in group. |
3944 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
cd265ab1 | 3945 | |
33c7a0ef TL |
3946 | # set metadata for subvolume. |
3947 | input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)} | |
cd265ab1 | 3948 | |
33c7a0ef TL |
3949 | for k, v in input_metadata_dict.items(): |
3950 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, k, v, "--group_name", group) | |
cd265ab1 | 3951 | |
33c7a0ef | 3952 | # list metadata |
cd265ab1 | 3953 | try: |
33c7a0ef TL |
3954 | ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group) |
3955 | except CommandFailedError: | |
3956 | self.fail("expected the 'fs subvolume metadata ls' command to succeed") | |
cd265ab1 | 3957 | |
33c7a0ef | 3958 | ret_dict = json.loads(ret) |
cd265ab1 | 3959 | |
33c7a0ef TL |
3960 | # compare output with expected output |
3961 | self.assertDictEqual(input_metadata_dict, ret_dict) | |
cd265ab1 | 3962 | |
33c7a0ef TL |
3963 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
3964 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
3965 | ||
3966 | # verify trash dir is clean. | |
cd265ab1 TL |
3967 | self._wait_for_trash_empty() |
3968 | ||
33c7a0ef | 3969 | def test_subvolume_user_metadata_list_if_no_metadata_set(self): |
f38dd50b TL |
3970 | subvolname = self._gen_subvol_name() |
3971 | group = self._gen_subvol_grp_name() | |
cd265ab1 | 3972 | |
33c7a0ef | 3973 | # create group. |
cd265ab1 TL |
3974 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
3975 | ||
33c7a0ef TL |
3976 | # create subvolume in group. |
3977 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
cd265ab1 | 3978 | |
33c7a0ef TL |
3979 | # list metadata |
3980 | try: | |
3981 | ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group) | |
3982 | except CommandFailedError: | |
3983 | self.fail("expected the 'fs subvolume metadata ls' command to succeed") | |
cd265ab1 | 3984 | |
33c7a0ef TL |
3985 | # remove '\n' from returned value. |
3986 | ret = ret.strip('\n') | |
cd265ab1 | 3987 | |
33c7a0ef TL |
3988 | # compare output with expected output |
3989 | # expecting empty json/dictionary | |
3990 | self.assertEqual(ret, "{}") | |
cd265ab1 | 3991 | |
33c7a0ef TL |
3992 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
3993 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
3994 | ||
3995 | # verify trash dir is clean. | |
cd265ab1 TL |
3996 | self._wait_for_trash_empty() |
3997 | ||
33c7a0ef | 3998 | def test_subvolume_user_metadata_remove(self): |
f38dd50b TL |
3999 | subvolname = self._gen_subvol_name() |
4000 | group = self._gen_subvol_grp_name() | |
cd265ab1 | 4001 | |
33c7a0ef TL |
4002 | # create group. |
4003 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
81eedcae | 4004 | |
33c7a0ef TL |
4005 | # create subvolume in group. |
4006 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
81eedcae | 4007 | |
33c7a0ef TL |
4008 | # set metadata for subvolume. |
4009 | key = "key" | |
4010 | value = "value" | |
4011 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
81eedcae | 4012 | |
33c7a0ef TL |
4013 | # remove metadata against specified key. |
4014 | try: | |
4015 | self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group) | |
4016 | except CommandFailedError: | |
4017 | self.fail("expected the 'fs subvolume metadata rm' command to succeed") | |
f67539c2 | 4018 | |
33c7a0ef TL |
4019 | # confirm key is removed by again fetching metadata |
4020 | try: | |
4021 | self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group) | |
4022 | except CommandFailedError as e: | |
4023 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
f67539c2 | 4024 | else: |
33c7a0ef | 4025 | self.fail("Expected ENOENT because key does not exist") |
81eedcae | 4026 | |
33c7a0ef TL |
4027 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
4028 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
81eedcae | 4029 | |
33c7a0ef | 4030 | # verify trash dir is clean. |
494da23a TL |
4031 | self._wait_for_trash_empty() |
4032 | ||
33c7a0ef | 4033 | def test_subvolume_user_metadata_remove_for_nonexisting_key(self): |
f38dd50b TL |
4034 | subvolname = self._gen_subvol_name() |
4035 | group = self._gen_subvol_grp_name() | |
81eedcae | 4036 | |
33c7a0ef | 4037 | # create group. |
81eedcae TL |
4038 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
4039 | ||
33c7a0ef TL |
4040 | # create subvolume in group. |
4041 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
81eedcae | 4042 | |
33c7a0ef TL |
4043 | # set metadata for subvolume. |
4044 | key = "key" | |
4045 | value = "value" | |
4046 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
f67539c2 | 4047 | |
33c7a0ef TL |
4048 | # try to remove value for nonexisting key |
4049 | # Expecting ENOENT exit status because key does not exist | |
4050 | try: | |
4051 | self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key_nonexist", "--group_name", group) | |
4052 | except CommandFailedError as e: | |
4053 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
4054 | else: | |
4055 | self.fail("Expected ENOENT because 'key_nonexist' does not exist") | |
f67539c2 | 4056 | |
33c7a0ef TL |
4057 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
4058 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
81eedcae | 4059 | |
33c7a0ef TL |
4060 | # verify trash dir is clean. |
4061 | self._wait_for_trash_empty() | |
81eedcae | 4062 | |
33c7a0ef | 4063 | def test_subvolume_user_metadata_remove_for_nonexisting_section(self): |
f38dd50b TL |
4064 | subvolname = self._gen_subvol_name() |
4065 | group = self._gen_subvol_grp_name() | |
81eedcae | 4066 | |
33c7a0ef TL |
4067 | # create group. |
4068 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
81eedcae | 4069 | |
33c7a0ef TL |
4070 | # create subvolume in group. |
4071 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
4072 | ||
4073 | # try to remove value for nonexisting key (as section does not exist) | |
4074 | # Expecting ENOENT exit status because key does not exist | |
4075 | try: | |
4076 | self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key", "--group_name", group) | |
4077 | except CommandFailedError as e: | |
4078 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
4079 | else: | |
4080 | self.fail("Expected ENOENT because section does not exist") | |
4081 | ||
4082 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
4083 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
4084 | ||
4085 | # verify trash dir is clean. | |
494da23a TL |
4086 | self._wait_for_trash_empty() |
4087 | ||
33c7a0ef | 4088 | def test_subvolume_user_metadata_remove_force(self): |
f38dd50b TL |
4089 | subvolname = self._gen_subvol_name() |
4090 | group = self._gen_subvol_grp_name() | |
33c7a0ef TL |
4091 | |
4092 | # create group. | |
4093 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
4094 | ||
4095 | # create subvolume in group. | |
4096 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
4097 | ||
4098 | # set metadata for subvolume. | |
4099 | key = "key" | |
4100 | value = "value" | |
4101 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
4102 | ||
4103 | # remove metadata against specified key with --force option. | |
4104 | try: | |
4105 | self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group, "--force") | |
4106 | except CommandFailedError: | |
4107 | self.fail("expected the 'fs subvolume metadata rm' command to succeed") | |
4108 | ||
4109 | # confirm key is removed by again fetching metadata | |
4110 | try: | |
4111 | self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group) | |
4112 | except CommandFailedError as e: | |
4113 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
4114 | else: | |
4115 | self.fail("Expected ENOENT because key does not exist") | |
4116 | ||
4117 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
81eedcae TL |
4118 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) |
4119 | ||
33c7a0ef TL |
4120 | # verify trash dir is clean. |
4121 | self._wait_for_trash_empty() | |
f67539c2 | 4122 | |
33c7a0ef | 4123 | def test_subvolume_user_metadata_remove_force_for_nonexisting_key(self): |
f38dd50b TL |
4124 | subvolname = self._gen_subvol_name() |
4125 | group = self._gen_subvol_grp_name() | |
81eedcae | 4126 | |
33c7a0ef | 4127 | # create group. |
81eedcae TL |
4128 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
4129 | ||
33c7a0ef TL |
4130 | # create subvolume in group. |
4131 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) | |
81eedcae | 4132 | |
33c7a0ef TL |
4133 | # set metadata for subvolume. |
4134 | key = "key" | |
4135 | value = "value" | |
4136 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
81eedcae | 4137 | |
33c7a0ef TL |
4138 | # remove metadata against specified key. |
4139 | try: | |
4140 | self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group) | |
4141 | except CommandFailedError: | |
4142 | self.fail("expected the 'fs subvolume metadata rm' command to succeed") | |
81eedcae | 4143 | |
33c7a0ef | 4144 | # confirm key is removed by again fetching metadata |
81eedcae | 4145 | try: |
33c7a0ef TL |
4146 | self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group) |
4147 | except CommandFailedError as e: | |
4148 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
92f5a8d4 | 4149 | else: |
33c7a0ef | 4150 | self.fail("Expected ENOENT because key does not exist") |
f67539c2 | 4151 | |
33c7a0ef TL |
4152 | # again remove metadata against already removed key with --force option. |
4153 | try: | |
4154 | self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group, "--force") | |
4155 | except CommandFailedError: | |
4156 | self.fail("expected the 'fs subvolume metadata rm' (with --force) command to succeed") | |
81eedcae | 4157 | |
33c7a0ef TL |
4158 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) |
4159 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
81eedcae | 4160 | |
33c7a0ef | 4161 | # verify trash dir is clean. |
494da23a TL |
4162 | self._wait_for_trash_empty() |
4163 | ||
33c7a0ef | 4164 | def test_subvolume_user_metadata_set_and_get_for_legacy_subvolume(self): |
f38dd50b TL |
4165 | subvolname = self._gen_subvol_name() |
4166 | group = self._gen_subvol_grp_name() | |
33c7a0ef TL |
4167 | |
4168 | # emulate a old-fashioned subvolume in a custom group | |
4169 | createpath = os.path.join(".", "volumes", group, subvolname) | |
1e59de90 | 4170 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False) |
33c7a0ef TL |
4171 | |
4172 | # set metadata for subvolume. | |
4173 | key = "key" | |
4174 | value = "value" | |
4175 | try: | |
4176 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group) | |
4177 | except CommandFailedError: | |
4178 | self.fail("expected the 'fs subvolume metadata set' command to succeed") | |
4179 | ||
4180 | # get value for specified key. | |
4181 | try: | |
4182 | ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group) | |
4183 | except CommandFailedError: | |
4184 | self.fail("expected the 'fs subvolume metadata get' command to succeed") | |
4185 | ||
4186 | # remove '\n' from returned value. | |
4187 | ret = ret.strip('\n') | |
4188 | ||
4189 | # match received value with expected value. | |
4190 | self.assertEqual(value, ret) | |
4191 | ||
4192 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
81eedcae | 4193 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) |
494da23a | 4194 | |
33c7a0ef TL |
4195 | # verify trash dir is clean. |
4196 | self._wait_for_trash_empty() | |
4197 | ||
4198 | def test_subvolume_user_metadata_list_and_remove_for_legacy_subvolume(self): | |
f38dd50b TL |
4199 | subvolname = self._gen_subvol_name() |
4200 | group = self._gen_subvol_grp_name() | |
33c7a0ef TL |
4201 | |
4202 | # emulate a old-fashioned subvolume in a custom group | |
4203 | createpath = os.path.join(".", "volumes", group, subvolname) | |
1e59de90 | 4204 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False) |
33c7a0ef TL |
4205 | |
4206 | # set metadata for subvolume. | |
4207 | input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)} | |
4208 | ||
4209 | for k, v in input_metadata_dict.items(): | |
4210 | self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, k, v, "--group_name", group) | |
4211 | ||
4212 | # list metadata | |
4213 | try: | |
4214 | ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group) | |
4215 | except CommandFailedError: | |
4216 | self.fail("expected the 'fs subvolume metadata ls' command to succeed") | |
4217 | ||
4218 | ret_dict = json.loads(ret) | |
4219 | ||
4220 | # compare output with expected output | |
4221 | self.assertDictEqual(input_metadata_dict, ret_dict) | |
4222 | ||
4223 | # remove metadata against specified key. | |
4224 | try: | |
4225 | self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key_1", "--group_name", group) | |
4226 | except CommandFailedError: | |
4227 | self.fail("expected the 'fs subvolume metadata rm' command to succeed") | |
4228 | ||
4229 | # confirm key is removed by again fetching metadata | |
4230 | try: | |
4231 | self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key_1", "--group_name", group) | |
4232 | except CommandFailedError as e: | |
4233 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
4234 | else: | |
4235 | self.fail("Expected ENOENT because key_1 does not exist") | |
4236 | ||
4237 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
4238 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
eafe8130 | 4239 | |
33c7a0ef TL |
4240 | # verify trash dir is clean. |
4241 | self._wait_for_trash_empty() | |
4242 | ||
4243 | class TestSubvolumeGroupSnapshots(TestVolumesHelper): | |
4244 | """Tests for FS subvolume group snapshot operations.""" | |
4245 | @unittest.skip("skipping subvolumegroup snapshot tests") | |
4246 | def test_nonexistent_subvolume_group_snapshot_rm(self): | |
f38dd50b TL |
4247 | subvolume = self._gen_subvol_name() |
4248 | group = self._gen_subvol_grp_name() | |
4249 | snapshot = self._gen_subvol_snap_name() | |
eafe8130 TL |
4250 | |
4251 | # create group | |
eafe8130 TL |
4252 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
4253 | ||
f67539c2 TL |
4254 | # create subvolume in group |
4255 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
eafe8130 | 4256 | |
33c7a0ef TL |
4257 | # snapshot group |
4258 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
92f5a8d4 | 4259 | |
33c7a0ef TL |
4260 | # remove snapshot |
4261 | self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) | |
92f5a8d4 | 4262 | |
33c7a0ef | 4263 | # remove snapshot |
f67539c2 | 4264 | try: |
33c7a0ef | 4265 | self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) |
f67539c2 | 4266 | except CommandFailedError as ce: |
33c7a0ef TL |
4267 | if ce.exitstatus != errno.ENOENT: |
4268 | raise | |
f67539c2 | 4269 | else: |
33c7a0ef | 4270 | raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail") |
92f5a8d4 | 4271 | |
f67539c2 TL |
4272 | # remove subvolume |
4273 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
92f5a8d4 TL |
4274 | |
4275 | # verify trash dir is clean | |
f67539c2 | 4276 | self._wait_for_trash_empty() |
9f95a23c | 4277 | |
f67539c2 TL |
4278 | # remove group |
4279 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
9f95a23c | 4280 | |
33c7a0ef TL |
4281 | @unittest.skip("skipping subvolumegroup snapshot tests") |
4282 | def test_subvolume_group_snapshot_create_and_rm(self): | |
f38dd50b TL |
4283 | subvolume = self._gen_subvol_name() |
4284 | group = self._gen_subvol_grp_name() | |
4285 | snapshot = self._gen_subvol_snap_name() | |
92f5a8d4 TL |
4286 | |
4287 | # create group | |
f67539c2 | 4288 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
92f5a8d4 | 4289 | |
f67539c2 TL |
4290 | # create subvolume in group |
4291 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
92f5a8d4 | 4292 | |
33c7a0ef TL |
4293 | # snapshot group |
4294 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
f67539c2 | 4295 | |
33c7a0ef TL |
4296 | # remove snapshot |
4297 | self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) | |
adb31ebb | 4298 | |
92f5a8d4 | 4299 | # remove subvolume |
f67539c2 | 4300 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) |
92f5a8d4 TL |
4301 | |
4302 | # verify trash dir is clean | |
4303 | self._wait_for_trash_empty() | |
4304 | ||
4305 | # remove group | |
4306 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
4307 | ||
33c7a0ef TL |
4308 | @unittest.skip("skipping subvolumegroup snapshot tests") |
4309 | def test_subvolume_group_snapshot_idempotence(self): | |
f38dd50b TL |
4310 | subvolume = self._gen_subvol_name() |
4311 | group = self._gen_subvol_grp_name() | |
4312 | snapshot = self._gen_subvol_snap_name() | |
494da23a | 4313 | |
33c7a0ef TL |
4314 | # create group |
4315 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
92f5a8d4 | 4316 | |
33c7a0ef TL |
4317 | # create subvolume in group |
4318 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
494da23a | 4319 | |
33c7a0ef TL |
4320 | # snapshot group |
4321 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
494da23a | 4322 | |
33c7a0ef TL |
4323 | # try creating snapshot w/ same snapshot name -- shoule be idempotent |
4324 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
4325 | ||
4326 | # remove snapshot | |
4327 | self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) | |
4328 | ||
4329 | # remove subvolume | |
4330 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
4331 | ||
4332 | # verify trash dir is clean | |
4333 | self._wait_for_trash_empty() | |
4334 | ||
4335 | # remove group | |
4336 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
4337 | ||
4338 | @unittest.skip("skipping subvolumegroup snapshot tests") | |
4339 | def test_subvolume_group_snapshot_ls(self): | |
4340 | # tests the 'fs subvolumegroup snapshot ls' command | |
4341 | ||
4342 | snapshots = [] | |
4343 | ||
4344 | # create group | |
f38dd50b | 4345 | group = self._gen_subvol_grp_name() |
33c7a0ef TL |
4346 | self._fs_cmd("subvolumegroup", "create", self.volname, group) |
4347 | ||
4348 | # create subvolumegroup snapshots | |
f38dd50b | 4349 | snapshots = self._gen_subvol_snap_name(3) |
33c7a0ef TL |
4350 | for snapshot in snapshots: |
4351 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
4352 | ||
4353 | subvolgrpsnapshotls = json.loads(self._fs_cmd('subvolumegroup', 'snapshot', 'ls', self.volname, group)) | |
4354 | if len(subvolgrpsnapshotls) == 0: | |
4355 | raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots") | |
4356 | else: | |
4357 | snapshotnames = [snapshot['name'] for snapshot in subvolgrpsnapshotls] | |
4358 | if collections.Counter(snapshotnames) != collections.Counter(snapshots): | |
4359 | raise RuntimeError("Error creating or listing subvolume group snapshots") | |
4360 | ||
4361 | @unittest.skip("skipping subvolumegroup snapshot tests") | |
4362 | def test_subvolume_group_snapshot_rm_force(self): | |
4363 | # test removing non-existing subvolume group snapshot with --force | |
f38dd50b TL |
4364 | group = self._gen_subvol_grp_name() |
4365 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef | 4366 | # remove snapshot |
f67539c2 | 4367 | try: |
33c7a0ef TL |
4368 | self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force") |
4369 | except CommandFailedError: | |
4370 | raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed") | |
4371 | ||
4372 | def test_subvolume_group_snapshot_unsupported_status(self): | |
f38dd50b TL |
4373 | group = self._gen_subvol_grp_name() |
4374 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
4375 | |
4376 | # create group | |
4377 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
4378 | ||
4379 | # snapshot group | |
4380 | try: | |
4381 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
f67539c2 | 4382 | except CommandFailedError as ce: |
33c7a0ef | 4383 | self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create") |
f67539c2 | 4384 | else: |
33c7a0ef | 4385 | self.fail("expected subvolumegroup snapshot create command to fail") |
92f5a8d4 | 4386 | |
33c7a0ef TL |
4387 | # remove group |
4388 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
92f5a8d4 | 4389 | |
33c7a0ef TL |
4390 | |
4391 | class TestSubvolumeSnapshots(TestVolumesHelper): | |
4392 | """Tests for FS subvolume snapshot operations.""" | |
4393 | def test_nonexistent_subvolume_snapshot_rm(self): | |
f38dd50b TL |
4394 | subvolume = self._gen_subvol_name() |
4395 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
4396 | |
4397 | # create subvolume | |
4398 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
4399 | ||
4400 | # snapshot subvolume | |
4401 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
4402 | ||
4403 | # remove snapshot | |
4404 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
4405 | ||
4406 | # remove snapshot again | |
f67539c2 | 4407 | try: |
33c7a0ef | 4408 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) |
f67539c2 | 4409 | except CommandFailedError as ce: |
33c7a0ef TL |
4410 | if ce.exitstatus != errno.ENOENT: |
4411 | raise | |
f67539c2 | 4412 | else: |
33c7a0ef | 4413 | raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail") |
92f5a8d4 | 4414 | |
33c7a0ef TL |
4415 | # remove subvolume |
4416 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
92f5a8d4 | 4417 | |
f67539c2 TL |
4418 | # verify trash dir is clean |
4419 | self._wait_for_trash_empty() | |
92f5a8d4 | 4420 | |
33c7a0ef | 4421 | def test_subvolume_snapshot_create_and_rm(self): |
f38dd50b TL |
4422 | subvolume = self._gen_subvol_name() |
4423 | snapshot = self._gen_subvol_snap_name() | |
92f5a8d4 | 4424 | |
f67539c2 TL |
4425 | # create subvolume |
4426 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
92f5a8d4 | 4427 | |
f67539c2 | 4428 | # snapshot subvolume |
33c7a0ef | 4429 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) |
adb31ebb | 4430 | |
33c7a0ef TL |
4431 | # remove snapshot |
4432 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
adb31ebb | 4433 | |
33c7a0ef TL |
4434 | # remove subvolume |
4435 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
adb31ebb | 4436 | |
33c7a0ef TL |
4437 | # verify trash dir is clean |
4438 | self._wait_for_trash_empty() | |
adb31ebb | 4439 | |
33c7a0ef | 4440 | def test_subvolume_snapshot_create_idempotence(self): |
f38dd50b TL |
4441 | subvolume = self._gen_subvol_name() |
4442 | snapshot = self._gen_subvol_snap_name() | |
adb31ebb | 4443 | |
33c7a0ef TL |
4444 | # create subvolume |
4445 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
adb31ebb | 4446 | |
33c7a0ef TL |
4447 | # snapshot subvolume |
4448 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
adb31ebb | 4449 | |
33c7a0ef TL |
4450 | # try creating w/ same subvolume snapshot name -- should be idempotent |
4451 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
adb31ebb | 4452 | |
33c7a0ef TL |
4453 | # remove snapshot |
4454 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
f67539c2 | 4455 | |
33c7a0ef TL |
4456 | # remove subvolume |
4457 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
adb31ebb TL |
4458 | |
4459 | # verify trash dir is clean | |
4460 | self._wait_for_trash_empty() | |
4461 | ||
33c7a0ef TL |
4462 | def test_subvolume_snapshot_info(self): |
4463 | ||
adb31ebb | 4464 | """ |
33c7a0ef | 4465 | tests the 'fs subvolume snapshot info' command |
adb31ebb | 4466 | """ |
33c7a0ef | 4467 | |
2a845540 | 4468 | snap_md = ["created_at", "data_pool", "has_pending_clones"] |
adb31ebb | 4469 | |
f38dd50b TL |
4470 | subvolume = self._gen_subvol_name() |
4471 | snapshot, snap_missing = self._gen_subvol_snap_name(2) | |
adb31ebb | 4472 | |
f67539c2 | 4473 | # create subvolume |
33c7a0ef TL |
4474 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
4475 | ||
4476 | # do some IO | |
4477 | self._do_subvolume_io(subvolume, number_of_files=1) | |
adb31ebb | 4478 | |
f67539c2 TL |
4479 | # snapshot subvolume |
4480 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
adb31ebb | 4481 | |
f67539c2 TL |
4482 | snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot)) |
4483 | for md in snap_md: | |
4484 | self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md)) | |
4485 | self.assertEqual(snap_info["has_pending_clones"], "no") | |
adb31ebb | 4486 | |
33c7a0ef | 4487 | # snapshot info for non-existent snapshot |
adb31ebb | 4488 | try: |
33c7a0ef | 4489 | self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing) |
adb31ebb | 4490 | except CommandFailedError as ce: |
33c7a0ef | 4491 | self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot") |
adb31ebb | 4492 | else: |
33c7a0ef | 4493 | self.fail("expected snapshot info of non-existent snapshot to fail") |
adb31ebb | 4494 | |
33c7a0ef TL |
4495 | # remove snapshot |
4496 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
f67539c2 | 4497 | |
33c7a0ef TL |
4498 | # remove subvolume |
4499 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
f67539c2 | 4500 | |
33c7a0ef TL |
4501 | # verify trash dir is clean |
4502 | self._wait_for_trash_empty() | |
f67539c2 | 4503 | |
33c7a0ef | 4504 | def test_subvolume_snapshot_in_group(self): |
f38dd50b TL |
4505 | subvolume = self._gen_subvol_name() |
4506 | group = self._gen_subvol_grp_name() | |
4507 | snapshot = self._gen_subvol_snap_name() | |
f67539c2 | 4508 | |
33c7a0ef TL |
4509 | # create group |
4510 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
4511 | ||
4512 | # create subvolume in group | |
4513 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
4514 | ||
4515 | # snapshot subvolume in group | |
4516 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) | |
4517 | ||
4518 | # remove snapshot | |
4519 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) | |
4520 | ||
4521 | # remove subvolume | |
4522 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
4523 | ||
4524 | # verify trash dir is clean | |
4525 | self._wait_for_trash_empty() | |
4526 | ||
4527 | # remove group | |
4528 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
4529 | ||
4530 | def test_subvolume_snapshot_ls(self): | |
4531 | # tests the 'fs subvolume snapshot ls' command | |
4532 | ||
4533 | snapshots = [] | |
4534 | ||
4535 | # create subvolume | |
f38dd50b | 4536 | subvolume = self._gen_subvol_name() |
33c7a0ef TL |
4537 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
4538 | ||
4539 | # create subvolume snapshots | |
f38dd50b | 4540 | snapshots = self._gen_subvol_snap_name(3) |
33c7a0ef TL |
4541 | for snapshot in snapshots: |
4542 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
4543 | ||
4544 | subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume)) | |
4545 | if len(subvolsnapshotls) == 0: | |
4546 | self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots") | |
4547 | else: | |
4548 | snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls] | |
4549 | if collections.Counter(snapshotnames) != collections.Counter(snapshots): | |
4550 | self.fail("Error creating or listing subvolume snapshots") | |
4551 | ||
4552 | # remove snapshot | |
4553 | for snapshot in snapshots: | |
4554 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
4555 | ||
4556 | # remove subvolume | |
4557 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
4558 | ||
4559 | # verify trash dir is clean | |
4560 | self._wait_for_trash_empty() | |
4561 | ||
4562 | def test_subvolume_inherited_snapshot_ls(self): | |
4563 | # tests the scenario where 'fs subvolume snapshot ls' command | |
4564 | # should not list inherited snapshots created as part of snapshot | |
4565 | # at ancestral level | |
4566 | ||
4567 | snapshots = [] | |
f38dd50b TL |
4568 | subvolume = self._gen_subvol_name() |
4569 | group = self._gen_subvol_grp_name() | |
33c7a0ef TL |
4570 | snap_count = 3 |
4571 | ||
4572 | # create group | |
4573 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
4574 | ||
4575 | # create subvolume in group | |
4576 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
4577 | ||
4578 | # create subvolume snapshots | |
f38dd50b | 4579 | snapshots = self._gen_subvol_snap_name(snap_count) |
33c7a0ef TL |
4580 | for snapshot in snapshots: |
4581 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) | |
4582 | ||
4583 | # Create snapshot at ancestral level | |
4584 | ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1") | |
4585 | ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2") | |
1e59de90 | 4586 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1, ancestral_snappath2], omit_sudo=False) |
33c7a0ef TL |
4587 | |
4588 | subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group)) | |
4589 | self.assertEqual(len(subvolsnapshotls), snap_count) | |
4590 | ||
4591 | # remove ancestral snapshots | |
1e59de90 | 4592 | self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1, ancestral_snappath2], omit_sudo=False) |
33c7a0ef TL |
4593 | |
4594 | # remove snapshot | |
4595 | for snapshot in snapshots: | |
4596 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) | |
4597 | ||
4598 | # remove subvolume | |
4599 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
4600 | ||
4601 | # verify trash dir is clean | |
4602 | self._wait_for_trash_empty() | |
4603 | ||
4604 | # remove group | |
4605 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
4606 | ||
4607 | def test_subvolume_inherited_snapshot_info(self): | |
4608 | """ | |
4609 | tests the scenario where 'fs subvolume snapshot info' command | |
4610 | should fail for inherited snapshots created as part of snapshot | |
4611 | at ancestral level | |
4612 | """ | |
4613 | ||
f38dd50b TL |
4614 | subvolume = self._gen_subvol_name() |
4615 | group = self._gen_subvol_grp_name() | |
33c7a0ef TL |
4616 | |
4617 | # create group | |
4618 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
4619 | ||
4620 | # create subvolume in group | |
4621 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
4622 | ||
4623 | # Create snapshot at ancestral level | |
4624 | ancestral_snap_name = "ancestral_snap_1" | |
4625 | ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name) | |
1e59de90 | 4626 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1], omit_sudo=False) |
33c7a0ef TL |
4627 | |
4628 | # Validate existence of inherited snapshot | |
4629 | group_path = os.path.join(".", "volumes", group) | |
4630 | inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip()) | |
4631 | inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir) | |
4632 | inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap) | |
4633 | self.mount_a.run_shell(['ls', inherited_snappath]) | |
4634 | ||
4635 | # snapshot info on inherited snapshot | |
4636 | try: | |
4637 | self._get_subvolume_snapshot_info(self.volname, subvolume, inherited_snap, group) | |
4638 | except CommandFailedError as ce: | |
4639 | self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on snapshot info of inherited snapshot") | |
4640 | else: | |
4641 | self.fail("expected snapshot info of inherited snapshot to fail") | |
4642 | ||
4643 | # remove ancestral snapshots | |
1e59de90 | 4644 | self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1], omit_sudo=False) |
33c7a0ef TL |
4645 | |
4646 | # remove subvolume | |
4647 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) | |
4648 | ||
4649 | # verify trash dir is clean | |
4650 | self._wait_for_trash_empty() | |
4651 | ||
4652 | # remove group | |
4653 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
4654 | ||
4655 | def test_subvolume_inherited_snapshot_rm(self): | |
4656 | """ | |
4657 | tests the scenario where 'fs subvolume snapshot rm' command | |
4658 | should fail for inherited snapshots created as part of snapshot | |
4659 | at ancestral level | |
4660 | """ | |
4661 | ||
f38dd50b TL |
4662 | subvolume = self._gen_subvol_name() |
4663 | group = self._gen_subvol_grp_name() | |
33c7a0ef TL |
4664 | |
4665 | # create group | |
4666 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
4667 | ||
4668 | # create subvolume in group | |
4669 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
4670 | ||
4671 | # Create snapshot at ancestral level | |
4672 | ancestral_snap_name = "ancestral_snap_1" | |
4673 | ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name) | |
1e59de90 | 4674 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1], omit_sudo=False) |
33c7a0ef TL |
4675 | |
4676 | # Validate existence of inherited snap | |
4677 | group_path = os.path.join(".", "volumes", group) | |
4678 | inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip()) | |
4679 | inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir) | |
4680 | inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap) | |
4681 | self.mount_a.run_shell(['ls', inherited_snappath]) | |
4682 | ||
4683 | # inherited snapshot should not be deletable | |
4684 | try: | |
4685 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, inherited_snap, "--group_name", group) | |
4686 | except CommandFailedError as ce: | |
4687 | self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when removing inherited snapshot") | |
4688 | else: | |
4689 | self.fail("expected removing inheirted snapshot to fail") | |
4690 | ||
4691 | # remove ancestral snapshots | |
1e59de90 | 4692 | self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1], omit_sudo=False) |
33c7a0ef TL |
4693 | |
4694 | # remove subvolume | |
4695 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
4696 | ||
4697 | # verify trash dir is clean | |
4698 | self._wait_for_trash_empty() | |
4699 | ||
4700 | # remove group | |
4701 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
4702 | ||
4703 | def test_subvolume_subvolumegroup_snapshot_name_conflict(self): | |
4704 | """ | |
4705 | tests the scenario where creation of subvolume snapshot name | |
4706 | with same name as it's subvolumegroup snapshot name. This should | |
4707 | fail. | |
4708 | """ | |
4709 | ||
f38dd50b TL |
4710 | subvolume = self._gen_subvol_name() |
4711 | group = self._gen_subvol_grp_name() | |
4712 | group_snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
4713 | |
4714 | # create group | |
4715 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
4716 | ||
4717 | # create subvolume in group | |
4718 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
4719 | ||
4720 | # Create subvolumegroup snapshot | |
4721 | group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot) | |
1e59de90 | 4722 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', group_snapshot_path], omit_sudo=False) |
33c7a0ef TL |
4723 | |
4724 | # Validate existence of subvolumegroup snapshot | |
4725 | self.mount_a.run_shell(['ls', group_snapshot_path]) | |
4726 | ||
4727 | # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail | |
4728 | try: | |
4729 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, group_snapshot, "--group_name", group) | |
4730 | except CommandFailedError as ce: | |
4731 | self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot") | |
4732 | else: | |
4733 | self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail") | |
4734 | ||
4735 | # remove subvolumegroup snapshot | |
1e59de90 | 4736 | self.mount_a.run_shell(['sudo', 'rmdir', group_snapshot_path], omit_sudo=False) |
33c7a0ef TL |
4737 | |
4738 | # remove subvolume | |
4739 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
4740 | ||
4741 | # verify trash dir is clean | |
4742 | self._wait_for_trash_empty() | |
4743 | ||
4744 | # remove group | |
4745 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
4746 | ||
4747 | def test_subvolume_retain_snapshot_invalid_recreate(self): | |
4748 | """ | |
4749 | ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash | |
4750 | """ | |
f38dd50b TL |
4751 | subvolume = self._gen_subvol_name() |
4752 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
4753 | |
4754 | # create subvolume | |
4755 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
4756 | ||
4757 | # snapshot subvolume | |
4758 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
4759 | ||
4760 | # remove with snapshot retention | |
4761 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
4762 | ||
4763 | # recreate subvolume with an invalid pool | |
4764 | data_pool = "invalid_pool" | |
4765 | try: | |
4766 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool) | |
4767 | except CommandFailedError as ce: | |
4768 | self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname") | |
4769 | else: | |
4770 | self.fail("expected recreate of subvolume with invalid poolname to fail") | |
4771 | ||
4772 | # fetch info | |
4773 | subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume)) | |
4774 | self.assertEqual(subvol_info["state"], "snapshot-retained", | |
4775 | msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"])) | |
4776 | ||
4777 | # getpath | |
4778 | try: | |
4779 | self._fs_cmd("subvolume", "getpath", self.volname, subvolume) | |
4780 | except CommandFailedError as ce: | |
4781 | self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots") | |
4782 | else: | |
4783 | self.fail("expected getpath of subvolume with retained snapshots to fail") | |
4784 | ||
4785 | # remove snapshot (should remove volume) | |
4786 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
4787 | ||
4788 | # verify trash dir is clean | |
4789 | self._wait_for_trash_empty() | |
4790 | ||
4791 | def test_subvolume_retain_snapshot_recreate_subvolume(self): | |
4792 | """ | |
4793 | ensure a retained subvolume can be recreated and further snapshotted | |
4794 | """ | |
2a845540 | 4795 | snap_md = ["created_at", "data_pool", "has_pending_clones"] |
33c7a0ef | 4796 | |
f38dd50b TL |
4797 | subvolume = self._gen_subvol_name() |
4798 | snapshot1, snapshot2 = self._gen_subvol_snap_name(2) | |
33c7a0ef TL |
4799 | |
4800 | # create subvolume | |
4801 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
4802 | ||
4803 | # snapshot subvolume | |
4804 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1) | |
4805 | ||
4806 | # remove with snapshot retention | |
4807 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
4808 | ||
4809 | # fetch info | |
4810 | subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume)) | |
4811 | self.assertEqual(subvol_info["state"], "snapshot-retained", | |
4812 | msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"])) | |
4813 | ||
4814 | # recreate retained subvolume | |
4815 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
4816 | ||
4817 | # fetch info | |
4818 | subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume)) | |
4819 | self.assertEqual(subvol_info["state"], "complete", | |
4820 | msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"])) | |
4821 | ||
4822 | # snapshot info (older snapshot) | |
4823 | snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1)) | |
4824 | for md in snap_md: | |
4825 | self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md)) | |
4826 | self.assertEqual(snap_info["has_pending_clones"], "no") | |
4827 | ||
4828 | # snap-create (new snapshot) | |
4829 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2) | |
4830 | ||
4831 | # remove with retain snapshots | |
4832 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
4833 | ||
4834 | # list snapshots | |
4835 | subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume)) | |
4836 | self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the" | |
4837 | " created subvolume snapshots") | |
4838 | snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls] | |
4839 | for snap in [snapshot1, snapshot2]: | |
4840 | self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap)) | |
4841 | ||
4842 | # remove snapshots (should remove volume) | |
4843 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1) | |
4844 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2) | |
4845 | ||
4846 | # verify list subvolumes returns an empty list | |
4847 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
4848 | self.assertEqual(len(subvolumels), 0) | |
4849 | ||
4850 | # verify trash dir is clean | |
4851 | self._wait_for_trash_empty() | |
4852 | ||
4853 | def test_subvolume_retain_snapshot_with_snapshots(self): | |
4854 | """ | |
4855 | ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume | |
4856 | also test allowed and dis-allowed operations on a retained subvolume | |
4857 | """ | |
2a845540 | 4858 | snap_md = ["created_at", "data_pool", "has_pending_clones"] |
33c7a0ef | 4859 | |
f38dd50b TL |
4860 | subvolume = self._gen_subvol_name() |
4861 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
4862 | |
4863 | # create subvolume | |
4864 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
4865 | ||
4866 | # snapshot subvolume | |
4867 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
4868 | ||
4869 | # remove subvolume -- should fail with ENOTEMPTY since it has snapshots | |
4870 | try: | |
4871 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
4872 | except CommandFailedError as ce: | |
4873 | self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of retained subvolume with snapshots") | |
4874 | else: | |
4875 | self.fail("expected rm of subvolume with retained snapshots to fail") | |
4876 | ||
4877 | # remove with snapshot retention | |
4878 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
4879 | ||
4880 | # fetch info | |
4881 | subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume)) | |
4882 | self.assertEqual(subvol_info["state"], "snapshot-retained", | |
4883 | msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"])) | |
4884 | ||
4885 | ## test allowed ops in retained state | |
4886 | # ls | |
4887 | subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
4888 | self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes))) | |
4889 | self.assertEqual(subvolumes[0]['name'], subvolume, | |
4890 | "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name'])) | |
4891 | ||
4892 | # snapshot info | |
4893 | snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot)) | |
4894 | for md in snap_md: | |
4895 | self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md)) | |
4896 | self.assertEqual(snap_info["has_pending_clones"], "no") | |
4897 | ||
4898 | # rm --force (allowed but should fail) | |
4899 | try: | |
4900 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force") | |
4901 | except CommandFailedError as ce: | |
4902 | self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots") | |
4903 | else: | |
4904 | self.fail("expected rm of subvolume with retained snapshots to fail") | |
4905 | ||
4906 | # rm (allowed but should fail) | |
4907 | try: | |
4908 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
4909 | except CommandFailedError as ce: | |
4910 | self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots") | |
4911 | else: | |
4912 | self.fail("expected rm of subvolume with retained snapshots to fail") | |
4913 | ||
4914 | ## test disallowed ops | |
4915 | # getpath | |
4916 | try: | |
4917 | self._fs_cmd("subvolume", "getpath", self.volname, subvolume) | |
4918 | except CommandFailedError as ce: | |
4919 | self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots") | |
4920 | else: | |
4921 | self.fail("expected getpath of subvolume with retained snapshots to fail") | |
4922 | ||
4923 | # resize | |
4924 | nsize = self.DEFAULT_FILE_SIZE*1024*1024 | |
4925 | try: | |
4926 | self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize)) | |
4927 | except CommandFailedError as ce: | |
4928 | self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on resize of subvolume with retained snapshots") | |
4929 | else: | |
4930 | self.fail("expected resize of subvolume with retained snapshots to fail") | |
4931 | ||
4932 | # snap-create | |
4933 | try: | |
4934 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, "fail") | |
4935 | except CommandFailedError as ce: | |
4936 | self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot create of subvolume with retained snapshots") | |
4937 | else: | |
4938 | self.fail("expected snapshot create of subvolume with retained snapshots to fail") | |
4939 | ||
4940 | # remove snapshot (should remove volume) | |
4941 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
4942 | ||
4943 | # verify list subvolumes returns an empty list | |
4944 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
4945 | self.assertEqual(len(subvolumels), 0) | |
4946 | ||
4947 | # verify trash dir is clean | |
4948 | self._wait_for_trash_empty() | |
4949 | ||
4950 | def test_subvolume_retain_snapshot_without_snapshots(self): | |
4951 | """ | |
4952 | ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume | |
4953 | """ | |
f38dd50b | 4954 | subvolume = self._gen_subvol_name() |
33c7a0ef TL |
4955 | |
4956 | # create subvolume | |
4957 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
4958 | ||
4959 | # remove with snapshot retention (should remove volume, no snapshots to retain) | |
4960 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
4961 | ||
4962 | # verify list subvolumes returns an empty list | |
4963 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
4964 | self.assertEqual(len(subvolumels), 0) | |
4965 | ||
4966 | # verify trash dir is clean | |
4967 | self._wait_for_trash_empty() | |
4968 | ||
4969 | def test_subvolume_retain_snapshot_trash_busy_recreate(self): | |
4970 | """ | |
4971 | ensure retained subvolume recreate fails if its trash is not yet purged | |
4972 | """ | |
f38dd50b TL |
4973 | subvolume = self._gen_subvol_name() |
4974 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
4975 | |
4976 | # create subvolume | |
4977 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
4978 | ||
4979 | # snapshot subvolume | |
4980 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
4981 | ||
4982 | # remove with snapshot retention | |
4983 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
4984 | ||
4985 | # fake a trash entry | |
4986 | self._update_fake_trash(subvolume) | |
4987 | ||
4988 | # recreate subvolume | |
4989 | try: | |
4990 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
4991 | except CommandFailedError as ce: | |
4992 | self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of subvolume with purge pending") | |
4993 | else: | |
4994 | self.fail("expected recreate of subvolume with purge pending to fail") | |
4995 | ||
4996 | # clear fake trash entry | |
4997 | self._update_fake_trash(subvolume, create=False) | |
4998 | ||
4999 | # recreate subvolume | |
5000 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
5001 | ||
5002 | # remove snapshot | |
5003 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
5004 | ||
5005 | # remove subvolume | |
5006 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
5007 | ||
5008 | # verify trash dir is clean | |
5009 | self._wait_for_trash_empty() | |
5010 | ||
5011 | def test_subvolume_rm_with_snapshots(self): | |
f38dd50b TL |
5012 | subvolume = self._gen_subvol_name() |
5013 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5014 | |
5015 | # create subvolume | |
5016 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
5017 | ||
5018 | # snapshot subvolume | |
5019 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
5020 | ||
5021 | # remove subvolume -- should fail with ENOTEMPTY since it has snapshots | |
5022 | try: | |
5023 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
5024 | except CommandFailedError as ce: | |
5025 | if ce.exitstatus != errno.ENOTEMPTY: | |
5026 | raise RuntimeError("invalid error code returned when deleting subvolume with snapshots") | |
5027 | else: | |
5028 | raise RuntimeError("expected subvolume deletion to fail") | |
5029 | ||
5030 | # remove snapshot | |
5031 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
5032 | ||
5033 | # remove subvolume | |
5034 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
5035 | ||
5036 | # verify trash dir is clean | |
5037 | self._wait_for_trash_empty() | |
5038 | ||
5039 | def test_subvolume_snapshot_protect_unprotect_sanity(self): | |
5040 | """ | |
5041 | Snapshot protect/unprotect commands are deprecated. This test exists to ensure that | |
5042 | invoking the command does not cause errors, till they are removed from a subsequent release. | |
5043 | """ | |
f38dd50b TL |
5044 | subvolume = self._gen_subvol_name() |
5045 | snapshot = self._gen_subvol_snap_name() | |
5046 | clone = self._gen_subvol_clone_name() | |
33c7a0ef TL |
5047 | |
5048 | # create subvolume | |
5049 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
5050 | ||
5051 | # do some IO | |
5052 | self._do_subvolume_io(subvolume, number_of_files=64) | |
5053 | ||
5054 | # snapshot subvolume | |
5055 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
5056 | ||
5057 | # now, protect snapshot | |
5058 | self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) | |
5059 | ||
5060 | # schedule a clone | |
5061 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
5062 | ||
5063 | # check clone status | |
5064 | self._wait_for_clone_to_complete(clone) | |
5065 | ||
5066 | # now, unprotect snapshot | |
5067 | self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) | |
5068 | ||
5069 | # verify clone | |
5070 | self._verify_clone(subvolume, snapshot, clone) | |
5071 | ||
5072 | # remove snapshot | |
5073 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
5074 | ||
5075 | # remove subvolumes | |
5076 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
5077 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
5078 | ||
5079 | # verify trash dir is clean | |
5080 | self._wait_for_trash_empty() | |
5081 | ||
5082 | def test_subvolume_snapshot_rm_force(self): | |
5083 | # test removing non existing subvolume snapshot with --force | |
f38dd50b TL |
5084 | subvolume = self._gen_subvol_name() |
5085 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5086 | |
5087 | # remove snapshot | |
5088 | try: | |
5089 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force") | |
5090 | except CommandFailedError: | |
5091 | raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed") | |
5092 | ||
5093 | def test_subvolume_snapshot_metadata_set(self): | |
5094 | """ | |
5095 | Set custom metadata for subvolume snapshot. | |
5096 | """ | |
f38dd50b TL |
5097 | subvolname = self._gen_subvol_name() |
5098 | group = self._gen_subvol_grp_name() | |
5099 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5100 | |
5101 | # create group. | |
5102 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5103 | ||
5104 | # create subvolume in group. | |
5105 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5106 | ||
5107 | # snapshot subvolume | |
5108 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5109 | ||
5110 | # set metadata for snapshot. | |
5111 | key = "key" | |
5112 | value = "value" | |
5113 | try: | |
5114 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
5115 | except CommandFailedError: | |
5116 | self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed") | |
5117 | ||
5118 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5119 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5120 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5121 | ||
5122 | # verify trash dir is clean. | |
5123 | self._wait_for_trash_empty() | |
5124 | ||
5125 | def test_subvolume_snapshot_metadata_set_idempotence(self): | |
5126 | """ | |
5127 | Set custom metadata for subvolume snapshot (Idempotency). | |
5128 | """ | |
f38dd50b TL |
5129 | subvolname = self._gen_subvol_name() |
5130 | group = self._gen_subvol_grp_name() | |
5131 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5132 | |
5133 | # create group. | |
5134 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5135 | ||
5136 | # create subvolume in group. | |
5137 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5138 | ||
5139 | # snapshot subvolume | |
5140 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5141 | ||
5142 | # set metadata for snapshot. | |
5143 | key = "key" | |
5144 | value = "value" | |
5145 | try: | |
5146 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
5147 | except CommandFailedError: | |
5148 | self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed") | |
5149 | ||
5150 | # set same metadata again for subvolume. | |
5151 | try: | |
5152 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
5153 | except CommandFailedError: | |
5154 | self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed because it is idempotent operation") | |
5155 | ||
5156 | # get value for specified key. | |
5157 | try: | |
5158 | ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group) | |
5159 | except CommandFailedError: | |
5160 | self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed") | |
5161 | ||
5162 | # remove '\n' from returned value. | |
5163 | ret = ret.strip('\n') | |
5164 | ||
5165 | # match received value with expected value. | |
5166 | self.assertEqual(value, ret) | |
5167 | ||
5168 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5169 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5170 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5171 | ||
5172 | # verify trash dir is clean. | |
5173 | self._wait_for_trash_empty() | |
5174 | ||
5175 | def test_subvolume_snapshot_metadata_get(self): | |
5176 | """ | |
5177 | Get custom metadata for a specified key in subvolume snapshot metadata. | |
5178 | """ | |
f38dd50b TL |
5179 | subvolname = self._gen_subvol_name() |
5180 | group = self._gen_subvol_grp_name() | |
5181 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5182 | |
5183 | # create group. | |
5184 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5185 | ||
5186 | # create subvolume in group. | |
5187 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5188 | ||
5189 | # snapshot subvolume | |
5190 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5191 | ||
5192 | # set metadata for snapshot. | |
5193 | key = "key" | |
5194 | value = "value" | |
5195 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
5196 | ||
5197 | # get value for specified key. | |
5198 | try: | |
5199 | ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group) | |
5200 | except CommandFailedError: | |
5201 | self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed") | |
5202 | ||
5203 | # remove '\n' from returned value. | |
5204 | ret = ret.strip('\n') | |
5205 | ||
5206 | # match received value with expected value. | |
5207 | self.assertEqual(value, ret) | |
5208 | ||
5209 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5210 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5211 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5212 | ||
5213 | # verify trash dir is clean. | |
5214 | self._wait_for_trash_empty() | |
5215 | ||
5216 | def test_subvolume_snapshot_metadata_get_for_nonexisting_key(self): | |
5217 | """ | |
5218 | Get custom metadata for subvolume snapshot if specified key not exist in metadata. | |
5219 | """ | |
f38dd50b TL |
5220 | subvolname = self._gen_subvol_name() |
5221 | group = self._gen_subvol_grp_name() | |
5222 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5223 | |
5224 | # create group. | |
5225 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5226 | ||
5227 | # create subvolume in group. | |
5228 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5229 | ||
5230 | # snapshot subvolume | |
5231 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5232 | ||
5233 | # set metadata for snapshot. | |
5234 | key = "key" | |
5235 | value = "value" | |
5236 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
5237 | ||
5238 | # try to get value for nonexisting key | |
5239 | # Expecting ENOENT exit status because key does not exist | |
5240 | try: | |
5241 | self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, "key_nonexist", group) | |
5242 | except CommandFailedError as e: | |
5243 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
5244 | else: | |
5245 | self.fail("Expected ENOENT because 'key_nonexist' does not exist") | |
5246 | ||
5247 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5248 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5249 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5250 | ||
5251 | # verify trash dir is clean. | |
5252 | self._wait_for_trash_empty() | |
5253 | ||
5254 | def test_subvolume_snapshot_metadata_get_for_nonexisting_section(self): | |
5255 | """ | |
5256 | Get custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot. | |
5257 | """ | |
f38dd50b TL |
5258 | subvolname = self._gen_subvol_name() |
5259 | group = self._gen_subvol_grp_name() | |
5260 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5261 | |
5262 | # create group. | |
5263 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5264 | ||
5265 | # create subvolume in group. | |
5266 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5267 | ||
5268 | # snapshot subvolume | |
5269 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5270 | ||
5271 | # try to get value for nonexisting key (as section does not exist) | |
5272 | # Expecting ENOENT exit status because key does not exist | |
5273 | try: | |
5274 | self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, "key", group) | |
5275 | except CommandFailedError as e: | |
5276 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
5277 | else: | |
5278 | self.fail("Expected ENOENT because section does not exist") | |
5279 | ||
5280 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5281 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5282 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5283 | ||
5284 | # verify trash dir is clean. | |
5285 | self._wait_for_trash_empty() | |
5286 | ||
5287 | def test_subvolume_snapshot_metadata_update(self): | |
5288 | """ | |
5289 | Update custom metadata for a specified key in subvolume snapshot metadata. | |
5290 | """ | |
f38dd50b TL |
5291 | subvolname = self._gen_subvol_name() |
5292 | group = self._gen_subvol_grp_name() | |
5293 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5294 | |
5295 | # create group. | |
5296 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5297 | ||
5298 | # create subvolume in group. | |
5299 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5300 | ||
5301 | # snapshot subvolume | |
5302 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5303 | ||
5304 | # set metadata for snapshot. | |
5305 | key = "key" | |
5306 | value = "value" | |
5307 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
5308 | ||
5309 | # update metadata against key. | |
5310 | new_value = "new_value" | |
5311 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, new_value, group) | |
5312 | ||
5313 | # get metadata for specified key of snapshot. | |
5314 | try: | |
5315 | ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group) | |
5316 | except CommandFailedError: | |
5317 | self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed") | |
5318 | ||
5319 | # remove '\n' from returned value. | |
5320 | ret = ret.strip('\n') | |
5321 | ||
5322 | # match received value with expected value. | |
5323 | self.assertEqual(new_value, ret) | |
5324 | ||
5325 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5326 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5327 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5328 | ||
5329 | # verify trash dir is clean. | |
5330 | self._wait_for_trash_empty() | |
5331 | ||
5332 | def test_subvolume_snapshot_metadata_list(self): | |
5333 | """ | |
5334 | List custom metadata for subvolume snapshot. | |
5335 | """ | |
f38dd50b TL |
5336 | subvolname = self._gen_subvol_name() |
5337 | group = self._gen_subvol_grp_name() | |
5338 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5339 | |
5340 | # create group. | |
5341 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5342 | ||
5343 | # create subvolume in group. | |
5344 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5345 | ||
5346 | # snapshot subvolume | |
5347 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5348 | ||
5349 | # set metadata for subvolume. | |
5350 | input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)} | |
5351 | ||
5352 | for k, v in input_metadata_dict.items(): | |
5353 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, k, v, group) | |
5354 | ||
5355 | # list metadata | |
5356 | try: | |
5357 | ret_dict = json.loads(self._fs_cmd("subvolume", "snapshot", "metadata", "ls", self.volname, subvolname, snapshot, group)) | |
5358 | except CommandFailedError: | |
5359 | self.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed") | |
5360 | ||
5361 | # compare output with expected output | |
5362 | self.assertDictEqual(input_metadata_dict, ret_dict) | |
5363 | ||
5364 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5365 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5366 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5367 | ||
5368 | # verify trash dir is clean. | |
5369 | self._wait_for_trash_empty() | |
5370 | ||
5371 | def test_subvolume_snapshot_metadata_list_if_no_metadata_set(self): | |
5372 | """ | |
5373 | List custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot. | |
5374 | """ | |
f38dd50b TL |
5375 | subvolname = self._gen_subvol_name() |
5376 | group = self._gen_subvol_grp_name() | |
5377 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5378 | |
5379 | # create group. | |
5380 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5381 | ||
5382 | # create subvolume in group. | |
5383 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5384 | ||
5385 | # snapshot subvolume | |
5386 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5387 | ||
5388 | # list metadata | |
5389 | try: | |
5390 | ret_dict = json.loads(self._fs_cmd("subvolume", "snapshot", "metadata", "ls", self.volname, subvolname, snapshot, group)) | |
5391 | except CommandFailedError: | |
5392 | self.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed") | |
5393 | ||
5394 | # compare output with expected output | |
5395 | empty_dict = {} | |
5396 | self.assertDictEqual(ret_dict, empty_dict) | |
5397 | ||
5398 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5399 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5400 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5401 | ||
5402 | # verify trash dir is clean. | |
5403 | self._wait_for_trash_empty() | |
5404 | ||
5405 | def test_subvolume_snapshot_metadata_remove(self): | |
5406 | """ | |
5407 | Remove custom metadata for a specified key in subvolume snapshot metadata. | |
5408 | """ | |
f38dd50b TL |
5409 | subvolname = self._gen_subvol_name() |
5410 | group = self._gen_subvol_grp_name() | |
5411 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5412 | |
5413 | # create group. | |
5414 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5415 | ||
5416 | # create subvolume in group. | |
5417 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5418 | ||
5419 | # snapshot subvolume | |
5420 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5421 | ||
5422 | # set metadata for snapshot. | |
5423 | key = "key" | |
5424 | value = "value" | |
5425 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
5426 | ||
5427 | # remove metadata against specified key. | |
5428 | try: | |
5429 | self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group) | |
5430 | except CommandFailedError: | |
5431 | self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed") | |
5432 | ||
5433 | # confirm key is removed by again fetching metadata | |
5434 | try: | |
5435 | self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, key, snapshot, group) | |
5436 | except CommandFailedError as e: | |
5437 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
5438 | else: | |
5439 | self.fail("Expected ENOENT because key does not exist") | |
5440 | ||
5441 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5442 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5443 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5444 | ||
5445 | # verify trash dir is clean. | |
5446 | self._wait_for_trash_empty() | |
5447 | ||
5448 | def test_subvolume_snapshot_metadata_remove_for_nonexisting_key(self): | |
5449 | """ | |
5450 | Remove custom metadata for subvolume snapshot if specified key not exist in metadata. | |
5451 | """ | |
f38dd50b TL |
5452 | subvolname = self._gen_subvol_name() |
5453 | group = self._gen_subvol_grp_name() | |
5454 | snapshot = self._gen_subvol_snap_name() | |
33c7a0ef TL |
5455 | |
5456 | # create group. | |
5457 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5458 | ||
5459 | # create subvolume in group. | |
5460 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5461 | ||
5462 | # snapshot subvolume | |
5463 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5464 | ||
5465 | # set metadata for snapshot. | |
5466 | key = "key" | |
5467 | value = "value" | |
5468 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
5469 | ||
5470 | # try to remove value for nonexisting key | |
5471 | # Expecting ENOENT exit status because key does not exist | |
5472 | try: | |
5473 | self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, "key_nonexist", group) | |
5474 | except CommandFailedError as e: | |
5475 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
5476 | else: | |
5477 | self.fail("Expected ENOENT because 'key_nonexist' does not exist") | |
adb31ebb | 5478 | |
33c7a0ef TL |
5479 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) |
5480 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5481 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
adb31ebb | 5482 | |
33c7a0ef | 5483 | # verify trash dir is clean. |
adb31ebb TL |
5484 | self._wait_for_trash_empty() |
5485 | ||
33c7a0ef | 5486 | def test_subvolume_snapshot_metadata_remove_for_nonexisting_section(self): |
adb31ebb | 5487 | """ |
33c7a0ef | 5488 | Remove custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot. |
adb31ebb | 5489 | """ |
f38dd50b TL |
5490 | subvolname = self._gen_subvol_name() |
5491 | group = self._gen_subvol_grp_name() | |
5492 | snapshot = self._gen_subvol_snap_name() | |
adb31ebb | 5493 | |
33c7a0ef TL |
5494 | # create group. |
5495 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
adb31ebb | 5496 | |
33c7a0ef TL |
5497 | # create subvolume in group. |
5498 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
adb31ebb | 5499 | |
33c7a0ef TL |
5500 | # snapshot subvolume |
5501 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
f67539c2 | 5502 | |
33c7a0ef TL |
5503 | # try to remove value for nonexisting key (as section does not exist) |
5504 | # Expecting ENOENT exit status because key does not exist | |
5505 | try: | |
5506 | self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, "key", group) | |
5507 | except CommandFailedError as e: | |
5508 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
5509 | else: | |
5510 | self.fail("Expected ENOENT because section does not exist") | |
5511 | ||
5512 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5513 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5514 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5515 | ||
5516 | # verify trash dir is clean. | |
f67539c2 TL |
5517 | self._wait_for_trash_empty() |
5518 | ||
33c7a0ef | 5519 | def test_subvolume_snapshot_metadata_remove_force(self): |
f67539c2 | 5520 | """ |
33c7a0ef | 5521 | Forcefully remove custom metadata for a specified key in subvolume snapshot metadata. |
f67539c2 | 5522 | """ |
f38dd50b TL |
5523 | subvolname = self._gen_subvol_name() |
5524 | group = self._gen_subvol_grp_name() | |
5525 | snapshot = self._gen_subvol_snap_name() | |
f67539c2 | 5526 | |
33c7a0ef TL |
5527 | # create group. |
5528 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5529 | ||
5530 | # create subvolume in group. | |
5531 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
f67539c2 TL |
5532 | |
5533 | # snapshot subvolume | |
33c7a0ef | 5534 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) |
f67539c2 | 5535 | |
33c7a0ef TL |
5536 | # set metadata for snapshot. |
5537 | key = "key" | |
5538 | value = "value" | |
5539 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
f67539c2 | 5540 | |
33c7a0ef TL |
5541 | # remove metadata against specified key with --force option. |
5542 | try: | |
5543 | self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group, "--force") | |
5544 | except CommandFailedError: | |
5545 | self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed") | |
f67539c2 | 5546 | |
33c7a0ef | 5547 | # confirm key is removed by again fetching metadata |
f67539c2 | 5548 | try: |
33c7a0ef TL |
5549 | self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group) |
5550 | except CommandFailedError as e: | |
5551 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
f67539c2 | 5552 | else: |
33c7a0ef | 5553 | self.fail("Expected ENOENT because key does not exist") |
adb31ebb | 5554 | |
33c7a0ef TL |
5555 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) |
5556 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5557 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
adb31ebb | 5558 | |
33c7a0ef | 5559 | # verify trash dir is clean. |
adb31ebb TL |
5560 | self._wait_for_trash_empty() |
5561 | ||
33c7a0ef TL |
5562 | def test_subvolume_snapshot_metadata_remove_force_for_nonexisting_key(self): |
5563 | """ | |
5564 | Forcefully remove custom metadata for subvolume snapshot if specified key not exist in metadata. | |
5565 | """ | |
f38dd50b TL |
5566 | subvolname = self._gen_subvol_name() |
5567 | group = self._gen_subvol_grp_name() | |
5568 | snapshot = self._gen_subvol_snap_name() | |
adb31ebb | 5569 | |
33c7a0ef TL |
5570 | # create group. |
5571 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5572 | ||
5573 | # create subvolume in group. | |
5574 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
adb31ebb TL |
5575 | |
5576 | # snapshot subvolume | |
33c7a0ef | 5577 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) |
adb31ebb | 5578 | |
33c7a0ef TL |
5579 | # set metadata for snapshot. |
5580 | key = "key" | |
5581 | value = "value" | |
5582 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
5583 | ||
5584 | # remove metadata against specified key. | |
adb31ebb | 5585 | try: |
33c7a0ef TL |
5586 | self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group) |
5587 | except CommandFailedError: | |
5588 | self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed") | |
5589 | ||
5590 | # confirm key is removed by again fetching metadata | |
5591 | try: | |
5592 | self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group) | |
5593 | except CommandFailedError as e: | |
5594 | self.assertEqual(e.exitstatus, errno.ENOENT) | |
adb31ebb | 5595 | else: |
33c7a0ef | 5596 | self.fail("Expected ENOENT because key does not exist") |
adb31ebb | 5597 | |
33c7a0ef TL |
5598 | # again remove metadata against already removed key with --force option. |
5599 | try: | |
5600 | self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group, "--force") | |
5601 | except CommandFailedError: | |
5602 | self.fail("expected the 'fs subvolume snapshot metadata rm' (with --force) command to succeed") | |
adb31ebb | 5603 | |
33c7a0ef TL |
5604 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) |
5605 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5606 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
adb31ebb | 5607 | |
33c7a0ef | 5608 | # verify trash dir is clean. |
adb31ebb TL |
5609 | self._wait_for_trash_empty() |
5610 | ||
33c7a0ef | 5611 | def test_subvolume_snapshot_metadata_after_snapshot_remove(self): |
adb31ebb | 5612 | """ |
33c7a0ef | 5613 | Verify metadata removal of subvolume snapshot after snapshot removal. |
adb31ebb | 5614 | """ |
f38dd50b TL |
5615 | subvolname = self._gen_subvol_name() |
5616 | group = self._gen_subvol_grp_name() | |
5617 | snapshot = self._gen_subvol_snap_name() | |
adb31ebb | 5618 | |
33c7a0ef TL |
5619 | # create group. |
5620 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
adb31ebb | 5621 | |
33c7a0ef TL |
5622 | # create subvolume in group. |
5623 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
adb31ebb | 5624 | |
f67539c2 | 5625 | # snapshot subvolume |
33c7a0ef | 5626 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) |
f67539c2 | 5627 | |
33c7a0ef TL |
5628 | # set metadata for snapshot. |
5629 | key = "key" | |
5630 | value = "value" | |
5631 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
f67539c2 | 5632 | |
33c7a0ef TL |
5633 | # get value for specified key. |
5634 | ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group) | |
f67539c2 | 5635 | |
33c7a0ef TL |
5636 | # remove '\n' from returned value. |
5637 | ret = ret.strip('\n') | |
f67539c2 | 5638 | |
33c7a0ef TL |
5639 | # match received value with expected value. |
5640 | self.assertEqual(value, ret) | |
f67539c2 | 5641 | |
33c7a0ef TL |
5642 | # remove subvolume snapshot. |
5643 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
adb31ebb | 5644 | |
33c7a0ef TL |
5645 | # try to get metadata after removing snapshot. |
5646 | # Expecting error ENOENT with error message of snapshot does not exist | |
f38dd50b TL |
5647 | cmd_ret = self.run_ceph_cmd( |
5648 | args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group], check_status=False, stdout=StringIO(), | |
5649 | stderr=StringIO()) | |
33c7a0ef TL |
5650 | self.assertEqual(cmd_ret.returncode, errno.ENOENT, "Expecting ENOENT error") |
5651 | self.assertIn(f"snapshot '{snapshot}' does not exist", cmd_ret.stderr.getvalue(), | |
5652 | f"Expecting message: snapshot '{snapshot}' does not exist ") | |
adb31ebb | 5653 | |
33c7a0ef TL |
5654 | # confirm metadata is removed by searching section name in .meta file |
5655 | meta_path = os.path.join(".", "volumes", group, subvolname, ".meta") | |
5656 | section_name = "SNAP_METADATA_" + snapshot | |
f67539c2 | 5657 | |
f67539c2 | 5658 | try: |
33c7a0ef TL |
5659 | self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False) |
5660 | except CommandFailedError as e: | |
5661 | self.assertNotEqual(e.exitstatus, 0) | |
5662 | else: | |
5663 | self.fail("Expected non-zero exist status because section should not exist") | |
5664 | ||
5665 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5666 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
f67539c2 | 5667 | |
33c7a0ef TL |
5668 | # verify trash dir is clean. |
5669 | self._wait_for_trash_empty() | |
f67539c2 | 5670 | |
2a845540 TL |
5671 | def test_clean_stale_subvolume_snapshot_metadata(self): |
5672 | """ | |
5673 | Validate cleaning of stale subvolume snapshot metadata. | |
5674 | """ | |
f38dd50b TL |
5675 | subvolname = self._gen_subvol_name() |
5676 | group = self._gen_subvol_grp_name() | |
5677 | snapshot = self._gen_subvol_snap_name() | |
2a845540 TL |
5678 | |
5679 | # create group. | |
5680 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5681 | ||
5682 | # create subvolume in group. | |
5683 | self._fs_cmd("subvolume", "create", self.volname, subvolname, group) | |
5684 | ||
5685 | # snapshot subvolume | |
5686 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) | |
5687 | ||
5688 | # set metadata for snapshot. | |
5689 | key = "key" | |
5690 | value = "value" | |
5691 | try: | |
5692 | self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) | |
5693 | except CommandFailedError: | |
5694 | self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed") | |
5695 | ||
5696 | # save the subvolume config file. | |
5697 | meta_path = os.path.join(".", "volumes", group, subvolname, ".meta") | |
5698 | tmp_meta_path = os.path.join(".", "volumes", group, subvolname, ".meta.stale_snap_section") | |
5699 | self.mount_a.run_shell(['sudo', 'cp', '-p', meta_path, tmp_meta_path], omit_sudo=False) | |
5700 | ||
5701 | # Delete snapshot, this would remove user snap metadata | |
5702 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) | |
5703 | ||
5704 | # Copy back saved subvolume config file. This would have stale snapshot metadata | |
5705 | self.mount_a.run_shell(['sudo', 'cp', '-p', tmp_meta_path, meta_path], omit_sudo=False) | |
5706 | ||
5707 | # Verify that it has stale snapshot metadata | |
5708 | section_name = "SNAP_METADATA_" + snapshot | |
5709 | try: | |
5710 | self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False) | |
5711 | except CommandFailedError: | |
5712 | self.fail("Expected grep cmd to succeed because stale snapshot metadata exist") | |
5713 | ||
5714 | # Do any subvolume operation to clean the stale snapshot metadata | |
5715 | _ = json.loads(self._get_subvolume_info(self.volname, subvolname, group)) | |
5716 | ||
5717 | # Verify that the stale snapshot metadata is cleaned | |
5718 | try: | |
5719 | self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False) | |
5720 | except CommandFailedError as e: | |
5721 | self.assertNotEqual(e.exitstatus, 0) | |
5722 | else: | |
5723 | self.fail("Expected non-zero exist status because stale snapshot metadata should not exist") | |
5724 | ||
5725 | self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) | |
5726 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5727 | ||
5728 | # verify trash dir is clean. | |
5729 | self._wait_for_trash_empty() | |
5730 | # Clean tmp config file | |
5731 | self.mount_a.run_shell(['sudo', 'rm', '-f', tmp_meta_path], omit_sudo=False) | |
5732 | ||
5733 | ||
f67539c2 TL |
5734 | class TestSubvolumeSnapshotClones(TestVolumesHelper): |
5735 | """ Tests for FS subvolume snapshot clone operations.""" | |
5736 | def test_clone_subvolume_info(self): | |
5737 | # tests the 'fs subvolume info' command for a clone | |
5738 | subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", | |
5739 | "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace", | |
5740 | "type", "uid"] | |
adb31ebb | 5741 | |
f38dd50b TL |
5742 | subvolume = self._gen_subvol_name() |
5743 | snapshot = self._gen_subvol_snap_name() | |
5744 | clone = self._gen_subvol_clone_name() | |
adb31ebb TL |
5745 | |
5746 | # create subvolume | |
522d829b | 5747 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb | 5748 | |
f67539c2 TL |
5749 | # do some IO |
5750 | self._do_subvolume_io(subvolume, number_of_files=1) | |
5751 | ||
adb31ebb TL |
5752 | # snapshot subvolume |
5753 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
5754 | ||
f67539c2 TL |
5755 | # schedule a clone |
5756 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
adb31ebb | 5757 | |
f67539c2 TL |
5758 | # check clone status |
5759 | self._wait_for_clone_to_complete(clone) | |
adb31ebb | 5760 | |
f67539c2 TL |
5761 | # remove snapshot |
5762 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
adb31ebb | 5763 | |
f67539c2 TL |
5764 | subvol_info = json.loads(self._get_subvolume_info(self.volname, clone)) |
5765 | if len(subvol_info) == 0: | |
5766 | raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume") | |
5767 | for md in subvol_md: | |
5768 | if md not in subvol_info.keys(): | |
5769 | raise RuntimeError("%s not present in the metadata of subvolume" % md) | |
5770 | if subvol_info["type"] != "clone": | |
5771 | raise RuntimeError("type should be set to clone") | |
adb31ebb | 5772 | |
f67539c2 TL |
5773 | # remove subvolumes |
5774 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
5775 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
adb31ebb | 5776 | |
f67539c2 TL |
5777 | # verify trash dir is clean |
5778 | self._wait_for_trash_empty() | |
adb31ebb | 5779 | |
2a845540 TL |
5780 | def test_subvolume_snapshot_info_without_snapshot_clone(self): |
5781 | """ | |
1e59de90 | 5782 | Verify subvolume snapshot info output without cloning snapshot. |
2a845540 TL |
5783 | If no clone is performed then path /volumes/_index/clone/{track_id} |
5784 | will not exist. | |
5785 | """ | |
f38dd50b TL |
5786 | subvolume = self._gen_subvol_name() |
5787 | snapshot = self._gen_subvol_snap_name() | |
2a845540 TL |
5788 | |
5789 | # create subvolume. | |
5790 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
5791 | ||
5792 | # snapshot subvolume | |
5793 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
5794 | ||
5795 | # list snapshot info | |
5796 | result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot)) | |
5797 | ||
5798 | # verify snapshot info | |
5799 | self.assertEqual(result['has_pending_clones'], "no") | |
5800 | self.assertFalse('orphan_clones_count' in result) | |
5801 | self.assertFalse('pending_clones' in result) | |
5802 | ||
5803 | # remove snapshot, subvolume, clone | |
5804 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
5805 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
5806 | ||
5807 | # verify trash dir is clean | |
5808 | self._wait_for_trash_empty() | |
5809 | ||
5810 | def test_subvolume_snapshot_info_if_no_clone_pending(self): | |
5811 | """ | |
5812 | Verify subvolume snapshot info output if no clone is in pending state. | |
5813 | """ | |
f38dd50b TL |
5814 | subvolume = self._gen_subvol_name() |
5815 | snapshot = self._gen_subvol_snap_name() | |
2a845540 TL |
5816 | clone_list = [f'clone_{i}' for i in range(3)] |
5817 | ||
f38dd50b TL |
5818 | # disable "capped" clones |
5819 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_no_wait', False) | |
5820 | ||
2a845540 TL |
5821 | # create subvolume. |
5822 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
5823 | ||
5824 | # snapshot subvolume | |
5825 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
5826 | ||
5827 | # schedule a clones | |
5828 | for clone in clone_list: | |
5829 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
5830 | ||
5831 | # check clones status | |
5832 | for clone in clone_list: | |
5833 | self._wait_for_clone_to_complete(clone) | |
5834 | ||
5835 | # list snapshot info | |
5836 | result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot)) | |
5837 | ||
5838 | # verify snapshot info | |
5839 | self.assertEqual(result['has_pending_clones'], "no") | |
5840 | self.assertFalse('orphan_clones_count' in result) | |
5841 | self.assertFalse('pending_clones' in result) | |
5842 | ||
5843 | # remove snapshot, subvolume, clone | |
5844 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
5845 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
5846 | for clone in clone_list: | |
5847 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
5848 | ||
5849 | # verify trash dir is clean | |
5850 | self._wait_for_trash_empty() | |
5851 | ||
5852 | def test_subvolume_snapshot_info_if_clone_pending_for_no_group(self): | |
5853 | """ | |
5854 | Verify subvolume snapshot info output if clones are in pending state. | |
5855 | Clones are not specified for particular target_group. Hence target_group | |
5856 | should not be in the output as we don't show _nogroup (default group) | |
5857 | """ | |
f38dd50b TL |
5858 | subvolume = self._gen_subvol_name() |
5859 | snapshot = self._gen_subvol_snap_name() | |
2a845540 TL |
5860 | clone_list = [f'clone_{i}' for i in range(3)] |
5861 | ||
5862 | # create subvolume. | |
5863 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
5864 | ||
5865 | # snapshot subvolume | |
5866 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
5867 | ||
5868 | # insert delay at the beginning of snapshot clone | |
5869 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5) | |
5870 | ||
f38dd50b TL |
5871 | # disable "capped" clones |
5872 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_no_wait', False) | |
5873 | ||
2a845540 TL |
5874 | # schedule a clones |
5875 | for clone in clone_list: | |
5876 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
5877 | ||
5878 | # list snapshot info | |
5879 | result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot)) | |
5880 | ||
5881 | # verify snapshot info | |
5882 | expected_clone_list = [] | |
5883 | for clone in clone_list: | |
5884 | expected_clone_list.append({"name": clone}) | |
5885 | self.assertEqual(result['has_pending_clones'], "yes") | |
5886 | self.assertFalse('orphan_clones_count' in result) | |
5887 | self.assertListEqual(result['pending_clones'], expected_clone_list) | |
5888 | self.assertEqual(len(result['pending_clones']), 3) | |
5889 | ||
5890 | # check clones status | |
5891 | for clone in clone_list: | |
5892 | self._wait_for_clone_to_complete(clone) | |
5893 | ||
5894 | # remove snapshot, subvolume, clone | |
5895 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
5896 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
5897 | for clone in clone_list: | |
5898 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
5899 | ||
5900 | # verify trash dir is clean | |
5901 | self._wait_for_trash_empty() | |
5902 | ||
5903 | def test_subvolume_snapshot_info_if_clone_pending_for_target_group(self): | |
5904 | """ | |
5905 | Verify subvolume snapshot info output if clones are in pending state. | |
5906 | Clones are not specified for target_group. | |
5907 | """ | |
f38dd50b TL |
5908 | subvolume = self._gen_subvol_name() |
5909 | snapshot = self._gen_subvol_snap_name() | |
5910 | clone = self._gen_subvol_clone_name() | |
5911 | group = self._gen_subvol_grp_name() | |
5912 | target_group = self._gen_subvol_grp_name() | |
2a845540 TL |
5913 | |
5914 | # create groups | |
5915 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
5916 | self._fs_cmd("subvolumegroup", "create", self.volname, target_group) | |
5917 | ||
5918 | # create subvolume | |
5919 | self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777") | |
5920 | ||
5921 | # snapshot subvolume | |
5922 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) | |
5923 | ||
5924 | # insert delay at the beginning of snapshot clone | |
5925 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5) | |
5926 | ||
5927 | # schedule a clone | |
5928 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, | |
5929 | "--group_name", group, "--target_group_name", target_group) | |
5930 | ||
5931 | # list snapshot info | |
5932 | result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot, "--group_name", group)) | |
5933 | ||
5934 | # verify snapshot info | |
5935 | expected_clone_list = [{"name": clone, "target_group": target_group}] | |
5936 | self.assertEqual(result['has_pending_clones'], "yes") | |
5937 | self.assertFalse('orphan_clones_count' in result) | |
5938 | self.assertListEqual(result['pending_clones'], expected_clone_list) | |
5939 | self.assertEqual(len(result['pending_clones']), 1) | |
5940 | ||
5941 | # check clone status | |
5942 | self._wait_for_clone_to_complete(clone, clone_group=target_group) | |
5943 | ||
5944 | # remove snapshot | |
5945 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) | |
5946 | ||
5947 | # remove subvolumes | |
5948 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
5949 | self._fs_cmd("subvolume", "rm", self.volname, clone, target_group) | |
5950 | ||
5951 | # remove groups | |
5952 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
5953 | self._fs_cmd("subvolumegroup", "rm", self.volname, target_group) | |
5954 | ||
5955 | # verify trash dir is clean | |
5956 | self._wait_for_trash_empty() | |
5957 | ||
5958 | def test_subvolume_snapshot_info_if_orphan_clone(self): | |
5959 | """ | |
5960 | Verify subvolume snapshot info output if orphan clones exists. | |
5961 | Orphan clones should not list under pending clones. | |
5962 | orphan_clones_count should display correct count of orphan clones' | |
5963 | """ | |
f38dd50b TL |
5964 | subvolume = self._gen_subvol_name() |
5965 | snapshot = self._gen_subvol_snap_name() | |
2a845540 TL |
5966 | clone_list = [f'clone_{i}' for i in range(3)] |
5967 | ||
5968 | # create subvolume. | |
5969 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
5970 | ||
5971 | # snapshot subvolume | |
5972 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
5973 | ||
5974 | # insert delay at the beginning of snapshot clone | |
39ae355f | 5975 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 15) |
2a845540 | 5976 | |
f38dd50b TL |
5977 | # disable "capped" clones |
5978 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_no_wait', False) | |
5979 | ||
2a845540 TL |
5980 | # schedule a clones |
5981 | for clone in clone_list: | |
5982 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
5983 | ||
5984 | # remove track file for third clone to make it orphan | |
5985 | meta_path = os.path.join(".", "volumes", "_nogroup", subvolume, ".meta") | |
39ae355f | 5986 | pending_clones_result = self.mount_a.run_shell(['sudo', 'grep', 'clone snaps', '-A3', meta_path], omit_sudo=False, stdout=StringIO(), stderr=StringIO()) |
2a845540 TL |
5987 | third_clone_track_id = pending_clones_result.stdout.getvalue().splitlines()[3].split(" = ")[0] |
5988 | third_clone_track_path = os.path.join(".", "volumes", "_index", "clone", third_clone_track_id) | |
5989 | self.mount_a.run_shell(f"sudo rm -f {third_clone_track_path}", omit_sudo=False) | |
5990 | ||
5991 | # list snapshot info | |
5992 | result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot)) | |
5993 | ||
5994 | # verify snapshot info | |
5995 | expected_clone_list = [] | |
5996 | for i in range(len(clone_list)-1): | |
5997 | expected_clone_list.append({"name": clone_list[i]}) | |
5998 | self.assertEqual(result['has_pending_clones'], "yes") | |
5999 | self.assertEqual(result['orphan_clones_count'], 1) | |
6000 | self.assertListEqual(result['pending_clones'], expected_clone_list) | |
6001 | self.assertEqual(len(result['pending_clones']), 2) | |
6002 | ||
6003 | # check clones status | |
6004 | for i in range(len(clone_list)-1): | |
6005 | self._wait_for_clone_to_complete(clone_list[i]) | |
6006 | ||
6007 | # list snapshot info after cloning completion | |
6008 | res = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot)) | |
6009 | ||
6010 | # verify snapshot info (has_pending_clones should be no) | |
6011 | self.assertEqual(res['has_pending_clones'], "no") | |
6012 | ||
f67539c2 | 6013 | def test_non_clone_status(self): |
f38dd50b | 6014 | subvolume = self._gen_subvol_name() |
adb31ebb | 6015 | |
f67539c2 TL |
6016 | # create subvolume |
6017 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
adb31ebb | 6018 | |
adb31ebb | 6019 | try: |
f67539c2 | 6020 | self._fs_cmd("clone", "status", self.volname, subvolume) |
adb31ebb | 6021 | except CommandFailedError as ce: |
f67539c2 TL |
6022 | if ce.exitstatus != errno.ENOTSUP: |
6023 | raise RuntimeError("invalid error code when fetching status of a non cloned subvolume") | |
adb31ebb | 6024 | else: |
f67539c2 | 6025 | raise RuntimeError("expected fetching of clone status of a subvolume to fail") |
adb31ebb | 6026 | |
f67539c2 TL |
6027 | # remove subvolume |
6028 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
adb31ebb | 6029 | |
f67539c2 TL |
6030 | # verify trash dir is clean |
6031 | self._wait_for_trash_empty() | |
6032 | ||
6033 | def test_subvolume_clone_inherit_snapshot_namespace_and_size(self): | |
f38dd50b TL |
6034 | subvolume = self._gen_subvol_name() |
6035 | snapshot = self._gen_subvol_snap_name() | |
6036 | clone = self._gen_subvol_clone_name() | |
f67539c2 TL |
6037 | osize = self.DEFAULT_FILE_SIZE*1024*1024*12 |
6038 | ||
6039 | # create subvolume, in an isolated namespace with a specified size | |
522d829b | 6040 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize), "--mode=777") |
f67539c2 TL |
6041 | |
6042 | # do some IO | |
6043 | self._do_subvolume_io(subvolume, number_of_files=8) | |
6044 | ||
6045 | # snapshot subvolume | |
6046 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6047 | ||
6048 | # create a pool different from current subvolume pool | |
6049 | subvol_path = self._get_subvolume_path(self.volname, subvolume) | |
6050 | default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool") | |
6051 | new_pool = "new_pool" | |
6052 | self.assertNotEqual(default_pool, new_pool) | |
6053 | self.fs.add_data_pool(new_pool) | |
6054 | ||
6055 | # update source subvolume pool | |
6056 | self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="") | |
6057 | ||
6058 | # schedule a clone, with NO --pool specification | |
6059 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
6060 | ||
6061 | # check clone status | |
6062 | self._wait_for_clone_to_complete(clone) | |
6063 | ||
6064 | # verify clone | |
6065 | self._verify_clone(subvolume, snapshot, clone) | |
6066 | ||
6067 | # remove snapshot | |
adb31ebb TL |
6068 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) |
6069 | ||
f67539c2 TL |
6070 | # remove subvolumes |
6071 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6072 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
adb31ebb TL |
6073 | |
6074 | # verify trash dir is clean | |
6075 | self._wait_for_trash_empty() | |
6076 | ||
1d09f67e | 6077 | def test_subvolume_clone_inherit_quota_attrs(self): |
f38dd50b TL |
6078 | subvolume = self._gen_subvol_name() |
6079 | snapshot = self._gen_subvol_snap_name() | |
6080 | clone = self._gen_subvol_clone_name() | |
1d09f67e TL |
6081 | osize = self.DEFAULT_FILE_SIZE*1024*1024*12 |
6082 | ||
6083 | # create subvolume with a specified size | |
6084 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777", "--size", str(osize)) | |
6085 | ||
6086 | # do some IO | |
6087 | self._do_subvolume_io(subvolume, number_of_files=8) | |
6088 | ||
6089 | # get subvolume path | |
6090 | subvolpath = self._get_subvolume_path(self.volname, subvolume) | |
6091 | ||
6092 | # set quota on number of files | |
6093 | self.mount_a.setfattr(subvolpath, 'ceph.quota.max_files', "20", sudo=True) | |
6094 | ||
6095 | # snapshot subvolume | |
6096 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6097 | ||
6098 | # schedule a clone | |
6099 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
6100 | ||
6101 | # check clone status | |
6102 | self._wait_for_clone_to_complete(clone) | |
6103 | ||
6104 | # verify clone | |
6105 | self._verify_clone(subvolume, snapshot, clone) | |
6106 | ||
6107 | # get subvolume path | |
6108 | clonepath = self._get_subvolume_path(self.volname, clone) | |
6109 | ||
6110 | # verify quota max_files is inherited from source snapshot | |
6111 | subvol_quota = self.mount_a.getfattr(subvolpath, "ceph.quota.max_files") | |
6112 | clone_quota = self.mount_a.getfattr(clonepath, "ceph.quota.max_files") | |
6113 | self.assertEqual(subvol_quota, clone_quota) | |
6114 | ||
6115 | # remove snapshot | |
6116 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6117 | ||
6118 | # remove subvolumes | |
6119 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6120 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
6121 | ||
6122 | # verify trash dir is clean | |
6123 | self._wait_for_trash_empty() | |
6124 | ||
f67539c2 | 6125 | def test_subvolume_clone_in_progress_getpath(self): |
f38dd50b TL |
6126 | subvolume = self._gen_subvol_name() |
6127 | snapshot = self._gen_subvol_snap_name() | |
6128 | clone = self._gen_subvol_clone_name() | |
adb31ebb TL |
6129 | |
6130 | # create subvolume | |
522d829b | 6131 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb | 6132 | |
f67539c2 TL |
6133 | # do some IO |
6134 | self._do_subvolume_io(subvolume, number_of_files=64) | |
6135 | ||
adb31ebb TL |
6136 | # snapshot subvolume |
6137 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6138 | ||
522d829b TL |
6139 | # Insert delay at the beginning of snapshot clone |
6140 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2) | |
6141 | ||
f67539c2 TL |
6142 | # schedule a clone |
6143 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
adb31ebb | 6144 | |
f67539c2 | 6145 | # clone should not be accessible right now |
adb31ebb | 6146 | try: |
f67539c2 | 6147 | self._get_subvolume_path(self.volname, clone) |
adb31ebb | 6148 | except CommandFailedError as ce: |
f67539c2 TL |
6149 | if ce.exitstatus != errno.EAGAIN: |
6150 | raise RuntimeError("invalid error code when fetching path of an pending clone") | |
adb31ebb | 6151 | else: |
f67539c2 | 6152 | raise RuntimeError("expected fetching path of an pending clone to fail") |
adb31ebb | 6153 | |
f67539c2 TL |
6154 | # check clone status |
6155 | self._wait_for_clone_to_complete(clone) | |
adb31ebb | 6156 | |
f67539c2 TL |
6157 | # clone should be accessible now |
6158 | subvolpath = self._get_subvolume_path(self.volname, clone) | |
6159 | self.assertNotEqual(subvolpath, None) | |
adb31ebb | 6160 | |
f67539c2 TL |
6161 | # verify clone |
6162 | self._verify_clone(subvolume, snapshot, clone) | |
6163 | ||
6164 | # remove snapshot | |
adb31ebb TL |
6165 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) |
6166 | ||
f67539c2 TL |
6167 | # remove subvolumes |
6168 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6169 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
6170 | ||
adb31ebb TL |
6171 | # verify trash dir is clean |
6172 | self._wait_for_trash_empty() | |
6173 | ||
f67539c2 | 6174 | def test_subvolume_clone_in_progress_snapshot_rm(self): |
f38dd50b TL |
6175 | subvolume = self._gen_subvol_name() |
6176 | snapshot = self._gen_subvol_snap_name() | |
6177 | clone = self._gen_subvol_clone_name() | |
adb31ebb TL |
6178 | |
6179 | # create subvolume | |
522d829b | 6180 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb | 6181 | |
f67539c2 TL |
6182 | # do some IO |
6183 | self._do_subvolume_io(subvolume, number_of_files=64) | |
6184 | ||
adb31ebb TL |
6185 | # snapshot subvolume |
6186 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6187 | ||
522d829b TL |
6188 | # Insert delay at the beginning of snapshot clone |
6189 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2) | |
6190 | ||
f67539c2 TL |
6191 | # schedule a clone |
6192 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
adb31ebb | 6193 | |
f67539c2 | 6194 | # snapshot should not be deletable now |
adb31ebb | 6195 | try: |
f67539c2 | 6196 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) |
adb31ebb | 6197 | except CommandFailedError as ce: |
f67539c2 | 6198 | self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone") |
adb31ebb | 6199 | else: |
f67539c2 | 6200 | self.fail("expected removing source snapshot of a clone to fail") |
adb31ebb | 6201 | |
f67539c2 TL |
6202 | # check clone status |
6203 | self._wait_for_clone_to_complete(clone) | |
adb31ebb | 6204 | |
f67539c2 TL |
6205 | # clone should be accessible now |
6206 | subvolpath = self._get_subvolume_path(self.volname, clone) | |
6207 | self.assertNotEqual(subvolpath, None) | |
6208 | ||
6209 | # verify clone | |
6210 | self._verify_clone(subvolume, snapshot, clone) | |
adb31ebb TL |
6211 | |
6212 | # remove snapshot | |
6213 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6214 | ||
f67539c2 | 6215 | # remove subvolumes |
adb31ebb | 6216 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) |
f67539c2 | 6217 | self._fs_cmd("subvolume", "rm", self.volname, clone) |
adb31ebb TL |
6218 | |
6219 | # verify trash dir is clean | |
6220 | self._wait_for_trash_empty() | |
6221 | ||
f67539c2 | 6222 | def test_subvolume_clone_in_progress_source(self): |
f38dd50b TL |
6223 | subvolume = self._gen_subvol_name() |
6224 | snapshot = self._gen_subvol_snap_name() | |
6225 | clone = self._gen_subvol_clone_name() | |
adb31ebb TL |
6226 | |
6227 | # create subvolume | |
522d829b | 6228 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb | 6229 | |
f67539c2 TL |
6230 | # do some IO |
6231 | self._do_subvolume_io(subvolume, number_of_files=64) | |
6232 | ||
adb31ebb TL |
6233 | # snapshot subvolume |
6234 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6235 | ||
522d829b TL |
6236 | # Insert delay at the beginning of snapshot clone |
6237 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2) | |
6238 | ||
f67539c2 | 6239 | # schedule a clone |
adb31ebb TL |
6240 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) |
6241 | ||
f67539c2 TL |
6242 | # verify clone source |
6243 | result = json.loads(self._fs_cmd("clone", "status", self.volname, clone)) | |
6244 | source = result['status']['source'] | |
6245 | self.assertEqual(source['volume'], self.volname) | |
6246 | self.assertEqual(source['subvolume'], subvolume) | |
6247 | self.assertEqual(source.get('group', None), None) | |
6248 | self.assertEqual(source['snapshot'], snapshot) | |
6249 | ||
adb31ebb TL |
6250 | # check clone status |
6251 | self._wait_for_clone_to_complete(clone) | |
6252 | ||
f67539c2 TL |
6253 | # clone should be accessible now |
6254 | subvolpath = self._get_subvolume_path(self.volname, clone) | |
6255 | self.assertNotEqual(subvolpath, None) | |
adb31ebb | 6256 | |
f67539c2 TL |
6257 | # verify clone |
6258 | self._verify_clone(subvolume, snapshot, clone) | |
adb31ebb TL |
6259 | |
6260 | # remove snapshot | |
6261 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
adb31ebb | 6262 | |
f67539c2 | 6263 | # remove subvolumes |
adb31ebb TL |
6264 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) |
6265 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
6266 | ||
6267 | # verify trash dir is clean | |
6268 | self._wait_for_trash_empty() | |
6269 | ||
f67539c2 | 6270 | def test_subvolume_clone_retain_snapshot_with_snapshots(self): |
adb31ebb | 6271 | """ |
f67539c2 | 6272 | retain snapshots of a cloned subvolume and check disallowed operations |
adb31ebb | 6273 | """ |
f38dd50b TL |
6274 | subvolume = self._gen_subvol_name() |
6275 | snapshot1, snapshot2 = self._gen_subvol_snap_name(2) | |
6276 | clone = self._gen_subvol_clone_name() | |
adb31ebb TL |
6277 | |
6278 | # create subvolume | |
522d829b | 6279 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb | 6280 | |
f67539c2 TL |
6281 | # store path for clone verification |
6282 | subvol1_path = self._get_subvolume_path(self.volname, subvolume) | |
6283 | ||
6284 | # do some IO | |
6285 | self._do_subvolume_io(subvolume, number_of_files=16) | |
6286 | ||
adb31ebb TL |
6287 | # snapshot subvolume |
6288 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1) | |
6289 | ||
6290 | # remove with snapshot retention | |
6291 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
6292 | ||
f67539c2 TL |
6293 | # clone retained subvolume snapshot |
6294 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot1, clone) | |
adb31ebb | 6295 | |
f67539c2 TL |
6296 | # check clone status |
6297 | self._wait_for_clone_to_complete(clone) | |
adb31ebb | 6298 | |
f67539c2 TL |
6299 | # verify clone |
6300 | self._verify_clone(subvolume, snapshot1, clone, subvol_path=subvol1_path) | |
adb31ebb | 6301 | |
f67539c2 TL |
6302 | # create a snapshot on the clone |
6303 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot2) | |
adb31ebb | 6304 | |
f67539c2 TL |
6305 | # retain a clone |
6306 | self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots") | |
adb31ebb TL |
6307 | |
6308 | # list snapshots | |
f67539c2 TL |
6309 | clonesnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, clone)) |
6310 | self.assertEqual(len(clonesnapshotls), 1, "Expected the 'fs subvolume snapshot ls' command to list the" | |
adb31ebb | 6311 | " created subvolume snapshots") |
f67539c2 TL |
6312 | snapshotnames = [snapshot['name'] for snapshot in clonesnapshotls] |
6313 | for snap in [snapshot2]: | |
adb31ebb TL |
6314 | self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap)) |
6315 | ||
f67539c2 TL |
6316 | ## check disallowed operations on retained clone |
6317 | # clone-status | |
6318 | try: | |
6319 | self._fs_cmd("clone", "status", self.volname, clone) | |
6320 | except CommandFailedError as ce: | |
6321 | self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone status of clone with retained snapshots") | |
6322 | else: | |
6323 | self.fail("expected clone status of clone with retained snapshots to fail") | |
6324 | ||
6325 | # clone-cancel | |
6326 | try: | |
6327 | self._fs_cmd("clone", "cancel", self.volname, clone) | |
6328 | except CommandFailedError as ce: | |
6329 | self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone cancel of clone with retained snapshots") | |
6330 | else: | |
6331 | self.fail("expected clone cancel of clone with retained snapshots to fail") | |
6332 | ||
6333 | # remove snapshots (removes subvolumes as all are in retained state) | |
adb31ebb | 6334 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1) |
f67539c2 | 6335 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot2) |
adb31ebb TL |
6336 | |
6337 | # verify list subvolumes returns an empty list | |
6338 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
6339 | self.assertEqual(len(subvolumels), 0) | |
6340 | ||
6341 | # verify trash dir is clean | |
6342 | self._wait_for_trash_empty() | |
6343 | ||
6344 | def test_subvolume_retain_snapshot_clone(self): | |
6345 | """ | |
6346 | clone a snapshot from a snapshot retained subvolume | |
6347 | """ | |
f38dd50b TL |
6348 | subvolume = self._gen_subvol_name() |
6349 | snapshot = self._gen_subvol_snap_name() | |
6350 | clone = self._gen_subvol_clone_name() | |
adb31ebb TL |
6351 | |
6352 | # create subvolume | |
522d829b | 6353 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb TL |
6354 | |
6355 | # store path for clone verification | |
6356 | subvol_path = self._get_subvolume_path(self.volname, subvolume) | |
6357 | ||
6358 | # do some IO | |
6359 | self._do_subvolume_io(subvolume, number_of_files=16) | |
6360 | ||
6361 | # snapshot subvolume | |
6362 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6363 | ||
6364 | # remove with snapshot retention | |
6365 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
6366 | ||
6367 | # clone retained subvolume snapshot | |
6368 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
6369 | ||
6370 | # check clone status | |
6371 | self._wait_for_clone_to_complete(clone) | |
6372 | ||
6373 | # verify clone | |
6374 | self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path) | |
6375 | ||
6376 | # remove snapshots (removes retained volume) | |
6377 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6378 | ||
6379 | # remove subvolume | |
6380 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
6381 | ||
6382 | # verify list subvolumes returns an empty list | |
6383 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
6384 | self.assertEqual(len(subvolumels), 0) | |
92f5a8d4 TL |
6385 | |
6386 | # verify trash dir is clean | |
6387 | self._wait_for_trash_empty() | |
6388 | ||
f67539c2 | 6389 | def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self): |
adb31ebb | 6390 | """ |
f67539c2 | 6391 | clone a subvolume from recreated subvolume's latest snapshot |
adb31ebb | 6392 | """ |
f38dd50b TL |
6393 | subvolume = self._gen_subvol_name() |
6394 | snapshot1, snapshot2 = self._gen_subvol_snap_name(2) | |
6395 | clone = self._gen_subvol_clone_name(1) | |
adb31ebb TL |
6396 | |
6397 | # create subvolume | |
522d829b | 6398 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb | 6399 | |
adb31ebb TL |
6400 | # do some IO |
6401 | self._do_subvolume_io(subvolume, number_of_files=16) | |
6402 | ||
6403 | # snapshot subvolume | |
f67539c2 | 6404 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1) |
adb31ebb TL |
6405 | |
6406 | # remove with snapshot retention | |
6407 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
6408 | ||
f67539c2 | 6409 | # recreate subvolume |
522d829b | 6410 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
f67539c2 TL |
6411 | |
6412 | # get and store path for clone verification | |
6413 | subvol2_path = self._get_subvolume_path(self.volname, subvolume) | |
6414 | ||
6415 | # do some IO | |
6416 | self._do_subvolume_io(subvolume, number_of_files=16) | |
6417 | ||
6418 | # snapshot newer subvolume | |
6419 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2) | |
6420 | ||
6421 | # remove with snapshot retention | |
6422 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
6423 | ||
6424 | # clone retained subvolume's newer snapshot | |
6425 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot2, clone) | |
adb31ebb TL |
6426 | |
6427 | # check clone status | |
f67539c2 | 6428 | self._wait_for_clone_to_complete(clone) |
adb31ebb TL |
6429 | |
6430 | # verify clone | |
f67539c2 | 6431 | self._verify_clone(subvolume, snapshot2, clone, subvol_path=subvol2_path) |
adb31ebb TL |
6432 | |
6433 | # remove snapshot | |
f67539c2 TL |
6434 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1) |
6435 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2) | |
adb31ebb TL |
6436 | |
6437 | # remove subvolume | |
f67539c2 | 6438 | self._fs_cmd("subvolume", "rm", self.volname, clone) |
adb31ebb TL |
6439 | |
6440 | # verify list subvolumes returns an empty list | |
6441 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
6442 | self.assertEqual(len(subvolumels), 0) | |
6443 | ||
6444 | # verify trash dir is clean | |
6445 | self._wait_for_trash_empty() | |
6446 | ||
f67539c2 | 6447 | def test_subvolume_retain_snapshot_recreate(self): |
adb31ebb | 6448 | """ |
f67539c2 | 6449 | recreate a subvolume from one of its retained snapshots |
adb31ebb | 6450 | """ |
f38dd50b TL |
6451 | subvolume = self._gen_subvol_name() |
6452 | snapshot = self._gen_subvol_snap_name() | |
adb31ebb TL |
6453 | |
6454 | # create subvolume | |
522d829b | 6455 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb TL |
6456 | |
6457 | # store path for clone verification | |
f67539c2 | 6458 | subvol_path = self._get_subvolume_path(self.volname, subvolume) |
adb31ebb TL |
6459 | |
6460 | # do some IO | |
6461 | self._do_subvolume_io(subvolume, number_of_files=16) | |
6462 | ||
6463 | # snapshot subvolume | |
f67539c2 | 6464 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) |
adb31ebb TL |
6465 | |
6466 | # remove with snapshot retention | |
6467 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") | |
6468 | ||
f67539c2 TL |
6469 | # recreate retained subvolume using its own snapshot to clone |
6470 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume) | |
adb31ebb TL |
6471 | |
6472 | # check clone status | |
f67539c2 | 6473 | self._wait_for_clone_to_complete(subvolume) |
adb31ebb TL |
6474 | |
6475 | # verify clone | |
f67539c2 | 6476 | self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path) |
adb31ebb | 6477 | |
f67539c2 TL |
6478 | # remove snapshot |
6479 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6480 | ||
6481 | # remove subvolume | |
6482 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
adb31ebb TL |
6483 | |
6484 | # verify list subvolumes returns an empty list | |
6485 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
6486 | self.assertEqual(len(subvolumels), 0) | |
6487 | ||
6488 | # verify trash dir is clean | |
6489 | self._wait_for_trash_empty() | |
6490 | ||
f67539c2 | 6491 | def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self): |
adb31ebb | 6492 | """ |
f67539c2 | 6493 | ensure retained clone recreate fails if its trash is not yet purged |
adb31ebb | 6494 | """ |
f38dd50b TL |
6495 | subvolume = self._gen_subvol_name() |
6496 | snapshot = self._gen_subvol_snap_name() | |
6497 | clone = self._gen_subvol_clone_name() | |
adb31ebb TL |
6498 | |
6499 | # create subvolume | |
6500 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
6501 | ||
adb31ebb | 6502 | # snapshot subvolume |
f67539c2 | 6503 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) |
adb31ebb | 6504 | |
f67539c2 TL |
6505 | # clone subvolume snapshot |
6506 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
adb31ebb | 6507 | |
f67539c2 TL |
6508 | # check clone status |
6509 | self._wait_for_clone_to_complete(clone) | |
adb31ebb | 6510 | |
f67539c2 TL |
6511 | # snapshot clone |
6512 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot) | |
adb31ebb | 6513 | |
f67539c2 TL |
6514 | # remove clone with snapshot retention |
6515 | self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots") | |
adb31ebb | 6516 | |
f67539c2 TL |
6517 | # fake a trash entry |
6518 | self._update_fake_trash(clone) | |
adb31ebb | 6519 | |
f67539c2 TL |
6520 | # clone subvolume snapshot (recreate) |
6521 | try: | |
6522 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
6523 | except CommandFailedError as ce: | |
6524 | self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending") | |
6525 | else: | |
6526 | self.fail("expected recreate of clone with purge pending to fail") | |
adb31ebb | 6527 | |
f67539c2 TL |
6528 | # clear fake trash entry |
6529 | self._update_fake_trash(clone, create=False) | |
6530 | ||
6531 | # recreate subvolume | |
6532 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
adb31ebb TL |
6533 | |
6534 | # check clone status | |
6535 | self._wait_for_clone_to_complete(clone) | |
6536 | ||
adb31ebb | 6537 | # remove snapshot |
f67539c2 TL |
6538 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) |
6539 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot) | |
adb31ebb TL |
6540 | |
6541 | # remove subvolume | |
f67539c2 | 6542 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) |
adb31ebb TL |
6543 | self._fs_cmd("subvolume", "rm", self.volname, clone) |
6544 | ||
adb31ebb TL |
6545 | # verify trash dir is clean |
6546 | self._wait_for_trash_empty() | |
6547 | ||
f67539c2 | 6548 | def test_subvolume_snapshot_attr_clone(self): |
f38dd50b TL |
6549 | subvolume = self._gen_subvol_name() |
6550 | snapshot = self._gen_subvol_snap_name() | |
6551 | clone = self._gen_subvol_clone_name() | |
92f5a8d4 TL |
6552 | |
6553 | # create subvolume | |
522d829b | 6554 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
92f5a8d4 TL |
6555 | |
6556 | # do some IO | |
f67539c2 | 6557 | self._do_subvolume_io_mixed(subvolume) |
92f5a8d4 TL |
6558 | |
6559 | # snapshot subvolume | |
6560 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6561 | ||
92f5a8d4 TL |
6562 | # schedule a clone |
6563 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
6564 | ||
92f5a8d4 TL |
6565 | # check clone status |
6566 | self._wait_for_clone_to_complete(clone) | |
6567 | ||
adb31ebb TL |
6568 | # verify clone |
6569 | self._verify_clone(subvolume, snapshot, clone) | |
6570 | ||
92f5a8d4 TL |
6571 | # remove snapshot |
6572 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6573 | ||
adb31ebb TL |
6574 | # remove subvolumes |
6575 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6576 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
6577 | ||
6578 | # verify trash dir is clean | |
6579 | self._wait_for_trash_empty() | |
6580 | ||
33c7a0ef TL |
6581 | def test_clone_failure_status_pending_in_progress_complete(self): |
6582 | """ | |
6583 | ensure failure status is not shown when clone is not in failed/cancelled state | |
6584 | """ | |
f38dd50b TL |
6585 | subvolume = self._gen_subvol_name() |
6586 | snapshot = self._gen_subvol_snap_name() | |
6587 | clone1 = self._gen_subvol_clone_name() | |
33c7a0ef TL |
6588 | |
6589 | # create subvolume | |
6590 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
6591 | ||
6592 | # do some IO | |
6593 | self._do_subvolume_io(subvolume, number_of_files=200) | |
6594 | ||
6595 | # snapshot subvolume | |
6596 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6597 | ||
6598 | # Insert delay at the beginning of snapshot clone | |
6599 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5) | |
6600 | ||
6601 | # schedule a clone1 | |
6602 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) | |
6603 | ||
6604 | # pending clone shouldn't show failure status | |
6605 | clone1_result = self._get_clone_status(clone1) | |
6606 | try: | |
6607 | clone1_result["status"]["failure"]["errno"] | |
6608 | except KeyError as e: | |
6609 | self.assertEqual(str(e), "'failure'") | |
6610 | else: | |
6611 | self.fail("clone status shouldn't show failure for pending clone") | |
6612 | ||
6613 | # check clone1 to be in-progress | |
6614 | self._wait_for_clone_to_be_in_progress(clone1) | |
6615 | ||
6616 | # in-progress clone1 shouldn't show failure status | |
6617 | clone1_result = self._get_clone_status(clone1) | |
6618 | try: | |
6619 | clone1_result["status"]["failure"]["errno"] | |
6620 | except KeyError as e: | |
6621 | self.assertEqual(str(e), "'failure'") | |
6622 | else: | |
6623 | self.fail("clone status shouldn't show failure for in-progress clone") | |
6624 | ||
6625 | # wait for clone1 to complete | |
6626 | self._wait_for_clone_to_complete(clone1) | |
6627 | ||
6628 | # complete clone1 shouldn't show failure status | |
6629 | clone1_result = self._get_clone_status(clone1) | |
6630 | try: | |
6631 | clone1_result["status"]["failure"]["errno"] | |
6632 | except KeyError as e: | |
6633 | self.assertEqual(str(e), "'failure'") | |
6634 | else: | |
6635 | self.fail("clone status shouldn't show failure for complete clone") | |
6636 | ||
6637 | # remove snapshot | |
6638 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6639 | ||
6640 | # remove subvolumes | |
6641 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6642 | self._fs_cmd("subvolume", "rm", self.volname, clone1) | |
6643 | ||
6644 | # verify trash dir is clean | |
6645 | self._wait_for_trash_empty() | |
6646 | ||
6647 | def test_clone_failure_status_failed(self): | |
6648 | """ | |
6649 | ensure failure status is shown when clone is in failed state and validate the reason | |
6650 | """ | |
f38dd50b TL |
6651 | subvolume = self._gen_subvol_name() |
6652 | snapshot = self._gen_subvol_snap_name() | |
6653 | clone1 = self._gen_subvol_clone_name() | |
33c7a0ef TL |
6654 | |
6655 | # create subvolume | |
6656 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
6657 | ||
6658 | # do some IO | |
6659 | self._do_subvolume_io(subvolume, number_of_files=200) | |
6660 | ||
6661 | # snapshot subvolume | |
6662 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6663 | ||
6664 | # Insert delay at the beginning of snapshot clone | |
6665 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5) | |
6666 | ||
6667 | # schedule a clone1 | |
6668 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) | |
6669 | ||
6670 | # remove snapshot from backend to force the clone failure. | |
6671 | snappath = os.path.join(".", "volumes", "_nogroup", subvolume, ".snap", snapshot) | |
1e59de90 | 6672 | self.mount_a.run_shell(['sudo', 'rmdir', snappath], omit_sudo=False) |
33c7a0ef TL |
6673 | |
6674 | # wait for clone1 to fail. | |
6675 | self._wait_for_clone_to_fail(clone1) | |
6676 | ||
6677 | # check clone1 status | |
6678 | clone1_result = self._get_clone_status(clone1) | |
6679 | self.assertEqual(clone1_result["status"]["state"], "failed") | |
6680 | self.assertEqual(clone1_result["status"]["failure"]["errno"], "2") | |
6681 | self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "snapshot '{0}' does not exist".format(snapshot)) | |
6682 | ||
6683 | # clone removal should succeed after failure, remove clone1 | |
6684 | self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force") | |
6685 | ||
6686 | # remove subvolumes | |
6687 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6688 | ||
6689 | # verify trash dir is clean | |
6690 | self._wait_for_trash_empty() | |
6691 | ||
6692 | def test_clone_failure_status_pending_cancelled(self): | |
6693 | """ | |
6694 | ensure failure status is shown when clone is cancelled during pending state and validate the reason | |
6695 | """ | |
f38dd50b TL |
6696 | subvolume = self._gen_subvol_name() |
6697 | snapshot = self._gen_subvol_snap_name() | |
6698 | clone1 = self._gen_subvol_clone_name() | |
33c7a0ef TL |
6699 | |
6700 | # create subvolume | |
6701 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
6702 | ||
6703 | # do some IO | |
6704 | self._do_subvolume_io(subvolume, number_of_files=200) | |
6705 | ||
6706 | # snapshot subvolume | |
6707 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6708 | ||
6709 | # Insert delay at the beginning of snapshot clone | |
6710 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5) | |
6711 | ||
6712 | # schedule a clone1 | |
6713 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) | |
6714 | ||
6715 | # cancel pending clone1 | |
6716 | self._fs_cmd("clone", "cancel", self.volname, clone1) | |
6717 | ||
6718 | # check clone1 status | |
6719 | clone1_result = self._get_clone_status(clone1) | |
6720 | self.assertEqual(clone1_result["status"]["state"], "canceled") | |
6721 | self.assertEqual(clone1_result["status"]["failure"]["errno"], "4") | |
6722 | self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "user interrupted clone operation") | |
6723 | ||
6724 | # clone removal should succeed with force after cancelled, remove clone1 | |
6725 | self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force") | |
6726 | ||
6727 | # remove snapshot | |
6728 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6729 | ||
6730 | # remove subvolumes | |
6731 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6732 | ||
6733 | # verify trash dir is clean | |
6734 | self._wait_for_trash_empty() | |
6735 | ||
6736 | def test_clone_failure_status_in_progress_cancelled(self): | |
6737 | """ | |
6738 | ensure failure status is shown when clone is cancelled during in-progress state and validate the reason | |
6739 | """ | |
f38dd50b TL |
6740 | subvolume = self._gen_subvol_name() |
6741 | snapshot = self._gen_subvol_snap_name() | |
6742 | clone1 = self._gen_subvol_clone_name() | |
33c7a0ef TL |
6743 | |
6744 | # create subvolume | |
6745 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
6746 | ||
6747 | # do some IO | |
6748 | self._do_subvolume_io(subvolume, number_of_files=200) | |
6749 | ||
6750 | # snapshot subvolume | |
6751 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6752 | ||
6753 | # Insert delay at the beginning of snapshot clone | |
6754 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5) | |
6755 | ||
6756 | # schedule a clone1 | |
6757 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) | |
6758 | ||
6759 | # wait for clone1 to be in-progress | |
6760 | self._wait_for_clone_to_be_in_progress(clone1) | |
6761 | ||
6762 | # cancel in-progess clone1 | |
6763 | self._fs_cmd("clone", "cancel", self.volname, clone1) | |
6764 | ||
6765 | # check clone1 status | |
6766 | clone1_result = self._get_clone_status(clone1) | |
6767 | self.assertEqual(clone1_result["status"]["state"], "canceled") | |
6768 | self.assertEqual(clone1_result["status"]["failure"]["errno"], "4") | |
6769 | self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "user interrupted clone operation") | |
6770 | ||
6771 | # clone removal should succeed with force after cancelled, remove clone1 | |
6772 | self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force") | |
6773 | ||
6774 | # remove snapshot | |
6775 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6776 | ||
6777 | # remove subvolumes | |
6778 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6779 | ||
6780 | # verify trash dir is clean | |
6781 | self._wait_for_trash_empty() | |
6782 | ||
adb31ebb | 6783 | def test_subvolume_snapshot_clone(self): |
f38dd50b TL |
6784 | subvolume = self._gen_subvol_name() |
6785 | snapshot = self._gen_subvol_snap_name() | |
6786 | clone = self._gen_subvol_clone_name() | |
adb31ebb TL |
6787 | |
6788 | # create subvolume | |
522d829b | 6789 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb TL |
6790 | |
6791 | # do some IO | |
6792 | self._do_subvolume_io(subvolume, number_of_files=64) | |
6793 | ||
6794 | # snapshot subvolume | |
6795 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6796 | ||
6797 | # schedule a clone | |
6798 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
6799 | ||
6800 | # check clone status | |
6801 | self._wait_for_clone_to_complete(clone) | |
6802 | ||
92f5a8d4 | 6803 | # verify clone |
adb31ebb TL |
6804 | self._verify_clone(subvolume, snapshot, clone) |
6805 | ||
6806 | # remove snapshot | |
6807 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
92f5a8d4 TL |
6808 | |
6809 | # remove subvolumes | |
6810 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6811 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
6812 | ||
6813 | # verify trash dir is clean | |
6814 | self._wait_for_trash_empty() | |
6815 | ||
20effc67 | 6816 | def test_subvolume_snapshot_clone_quota_exceeded(self): |
f38dd50b TL |
6817 | subvolume = self._gen_subvol_name() |
6818 | snapshot = self._gen_subvol_snap_name() | |
6819 | clone = self._gen_subvol_clone_name() | |
20effc67 TL |
6820 | |
6821 | # create subvolume with 20MB quota | |
6822 | osize = self.DEFAULT_FILE_SIZE*1024*1024*20 | |
6823 | self._fs_cmd("subvolume", "create", self.volname, subvolume,"--mode=777", "--size", str(osize)) | |
6824 | ||
6825 | # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time. | |
39ae355f TL |
6826 | try: |
6827 | self._do_subvolume_io(subvolume, number_of_files=50) | |
6828 | except CommandFailedError: | |
6829 | # ignore quota enforcement error. | |
6830 | pass | |
20effc67 TL |
6831 | |
6832 | # snapshot subvolume | |
6833 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6834 | ||
6835 | # schedule a clone | |
6836 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
6837 | ||
6838 | # check clone status | |
6839 | self._wait_for_clone_to_complete(clone) | |
6840 | ||
6841 | # verify clone | |
6842 | self._verify_clone(subvolume, snapshot, clone) | |
6843 | ||
6844 | # remove snapshot | |
6845 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6846 | ||
6847 | # remove subvolumes | |
6848 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6849 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
6850 | ||
6851 | # verify trash dir is clean | |
6852 | self._wait_for_trash_empty() | |
6853 | ||
6854 | def test_subvolume_snapshot_in_complete_clone_rm(self): | |
6855 | """ | |
6856 | Validates the removal of clone when it is not in 'complete|cancelled|failed' state. | |
6857 | The forceful removl of subvolume clone succeeds only if it's in any of the | |
6858 | 'complete|cancelled|failed' states. It fails with EAGAIN in any other states. | |
6859 | """ | |
6860 | ||
f38dd50b TL |
6861 | subvolume = self._gen_subvol_name() |
6862 | snapshot = self._gen_subvol_snap_name() | |
6863 | clone = self._gen_subvol_clone_name() | |
20effc67 TL |
6864 | |
6865 | # create subvolume | |
6866 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
6867 | ||
6868 | # do some IO | |
6869 | self._do_subvolume_io(subvolume, number_of_files=64) | |
6870 | ||
6871 | # snapshot subvolume | |
6872 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6873 | ||
6874 | # Insert delay at the beginning of snapshot clone | |
6875 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2) | |
6876 | ||
6877 | # schedule a clone | |
6878 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
6879 | ||
6880 | # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled. | |
6881 | try: | |
6882 | self._fs_cmd("subvolume", "rm", self.volname, clone, "--force") | |
6883 | except CommandFailedError as ce: | |
6884 | if ce.exitstatus != errno.EAGAIN: | |
6885 | raise RuntimeError("invalid error code when trying to remove failed clone") | |
6886 | else: | |
6887 | raise RuntimeError("expected error when removing a failed clone") | |
6888 | ||
6889 | # cancel on-going clone | |
6890 | self._fs_cmd("clone", "cancel", self.volname, clone) | |
6891 | ||
6892 | # verify canceled state | |
6893 | self._check_clone_canceled(clone) | |
6894 | ||
6895 | # clone removal should succeed after cancel | |
6896 | self._fs_cmd("subvolume", "rm", self.volname, clone, "--force") | |
6897 | ||
6898 | # remove snapshot | |
6899 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6900 | ||
6901 | # remove subvolumes | |
6902 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6903 | ||
6904 | # verify trash dir is clean | |
6905 | self._wait_for_trash_empty() | |
6906 | ||
f67539c2 | 6907 | def test_subvolume_snapshot_clone_retain_suid_guid(self): |
f38dd50b TL |
6908 | subvolume = self._gen_subvol_name() |
6909 | snapshot = self._gen_subvol_snap_name() | |
6910 | clone = self._gen_subvol_clone_name() | |
f91f0fd5 | 6911 | |
f67539c2 | 6912 | # create subvolume |
522d829b | 6913 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
f91f0fd5 | 6914 | |
f67539c2 TL |
6915 | # Create a file with suid, guid bits set along with executable bit. |
6916 | args = ["subvolume", "getpath", self.volname, subvolume] | |
6917 | args = tuple(args) | |
6918 | subvolpath = self._fs_cmd(*args) | |
6919 | self.assertNotEqual(subvolpath, None) | |
6920 | subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline | |
f91f0fd5 | 6921 | |
f67539c2 TL |
6922 | file_path = subvolpath |
6923 | file_path = os.path.join(subvolpath, "test_suid_file") | |
6924 | self.mount_a.run_shell(["touch", file_path]) | |
6925 | self.mount_a.run_shell(["chmod", "u+sx,g+sx", file_path]) | |
f91f0fd5 | 6926 | |
f67539c2 TL |
6927 | # snapshot subvolume |
6928 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6929 | ||
6930 | # schedule a clone | |
6931 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
6932 | ||
6933 | # check clone status | |
6934 | self._wait_for_clone_to_complete(clone) | |
6935 | ||
6936 | # verify clone | |
6937 | self._verify_clone(subvolume, snapshot, clone) | |
6938 | ||
6939 | # remove snapshot | |
6940 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6941 | ||
6942 | # remove subvolumes | |
6943 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
6944 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
6945 | ||
6946 | # verify trash dir is clean | |
6947 | self._wait_for_trash_empty() | |
6948 | ||
6949 | def test_subvolume_snapshot_clone_and_reclone(self): | |
f38dd50b TL |
6950 | subvolume = self._gen_subvol_name() |
6951 | snapshot = self._gen_subvol_snap_name() | |
6952 | clone1, clone2 = self._gen_subvol_clone_name(2) | |
92f5a8d4 TL |
6953 | |
6954 | # create subvolume | |
522d829b | 6955 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
92f5a8d4 TL |
6956 | |
6957 | # do some IO | |
6958 | self._do_subvolume_io(subvolume, number_of_files=32) | |
6959 | ||
6960 | # snapshot subvolume | |
6961 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
6962 | ||
92f5a8d4 | 6963 | # schedule a clone |
f67539c2 | 6964 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) |
92f5a8d4 TL |
6965 | |
6966 | # check clone status | |
f67539c2 | 6967 | self._wait_for_clone_to_complete(clone1) |
92f5a8d4 | 6968 | |
adb31ebb | 6969 | # verify clone |
f67539c2 | 6970 | self._verify_clone(subvolume, snapshot, clone1) |
adb31ebb | 6971 | |
92f5a8d4 TL |
6972 | # remove snapshot |
6973 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
6974 | ||
f67539c2 TL |
6975 | # now the clone is just like a normal subvolume -- snapshot the clone and fork |
6976 | # another clone. before that do some IO so it's can be differentiated. | |
6977 | self._do_subvolume_io(clone1, create_dir="data", number_of_files=32) | |
6978 | ||
6979 | # snapshot clone -- use same snap name | |
6980 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot) | |
6981 | ||
6982 | # schedule a clone | |
6983 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2) | |
6984 | ||
6985 | # check clone status | |
6986 | self._wait_for_clone_to_complete(clone2) | |
6987 | ||
6988 | # verify clone | |
6989 | self._verify_clone(clone1, snapshot, clone2) | |
6990 | ||
6991 | # remove snapshot | |
6992 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot) | |
92f5a8d4 TL |
6993 | |
6994 | # remove subvolumes | |
6995 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
f67539c2 TL |
6996 | self._fs_cmd("subvolume", "rm", self.volname, clone1) |
6997 | self._fs_cmd("subvolume", "rm", self.volname, clone2) | |
92f5a8d4 TL |
6998 | |
6999 | # verify trash dir is clean | |
7000 | self._wait_for_trash_empty() | |
7001 | ||
f67539c2 | 7002 | def test_subvolume_snapshot_clone_cancel_in_progress(self): |
f38dd50b TL |
7003 | subvolume = self._gen_subvol_name() |
7004 | snapshot = self._gen_subvol_snap_name() | |
7005 | clone = self._gen_subvol_clone_name() | |
92f5a8d4 | 7006 | |
92f5a8d4 | 7007 | # create subvolume |
522d829b | 7008 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
92f5a8d4 TL |
7009 | |
7010 | # do some IO | |
f67539c2 | 7011 | self._do_subvolume_io(subvolume, number_of_files=128) |
92f5a8d4 TL |
7012 | |
7013 | # snapshot subvolume | |
7014 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
7015 | ||
522d829b TL |
7016 | # Insert delay at the beginning of snapshot clone |
7017 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2) | |
7018 | ||
92f5a8d4 TL |
7019 | # schedule a clone |
7020 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
7021 | ||
f67539c2 TL |
7022 | # cancel on-going clone |
7023 | self._fs_cmd("clone", "cancel", self.volname, clone) | |
92f5a8d4 | 7024 | |
f67539c2 TL |
7025 | # verify canceled state |
7026 | self._check_clone_canceled(clone) | |
adb31ebb | 7027 | |
92f5a8d4 TL |
7028 | # remove snapshot |
7029 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
7030 | ||
adb31ebb TL |
7031 | # remove subvolumes |
7032 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
f67539c2 | 7033 | self._fs_cmd("subvolume", "rm", self.volname, clone, "--force") |
adb31ebb TL |
7034 | |
7035 | # verify trash dir is clean | |
7036 | self._wait_for_trash_empty() | |
7037 | ||
f67539c2 TL |
7038 | def test_subvolume_snapshot_clone_cancel_pending(self): |
7039 | """ | |
7040 | this test is a bit more involved compared to canceling an in-progress clone. | |
7041 | we'd need to ensure that a to-be canceled clone has still not been picked up | |
7042 | by cloner threads. exploit the fact that clones are picked up in an FCFS | |
7043 | fashion and there are four (4) cloner threads by default. When the number of | |
7044 | cloner threads increase, this test _may_ start tripping -- so, the number of | |
7045 | clone operations would need to be jacked up. | |
7046 | """ | |
7047 | # default number of clone threads | |
7048 | NR_THREADS = 4 | |
7049 | # good enough for 4 threads | |
7050 | NR_CLONES = 5 | |
7051 | # yeh, 1gig -- we need the clone to run for sometime | |
7052 | FILE_SIZE_MB = 1024 | |
7053 | ||
f38dd50b TL |
7054 | subvolume = self._gen_subvol_name() |
7055 | snapshot = self._gen_subvol_snap_name() | |
7056 | clones = self._gen_subvol_snap_name(NR_CLONES) | |
adb31ebb | 7057 | |
f67539c2 | 7058 | # create subvolume |
522d829b | 7059 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb TL |
7060 | |
7061 | # do some IO | |
f67539c2 | 7062 | self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB) |
adb31ebb TL |
7063 | |
7064 | # snapshot subvolume | |
7065 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
7066 | ||
f38dd50b TL |
7067 | # Disable the snapshot_clone_no_wait config option |
7068 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_no_wait', False) | |
7069 | threads_available = self.config_get('mgr', 'mgr/volumes/snapshot_clone_no_wait') | |
7070 | self.assertEqual(threads_available, 'false') | |
7071 | ||
f67539c2 TL |
7072 | # schedule clones |
7073 | for clone in clones: | |
7074 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
adb31ebb | 7075 | |
f67539c2 TL |
7076 | to_wait = clones[0:NR_THREADS] |
7077 | to_cancel = clones[NR_THREADS:] | |
adb31ebb | 7078 | |
f67539c2 TL |
7079 | # cancel pending clones and verify |
7080 | for clone in to_cancel: | |
7081 | status = json.loads(self._fs_cmd("clone", "status", self.volname, clone)) | |
7082 | self.assertEqual(status["status"]["state"], "pending") | |
7083 | self._fs_cmd("clone", "cancel", self.volname, clone) | |
7084 | self._check_clone_canceled(clone) | |
adb31ebb | 7085 | |
f67539c2 TL |
7086 | # let's cancel on-going clones. handle the case where some of the clones |
7087 | # _just_ complete | |
7088 | for clone in list(to_wait): | |
7089 | try: | |
7090 | self._fs_cmd("clone", "cancel", self.volname, clone) | |
7091 | to_cancel.append(clone) | |
7092 | to_wait.remove(clone) | |
7093 | except CommandFailedError as ce: | |
7094 | if ce.exitstatus != errno.EINVAL: | |
7095 | raise RuntimeError("invalid error code when cancelling on-going clone") | |
adb31ebb TL |
7096 | |
7097 | # remove snapshot | |
7098 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
92f5a8d4 TL |
7099 | |
7100 | # remove subvolumes | |
7101 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
f67539c2 TL |
7102 | for clone in to_wait: |
7103 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
7104 | for clone in to_cancel: | |
7105 | self._fs_cmd("subvolume", "rm", self.volname, clone, "--force") | |
92f5a8d4 TL |
7106 | |
7107 | # verify trash dir is clean | |
7108 | self._wait_for_trash_empty() | |
7109 | ||
f67539c2 | 7110 | def test_subvolume_snapshot_clone_different_groups(self): |
f38dd50b TL |
7111 | subvolume = self._gen_subvol_name() |
7112 | snapshot = self._gen_subvol_snap_name() | |
7113 | clone = self._gen_subvol_clone_name() | |
7114 | s_group, c_group = self._gen_subvol_grp_name(2) | |
f67539c2 TL |
7115 | |
7116 | # create groups | |
7117 | self._fs_cmd("subvolumegroup", "create", self.volname, s_group) | |
7118 | self._fs_cmd("subvolumegroup", "create", self.volname, c_group) | |
92f5a8d4 TL |
7119 | |
7120 | # create subvolume | |
522d829b | 7121 | self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group, "--mode=777") |
92f5a8d4 TL |
7122 | |
7123 | # do some IO | |
f67539c2 | 7124 | self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32) |
92f5a8d4 TL |
7125 | |
7126 | # snapshot subvolume | |
f67539c2 | 7127 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group) |
92f5a8d4 | 7128 | |
92f5a8d4 | 7129 | # schedule a clone |
f67539c2 TL |
7130 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, |
7131 | '--group_name', s_group, '--target_group_name', c_group) | |
92f5a8d4 TL |
7132 | |
7133 | # check clone status | |
f67539c2 | 7134 | self._wait_for_clone_to_complete(clone, clone_group=c_group) |
92f5a8d4 | 7135 | |
adb31ebb | 7136 | # verify clone |
f67539c2 | 7137 | self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group) |
adb31ebb | 7138 | |
92f5a8d4 | 7139 | # remove snapshot |
f67539c2 | 7140 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group) |
92f5a8d4 | 7141 | |
92f5a8d4 | 7142 | # remove subvolumes |
f67539c2 TL |
7143 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group) |
7144 | self._fs_cmd("subvolume", "rm", self.volname, clone, c_group) | |
7145 | ||
7146 | # remove groups | |
7147 | self._fs_cmd("subvolumegroup", "rm", self.volname, s_group) | |
7148 | self._fs_cmd("subvolumegroup", "rm", self.volname, c_group) | |
92f5a8d4 TL |
7149 | |
7150 | # verify trash dir is clean | |
7151 | self._wait_for_trash_empty() | |
7152 | ||
f67539c2 | 7153 | def test_subvolume_snapshot_clone_fail_with_remove(self): |
f38dd50b TL |
7154 | subvolume = self._gen_subvol_name() |
7155 | snapshot = self._gen_subvol_snap_name() | |
7156 | clone1, clone2 = self._gen_subvol_clone_name(2) | |
f67539c2 TL |
7157 | |
7158 | pool_capacity = 32 * 1024 * 1024 | |
7159 | # number of files required to fill up 99% of the pool | |
7160 | nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024)) | |
92f5a8d4 TL |
7161 | |
7162 | # create subvolume | |
522d829b | 7163 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
92f5a8d4 TL |
7164 | |
7165 | # do some IO | |
f67539c2 | 7166 | self._do_subvolume_io(subvolume, number_of_files=nr_files) |
92f5a8d4 TL |
7167 | |
7168 | # snapshot subvolume | |
7169 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
7170 | ||
f67539c2 TL |
7171 | # add data pool |
7172 | new_pool = "new_pool" | |
7173 | self.fs.add_data_pool(new_pool) | |
7174 | ||
f38dd50b TL |
7175 | self.run_ceph_cmd("osd", "pool", "set-quota", new_pool, |
7176 | "max_bytes", f"{pool_capacity // 4}") | |
92f5a8d4 TL |
7177 | |
7178 | # schedule a clone | |
f67539c2 | 7179 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool) |
92f5a8d4 | 7180 | |
f67539c2 TL |
7181 | # check clone status -- this should dramatically overshoot the pool quota |
7182 | self._wait_for_clone_to_complete(clone1) | |
92f5a8d4 | 7183 | |
adb31ebb | 7184 | # verify clone |
f67539c2 TL |
7185 | self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool) |
7186 | ||
7187 | # wait a bit so that subsequent I/O will give pool full error | |
7188 | time.sleep(120) | |
7189 | ||
7190 | # schedule a clone | |
7191 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool) | |
7192 | ||
7193 | # check clone status | |
7194 | self._wait_for_clone_to_fail(clone2) | |
adb31ebb | 7195 | |
92f5a8d4 TL |
7196 | # remove snapshot |
7197 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
7198 | ||
92f5a8d4 TL |
7199 | # remove subvolumes |
7200 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
f67539c2 TL |
7201 | self._fs_cmd("subvolume", "rm", self.volname, clone1) |
7202 | try: | |
7203 | self._fs_cmd("subvolume", "rm", self.volname, clone2) | |
7204 | except CommandFailedError as ce: | |
7205 | if ce.exitstatus != errno.EAGAIN: | |
7206 | raise RuntimeError("invalid error code when trying to remove failed clone") | |
7207 | else: | |
7208 | raise RuntimeError("expected error when removing a failed clone") | |
92f5a8d4 | 7209 | |
f67539c2 TL |
7210 | # ... and with force, failed clone can be removed |
7211 | self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force") | |
92f5a8d4 TL |
7212 | |
7213 | # verify trash dir is clean | |
7214 | self._wait_for_trash_empty() | |
7215 | ||
f67539c2 | 7216 | def test_subvolume_snapshot_clone_on_existing_subvolumes(self): |
f38dd50b TL |
7217 | subvolume1, subvolume2 = self._gen_subvol_name(2) |
7218 | snapshot = self._gen_subvol_snap_name() | |
7219 | clone = self._gen_subvol_clone_name() | |
92f5a8d4 | 7220 | |
f67539c2 | 7221 | # create subvolumes |
522d829b TL |
7222 | self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--mode=777") |
7223 | self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--mode=777") | |
92f5a8d4 TL |
7224 | |
7225 | # do some IO | |
f67539c2 | 7226 | self._do_subvolume_io(subvolume1, number_of_files=32) |
92f5a8d4 TL |
7227 | |
7228 | # snapshot subvolume | |
f67539c2 | 7229 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot) |
92f5a8d4 | 7230 | |
f67539c2 TL |
7231 | # schedule a clone with target as subvolume2 |
7232 | try: | |
7233 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2) | |
7234 | except CommandFailedError as ce: | |
7235 | if ce.exitstatus != errno.EEXIST: | |
7236 | raise RuntimeError("invalid error code when cloning to existing subvolume") | |
7237 | else: | |
7238 | raise RuntimeError("expected cloning to fail if the target is an existing subvolume") | |
7239 | ||
7240 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone) | |
7241 | ||
7242 | # schedule a clone with target as clone | |
7243 | try: | |
7244 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone) | |
7245 | except CommandFailedError as ce: | |
7246 | if ce.exitstatus != errno.EEXIST: | |
7247 | raise RuntimeError("invalid error code when cloning to existing clone") | |
7248 | else: | |
7249 | raise RuntimeError("expected cloning to fail if the target is an existing clone") | |
92f5a8d4 TL |
7250 | |
7251 | # check clone status | |
7252 | self._wait_for_clone_to_complete(clone) | |
7253 | ||
adb31ebb | 7254 | # verify clone |
f67539c2 | 7255 | self._verify_clone(subvolume1, snapshot, clone) |
adb31ebb | 7256 | |
92f5a8d4 | 7257 | # remove snapshot |
f67539c2 | 7258 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot) |
92f5a8d4 | 7259 | |
92f5a8d4 | 7260 | # remove subvolumes |
f67539c2 TL |
7261 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1) |
7262 | self._fs_cmd("subvolume", "rm", self.volname, subvolume2) | |
92f5a8d4 TL |
7263 | self._fs_cmd("subvolume", "rm", self.volname, clone) |
7264 | ||
92f5a8d4 TL |
7265 | # verify trash dir is clean |
7266 | self._wait_for_trash_empty() | |
7267 | ||
f67539c2 | 7268 | def test_subvolume_snapshot_clone_pool_layout(self): |
f38dd50b TL |
7269 | subvolume = self._gen_subvol_name() |
7270 | snapshot = self._gen_subvol_snap_name() | |
7271 | clone = self._gen_subvol_clone_name() | |
92f5a8d4 | 7272 | |
f67539c2 TL |
7273 | # add data pool |
7274 | new_pool = "new_pool" | |
7275 | newid = self.fs.add_data_pool(new_pool) | |
92f5a8d4 TL |
7276 | |
7277 | # create subvolume | |
522d829b | 7278 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
92f5a8d4 TL |
7279 | |
7280 | # do some IO | |
f67539c2 | 7281 | self._do_subvolume_io(subvolume, number_of_files=32) |
92f5a8d4 TL |
7282 | |
7283 | # snapshot subvolume | |
f67539c2 | 7284 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) |
92f5a8d4 | 7285 | |
92f5a8d4 | 7286 | # schedule a clone |
f67539c2 | 7287 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool) |
92f5a8d4 TL |
7288 | |
7289 | # check clone status | |
f67539c2 | 7290 | self._wait_for_clone_to_complete(clone) |
92f5a8d4 | 7291 | |
adb31ebb | 7292 | # verify clone |
f67539c2 | 7293 | self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool) |
adb31ebb | 7294 | |
92f5a8d4 | 7295 | # remove snapshot |
f67539c2 | 7296 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) |
92f5a8d4 | 7297 | |
f67539c2 TL |
7298 | subvol_path = self._get_subvolume_path(self.volname, clone) |
7299 | desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool") | |
7300 | try: | |
7301 | self.assertEqual(desired_pool, new_pool) | |
7302 | except AssertionError: | |
7303 | self.assertEqual(int(desired_pool), newid) # old kernel returns id | |
92f5a8d4 | 7304 | |
f67539c2 TL |
7305 | # remove subvolumes |
7306 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
7307 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
92f5a8d4 TL |
7308 | |
7309 | # verify trash dir is clean | |
7310 | self._wait_for_trash_empty() | |
7311 | ||
f67539c2 | 7312 | def test_subvolume_snapshot_clone_under_group(self): |
f38dd50b TL |
7313 | subvolume = self._gen_subvol_name() |
7314 | snapshot = self._gen_subvol_snap_name() | |
7315 | clone = self._gen_subvol_clone_name() | |
7316 | group = self._gen_subvol_grp_name() | |
92f5a8d4 | 7317 | |
f67539c2 | 7318 | # create subvolume |
522d829b | 7319 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") |
adb31ebb | 7320 | |
92f5a8d4 | 7321 | # do some IO |
f67539c2 | 7322 | self._do_subvolume_io(subvolume, number_of_files=32) |
92f5a8d4 TL |
7323 | |
7324 | # snapshot subvolume | |
7325 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
7326 | ||
f67539c2 TL |
7327 | # create group |
7328 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
adb31ebb | 7329 | |
92f5a8d4 | 7330 | # schedule a clone |
f67539c2 | 7331 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group) |
f6b5b4d7 | 7332 | |
92f5a8d4 | 7333 | # check clone status |
f67539c2 | 7334 | self._wait_for_clone_to_complete(clone, clone_group=group) |
92f5a8d4 | 7335 | |
adb31ebb | 7336 | # verify clone |
f67539c2 | 7337 | self._verify_clone(subvolume, snapshot, clone, clone_group=group) |
adb31ebb | 7338 | |
92f5a8d4 TL |
7339 | # remove snapshot |
7340 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
7341 | ||
92f5a8d4 TL |
7342 | # remove subvolumes |
7343 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
f67539c2 TL |
7344 | self._fs_cmd("subvolume", "rm", self.volname, clone, group) |
7345 | ||
7346 | # remove group | |
7347 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
92f5a8d4 TL |
7348 | |
7349 | # verify trash dir is clean | |
7350 | self._wait_for_trash_empty() | |
7351 | ||
f67539c2 | 7352 | def test_subvolume_snapshot_clone_with_attrs(self): |
f38dd50b TL |
7353 | subvolume = self._gen_subvol_name() |
7354 | snapshot = self._gen_subvol_snap_name() | |
7355 | clone = self._gen_subvol_clone_name() | |
92f5a8d4 | 7356 | |
f67539c2 TL |
7357 | mode = "777" |
7358 | uid = "1000" | |
7359 | gid = "1000" | |
7360 | new_uid = "1001" | |
7361 | new_gid = "1001" | |
7362 | new_mode = "700" | |
7363 | ||
92f5a8d4 | 7364 | # create subvolume |
f67539c2 | 7365 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid) |
92f5a8d4 TL |
7366 | |
7367 | # do some IO | |
f67539c2 | 7368 | self._do_subvolume_io(subvolume, number_of_files=32) |
92f5a8d4 TL |
7369 | |
7370 | # snapshot subvolume | |
7371 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
7372 | ||
f67539c2 TL |
7373 | # change subvolume attrs (to ensure clone picks up snapshot attrs) |
7374 | self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode) | |
92f5a8d4 | 7375 | |
f67539c2 TL |
7376 | # schedule a clone |
7377 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
92f5a8d4 TL |
7378 | |
7379 | # check clone status | |
7380 | self._wait_for_clone_to_complete(clone) | |
7381 | ||
adb31ebb TL |
7382 | # verify clone |
7383 | self._verify_clone(subvolume, snapshot, clone) | |
7384 | ||
f6b5b4d7 TL |
7385 | # remove snapshot |
7386 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
7387 | ||
f6b5b4d7 TL |
7388 | # remove subvolumes |
7389 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
7390 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
7391 | ||
7392 | # verify trash dir is clean | |
7393 | self._wait_for_trash_empty() | |
7394 | ||
f67539c2 TL |
7395 | def test_subvolume_snapshot_clone_with_upgrade(self): |
7396 | """ | |
7397 | yet another poor man's upgrade test -- rather than going through a full | |
7398 | upgrade cycle, emulate old types subvolumes by going through the wormhole | |
7399 | and verify clone operation. | |
7400 | further ensure that a legacy volume is not updated to v2, but clone is. | |
7401 | """ | |
f38dd50b TL |
7402 | subvolume = self._gen_subvol_name() |
7403 | snapshot = self._gen_subvol_snap_name() | |
7404 | clone = self._gen_subvol_clone_name() | |
f6b5b4d7 | 7405 | |
f67539c2 TL |
7406 | # emulate a old-fashioned subvolume |
7407 | createpath = os.path.join(".", "volumes", "_nogroup", subvolume) | |
1e59de90 | 7408 | self.mount_a.run_shell_payload(f"sudo mkdir -p -m 777 {createpath}", omit_sudo=False) |
f67539c2 TL |
7409 | |
7410 | # add required xattrs to subvolume | |
7411 | default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") | |
522d829b | 7412 | self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True) |
f6b5b4d7 TL |
7413 | |
7414 | # do some IO | |
7415 | self._do_subvolume_io(subvolume, number_of_files=64) | |
7416 | ||
7417 | # snapshot subvolume | |
7418 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
7419 | ||
f67539c2 TL |
7420 | # ensure metadata file is in legacy location, with required version v1 |
7421 | self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True) | |
7422 | ||
522d829b TL |
7423 | # Insert delay at the beginning of snapshot clone |
7424 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2) | |
7425 | ||
f6b5b4d7 TL |
7426 | # schedule a clone |
7427 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
7428 | ||
7429 | # snapshot should not be deletable now | |
7430 | try: | |
7431 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
7432 | except CommandFailedError as ce: | |
7433 | self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone") | |
7434 | else: | |
7435 | self.fail("expected removing source snapshot of a clone to fail") | |
7436 | ||
7437 | # check clone status | |
7438 | self._wait_for_clone_to_complete(clone) | |
7439 | ||
adb31ebb | 7440 | # verify clone |
f67539c2 | 7441 | self._verify_clone(subvolume, snapshot, clone, source_version=1) |
adb31ebb | 7442 | |
92f5a8d4 TL |
7443 | # remove snapshot |
7444 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
7445 | ||
f67539c2 TL |
7446 | # ensure metadata file is in v2 location, with required version v2 |
7447 | self._assert_meta_location_and_version(self.volname, clone) | |
7448 | ||
92f5a8d4 TL |
7449 | # remove subvolumes |
7450 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
7451 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
7452 | ||
7453 | # verify trash dir is clean | |
7454 | self._wait_for_trash_empty() | |
7455 | ||
f67539c2 TL |
7456 | def test_subvolume_snapshot_reconf_max_concurrent_clones(self): |
7457 | """ | |
7458 | Validate 'max_concurrent_clones' config option | |
7459 | """ | |
7460 | ||
7461 | # get the default number of cloner threads | |
7462 | default_max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones')) | |
7463 | self.assertEqual(default_max_concurrent_clones, 4) | |
7464 | ||
7465 | # Increase number of cloner threads | |
7466 | self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6) | |
7467 | max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones')) | |
7468 | self.assertEqual(max_concurrent_clones, 6) | |
7469 | ||
7470 | # Decrease number of cloner threads | |
7471 | self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2) | |
7472 | max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones')) | |
7473 | self.assertEqual(max_concurrent_clones, 2) | |
7474 | ||
522d829b TL |
7475 | def test_subvolume_snapshot_config_snapshot_clone_delay(self): |
7476 | """ | |
7477 | Validate 'snapshot_clone_delay' config option | |
7478 | """ | |
7479 | ||
7480 | # get the default delay before starting the clone | |
7481 | default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay')) | |
7482 | self.assertEqual(default_timeout, 0) | |
7483 | ||
7484 | # Insert delay of 2 seconds at the beginning of the snapshot clone | |
7485 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2) | |
7486 | default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay')) | |
7487 | self.assertEqual(default_timeout, 2) | |
7488 | ||
7489 | # Decrease number of cloner threads | |
7490 | self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2) | |
7491 | max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones')) | |
7492 | self.assertEqual(max_concurrent_clones, 2) | |
7493 | ||
f67539c2 | 7494 | def test_subvolume_under_group_snapshot_clone(self): |
f38dd50b TL |
7495 | subvolume = self._gen_subvol_name() |
7496 | group = self._gen_subvol_grp_name() | |
7497 | snapshot = self._gen_subvol_snap_name() | |
7498 | clone = self._gen_subvol_clone_name() | |
92f5a8d4 | 7499 | |
f67539c2 TL |
7500 | # create group |
7501 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
7502 | ||
92f5a8d4 | 7503 | # create subvolume |
522d829b | 7504 | self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777") |
92f5a8d4 TL |
7505 | |
7506 | # do some IO | |
f67539c2 | 7507 | self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32) |
92f5a8d4 TL |
7508 | |
7509 | # snapshot subvolume | |
f67539c2 | 7510 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) |
92f5a8d4 | 7511 | |
92f5a8d4 | 7512 | # schedule a clone |
f67539c2 | 7513 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group) |
92f5a8d4 TL |
7514 | |
7515 | # check clone status | |
7516 | self._wait_for_clone_to_complete(clone) | |
7517 | ||
adb31ebb | 7518 | # verify clone |
f67539c2 | 7519 | self._verify_clone(subvolume, snapshot, clone, source_group=group) |
adb31ebb | 7520 | |
92f5a8d4 | 7521 | # remove snapshot |
f67539c2 | 7522 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) |
92f5a8d4 | 7523 | |
92f5a8d4 | 7524 | # remove subvolumes |
f67539c2 | 7525 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) |
92f5a8d4 TL |
7526 | self._fs_cmd("subvolume", "rm", self.volname, clone) |
7527 | ||
f67539c2 TL |
7528 | # remove group |
7529 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
7530 | ||
92f5a8d4 TL |
7531 | # verify trash dir is clean |
7532 | self._wait_for_trash_empty() | |
7533 | ||
f38dd50b TL |
7534 | def test_subvolume_snapshot_clone_with_no_wait_enabled(self): |
7535 | subvolume = self._gen_subvol_name() | |
7536 | snapshot = self._gen_subvol_snap_name() | |
7537 | clone1, clone2, clone3 = self._gen_subvol_clone_name(3) | |
7538 | ||
7539 | # create subvolume | |
7540 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
7541 | ||
7542 | # do some IO | |
7543 | self._do_subvolume_io(subvolume, number_of_files=10) | |
7544 | ||
7545 | # snapshot subvolume | |
7546 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
7547 | ||
7548 | # Decrease number of cloner threads | |
7549 | self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2) | |
7550 | max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones')) | |
7551 | self.assertEqual(max_concurrent_clones, 2) | |
7552 | ||
7553 | # Enable the snapshot_clone_no_wait config option | |
7554 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_no_wait', True) | |
7555 | threads_available = self.config_get('mgr', 'mgr/volumes/snapshot_clone_no_wait') | |
7556 | self.assertEqual(threads_available, 'true') | |
7557 | ||
7558 | # Insert delay of 15 seconds at the beginning of the snapshot clone | |
7559 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 15) | |
7560 | ||
7561 | # schedule a clone1 | |
7562 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) | |
7563 | ||
7564 | # schedule a clone2 | |
7565 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2) | |
7566 | ||
7567 | # schedule a clone3 | |
7568 | cmd_ret = self.mgr_cluster.mon_manager.run_cluster_cmd( | |
7569 | args=["fs", "subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone3], check_status=False, stdout=StringIO(), | |
7570 | stderr=StringIO()) | |
7571 | self.assertEqual(cmd_ret.returncode, errno.EAGAIN, "Expecting EAGAIN error") | |
7572 | ||
7573 | # check clone1 status | |
7574 | self._wait_for_clone_to_complete(clone1) | |
7575 | ||
7576 | # verify clone1 | |
7577 | self._verify_clone(subvolume, snapshot, clone1) | |
7578 | ||
7579 | # check clone2 status | |
7580 | self._wait_for_clone_to_complete(clone2) | |
7581 | ||
7582 | # verify clone2 | |
7583 | self._verify_clone(subvolume, snapshot, clone2) | |
7584 | ||
7585 | # schedule clone3 , it should be successful this time | |
7586 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone3) | |
7587 | ||
7588 | # check clone3 status | |
7589 | self._wait_for_clone_to_complete(clone3) | |
7590 | ||
7591 | # verify clone3 | |
7592 | self._verify_clone(subvolume, snapshot, clone3) | |
7593 | ||
7594 | # set number of cloner threads to default | |
7595 | self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 4) | |
7596 | max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones')) | |
7597 | self.assertEqual(max_concurrent_clones, 4) | |
7598 | ||
7599 | # set the snapshot_clone_delay to default | |
7600 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 0) | |
7601 | ||
7602 | # remove snapshot | |
7603 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
7604 | ||
7605 | # remove subvolumes | |
7606 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
7607 | self._fs_cmd("subvolume", "rm", self.volname, clone1) | |
7608 | self._fs_cmd("subvolume", "rm", self.volname, clone2) | |
7609 | self._fs_cmd("subvolume", "rm", self.volname, clone3) | |
7610 | ||
7611 | # verify trash dir is clean | |
7612 | self._wait_for_trash_empty() | |
7613 | ||
7614 | def test_subvolume_snapshot_clone_with_no_wait_not_enabled(self): | |
7615 | subvolume = self._gen_subvol_name() | |
7616 | snapshot = self._gen_subvol_snap_name() | |
7617 | clone1, clone2, clone3 = self._gen_subvol_clone_name(3) | |
7618 | ||
7619 | # create subvolume | |
7620 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") | |
7621 | ||
7622 | # do some IO | |
7623 | self._do_subvolume_io(subvolume, number_of_files=10) | |
7624 | ||
7625 | # snapshot subvolume | |
7626 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
7627 | ||
7628 | # Disable the snapshot_clone_no_wait config option | |
7629 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_no_wait', False) | |
7630 | threads_available = self.config_get('mgr', 'mgr/volumes/snapshot_clone_no_wait') | |
7631 | self.assertEqual(threads_available, 'false') | |
7632 | ||
7633 | # Decrease number of cloner threads | |
7634 | self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2) | |
7635 | max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones')) | |
7636 | self.assertEqual(max_concurrent_clones, 2) | |
7637 | ||
7638 | # schedule a clone1 | |
7639 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) | |
7640 | ||
7641 | # schedule a clone2 | |
7642 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2) | |
7643 | ||
7644 | # schedule a clone3 | |
7645 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone3) | |
7646 | ||
7647 | # check clone1 status | |
7648 | self._wait_for_clone_to_complete(clone1) | |
7649 | ||
7650 | # verify clone1 | |
7651 | self._verify_clone(subvolume, snapshot, clone1) | |
7652 | ||
7653 | # check clone2 status | |
7654 | self._wait_for_clone_to_complete(clone2) | |
7655 | ||
7656 | # verify clone2 | |
7657 | self._verify_clone(subvolume, snapshot, clone2) | |
7658 | ||
7659 | # check clone3 status | |
7660 | self._wait_for_clone_to_complete(clone3) | |
7661 | ||
7662 | # verify clone3 | |
7663 | self._verify_clone(subvolume, snapshot, clone3) | |
7664 | ||
7665 | # set the snapshot_clone_no_wait config option to default | |
7666 | self.config_set('mgr', 'mgr/volumes/snapshot_clone_no_wait', True) | |
7667 | threads_available = self.config_get('mgr', 'mgr/volumes/snapshot_clone_no_wait') | |
7668 | self.assertEqual(threads_available, 'true') | |
7669 | ||
7670 | # set number of cloner threads to default | |
7671 | self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 4) | |
7672 | max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones')) | |
7673 | self.assertEqual(max_concurrent_clones, 4) | |
7674 | ||
7675 | # remove snapshot | |
7676 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
7677 | ||
7678 | # remove subvolumes | |
7679 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
7680 | self._fs_cmd("subvolume", "rm", self.volname, clone1) | |
7681 | self._fs_cmd("subvolume", "rm", self.volname, clone2) | |
7682 | self._fs_cmd("subvolume", "rm", self.volname, clone3) | |
7683 | ||
7684 | # verify trash dir is clean | |
7685 | self._wait_for_trash_empty() | |
7686 | ||
f67539c2 TL |
7687 | |
7688 | class TestMisc(TestVolumesHelper): | |
7689 | """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations.""" | |
7690 | def test_connection_expiration(self): | |
7691 | # unmount any cephfs mounts | |
7692 | for i in range(0, self.CLIENTS_REQUIRED): | |
7693 | self.mounts[i].umount_wait() | |
7694 | sessions = self._session_list() | |
7695 | self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted | |
7696 | ||
7697 | # Get the mgr to definitely mount cephfs | |
f38dd50b | 7698 | subvolume = self._gen_subvol_name() |
f67539c2 TL |
7699 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
7700 | sessions = self._session_list() | |
7701 | self.assertEqual(len(sessions), 1) | |
92f5a8d4 | 7702 | |
f67539c2 TL |
7703 | # Now wait for the mgr to expire the connection: |
7704 | self.wait_until_evicted(sessions[0]['id'], timeout=90) | |
7705 | ||
7706 | def test_mgr_eviction(self): | |
7707 | # unmount any cephfs mounts | |
7708 | for i in range(0, self.CLIENTS_REQUIRED): | |
7709 | self.mounts[i].umount_wait() | |
7710 | sessions = self._session_list() | |
7711 | self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted | |
7712 | ||
7713 | # Get the mgr to definitely mount cephfs | |
f38dd50b | 7714 | subvolume = self._gen_subvol_name() |
92f5a8d4 | 7715 | self._fs_cmd("subvolume", "create", self.volname, subvolume) |
f67539c2 TL |
7716 | sessions = self._session_list() |
7717 | self.assertEqual(len(sessions), 1) | |
92f5a8d4 | 7718 | |
f67539c2 TL |
7719 | # Now fail the mgr, check the session was evicted |
7720 | mgr = self.mgr_cluster.get_active_id() | |
7721 | self.mgr_cluster.mgr_fail(mgr) | |
7722 | self.wait_until_evicted(sessions[0]['id']) | |
92f5a8d4 | 7723 | |
f67539c2 TL |
7724 | def test_names_can_only_be_goodchars(self): |
7725 | """ | |
7726 | Test the creating vols, subvols subvolgroups fails when their names uses | |
7727 | characters beyond [a-zA-Z0-9 -_.]. | |
7728 | """ | |
7729 | volname, badname = 'testvol', 'abcd@#' | |
92f5a8d4 | 7730 | |
f67539c2 TL |
7731 | with self.assertRaises(CommandFailedError): |
7732 | self._fs_cmd('volume', 'create', badname) | |
7733 | self._fs_cmd('volume', 'create', volname) | |
92f5a8d4 | 7734 | |
f67539c2 TL |
7735 | with self.assertRaises(CommandFailedError): |
7736 | self._fs_cmd('subvolumegroup', 'create', volname, badname) | |
92f5a8d4 | 7737 | |
f67539c2 TL |
7738 | with self.assertRaises(CommandFailedError): |
7739 | self._fs_cmd('subvolume', 'create', volname, badname) | |
7740 | self._fs_cmd('volume', 'rm', volname, '--yes-i-really-mean-it') | |
92f5a8d4 | 7741 | |
f67539c2 TL |
7742 | def test_subvolume_ops_on_nonexistent_vol(self): |
7743 | # tests the fs subvolume operations on non existing volume | |
92f5a8d4 | 7744 | |
f67539c2 | 7745 | volname = "non_existent_subvolume" |
92f5a8d4 | 7746 | |
f67539c2 TL |
7747 | # try subvolume operations |
7748 | for op in ("create", "rm", "getpath", "info", "resize", "pin", "ls"): | |
7749 | try: | |
7750 | if op == "resize": | |
7751 | self._fs_cmd("subvolume", "resize", volname, "subvolname_1", "inf") | |
7752 | elif op == "pin": | |
7753 | self._fs_cmd("subvolume", "pin", volname, "subvolname_1", "export", "1") | |
7754 | elif op == "ls": | |
7755 | self._fs_cmd("subvolume", "ls", volname) | |
7756 | else: | |
7757 | self._fs_cmd("subvolume", op, volname, "subvolume_1") | |
7758 | except CommandFailedError as ce: | |
7759 | self.assertEqual(ce.exitstatus, errno.ENOENT) | |
7760 | else: | |
7761 | self.fail("expected the 'fs subvolume {0}' command to fail".format(op)) | |
92f5a8d4 | 7762 | |
f67539c2 TL |
7763 | # try subvolume snapshot operations and clone create |
7764 | for op in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"): | |
7765 | try: | |
7766 | if op == "ls": | |
7767 | self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1") | |
7768 | elif op == "clone": | |
7769 | self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1", "clone_1") | |
7770 | else: | |
7771 | self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1") | |
7772 | except CommandFailedError as ce: | |
7773 | self.assertEqual(ce.exitstatus, errno.ENOENT) | |
7774 | else: | |
7775 | self.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op)) | |
92f5a8d4 | 7776 | |
f67539c2 | 7777 | # try, clone status |
92f5a8d4 | 7778 | try: |
f67539c2 | 7779 | self._fs_cmd("clone", "status", volname, "clone_1") |
92f5a8d4 | 7780 | except CommandFailedError as ce: |
f67539c2 | 7781 | self.assertEqual(ce.exitstatus, errno.ENOENT) |
92f5a8d4 | 7782 | else: |
f67539c2 | 7783 | self.fail("expected the 'fs clone status' command to fail") |
92f5a8d4 | 7784 | |
f67539c2 TL |
7785 | # try subvolumegroup operations |
7786 | for op in ("create", "rm", "getpath", "pin", "ls"): | |
7787 | try: | |
7788 | if op == "pin": | |
7789 | self._fs_cmd("subvolumegroup", "pin", volname, "group_1", "export", "0") | |
7790 | elif op == "ls": | |
7791 | self._fs_cmd("subvolumegroup", op, volname) | |
7792 | else: | |
7793 | self._fs_cmd("subvolumegroup", op, volname, "group_1") | |
7794 | except CommandFailedError as ce: | |
7795 | self.assertEqual(ce.exitstatus, errno.ENOENT) | |
7796 | else: | |
7797 | self.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op)) | |
92f5a8d4 | 7798 | |
f67539c2 TL |
7799 | # try subvolumegroup snapshot operations |
7800 | for op in ("create", "rm", "ls"): | |
7801 | try: | |
7802 | if op == "ls": | |
7803 | self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1") | |
7804 | else: | |
7805 | self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1", "snapshot_1") | |
7806 | except CommandFailedError as ce: | |
7807 | self.assertEqual(ce.exitstatus, errno.ENOENT) | |
7808 | else: | |
7809 | self.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op)) | |
adb31ebb | 7810 | |
f67539c2 TL |
7811 | def test_subvolume_upgrade_legacy_to_v1(self): |
7812 | """ | |
7813 | poor man's upgrade test -- rather than going through a full upgrade cycle, | |
7814 | emulate subvolumes by going through the wormhole and verify if they are | |
7815 | accessible. | |
7816 | further ensure that a legacy volume is not updated to v2. | |
7817 | """ | |
f38dd50b TL |
7818 | subvolume1, subvolume2 = self._gen_subvol_name(2) |
7819 | group = self._gen_subvol_grp_name() | |
92f5a8d4 | 7820 | |
f67539c2 TL |
7821 | # emulate a old-fashioned subvolume -- one in the default group and |
7822 | # the other in a custom group | |
7823 | createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1) | |
1e59de90 | 7824 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False) |
92f5a8d4 | 7825 | |
f67539c2 TL |
7826 | # create group |
7827 | createpath2 = os.path.join(".", "volumes", group, subvolume2) | |
1e59de90 | 7828 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath2], omit_sudo=False) |
92f5a8d4 | 7829 | |
f67539c2 TL |
7830 | # this would auto-upgrade on access without anyone noticing |
7831 | subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1) | |
7832 | self.assertNotEqual(subvolpath1, None) | |
7833 | subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline | |
92f5a8d4 | 7834 | |
f67539c2 TL |
7835 | subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group) |
7836 | self.assertNotEqual(subvolpath2, None) | |
7837 | subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline | |
92f5a8d4 | 7838 | |
f67539c2 TL |
7839 | # and... the subvolume path returned should be what we created behind the scene |
7840 | self.assertEqual(createpath1[1:], subvolpath1) | |
7841 | self.assertEqual(createpath2[1:], subvolpath2) | |
92f5a8d4 | 7842 | |
f67539c2 TL |
7843 | # ensure metadata file is in legacy location, with required version v1 |
7844 | self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True) | |
7845 | self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True) | |
92f5a8d4 | 7846 | |
f67539c2 TL |
7847 | # remove subvolume |
7848 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1) | |
7849 | self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group) | |
92f5a8d4 | 7850 | |
f67539c2 TL |
7851 | # verify trash dir is clean |
7852 | self._wait_for_trash_empty() | |
92f5a8d4 | 7853 | |
f67539c2 TL |
7854 | # remove group |
7855 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
92f5a8d4 | 7856 | |
f67539c2 TL |
7857 | def test_subvolume_no_upgrade_v1_sanity(self): |
7858 | """ | |
7859 | poor man's upgrade test -- theme continues... | |
7860 | ||
7861 | This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through | |
7862 | a series of operations on the v1 subvolume to ensure they work as expected. | |
7863 | """ | |
7864 | subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", | |
7865 | "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace", | |
7866 | "type", "uid", "features", "state"] | |
2a845540 | 7867 | snap_md = ["created_at", "data_pool", "has_pending_clones"] |
f67539c2 | 7868 | |
f38dd50b TL |
7869 | subvolume = self._gen_subvol_name() |
7870 | snapshot = self._gen_subvol_snap_name() | |
7871 | clone1, clone2 = self._gen_subvol_clone_name(2) | |
f67539c2 TL |
7872 | mode = "777" |
7873 | uid = "1000" | |
7874 | gid = "1000" | |
92f5a8d4 | 7875 | |
f67539c2 TL |
7876 | # emulate a v1 subvolume -- in the default group |
7877 | subvolume_path = self._create_v1_subvolume(subvolume) | |
92f5a8d4 | 7878 | |
f67539c2 TL |
7879 | # getpath |
7880 | subvolpath = self._get_subvolume_path(self.volname, subvolume) | |
7881 | self.assertEqual(subvolpath, subvolume_path) | |
92f5a8d4 | 7882 | |
f67539c2 TL |
7883 | # ls |
7884 | subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
7885 | self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes))) | |
7886 | self.assertEqual(subvolumes[0]['name'], subvolume, | |
7887 | "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name'])) | |
92f5a8d4 | 7888 | |
f67539c2 TL |
7889 | # info |
7890 | subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) | |
7891 | for md in subvol_md: | |
7892 | self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md)) | |
92f5a8d4 | 7893 | |
f67539c2 TL |
7894 | self.assertEqual(subvol_info["state"], "complete", |
7895 | msg="expected state to be 'complete', found '{0}".format(subvol_info["state"])) | |
7896 | self.assertEqual(len(subvol_info["features"]), 2, | |
7897 | msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"])) | |
7898 | for feature in ['snapshot-clone', 'snapshot-autoprotect']: | |
7899 | self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature)) | |
92f5a8d4 | 7900 | |
f67539c2 TL |
7901 | # resize |
7902 | nsize = self.DEFAULT_FILE_SIZE*1024*1024*10 | |
7903 | self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize)) | |
7904 | subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) | |
7905 | for md in subvol_md: | |
7906 | self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md)) | |
7907 | self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize)) | |
92f5a8d4 | 7908 | |
f67539c2 TL |
7909 | # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone) |
7910 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid) | |
92f5a8d4 | 7911 | |
f67539c2 TL |
7912 | # do some IO |
7913 | self._do_subvolume_io(subvolume, number_of_files=8) | |
494da23a | 7914 | |
f67539c2 TL |
7915 | # snap-create |
7916 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
9f95a23c | 7917 | |
f67539c2 TL |
7918 | # clone |
7919 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) | |
9f95a23c | 7920 | |
f67539c2 TL |
7921 | # check clone status |
7922 | self._wait_for_clone_to_complete(clone1) | |
9f95a23c | 7923 | |
f67539c2 TL |
7924 | # ensure clone is v2 |
7925 | self._assert_meta_location_and_version(self.volname, clone1, version=2) | |
9f95a23c | 7926 | |
f67539c2 TL |
7927 | # verify clone |
7928 | self._verify_clone(subvolume, snapshot, clone1, source_version=1) | |
9f95a23c | 7929 | |
f67539c2 TL |
7930 | # clone (older snapshot) |
7931 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2) | |
9f95a23c TL |
7932 | |
7933 | # check clone status | |
f67539c2 TL |
7934 | self._wait_for_clone_to_complete(clone2) |
7935 | ||
7936 | # ensure clone is v2 | |
7937 | self._assert_meta_location_and_version(self.volname, clone2, version=2) | |
9f95a23c | 7938 | |
adb31ebb | 7939 | # verify clone |
f67539c2 TL |
7940 | # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747 |
7941 | #self._verify_clone(subvolume, 'fake', clone2, source_version=1) | |
adb31ebb | 7942 | |
f67539c2 TL |
7943 | # snap-info |
7944 | snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot)) | |
7945 | for md in snap_md: | |
7946 | self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md)) | |
7947 | self.assertEqual(snap_info["has_pending_clones"], "no") | |
7948 | ||
7949 | # snap-ls | |
7950 | subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume)) | |
7951 | self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots))) | |
7952 | snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots] | |
7953 | for name in [snapshot, 'fake']: | |
7954 | self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name)) | |
7955 | ||
7956 | # snap-rm | |
9f95a23c | 7957 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) |
f67539c2 | 7958 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake") |
9f95a23c | 7959 | |
f67539c2 TL |
7960 | # ensure volume is still at version 1 |
7961 | self._assert_meta_location_and_version(self.volname, subvolume, version=1) | |
7962 | ||
7963 | # rm | |
9f95a23c | 7964 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) |
f67539c2 TL |
7965 | self._fs_cmd("subvolume", "rm", self.volname, clone1) |
7966 | self._fs_cmd("subvolume", "rm", self.volname, clone2) | |
9f95a23c TL |
7967 | |
7968 | # verify trash dir is clean | |
7969 | self._wait_for_trash_empty() | |
7970 | ||
f67539c2 TL |
7971 | def test_subvolume_no_upgrade_v1_to_v2(self): |
7972 | """ | |
7973 | poor man's upgrade test -- theme continues... | |
7974 | ensure v1 to v2 upgrades are not done automatically due to various states of v1 | |
7975 | """ | |
f38dd50b TL |
7976 | subvolume1, subvolume2, subvolume3 = self._gen_subvol_name(3) |
7977 | group = self._gen_subvol_grp_name() | |
9f95a23c | 7978 | |
f67539c2 TL |
7979 | # emulate a v1 subvolume -- in the default group |
7980 | subvol1_path = self._create_v1_subvolume(subvolume1) | |
9f95a23c | 7981 | |
f67539c2 TL |
7982 | # emulate a v1 subvolume -- in a custom group |
7983 | subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group) | |
9f95a23c | 7984 | |
f67539c2 TL |
7985 | # emulate a v1 subvolume -- in a clone pending state |
7986 | self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending') | |
9f95a23c | 7987 | |
f67539c2 TL |
7988 | # this would attempt auto-upgrade on access, but fail to do so as snapshots exist |
7989 | subvolpath1 = self._get_subvolume_path(self.volname, subvolume1) | |
7990 | self.assertEqual(subvolpath1, subvol1_path) | |
9f95a23c | 7991 | |
f67539c2 TL |
7992 | subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group) |
7993 | self.assertEqual(subvolpath2, subvol2_path) | |
9f95a23c | 7994 | |
f67539c2 TL |
7995 | # this would attempt auto-upgrade on access, but fail to do so as volume is not complete |
7996 | # use clone status, as only certain operations are allowed in pending state | |
7997 | status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3)) | |
7998 | self.assertEqual(status["status"]["state"], "pending") | |
9f95a23c | 7999 | |
9f95a23c | 8000 | # remove snapshot |
f67539c2 TL |
8001 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake") |
8002 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group) | |
9f95a23c | 8003 | |
f67539c2 TL |
8004 | # ensure metadata file is in v1 location, with version retained as v1 |
8005 | self._assert_meta_location_and_version(self.volname, subvolume1, version=1) | |
8006 | self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1) | |
8007 | ||
8008 | # remove subvolume | |
8009 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1) | |
8010 | self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group) | |
8011 | try: | |
8012 | self._fs_cmd("subvolume", "rm", self.volname, subvolume3) | |
8013 | except CommandFailedError as ce: | |
8014 | self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone") | |
8015 | else: | |
8016 | self.fail("expected rm of subvolume undergoing clone to fail") | |
8017 | ||
8018 | # ensure metadata file is in v1 location, with version retained as v1 | |
8019 | self._assert_meta_location_and_version(self.volname, subvolume3, version=1) | |
8020 | self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force") | |
8021 | ||
8022 | # verify list subvolumes returns an empty list | |
8023 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
8024 | self.assertEqual(len(subvolumels), 0) | |
9f95a23c TL |
8025 | |
8026 | # verify trash dir is clean | |
8027 | self._wait_for_trash_empty() | |
8028 | ||
f67539c2 | 8029 | def test_subvolume_upgrade_v1_to_v2(self): |
9f95a23c | 8030 | """ |
f67539c2 TL |
8031 | poor man's upgrade test -- theme continues... |
8032 | ensure v1 to v2 upgrades work | |
9f95a23c | 8033 | """ |
f38dd50b TL |
8034 | subvolume1, subvolume2 = self._gen_subvol_name(2) |
8035 | group = self._gen_subvol_grp_name() | |
9f95a23c | 8036 | |
f67539c2 TL |
8037 | # emulate a v1 subvolume -- in the default group |
8038 | subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False) | |
9f95a23c | 8039 | |
f67539c2 TL |
8040 | # emulate a v1 subvolume -- in a custom group |
8041 | subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False) | |
9f95a23c | 8042 | |
f67539c2 TL |
8043 | # this would attempt auto-upgrade on access |
8044 | subvolpath1 = self._get_subvolume_path(self.volname, subvolume1) | |
8045 | self.assertEqual(subvolpath1, subvol1_path) | |
9f95a23c | 8046 | |
f67539c2 TL |
8047 | subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group) |
8048 | self.assertEqual(subvolpath2, subvol2_path) | |
9f95a23c | 8049 | |
f67539c2 TL |
8050 | # ensure metadata file is in v2 location, with version retained as v2 |
8051 | self._assert_meta_location_and_version(self.volname, subvolume1, version=2) | |
8052 | self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2) | |
9f95a23c | 8053 | |
f67539c2 TL |
8054 | # remove subvolume |
8055 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1) | |
8056 | self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group) | |
9f95a23c TL |
8057 | |
8058 | # verify trash dir is clean | |
8059 | self._wait_for_trash_empty() | |
0948533f TL |
8060 | |
8061 | def test_malicious_metafile_on_legacy_to_v1_upgrade(self): | |
8062 | """ | |
8063 | Validate handcrafted .meta file on legacy subvol root doesn't break the system | |
8064 | on legacy subvol upgrade to v1 | |
8065 | poor man's upgrade test -- theme continues... | |
8066 | """ | |
f38dd50b | 8067 | subvol1, subvol2 = self._gen_subvol_name(2) |
0948533f TL |
8068 | |
8069 | # emulate a old-fashioned subvolume in the default group | |
8070 | createpath1 = os.path.join(".", "volumes", "_nogroup", subvol1) | |
2a845540 | 8071 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False) |
0948533f TL |
8072 | |
8073 | # add required xattrs to subvolume | |
8074 | default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") | |
8075 | self.mount_a.setfattr(createpath1, 'ceph.dir.layout.pool', default_pool, sudo=True) | |
8076 | ||
8077 | # create v2 subvolume | |
8078 | self._fs_cmd("subvolume", "create", self.volname, subvol2) | |
8079 | ||
8080 | # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume | |
8081 | # .meta into legacy subvol1's root | |
8082 | subvol2_metapath = os.path.join(".", "volumes", "_nogroup", subvol2, ".meta") | |
2a845540 | 8083 | self.mount_a.run_shell(['sudo', 'cp', subvol2_metapath, createpath1], omit_sudo=False) |
0948533f TL |
8084 | |
8085 | # Upgrade legacy subvol1 to v1 | |
8086 | subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvol1) | |
8087 | self.assertNotEqual(subvolpath1, None) | |
8088 | subvolpath1 = subvolpath1.rstrip() | |
8089 | ||
8090 | # the subvolume path returned should not be of subvol2 from handcrafted | |
8091 | # .meta file | |
8092 | self.assertEqual(createpath1[1:], subvolpath1) | |
8093 | ||
8094 | # ensure metadata file is in legacy location, with required version v1 | |
8095 | self._assert_meta_location_and_version(self.volname, subvol1, version=1, legacy=True) | |
8096 | ||
8097 | # Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2 | |
8098 | # path whose '.meta' file is copied to subvol1 root | |
8099 | authid1 = "alice" | |
8100 | self._fs_cmd("subvolume", "authorize", self.volname, subvol1, authid1) | |
8101 | ||
8102 | # Validate that the mds path added is of subvol1 and not of subvol2 | |
f38dd50b | 8103 | out = json.loads(self.get_ceph_cmd_stdout("auth", "get", "client.alice", "--format=json-pretty")) |
0948533f TL |
8104 | self.assertEqual("client.alice", out[0]["entity"]) |
8105 | self.assertEqual("allow rw path={0}".format(createpath1[1:]), out[0]["caps"]["mds"]) | |
8106 | ||
8107 | # remove subvolume | |
8108 | self._fs_cmd("subvolume", "rm", self.volname, subvol1) | |
8109 | self._fs_cmd("subvolume", "rm", self.volname, subvol2) | |
8110 | ||
8111 | # verify trash dir is clean | |
8112 | self._wait_for_trash_empty() | |
8113 | ||
8114 | def test_binary_metafile_on_legacy_to_v1_upgrade(self): | |
8115 | """ | |
8116 | Validate binary .meta file on legacy subvol root doesn't break the system | |
8117 | on legacy subvol upgrade to v1 | |
8118 | poor man's upgrade test -- theme continues... | |
8119 | """ | |
f38dd50b TL |
8120 | subvol = self._gen_subvol_name() |
8121 | group = self._gen_subvol_grp_name() | |
0948533f TL |
8122 | |
8123 | # emulate a old-fashioned subvolume -- in a custom group | |
8124 | createpath = os.path.join(".", "volumes", group, subvol) | |
2a845540 | 8125 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False) |
0948533f TL |
8126 | |
8127 | # add required xattrs to subvolume | |
8128 | default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") | |
8129 | self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True) | |
8130 | ||
8131 | # Create unparseable binary .meta file on legacy subvol's root | |
8132 | meta_contents = os.urandom(4096) | |
8133 | meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta") | |
8134 | self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True) | |
8135 | ||
8136 | # Upgrade legacy subvol to v1 | |
8137 | subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group) | |
8138 | self.assertNotEqual(subvolpath, None) | |
8139 | subvolpath = subvolpath.rstrip() | |
8140 | ||
8141 | # The legacy subvolume path should be returned for subvol. | |
8142 | # Should ignore unparseable binary .meta file in subvol's root | |
8143 | self.assertEqual(createpath[1:], subvolpath) | |
8144 | ||
8145 | # ensure metadata file is in legacy location, with required version v1 | |
8146 | self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True) | |
8147 | ||
8148 | # remove subvolume | |
8149 | self._fs_cmd("subvolume", "rm", self.volname, subvol, group) | |
8150 | ||
8151 | # verify trash dir is clean | |
8152 | self._wait_for_trash_empty() | |
8153 | ||
8154 | # remove group | |
8155 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
8156 | ||
8157 | def test_unparseable_metafile_on_legacy_to_v1_upgrade(self): | |
8158 | """ | |
8159 | Validate unparseable text .meta file on legacy subvol root doesn't break the system | |
8160 | on legacy subvol upgrade to v1 | |
8161 | poor man's upgrade test -- theme continues... | |
8162 | """ | |
f38dd50b TL |
8163 | subvol = self._gen_subvol_name() |
8164 | group = self._gen_subvol_grp_name() | |
0948533f TL |
8165 | |
8166 | # emulate a old-fashioned subvolume -- in a custom group | |
8167 | createpath = os.path.join(".", "volumes", group, subvol) | |
2a845540 | 8168 | self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False) |
0948533f TL |
8169 | |
8170 | # add required xattrs to subvolume | |
8171 | default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") | |
8172 | self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True) | |
8173 | ||
8174 | # Create unparseable text .meta file on legacy subvol's root | |
8175 | meta_contents = "unparseable config\nfile ...\nunparseable config\nfile ...\n" | |
8176 | meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta") | |
8177 | self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True) | |
8178 | ||
8179 | # Upgrade legacy subvol to v1 | |
8180 | subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group) | |
8181 | self.assertNotEqual(subvolpath, None) | |
8182 | subvolpath = subvolpath.rstrip() | |
8183 | ||
8184 | # The legacy subvolume path should be returned for subvol. | |
8185 | # Should ignore unparseable binary .meta file in subvol's root | |
8186 | self.assertEqual(createpath[1:], subvolpath) | |
8187 | ||
8188 | # ensure metadata file is in legacy location, with required version v1 | |
8189 | self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True) | |
8190 | ||
8191 | # remove subvolume | |
8192 | self._fs_cmd("subvolume", "rm", self.volname, subvol, group) | |
8193 | ||
8194 | # verify trash dir is clean | |
8195 | self._wait_for_trash_empty() | |
8196 | ||
8197 | # remove group | |
8198 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1e59de90 TL |
8199 | |
8200 | class TestPerModuleFinsherThread(TestVolumesHelper): | |
8201 | """ | |
8202 | Per module finisher thread tests related to mgr/volume cmds. | |
8203 | This is used in conjuction with check_counter with min val being 4 | |
8204 | as four subvolume cmds are run | |
8205 | """ | |
8206 | def test_volumes_module_finisher_thread(self): | |
f38dd50b TL |
8207 | subvol1, subvol2, subvol3 = self._gen_subvol_name(3) |
8208 | group = self._gen_subvol_grp_name() | |
1e59de90 TL |
8209 | |
8210 | # create group | |
8211 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
8212 | ||
8213 | # create subvolumes in group | |
8214 | self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group) | |
8215 | self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group) | |
8216 | self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group) | |
8217 | ||
8218 | self._fs_cmd("subvolume", "rm", self.volname, subvol1, group) | |
8219 | self._fs_cmd("subvolume", "rm", self.volname, subvol2, group) | |
8220 | self._fs_cmd("subvolume", "rm", self.volname, subvol3, group) | |
8221 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
8222 | ||
8223 | # verify trash dir is clean | |
8224 | self._wait_for_trash_empty() |