]>
Commit | Line | Data |
---|---|---|
81eedcae TL |
1 | import os |
2 | import json | |
92f5a8d4 | 3 | import time |
81eedcae TL |
4 | import errno |
5 | import random | |
6 | import logging | |
eafe8130 | 7 | import collections |
81eedcae TL |
8 | |
9 | from tasks.cephfs.cephfs_test_case import CephFSTestCase | |
10 | from teuthology.exceptions import CommandFailedError | |
11 | ||
12 | log = logging.getLogger(__name__) | |
13 | ||
14 | class TestVolumes(CephFSTestCase): | |
92f5a8d4 | 15 | TEST_VOLUME_PREFIX = "volume" |
81eedcae TL |
16 | TEST_SUBVOLUME_PREFIX="subvolume" |
17 | TEST_GROUP_PREFIX="group" | |
18 | TEST_SNAPSHOT_PREFIX="snapshot" | |
92f5a8d4 | 19 | TEST_CLONE_PREFIX="clone" |
494da23a TL |
20 | TEST_FILE_NAME_PREFIX="subvolume_file" |
21 | ||
22 | # for filling subvolume with data | |
23 | CLIENTS_REQUIRED = 1 | |
f6b5b4d7 | 24 | MDSS_REQUIRED = 2 |
494da23a TL |
25 | |
26 | # io defaults | |
27 | DEFAULT_FILE_SIZE = 1 # MB | |
28 | DEFAULT_NUMBER_OF_FILES = 1024 | |
81eedcae TL |
29 | |
30 | def _fs_cmd(self, *args): | |
31 | return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args) | |
32 | ||
f6b5b4d7 TL |
33 | def _raw_cmd(self, *args): |
34 | return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args) | |
35 | ||
92f5a8d4 TL |
36 | def __check_clone_state(self, state, clone, clone_group=None, timo=120): |
37 | check = 0 | |
38 | args = ["clone", "status", self.volname, clone] | |
39 | if clone_group: | |
40 | args.append(clone_group) | |
41 | args = tuple(args) | |
42 | while check < timo: | |
43 | result = json.loads(self._fs_cmd(*args)) | |
44 | if result["status"]["state"] == state: | |
45 | break | |
46 | check += 1 | |
47 | time.sleep(1) | |
48 | self.assertTrue(check < timo) | |
49 | ||
50 | def _wait_for_clone_to_complete(self, clone, clone_group=None, timo=120): | |
51 | self.__check_clone_state("complete", clone, clone_group, timo) | |
52 | ||
53 | def _wait_for_clone_to_fail(self, clone, clone_group=None, timo=120): | |
54 | self.__check_clone_state("failed", clone, clone_group, timo) | |
55 | ||
9f95a23c TL |
56 | def _check_clone_canceled(self, clone, clone_group=None): |
57 | self.__check_clone_state("canceled", clone, clone_group, timo=1) | |
58 | ||
59 | def _verify_clone_attrs(self, subvolume, clone, source_group=None, clone_group=None): | |
92f5a8d4 TL |
60 | path1 = self._get_subvolume_path(self.volname, subvolume, group_name=source_group) |
61 | path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group) | |
62 | ||
9f95a23c TL |
63 | p = self.mount_a.run_shell(["find", path1]) |
64 | paths = p.stdout.getvalue().strip().split() | |
65 | ||
66 | # for each entry in source and clone (sink) verify certain inode attributes: | |
67 | # inode type, mode, ownership, [am]time. | |
68 | for source_path in paths: | |
69 | sink_entry = source_path[len(path1)+1:] | |
70 | sink_path = os.path.join(path2, sink_entry) | |
71 | ||
72 | # mode+type | |
73 | sval = int(self.mount_a.run_shell(['stat', '-c' '%f', source_path]).stdout.getvalue().strip(), 16) | |
74 | cval = int(self.mount_a.run_shell(['stat', '-c' '%f', sink_path]).stdout.getvalue().strip(), 16) | |
75 | self.assertEqual(sval, cval) | |
76 | ||
77 | # ownership | |
78 | sval = int(self.mount_a.run_shell(['stat', '-c' '%u', source_path]).stdout.getvalue().strip()) | |
79 | cval = int(self.mount_a.run_shell(['stat', '-c' '%u', sink_path]).stdout.getvalue().strip()) | |
80 | self.assertEqual(sval, cval) | |
92f5a8d4 | 81 | |
9f95a23c TL |
82 | sval = int(self.mount_a.run_shell(['stat', '-c' '%g', source_path]).stdout.getvalue().strip()) |
83 | cval = int(self.mount_a.run_shell(['stat', '-c' '%g', sink_path]).stdout.getvalue().strip()) | |
84 | self.assertEqual(sval, cval) | |
92f5a8d4 | 85 | |
9f95a23c TL |
86 | # inode timestamps |
87 | sval = int(self.mount_a.run_shell(['stat', '-c' '%X', source_path]).stdout.getvalue().strip()) | |
88 | cval = int(self.mount_a.run_shell(['stat', '-c' '%X', sink_path]).stdout.getvalue().strip()) | |
89 | self.assertEqual(sval, cval) | |
90 | ||
91 | sval = int(self.mount_a.run_shell(['stat', '-c' '%Y', source_path]).stdout.getvalue().strip()) | |
92 | cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip()) | |
93 | self.assertEqual(sval, cval) | |
94 | ||
95 | def _verify_clone(self, subvolume, clone, source_group=None, clone_group=None, timo=120): | |
96 | path1 = self._get_subvolume_path(self.volname, subvolume, group_name=source_group) | |
97 | path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group) | |
92f5a8d4 TL |
98 | |
99 | check = 0 | |
100 | while check < timo: | |
101 | val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries")) | |
102 | val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries")) | |
103 | if val1 == val2: | |
104 | break | |
105 | check += 1 | |
106 | time.sleep(1) | |
107 | self.assertTrue(check < timo) | |
108 | ||
9f95a23c TL |
109 | self._verify_clone_attrs(subvolume, clone, source_group=source_group, clone_group=clone_group) |
110 | ||
92f5a8d4 | 111 | def _generate_random_volume_name(self, count=1): |
f6b5b4d7 TL |
112 | n = self.volume_start |
113 | volumes = [f"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i in range(n, n+count)] | |
114 | self.volume_start += count | |
92f5a8d4 TL |
115 | return volumes[0] if count == 1 else volumes |
116 | ||
117 | def _generate_random_subvolume_name(self, count=1): | |
f6b5b4d7 TL |
118 | n = self.subvolume_start |
119 | subvolumes = [f"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i in range(n, n+count)] | |
120 | self.subvolume_start += count | |
92f5a8d4 TL |
121 | return subvolumes[0] if count == 1 else subvolumes |
122 | ||
123 | def _generate_random_group_name(self, count=1): | |
f6b5b4d7 TL |
124 | n = self.group_start |
125 | groups = [f"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i in range(n, n+count)] | |
126 | self.group_start += count | |
92f5a8d4 TL |
127 | return groups[0] if count == 1 else groups |
128 | ||
129 | def _generate_random_snapshot_name(self, count=1): | |
f6b5b4d7 TL |
130 | n = self.snapshot_start |
131 | snaps = [f"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i in range(n, n+count)] | |
132 | self.snapshot_start += count | |
92f5a8d4 TL |
133 | return snaps[0] if count == 1 else snaps |
134 | ||
135 | def _generate_random_clone_name(self, count=1): | |
f6b5b4d7 TL |
136 | n = self.clone_start |
137 | clones = [f"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i in range(n, n+count)] | |
138 | self.clone_start += count | |
92f5a8d4 | 139 | return clones[0] if count == 1 else clones |
81eedcae TL |
140 | |
141 | def _enable_multi_fs(self): | |
142 | self._fs_cmd("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it") | |
143 | ||
144 | def _create_or_reuse_test_volume(self): | |
145 | result = json.loads(self._fs_cmd("volume", "ls")) | |
146 | if len(result) == 0: | |
147 | self.vol_created = True | |
92f5a8d4 | 148 | self.volname = self._generate_random_volume_name() |
81eedcae TL |
149 | self._fs_cmd("volume", "create", self.volname) |
150 | else: | |
151 | self.volname = result[0]['name'] | |
152 | ||
494da23a TL |
153 | def _get_subvolume_group_path(self, vol_name, group_name): |
154 | args = ("subvolumegroup", "getpath", vol_name, group_name) | |
155 | path = self._fs_cmd(*args) | |
156 | # remove the leading '/', and trailing whitespaces | |
157 | return path[1:].rstrip() | |
158 | ||
81eedcae TL |
159 | def _get_subvolume_path(self, vol_name, subvol_name, group_name=None): |
160 | args = ["subvolume", "getpath", vol_name, subvol_name] | |
161 | if group_name: | |
162 | args.append(group_name) | |
163 | args = tuple(args) | |
164 | path = self._fs_cmd(*args) | |
165 | # remove the leading '/', and trailing whitespaces | |
166 | return path[1:].rstrip() | |
167 | ||
1911f103 TL |
168 | def _get_subvolume_info(self, vol_name, subvol_name, group_name=None): |
169 | args = ["subvolume", "info", vol_name, subvol_name] | |
170 | if group_name: | |
171 | args.append(group_name) | |
172 | args = tuple(args) | |
173 | subvol_md = self._fs_cmd(*args) | |
174 | return subvol_md | |
175 | ||
e306af50 TL |
176 | def _get_subvolume_snapshot_info(self, vol_name, subvol_name, snapname, group_name=None): |
177 | args = ["subvolume", "snapshot", "info", vol_name, subvol_name, snapname] | |
178 | if group_name: | |
179 | args.append(group_name) | |
180 | args = tuple(args) | |
181 | snap_md = self._fs_cmd(*args) | |
182 | return snap_md | |
183 | ||
81eedcae | 184 | def _delete_test_volume(self): |
eafe8130 | 185 | self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") |
81eedcae | 186 | |
92f5a8d4 TL |
187 | def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None, |
188 | number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE): | |
494da23a | 189 | # get subvolume path for IO |
92f5a8d4 TL |
190 | args = ["subvolume", "getpath", self.volname, subvolume] |
191 | if subvolume_group: | |
192 | args.append(subvolume_group) | |
193 | args = tuple(args) | |
194 | subvolpath = self._fs_cmd(*args) | |
494da23a TL |
195 | self.assertNotEqual(subvolpath, None) |
196 | subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline | |
197 | ||
92f5a8d4 TL |
198 | io_path = subvolpath |
199 | if create_dir: | |
200 | io_path = os.path.join(subvolpath, create_dir) | |
201 | self.mount_a.run_shell(["mkdir", "-p", io_path]) | |
202 | ||
203 | log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path)) | |
494da23a TL |
204 | for i in range(number_of_files): |
205 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) | |
92f5a8d4 | 206 | self.mount_a.write_n_mb(os.path.join(io_path, filename), file_size) |
494da23a | 207 | |
9f95a23c TL |
208 | def _do_subvolume_io_mixed(self, subvolume, subvolume_group=None): |
209 | subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group) | |
210 | ||
211 | reg_file = "regfile.0" | |
212 | reg_path = os.path.join(subvolpath, reg_file) | |
213 | dir_path = os.path.join(subvolpath, "dir.0") | |
214 | sym_path1 = os.path.join(subvolpath, "sym.0") | |
215 | # this symlink's ownership would be changed | |
216 | sym_path2 = os.path.join(dir_path, "sym.0") | |
217 | ||
218 | #self.mount_a.write_n_mb(reg_path, TestVolumes.DEFAULT_FILE_SIZE) | |
219 | self.mount_a.run_shell(["sudo", "mkdir", dir_path], omit_sudo=False) | |
220 | self.mount_a.run_shell(["sudo", "ln", "-s", "./{}".format(reg_file), sym_path1], omit_sudo=False) | |
221 | self.mount_a.run_shell(["sudo", "ln", "-s", "./{}".format(reg_file), sym_path2], omit_sudo=False) | |
222 | # flip ownership to nobody. assumption: nobody's id is 65534 | |
223 | self.mount_a.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2], omit_sudo=False) | |
224 | ||
494da23a TL |
225 | def _wait_for_trash_empty(self, timeout=30): |
226 | # XXX: construct the trash dir path (note that there is no mgr | |
227 | # [sub]volume interface for this). | |
228 | trashdir = os.path.join("./", "volumes", "_deleting") | |
92f5a8d4 | 229 | self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout) |
494da23a | 230 | |
81eedcae TL |
231 | def setUp(self): |
232 | super(TestVolumes, self).setUp() | |
233 | self.volname = None | |
234 | self.vol_created = False | |
235 | self._enable_multi_fs() | |
236 | self._create_or_reuse_test_volume() | |
f6b5b4d7 TL |
237 | self.config_set('mon', 'mon_allow_pool_delete', True) |
238 | self.volume_start = random.randint(1, (1<<20)) | |
239 | self.subvolume_start = random.randint(1, (1<<20)) | |
240 | self.group_start = random.randint(1, (1<<20)) | |
241 | self.snapshot_start = random.randint(1, (1<<20)) | |
242 | self.clone_start = random.randint(1, (1<<20)) | |
81eedcae TL |
243 | |
244 | def tearDown(self): | |
245 | if self.vol_created: | |
246 | self._delete_test_volume() | |
247 | super(TestVolumes, self).tearDown() | |
248 | ||
92f5a8d4 TL |
249 | def test_connection_expiration(self): |
250 | # unmount any cephfs mounts | |
251 | self.mount_a.umount_wait() | |
252 | sessions = self._session_list() | |
253 | self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted | |
254 | ||
255 | # Get the mgr to definitely mount cephfs | |
256 | subvolume = self._generate_random_subvolume_name() | |
257 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
258 | sessions = self._session_list() | |
259 | self.assertEqual(len(sessions), 1) | |
260 | ||
261 | # Now wait for the mgr to expire the connection: | |
262 | self.wait_until_evicted(sessions[0]['id'], timeout=90) | |
263 | ||
264 | def test_volume_create(self): | |
265 | """ | |
266 | That the volume can be created and then cleans up | |
267 | """ | |
268 | volname = self._generate_random_volume_name() | |
269 | self._fs_cmd("volume", "create", volname) | |
270 | volumels = json.loads(self._fs_cmd("volume", "ls")) | |
271 | ||
272 | if not (volname in ([volume['name'] for volume in volumels])): | |
273 | raise RuntimeError("Error creating volume '{0}'".format(volname)) | |
274 | else: | |
275 | # clean up | |
276 | self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it") | |
277 | ||
278 | def test_volume_ls(self): | |
279 | """ | |
280 | That the existing and the newly created volumes can be listed and | |
281 | finally cleans up. | |
282 | """ | |
283 | vls = json.loads(self._fs_cmd("volume", "ls")) | |
284 | volumes = [volume['name'] for volume in vls] | |
285 | ||
286 | #create new volumes and add it to the existing list of volumes | |
287 | volumenames = self._generate_random_volume_name(3) | |
288 | for volumename in volumenames: | |
289 | self._fs_cmd("volume", "create", volumename) | |
290 | volumes.extend(volumenames) | |
291 | ||
292 | # list volumes | |
293 | try: | |
294 | volumels = json.loads(self._fs_cmd('volume', 'ls')) | |
295 | if len(volumels) == 0: | |
296 | raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.") | |
297 | else: | |
298 | volnames = [volume['name'] for volume in volumels] | |
299 | if collections.Counter(volnames) != collections.Counter(volumes): | |
300 | raise RuntimeError("Error creating or listing volumes") | |
301 | finally: | |
302 | # clean up | |
303 | for volume in volumenames: | |
304 | self._fs_cmd("volume", "rm", volume, "--yes-i-really-mean-it") | |
305 | ||
eafe8130 | 306 | def test_volume_rm(self): |
92f5a8d4 TL |
307 | """ |
308 | That the volume can only be removed when --yes-i-really-mean-it is used | |
309 | and verify that the deleted volume is not listed anymore. | |
310 | """ | |
eafe8130 TL |
311 | try: |
312 | self._fs_cmd("volume", "rm", self.volname) | |
313 | except CommandFailedError as ce: | |
314 | if ce.exitstatus != errno.EPERM: | |
315 | raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, " | |
316 | "but it failed with {0}".format(ce.exitstatus)) | |
317 | else: | |
318 | self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") | |
319 | ||
320 | #check if it's gone | |
92f5a8d4 | 321 | volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) |
eafe8130 | 322 | if (self.volname in [volume['name'] for volume in volumes]): |
92f5a8d4 TL |
323 | raise RuntimeError("Expected the 'fs volume rm' command to succeed. " |
324 | "The volume {0} not removed.".format(self.volname)) | |
eafe8130 TL |
325 | else: |
326 | raise RuntimeError("expected the 'fs volume rm' command to fail.") | |
327 | ||
f6b5b4d7 TL |
328 | def test_volume_rm_arbitrary_pool_removal(self): |
329 | """ | |
330 | That the arbitrary pool added to the volume out of band is removed | |
331 | successfully on volume removal. | |
332 | """ | |
333 | new_pool = "new_pool" | |
334 | # add arbitrary data pool | |
335 | self.fs.add_data_pool(new_pool) | |
336 | vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty")) | |
337 | self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") | |
338 | ||
339 | #check if fs is gone | |
340 | volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) | |
341 | volnames = [volume['name'] for volume in volumes] | |
342 | self.assertNotIn(self.volname, volnames) | |
343 | ||
344 | #check if osd pools are gone | |
345 | pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty")) | |
346 | for pool in vol_status["pools"]: | |
347 | self.assertNotIn(pool["name"], pools) | |
348 | ||
349 | def test_volume_rm_when_mon_delete_pool_false(self): | |
350 | """ | |
351 | That the volume can only be removed when mon_allowd_pool_delete is set | |
352 | to true and verify that the pools are removed after volume deletion. | |
353 | """ | |
354 | self.config_set('mon', 'mon_allow_pool_delete', False) | |
355 | try: | |
356 | self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") | |
357 | except CommandFailedError as ce: | |
358 | self.assertEqual(ce.exitstatus, errno.EPERM, | |
359 | "expected the 'fs volume rm' command to fail with EPERM, " | |
360 | "but it failed with {0}".format(ce.exitstatus)) | |
361 | vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty")) | |
362 | self.config_set('mon', 'mon_allow_pool_delete', True) | |
363 | self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") | |
364 | ||
365 | #check if fs is gone | |
366 | volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) | |
367 | volnames = [volume['name'] for volume in volumes] | |
368 | self.assertNotIn(self.volname, volnames, | |
369 | "volume {0} exists after removal".format(self.volname)) | |
370 | #check if pools are gone | |
371 | pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty")) | |
372 | for pool in vol_status["pools"]: | |
373 | self.assertNotIn(pool["name"], pools, | |
374 | "pool {0} exists after volume removal".format(pool["name"])) | |
375 | ||
81eedcae TL |
376 | ### basic subvolume operations |
377 | ||
378 | def test_subvolume_create_and_rm(self): | |
379 | # create subvolume | |
380 | subvolume = self._generate_random_subvolume_name() | |
381 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
382 | ||
383 | # make sure it exists | |
384 | subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume) | |
385 | self.assertNotEqual(subvolpath, None) | |
386 | ||
387 | # remove subvolume | |
388 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
389 | # make sure its gone | |
390 | try: | |
391 | self._fs_cmd("subvolume", "getpath", self.volname, subvolume) | |
392 | except CommandFailedError as ce: | |
393 | if ce.exitstatus != errno.ENOENT: | |
394 | raise | |
92f5a8d4 TL |
395 | else: |
396 | raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.") | |
81eedcae | 397 | |
494da23a TL |
398 | # verify trash dir is clean |
399 | self._wait_for_trash_empty() | |
400 | ||
92f5a8d4 TL |
401 | def test_subvolume_expand(self): |
402 | """ | |
403 | That a subvolume can be expanded in size and its quota matches the expected size. | |
404 | """ | |
405 | ||
406 | # create subvolume | |
407 | subvolname = self._generate_random_subvolume_name() | |
408 | osize = self.DEFAULT_FILE_SIZE*1024*1024 | |
409 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) | |
410 | ||
411 | # make sure it exists | |
412 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
413 | self.assertNotEqual(subvolpath, None) | |
414 | ||
415 | # expand the subvolume | |
416 | nsize = osize*2 | |
417 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
418 | ||
419 | # verify the quota | |
420 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
421 | self.assertEqual(size, nsize) | |
422 | ||
423 | def test_subvolume_shrink(self): | |
424 | """ | |
425 | That a subvolume can be shrinked in size and its quota matches the expected size. | |
426 | """ | |
427 | ||
428 | # create subvolume | |
429 | subvolname = self._generate_random_subvolume_name() | |
430 | osize = self.DEFAULT_FILE_SIZE*1024*1024 | |
431 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) | |
432 | ||
433 | # make sure it exists | |
434 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
435 | self.assertNotEqual(subvolpath, None) | |
436 | ||
437 | # shrink the subvolume | |
e306af50 | 438 | nsize = osize // 2 |
92f5a8d4 TL |
439 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) |
440 | ||
441 | # verify the quota | |
442 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
443 | self.assertEqual(size, nsize) | |
444 | ||
445 | def test_subvolume_resize_fail_invalid_size(self): | |
446 | """ | |
447 | That a subvolume cannot be resized to an invalid size and the quota did not change | |
448 | """ | |
449 | ||
450 | osize = self.DEFAULT_FILE_SIZE*1024*1024 | |
451 | # create subvolume | |
452 | subvolname = self._generate_random_subvolume_name() | |
453 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) | |
454 | ||
455 | # make sure it exists | |
456 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
457 | self.assertNotEqual(subvolpath, None) | |
458 | ||
459 | # try to resize the subvolume with an invalid size -10 | |
460 | nsize = -10 | |
461 | try: | |
462 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
463 | except CommandFailedError as ce: | |
464 | if ce.exitstatus != errno.EINVAL: | |
465 | raise | |
466 | else: | |
467 | raise RuntimeError("expected the 'fs subvolume resize' command to fail") | |
468 | ||
469 | # verify the quota did not change | |
470 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
471 | self.assertEqual(size, osize) | |
472 | ||
473 | def test_subvolume_resize_fail_zero_size(self): | |
474 | """ | |
475 | That a subvolume cannot be resized to a zero size and the quota did not change | |
476 | """ | |
477 | ||
478 | osize = self.DEFAULT_FILE_SIZE*1024*1024 | |
479 | # create subvolume | |
480 | subvolname = self._generate_random_subvolume_name() | |
481 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) | |
482 | ||
483 | # make sure it exists | |
484 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
485 | self.assertNotEqual(subvolpath, None) | |
486 | ||
487 | # try to resize the subvolume with size 0 | |
488 | nsize = 0 | |
489 | try: | |
490 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
491 | except CommandFailedError as ce: | |
492 | if ce.exitstatus != errno.EINVAL: | |
493 | raise | |
494 | else: | |
495 | raise RuntimeError("expected the 'fs subvolume resize' command to fail") | |
496 | ||
497 | # verify the quota did not change | |
498 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
499 | self.assertEqual(size, osize) | |
500 | ||
501 | def test_subvolume_resize_quota_lt_used_size(self): | |
502 | """ | |
503 | That a subvolume can be resized to a size smaller than the current used size | |
504 | and the resulting quota matches the expected size. | |
505 | """ | |
506 | ||
507 | osize = self.DEFAULT_FILE_SIZE*1024*1024*20 | |
508 | # create subvolume | |
509 | subvolname = self._generate_random_subvolume_name() | |
510 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) | |
511 | ||
512 | # make sure it exists | |
513 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
514 | self.assertNotEqual(subvolpath, None) | |
515 | ||
516 | # create one file of 10MB | |
517 | file_size=self.DEFAULT_FILE_SIZE*10 | |
518 | number_of_files=1 | |
519 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
520 | number_of_files, | |
521 | file_size)) | |
522 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1) | |
523 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
524 | ||
525 | usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes")) | |
526 | susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip()) | |
527 | self.assertEqual(usedsize, susedsize) | |
528 | ||
529 | # shrink the subvolume | |
e306af50 | 530 | nsize = usedsize // 2 |
92f5a8d4 TL |
531 | try: |
532 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
9f95a23c | 533 | except CommandFailedError: |
92f5a8d4 TL |
534 | raise RuntimeError("expected the 'fs subvolume resize' command to succeed") |
535 | ||
536 | # verify the quota | |
537 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
538 | self.assertEqual(size, nsize) | |
539 | ||
540 | ||
541 | def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self): | |
542 | """ | |
543 | That a subvolume cannot be resized to a size smaller than the current used size | |
544 | when --no_shrink is given and the quota did not change. | |
545 | """ | |
546 | ||
547 | osize = self.DEFAULT_FILE_SIZE*1024*1024*20 | |
548 | # create subvolume | |
549 | subvolname = self._generate_random_subvolume_name() | |
550 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) | |
551 | ||
552 | # make sure it exists | |
553 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
554 | self.assertNotEqual(subvolpath, None) | |
555 | ||
556 | # create one file of 10MB | |
557 | file_size=self.DEFAULT_FILE_SIZE*10 | |
558 | number_of_files=1 | |
559 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
560 | number_of_files, | |
561 | file_size)) | |
562 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2) | |
563 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
564 | ||
565 | usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes")) | |
566 | susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip()) | |
567 | self.assertEqual(usedsize, susedsize) | |
568 | ||
569 | # shrink the subvolume | |
e306af50 | 570 | nsize = usedsize // 2 |
92f5a8d4 TL |
571 | try: |
572 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink") | |
573 | except CommandFailedError as ce: | |
574 | if ce.exitstatus != errno.EINVAL: | |
575 | raise | |
576 | else: | |
577 | raise RuntimeError("expected the 'fs subvolume resize' command to fail") | |
578 | ||
579 | # verify the quota did not change | |
580 | size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) | |
581 | self.assertEqual(size, osize) | |
582 | ||
583 | def test_subvolume_resize_expand_on_full_subvolume(self): | |
584 | """ | |
585 | That the subvolume can be expanded from a full subvolume and future writes succeed. | |
586 | """ | |
587 | ||
588 | osize = self.DEFAULT_FILE_SIZE*1024*1024*10 | |
589 | # create subvolume of quota 10MB and make sure it exists | |
590 | subvolname = self._generate_random_subvolume_name() | |
591 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) | |
592 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
593 | self.assertNotEqual(subvolpath, None) | |
594 | ||
595 | # create one file of size 10MB and write | |
596 | file_size=self.DEFAULT_FILE_SIZE*10 | |
597 | number_of_files=1 | |
598 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
599 | number_of_files, | |
600 | file_size)) | |
601 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3) | |
602 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
603 | ||
604 | # create a file of size 5MB and try write more | |
e306af50 | 605 | file_size=file_size // 2 |
92f5a8d4 TL |
606 | number_of_files=1 |
607 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
608 | number_of_files, | |
609 | file_size)) | |
610 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4) | |
611 | try: | |
612 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
613 | except CommandFailedError: | |
614 | # Not able to write. So expand the subvolume more and try writing the 5MB file again | |
615 | nsize = osize*2 | |
616 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) | |
617 | try: | |
618 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
619 | except CommandFailedError: | |
620 | raise RuntimeError("expected filling subvolume {0} with {1} file of size {2}MB" | |
621 | "to succeed".format(subvolname, number_of_files, file_size)) | |
622 | else: | |
623 | raise RuntimeError("expected filling subvolume {0} with {1} file of size {2}MB" | |
624 | "to fail".format(subvolname, number_of_files, file_size)) | |
625 | ||
81eedcae TL |
626 | def test_subvolume_create_idempotence(self): |
627 | # create subvolume | |
628 | subvolume = self._generate_random_subvolume_name() | |
629 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
630 | ||
631 | # try creating w/ same subvolume name -- should be idempotent | |
632 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
633 | ||
634 | # remove subvolume | |
635 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
636 | ||
494da23a TL |
637 | # verify trash dir is clean |
638 | self._wait_for_trash_empty() | |
639 | ||
e306af50 TL |
640 | def test_subvolume_create_idempotence_resize(self): |
641 | # create subvolume | |
642 | subvolume = self._generate_random_subvolume_name() | |
643 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
644 | ||
645 | # try creating w/ same subvolume name with size -- should set quota | |
646 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "1000000000") | |
647 | ||
648 | # get subvolume metadata | |
649 | subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) | |
650 | self.assertEqual(subvol_info["bytes_quota"], 1000000000) | |
651 | ||
652 | # remove subvolume | |
653 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
654 | ||
655 | # verify trash dir is clean | |
656 | self._wait_for_trash_empty() | |
657 | ||
f6b5b4d7 TL |
658 | def test_subvolume_pin_export(self): |
659 | self.fs.set_max_mds(2) | |
660 | status = self.fs.wait_for_daemons() | |
661 | ||
662 | subvolume = self._generate_random_subvolume_name() | |
663 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
664 | self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1") | |
665 | path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume) | |
666 | path = os.path.dirname(path) # get subvolume path | |
667 | ||
668 | self._get_subtrees(status=status, rank=1) | |
669 | self._wait_subtrees([(path, 1)], status=status) | |
670 | ||
671 | def test_subvolumegroup_pin_distributed(self): | |
672 | self.fs.set_max_mds(2) | |
673 | status = self.fs.wait_for_daemons() | |
674 | self.config_set('mds', 'mds_export_ephemeral_distributed', True) | |
675 | ||
676 | group = "pinme" | |
677 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
678 | self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True") | |
679 | # (no effect on distribution) pin the group directory to 0 so rank 0 has all subtree bounds visible | |
680 | self._fs_cmd("subvolumegroup", "pin", self.volname, group, "export", "0") | |
681 | subvolumes = self._generate_random_subvolume_name(10) | |
682 | for subvolume in subvolumes: | |
683 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
684 | self._wait_distributed_subtrees(10, status=status) | |
685 | ||
686 | def test_subvolume_pin_random(self): | |
687 | self.fs.set_max_mds(2) | |
688 | self.fs.wait_for_daemons() | |
689 | self.config_set('mds', 'mds_export_ephemeral_random', True) | |
690 | ||
691 | subvolume = self._generate_random_subvolume_name() | |
692 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
693 | self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01") | |
694 | # no verification | |
695 | ||
e306af50 TL |
696 | def test_subvolume_create_isolated_namespace(self): |
697 | """ | |
698 | Create subvolume in separate rados namespace | |
699 | """ | |
700 | ||
701 | # create subvolume | |
702 | subvolume = self._generate_random_subvolume_name() | |
703 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated") | |
704 | ||
705 | # get subvolume metadata | |
706 | subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) | |
707 | self.assertNotEqual(len(subvol_info), 0) | |
708 | self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume) | |
709 | ||
710 | # remove subvolumes | |
711 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
712 | ||
713 | # verify trash dir is clean | |
714 | self._wait_for_trash_empty() | |
715 | ||
eafe8130 TL |
716 | def test_subvolume_create_with_invalid_data_pool_layout(self): |
717 | subvolume = self._generate_random_subvolume_name() | |
718 | data_pool = "invalid_pool" | |
719 | # create subvolume with invalid data pool layout | |
720 | try: | |
721 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool) | |
722 | except CommandFailedError as ce: | |
723 | if ce.exitstatus != errno.EINVAL: | |
724 | raise | |
725 | else: | |
92f5a8d4 TL |
726 | raise RuntimeError("expected the 'fs subvolume create' command to fail") |
727 | ||
728 | def test_subvolume_rm_force(self): | |
729 | # test removing non-existing subvolume with --force | |
730 | subvolume = self._generate_random_subvolume_name() | |
731 | try: | |
732 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force") | |
9f95a23c | 733 | except CommandFailedError: |
92f5a8d4 | 734 | raise RuntimeError("expected the 'fs subvolume rm --force' command to succeed") |
eafe8130 TL |
735 | |
736 | def test_subvolume_create_with_auto_cleanup_on_fail(self): | |
737 | subvolume = self._generate_random_subvolume_name() | |
738 | data_pool = "invalid_pool" | |
739 | # create subvolume with invalid data pool layout fails | |
740 | with self.assertRaises(CommandFailedError): | |
741 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool) | |
742 | ||
743 | # check whether subvol path is cleaned up | |
744 | try: | |
745 | self._fs_cmd("subvolume", "getpath", self.volname, subvolume) | |
746 | except CommandFailedError as ce: | |
747 | if ce.exitstatus != errno.ENOENT: | |
748 | raise | |
749 | else: | |
92f5a8d4 | 750 | raise RuntimeError("expected the 'fs subvolume getpath' command to fail") |
eafe8130 TL |
751 | |
752 | def test_subvolume_create_with_invalid_size(self): | |
753 | # create subvolume with an invalid size -1 | |
754 | subvolume = self._generate_random_subvolume_name() | |
755 | try: | |
756 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--size", "-1") | |
757 | except CommandFailedError as ce: | |
758 | if ce.exitstatus != errno.EINVAL: | |
759 | raise | |
760 | else: | |
761 | raise RuntimeError("expected the 'fs subvolume create' command to fail") | |
762 | ||
81eedcae TL |
763 | def test_nonexistent_subvolume_rm(self): |
764 | # remove non-existing subvolume | |
765 | subvolume = "non_existent_subvolume" | |
766 | ||
767 | # try, remove subvolume | |
768 | try: | |
769 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
770 | except CommandFailedError as ce: | |
771 | if ce.exitstatus != errno.ENOENT: | |
772 | raise | |
92f5a8d4 TL |
773 | else: |
774 | raise RuntimeError("expected the 'fs subvolume rm' command to fail") | |
81eedcae TL |
775 | |
776 | def test_nonexistent_subvolume_group_create(self): | |
777 | subvolume = self._generate_random_subvolume_name() | |
778 | group = "non_existent_group" | |
779 | ||
780 | # try, creating subvolume in a nonexistent group | |
781 | try: | |
782 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
783 | except CommandFailedError as ce: | |
784 | if ce.exitstatus != errno.ENOENT: | |
785 | raise | |
92f5a8d4 TL |
786 | else: |
787 | raise RuntimeError("expected the 'fs subvolume create' command to fail") | |
81eedcae | 788 | |
494da23a TL |
789 | def test_default_uid_gid_subvolume(self): |
790 | subvolume = self._generate_random_subvolume_name() | |
791 | expected_uid = 0 | |
792 | expected_gid = 0 | |
793 | ||
794 | # create subvolume | |
795 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
796 | subvol_path = self._get_subvolume_path(self.volname, subvolume) | |
797 | ||
798 | # check subvolume's uid and gid | |
799 | stat = self.mount_a.stat(subvol_path) | |
800 | self.assertEqual(stat['st_uid'], expected_uid) | |
801 | self.assertEqual(stat['st_gid'], expected_gid) | |
802 | ||
803 | # remove subvolume | |
804 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
805 | ||
eafe8130 TL |
806 | def test_subvolume_ls(self): |
807 | # tests the 'fs subvolume ls' command | |
808 | ||
809 | subvolumes = [] | |
810 | ||
811 | # create subvolumes | |
92f5a8d4 TL |
812 | subvolumes = self._generate_random_subvolume_name(3) |
813 | for subvolume in subvolumes: | |
814 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
eafe8130 TL |
815 | |
816 | # list subvolumes | |
817 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
818 | if len(subvolumels) == 0: | |
819 | raise RuntimeError("Expected the 'fs subvolume ls' command to list the created subvolumes.") | |
820 | else: | |
821 | subvolnames = [subvolume['name'] for subvolume in subvolumels] | |
822 | if collections.Counter(subvolnames) != collections.Counter(subvolumes): | |
823 | raise RuntimeError("Error creating or listing subvolumes") | |
824 | ||
825 | def test_subvolume_ls_for_notexistent_default_group(self): | |
826 | # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist | |
827 | # prerequisite: we expect that the volume is created and the default group _nogroup is | |
828 | # NOT created (i.e. a subvolume without group is not created) | |
829 | ||
830 | # list subvolumes | |
831 | subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) | |
832 | if len(subvolumels) > 0: | |
833 | raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.") | |
834 | ||
92f5a8d4 TL |
835 | def test_subvolume_resize_infinite_size(self): |
836 | """ | |
837 | That a subvolume can be resized to an infinite size by unsetting its quota. | |
838 | """ | |
839 | ||
840 | # create subvolume | |
841 | subvolname = self._generate_random_subvolume_name() | |
842 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", | |
843 | str(self.DEFAULT_FILE_SIZE*1024*1024)) | |
844 | ||
845 | # make sure it exists | |
846 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
847 | self.assertNotEqual(subvolpath, None) | |
848 | ||
849 | # resize inf | |
850 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf") | |
851 | ||
852 | # verify that the quota is None | |
853 | size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes") | |
854 | self.assertEqual(size, None) | |
855 | ||
856 | def test_subvolume_resize_infinite_size_future_writes(self): | |
857 | """ | |
858 | That a subvolume can be resized to an infinite size and the future writes succeed. | |
859 | """ | |
860 | ||
861 | # create subvolume | |
862 | subvolname = self._generate_random_subvolume_name() | |
863 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", | |
864 | str(self.DEFAULT_FILE_SIZE*1024*1024*5)) | |
865 | ||
866 | # make sure it exists | |
867 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
868 | self.assertNotEqual(subvolpath, None) | |
869 | ||
870 | # resize inf | |
871 | self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf") | |
872 | ||
873 | # verify that the quota is None | |
874 | size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes") | |
875 | self.assertEqual(size, None) | |
876 | ||
877 | # create one file of 10MB and try to write | |
878 | file_size=self.DEFAULT_FILE_SIZE*10 | |
879 | number_of_files=1 | |
880 | log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, | |
881 | number_of_files, | |
882 | file_size)) | |
883 | filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5) | |
884 | ||
885 | try: | |
886 | self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) | |
887 | except CommandFailedError: | |
888 | raise RuntimeError("expected filling subvolume {0} with {1} file of size {2}MB " | |
889 | "to succeed".format(subvolname, number_of_files, file_size)) | |
890 | ||
1911f103 TL |
891 | def test_subvolume_info(self): |
892 | # tests the 'fs subvolume info' command | |
893 | ||
894 | subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", | |
e306af50 | 895 | "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace", |
f6b5b4d7 | 896 | "type", "uid", "features"] |
1911f103 TL |
897 | |
898 | # create subvolume | |
899 | subvolume = self._generate_random_subvolume_name() | |
900 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
901 | ||
902 | # get subvolume metadata | |
903 | subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) | |
f6b5b4d7 | 904 | self.assertNotEqual(len(subvol_info), 0, "expected the 'fs subvolume info' command to list metadata of subvolume") |
1911f103 | 905 | for md in subvol_md: |
f6b5b4d7 | 906 | self.assertIn(md, subvol_info.keys(), "'{0}' key not present in metadata of subvolume".format(md)) |
1911f103 | 907 | |
f6b5b4d7 TL |
908 | self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set") |
909 | self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set") | |
910 | self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty") | |
1911f103 | 911 | |
f6b5b4d7 TL |
912 | self.assertEqual(len(subvol_info["features"]), 2, |
913 | msg="expected 2 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"])) | |
914 | for feature in ['snapshot-clone', 'snapshot-autoprotect']: | |
915 | self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature)) | |
1911f103 TL |
916 | |
917 | nsize = self.DEFAULT_FILE_SIZE*1024*1024 | |
f6b5b4d7 | 918 | self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize)) |
1911f103 TL |
919 | |
920 | # get subvolume metadata after quota set | |
921 | subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) | |
f6b5b4d7 | 922 | self.assertNotEqual(len(subvol_info), 0, "expected the 'fs subvolume info' command to list metadata of subvolume") |
1911f103 | 923 | |
f6b5b4d7 TL |
924 | self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set") |
925 | self.assertNotEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should not be set to infinite if quota is not set") | |
926 | self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume") | |
1911f103 | 927 | |
f6b5b4d7 TL |
928 | self.assertEqual(len(subvol_info["features"]), 2, |
929 | msg="expected 2 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"])) | |
930 | for feature in ['snapshot-clone', 'snapshot-autoprotect']: | |
931 | self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature)) | |
1911f103 TL |
932 | |
933 | # remove subvolumes | |
934 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
935 | ||
936 | # verify trash dir is clean | |
937 | self._wait_for_trash_empty() | |
938 | ||
939 | def test_clone_subvolume_info(self): | |
940 | ||
941 | # tests the 'fs subvolume info' command for a clone | |
942 | subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", | |
e306af50 TL |
943 | "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace", |
944 | "type", "uid"] | |
1911f103 TL |
945 | |
946 | subvolume = self._generate_random_subvolume_name() | |
947 | snapshot = self._generate_random_snapshot_name() | |
948 | clone = self._generate_random_clone_name() | |
949 | ||
950 | # create subvolume | |
951 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
952 | ||
953 | # do some IO | |
954 | self._do_subvolume_io(subvolume, number_of_files=1) | |
955 | ||
956 | # snapshot subvolume | |
957 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
958 | ||
1911f103 TL |
959 | # schedule a clone |
960 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
961 | ||
962 | # check clone status | |
963 | self._wait_for_clone_to_complete(clone) | |
964 | ||
1911f103 TL |
965 | # remove snapshot |
966 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
967 | ||
968 | subvol_info = json.loads(self._get_subvolume_info(self.volname, clone)) | |
969 | if len(subvol_info) == 0: | |
970 | raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume") | |
971 | for md in subvol_md: | |
972 | if md not in subvol_info.keys(): | |
973 | raise RuntimeError("%s not present in the metadata of subvolume" % md) | |
974 | if subvol_info["type"] != "clone": | |
975 | raise RuntimeError("type should be set to clone") | |
976 | ||
977 | # remove subvolumes | |
978 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
979 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
980 | ||
981 | # verify trash dir is clean | |
982 | self._wait_for_trash_empty() | |
983 | ||
984 | ||
81eedcae TL |
985 | ### subvolume group operations |
986 | ||
987 | def test_subvolume_create_and_rm_in_group(self): | |
988 | subvolume = self._generate_random_subvolume_name() | |
989 | group = self._generate_random_group_name() | |
990 | ||
991 | # create group | |
992 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
993 | ||
994 | # create subvolume in group | |
995 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
996 | ||
997 | # remove subvolume | |
998 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
999 | ||
494da23a TL |
1000 | # verify trash dir is clean |
1001 | self._wait_for_trash_empty() | |
1002 | ||
81eedcae TL |
1003 | # remove group |
1004 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1005 | ||
1006 | def test_subvolume_group_create_with_desired_data_pool_layout(self): | |
f6b5b4d7 | 1007 | group1, group2 = self._generate_random_group_name(2) |
81eedcae TL |
1008 | |
1009 | # create group | |
1010 | self._fs_cmd("subvolumegroup", "create", self.volname, group1) | |
494da23a | 1011 | group1_path = self._get_subvolume_group_path(self.volname, group1) |
81eedcae TL |
1012 | |
1013 | default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool") | |
1014 | new_pool = "new_pool" | |
1015 | self.assertNotEqual(default_pool, new_pool) | |
1016 | ||
1017 | # add data pool | |
1018 | self.fs.add_data_pool(new_pool) | |
1019 | ||
1020 | # create group specifying the new data pool as its pool layout | |
1021 | self._fs_cmd("subvolumegroup", "create", self.volname, group2, | |
1022 | "--pool_layout", new_pool) | |
494da23a | 1023 | group2_path = self._get_subvolume_group_path(self.volname, group2) |
81eedcae TL |
1024 | |
1025 | desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool") | |
1026 | self.assertEqual(desired_pool, new_pool) | |
1027 | ||
1028 | self._fs_cmd("subvolumegroup", "rm", self.volname, group1) | |
1029 | self._fs_cmd("subvolumegroup", "rm", self.volname, group2) | |
1030 | ||
eafe8130 TL |
1031 | def test_subvolume_group_create_with_invalid_data_pool_layout(self): |
1032 | group = self._generate_random_group_name() | |
1033 | data_pool = "invalid_pool" | |
1034 | # create group with invalid data pool layout | |
1035 | try: | |
1036 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool) | |
1037 | except CommandFailedError as ce: | |
1038 | if ce.exitstatus != errno.EINVAL: | |
1039 | raise | |
1040 | else: | |
92f5a8d4 TL |
1041 | raise RuntimeError("expected the 'fs subvolumegroup create' command to fail") |
1042 | ||
1043 | def test_subvolume_group_rm_force(self): | |
1044 | # test removing non-existing subvolume group with --force | |
1045 | group = self._generate_random_group_name() | |
1046 | try: | |
1047 | self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force") | |
9f95a23c | 1048 | except CommandFailedError: |
92f5a8d4 | 1049 | raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed") |
eafe8130 TL |
1050 | |
1051 | def test_subvolume_group_create_with_auto_cleanup_on_fail(self): | |
1052 | group = self._generate_random_group_name() | |
1053 | data_pool = "invalid_pool" | |
1054 | # create group with invalid data pool layout | |
1055 | with self.assertRaises(CommandFailedError): | |
1056 | self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool) | |
1057 | ||
1058 | # check whether group path is cleaned up | |
1059 | try: | |
1060 | self._fs_cmd("subvolumegroup", "getpath", self.volname, group) | |
1061 | except CommandFailedError as ce: | |
1062 | if ce.exitstatus != errno.ENOENT: | |
1063 | raise | |
1064 | else: | |
92f5a8d4 | 1065 | raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail") |
eafe8130 | 1066 | |
81eedcae | 1067 | def test_subvolume_create_with_desired_data_pool_layout_in_group(self): |
f6b5b4d7 | 1068 | subvol1, subvol2 = self._generate_random_subvolume_name(2) |
81eedcae TL |
1069 | group = self._generate_random_group_name() |
1070 | ||
1071 | # create group. this also helps set default pool layout for subvolumes | |
1072 | # created within the group. | |
1073 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1074 | ||
1075 | # create subvolume in group. | |
1076 | self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group) | |
1077 | subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group) | |
1078 | ||
1079 | default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool") | |
1080 | new_pool = "new_pool" | |
1081 | self.assertNotEqual(default_pool, new_pool) | |
1082 | ||
1083 | # add data pool | |
1084 | self.fs.add_data_pool(new_pool) | |
1085 | ||
1086 | # create subvolume specifying the new data pool as its pool layout | |
1087 | self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, | |
1088 | "--pool_layout", new_pool) | |
1089 | subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group) | |
1090 | ||
1091 | desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool") | |
1092 | self.assertEqual(desired_pool, new_pool) | |
1093 | ||
1094 | self._fs_cmd("subvolume", "rm", self.volname, subvol2, group) | |
1095 | self._fs_cmd("subvolume", "rm", self.volname, subvol1, group) | |
1096 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1097 | ||
1098 | def test_subvolume_group_create_with_desired_mode(self): | |
f6b5b4d7 | 1099 | group1, group2 = self._generate_random_group_name(2) |
81eedcae TL |
1100 | # default mode |
1101 | expected_mode1 = "755" | |
1102 | # desired mode | |
1103 | expected_mode2 = "777" | |
1104 | ||
1105 | # create group | |
1106 | self._fs_cmd("subvolumegroup", "create", self.volname, group1) | |
1107 | self._fs_cmd("subvolumegroup", "create", self.volname, group2, "--mode", "777") | |
1108 | ||
494da23a TL |
1109 | group1_path = self._get_subvolume_group_path(self.volname, group1) |
1110 | group2_path = self._get_subvolume_group_path(self.volname, group2) | |
81eedcae TL |
1111 | |
1112 | # check group's mode | |
1113 | actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip() | |
1114 | actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip() | |
1115 | self.assertEqual(actual_mode1, expected_mode1) | |
1116 | self.assertEqual(actual_mode2, expected_mode2) | |
1117 | ||
1118 | self._fs_cmd("subvolumegroup", "rm", self.volname, group1) | |
1119 | self._fs_cmd("subvolumegroup", "rm", self.volname, group2) | |
1120 | ||
92f5a8d4 TL |
1121 | def test_subvolume_group_create_with_desired_uid_gid(self): |
1122 | """ | |
1123 | That the subvolume group can be created with the desired uid and gid and its uid and gid matches the | |
1124 | expected values. | |
1125 | """ | |
1126 | uid = 1000 | |
1127 | gid = 1000 | |
1128 | ||
1129 | # create subvolume group | |
1130 | subvolgroupname = self._generate_random_group_name() | |
1131 | self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid)) | |
1132 | ||
1133 | # make sure it exists | |
1134 | subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname) | |
1135 | self.assertNotEqual(subvolgrouppath, None) | |
1136 | ||
1137 | # verify the uid and gid | |
1138 | suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip()) | |
1139 | sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip()) | |
1140 | self.assertEqual(uid, suid) | |
1141 | self.assertEqual(gid, sgid) | |
1142 | ||
1143 | # remove group | |
1144 | self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname) | |
1145 | ||
81eedcae | 1146 | def test_subvolume_create_with_desired_mode_in_group(self): |
f6b5b4d7 TL |
1147 | subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3) |
1148 | ||
81eedcae TL |
1149 | group = self._generate_random_group_name() |
1150 | # default mode | |
1151 | expected_mode1 = "755" | |
1152 | # desired mode | |
1153 | expected_mode2 = "777" | |
1154 | ||
1155 | # create group | |
1156 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1157 | ||
1158 | # create subvolume in group | |
1159 | self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group) | |
1160 | self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777") | |
1161 | # check whether mode 0777 also works | |
1162 | self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777") | |
1163 | ||
1164 | subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group) | |
1165 | subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group) | |
1166 | subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group) | |
1167 | ||
1168 | # check subvolume's mode | |
1169 | actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip() | |
1170 | actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip() | |
1171 | actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip() | |
1172 | self.assertEqual(actual_mode1, expected_mode1) | |
1173 | self.assertEqual(actual_mode2, expected_mode2) | |
1174 | self.assertEqual(actual_mode3, expected_mode2) | |
1175 | ||
81eedcae | 1176 | self._fs_cmd("subvolume", "rm", self.volname, subvol1, group) |
494da23a TL |
1177 | self._fs_cmd("subvolume", "rm", self.volname, subvol2, group) |
1178 | self._fs_cmd("subvolume", "rm", self.volname, subvol3, group) | |
81eedcae TL |
1179 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) |
1180 | ||
92f5a8d4 TL |
1181 | def test_subvolume_create_with_desired_uid_gid(self): |
1182 | """ | |
1183 | That the subvolume can be created with the desired uid and gid and its uid and gid matches the | |
1184 | expected values. | |
1185 | """ | |
1186 | uid = 1000 | |
1187 | gid = 1000 | |
1188 | ||
1189 | # create subvolume | |
1190 | subvolname = self._generate_random_subvolume_name() | |
1191 | self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid)) | |
1192 | ||
1193 | # make sure it exists | |
1194 | subvolpath = self._get_subvolume_path(self.volname, subvolname) | |
1195 | self.assertNotEqual(subvolpath, None) | |
1196 | ||
1197 | # verify the uid and gid | |
1198 | suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip()) | |
1199 | sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip()) | |
1200 | self.assertEqual(uid, suid) | |
1201 | self.assertEqual(gid, sgid) | |
1202 | ||
1203 | # remove subvolume | |
1204 | self._fs_cmd("subvolume", "rm", self.volname, subvolname) | |
1205 | ||
1206 | def test_nonexistent_subvolume_group_rm(self): | |
81eedcae TL |
1207 | group = "non_existent_group" |
1208 | ||
1209 | # try, remove subvolume group | |
1210 | try: | |
1211 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1212 | except CommandFailedError as ce: | |
1213 | if ce.exitstatus != errno.ENOENT: | |
1214 | raise | |
92f5a8d4 TL |
1215 | else: |
1216 | raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail") | |
81eedcae | 1217 | |
494da23a TL |
1218 | def test_default_uid_gid_subvolume_group(self): |
1219 | group = self._generate_random_group_name() | |
1220 | expected_uid = 0 | |
1221 | expected_gid = 0 | |
1222 | ||
1223 | # create group | |
1224 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1225 | group_path = self._get_subvolume_group_path(self.volname, group) | |
1226 | ||
1227 | # check group's uid and gid | |
1228 | stat = self.mount_a.stat(group_path) | |
1229 | self.assertEqual(stat['st_uid'], expected_uid) | |
1230 | self.assertEqual(stat['st_gid'], expected_gid) | |
1231 | ||
1232 | # remove group | |
1233 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1234 | ||
eafe8130 TL |
1235 | def test_subvolume_group_ls(self): |
1236 | # tests the 'fs subvolumegroup ls' command | |
1237 | ||
1238 | subvolumegroups = [] | |
1239 | ||
1240 | #create subvolumegroups | |
92f5a8d4 TL |
1241 | subvolumegroups = self._generate_random_group_name(3) |
1242 | for groupname in subvolumegroups: | |
eafe8130 | 1243 | self._fs_cmd("subvolumegroup", "create", self.volname, groupname) |
eafe8130 TL |
1244 | |
1245 | subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) | |
1246 | if len(subvolumegroupls) == 0: | |
1247 | raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups") | |
1248 | else: | |
1249 | subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls] | |
1250 | if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups): | |
1251 | raise RuntimeError("Error creating or listing subvolume groups") | |
1252 | ||
1253 | def test_subvolume_group_ls_for_nonexistent_volume(self): | |
1254 | # tests the 'fs subvolumegroup ls' command when /volume doesn't exist | |
1255 | # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created | |
1256 | ||
1257 | # list subvolume groups | |
1258 | subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) | |
1259 | if len(subvolumegroupls) > 0: | |
1260 | raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list") | |
1261 | ||
81eedcae TL |
1262 | ### snapshot operations |
1263 | ||
1264 | def test_subvolume_snapshot_create_and_rm(self): | |
1265 | subvolume = self._generate_random_subvolume_name() | |
1266 | snapshot = self._generate_random_snapshot_name() | |
1267 | ||
1268 | # create subvolume | |
1269 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1270 | ||
1271 | # snapshot subvolume | |
1272 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1273 | ||
1274 | # remove snapshot | |
1275 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1276 | ||
1277 | # remove subvolume | |
1278 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1279 | ||
494da23a TL |
1280 | # verify trash dir is clean |
1281 | self._wait_for_trash_empty() | |
1282 | ||
e306af50 TL |
1283 | def test_subvolume_snapshot_info(self): |
1284 | ||
1285 | """ | |
1286 | tests the 'fs subvolume snapshot info' command | |
1287 | """ | |
1288 | ||
f6b5b4d7 | 1289 | snap_metadata = ["created_at", "data_pool", "has_pending_clones", "size"] |
e306af50 TL |
1290 | |
1291 | subvolume = self._generate_random_subvolume_name() | |
1292 | snapshot = self._generate_random_snapshot_name() | |
1293 | ||
1294 | # create subvolume | |
1295 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1296 | ||
1297 | # do some IO | |
1298 | self._do_subvolume_io(subvolume, number_of_files=1) | |
1299 | ||
1300 | # snapshot subvolume | |
1301 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1302 | ||
e306af50 TL |
1303 | snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot)) |
1304 | self.assertNotEqual(len(snap_info), 0) | |
1305 | for md in snap_metadata: | |
1306 | if md not in snap_info: | |
1307 | raise RuntimeError("%s not present in the metadata of subvolume snapshot" % md) | |
e306af50 TL |
1308 | self.assertEqual(snap_info["has_pending_clones"], "no") |
1309 | ||
e306af50 TL |
1310 | # remove snapshot |
1311 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1312 | ||
1313 | # remove subvolume | |
1314 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1315 | ||
1316 | # verify trash dir is clean | |
1317 | self._wait_for_trash_empty() | |
1318 | ||
81eedcae TL |
1319 | def test_subvolume_snapshot_create_idempotence(self): |
1320 | subvolume = self._generate_random_subvolume_name() | |
1321 | snapshot = self._generate_random_snapshot_name() | |
1322 | ||
1323 | # create subvolume | |
1324 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1325 | ||
1326 | # snapshot subvolume | |
1327 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1328 | ||
1329 | # try creating w/ same subvolume snapshot name -- should be idempotent | |
1330 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1331 | ||
1332 | # remove snapshot | |
1333 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1334 | ||
1335 | # remove subvolume | |
1336 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1337 | ||
494da23a TL |
1338 | # verify trash dir is clean |
1339 | self._wait_for_trash_empty() | |
1340 | ||
81eedcae TL |
1341 | def test_nonexistent_subvolume_snapshot_rm(self): |
1342 | subvolume = self._generate_random_subvolume_name() | |
1343 | snapshot = self._generate_random_snapshot_name() | |
1344 | ||
1345 | # create subvolume | |
1346 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1347 | ||
1348 | # snapshot subvolume | |
1349 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1350 | ||
1351 | # remove snapshot | |
1352 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1353 | ||
1354 | # remove snapshot again | |
1355 | try: | |
1356 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1357 | except CommandFailedError as ce: | |
1358 | if ce.exitstatus != errno.ENOENT: | |
1359 | raise | |
92f5a8d4 TL |
1360 | else: |
1361 | raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail") | |
81eedcae TL |
1362 | |
1363 | # remove subvolume | |
1364 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1365 | ||
494da23a TL |
1366 | # verify trash dir is clean |
1367 | self._wait_for_trash_empty() | |
1368 | ||
92f5a8d4 TL |
1369 | def test_subvolume_snapshot_rm_force(self): |
1370 | # test removing non existing subvolume snapshot with --force | |
1371 | subvolume = self._generate_random_subvolume_name() | |
1372 | snapshot = self._generate_random_snapshot_name() | |
1373 | ||
1374 | # remove snapshot | |
1375 | try: | |
1376 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force") | |
9f95a23c | 1377 | except CommandFailedError: |
92f5a8d4 TL |
1378 | raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed") |
1379 | ||
81eedcae TL |
1380 | def test_subvolume_snapshot_in_group(self): |
1381 | subvolume = self._generate_random_subvolume_name() | |
1382 | group = self._generate_random_group_name() | |
1383 | snapshot = self._generate_random_snapshot_name() | |
1384 | ||
1385 | # create group | |
1386 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1387 | ||
1388 | # create subvolume in group | |
1389 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
1390 | ||
1391 | # snapshot subvolume in group | |
1392 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) | |
1393 | ||
1394 | # remove snapshot | |
1395 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) | |
1396 | ||
1397 | # remove subvolume | |
1398 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
1399 | ||
494da23a TL |
1400 | # verify trash dir is clean |
1401 | self._wait_for_trash_empty() | |
1402 | ||
81eedcae TL |
1403 | # remove group |
1404 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1405 | ||
eafe8130 TL |
1406 | def test_subvolume_snapshot_ls(self): |
1407 | # tests the 'fs subvolume snapshot ls' command | |
1408 | ||
1409 | snapshots = [] | |
1410 | ||
1411 | # create subvolume | |
1412 | subvolume = self._generate_random_subvolume_name() | |
1413 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1414 | ||
1415 | # create subvolume snapshots | |
92f5a8d4 TL |
1416 | snapshots = self._generate_random_snapshot_name(3) |
1417 | for snapshot in snapshots: | |
1418 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
eafe8130 TL |
1419 | |
1420 | subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume)) | |
1421 | if len(subvolsnapshotls) == 0: | |
1422 | raise RuntimeError("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots") | |
1423 | else: | |
1424 | snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls] | |
1425 | if collections.Counter(snapshotnames) != collections.Counter(snapshots): | |
1426 | raise RuntimeError("Error creating or listing subvolume snapshots") | |
1427 | ||
81eedcae TL |
1428 | def test_subvolume_group_snapshot_create_and_rm(self): |
1429 | subvolume = self._generate_random_subvolume_name() | |
1430 | group = self._generate_random_group_name() | |
1431 | snapshot = self._generate_random_snapshot_name() | |
1432 | ||
1433 | # create group | |
1434 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1435 | ||
1436 | # create subvolume in group | |
1437 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
1438 | ||
1439 | # snapshot group | |
1440 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
1441 | ||
1442 | # remove snapshot | |
1443 | self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) | |
1444 | ||
1445 | # remove subvolume | |
1446 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
1447 | ||
494da23a TL |
1448 | # verify trash dir is clean |
1449 | self._wait_for_trash_empty() | |
1450 | ||
81eedcae TL |
1451 | # remove group |
1452 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1453 | ||
1454 | def test_subvolume_group_snapshot_idempotence(self): | |
1455 | subvolume = self._generate_random_subvolume_name() | |
1456 | group = self._generate_random_group_name() | |
1457 | snapshot = self._generate_random_snapshot_name() | |
1458 | ||
1459 | # create group | |
1460 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1461 | ||
1462 | # create subvolume in group | |
1463 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
1464 | ||
1465 | # snapshot group | |
1466 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
1467 | ||
1468 | # try creating snapshot w/ same snapshot name -- shoule be idempotent | |
1469 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
1470 | ||
1471 | # remove snapshot | |
1472 | self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) | |
1473 | ||
1474 | # remove subvolume | |
1475 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
1476 | ||
494da23a TL |
1477 | # verify trash dir is clean |
1478 | self._wait_for_trash_empty() | |
1479 | ||
81eedcae TL |
1480 | # remove group |
1481 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1482 | ||
1483 | def test_nonexistent_subvolume_group_snapshot_rm(self): | |
1484 | subvolume = self._generate_random_subvolume_name() | |
1485 | group = self._generate_random_group_name() | |
1486 | snapshot = self._generate_random_snapshot_name() | |
1487 | ||
1488 | # create group | |
1489 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1490 | ||
1491 | # create subvolume in group | |
1492 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) | |
1493 | ||
1494 | # snapshot group | |
1495 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
1496 | ||
1497 | # remove snapshot | |
1498 | self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) | |
1499 | ||
1500 | # remove snapshot | |
1501 | try: | |
1502 | self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) | |
1503 | except CommandFailedError as ce: | |
1504 | if ce.exitstatus != errno.ENOENT: | |
1505 | raise | |
92f5a8d4 TL |
1506 | else: |
1507 | raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail") | |
81eedcae TL |
1508 | |
1509 | # remove subvolume | |
1510 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
1511 | ||
494da23a TL |
1512 | # verify trash dir is clean |
1513 | self._wait_for_trash_empty() | |
1514 | ||
81eedcae TL |
1515 | # remove group |
1516 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
494da23a | 1517 | |
92f5a8d4 TL |
1518 | def test_subvolume_group_snapshot_rm_force(self): |
1519 | # test removing non-existing subvolume group snapshot with --force | |
1520 | group = self._generate_random_group_name() | |
1521 | snapshot = self._generate_random_snapshot_name() | |
1522 | # remove snapshot | |
1523 | try: | |
1524 | self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force") | |
9f95a23c | 1525 | except CommandFailedError: |
92f5a8d4 TL |
1526 | raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed") |
1527 | ||
eafe8130 TL |
1528 | def test_subvolume_group_snapshot_ls(self): |
1529 | # tests the 'fs subvolumegroup snapshot ls' command | |
1530 | ||
1531 | snapshots = [] | |
1532 | ||
1533 | # create group | |
1534 | group = self._generate_random_group_name() | |
1535 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1536 | ||
1537 | # create subvolumegroup snapshots | |
92f5a8d4 TL |
1538 | snapshots = self._generate_random_snapshot_name(3) |
1539 | for snapshot in snapshots: | |
1540 | self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) | |
eafe8130 TL |
1541 | |
1542 | subvolgrpsnapshotls = json.loads(self._fs_cmd('subvolumegroup', 'snapshot', 'ls', self.volname, group)) | |
1543 | if len(subvolgrpsnapshotls) == 0: | |
1544 | raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots") | |
1545 | else: | |
1546 | snapshotnames = [snapshot['name'] for snapshot in subvolgrpsnapshotls] | |
1547 | if collections.Counter(snapshotnames) != collections.Counter(snapshots): | |
1548 | raise RuntimeError("Error creating or listing subvolume group snapshots") | |
1549 | ||
494da23a | 1550 | def test_async_subvolume_rm(self): |
92f5a8d4 TL |
1551 | subvolumes = self._generate_random_subvolume_name(100) |
1552 | ||
1553 | # create subvolumes | |
1554 | for subvolume in subvolumes: | |
1555 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1556 | self._do_subvolume_io(subvolume, number_of_files=10) | |
1557 | ||
1558 | self.mount_a.umount_wait() | |
1559 | ||
1560 | # remove subvolumes | |
1561 | for subvolume in subvolumes: | |
1562 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1563 | ||
e306af50 | 1564 | self.mount_a.mount_wait() |
92f5a8d4 TL |
1565 | |
1566 | # verify trash dir is clean | |
1567 | self._wait_for_trash_empty(timeout=300) | |
1568 | ||
9f95a23c TL |
1569 | def test_mgr_eviction(self): |
1570 | # unmount any cephfs mounts | |
1571 | self.mount_a.umount_wait() | |
1572 | sessions = self._session_list() | |
1573 | self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted | |
1574 | ||
1575 | # Get the mgr to definitely mount cephfs | |
1576 | subvolume = self._generate_random_subvolume_name() | |
1577 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1578 | sessions = self._session_list() | |
1579 | self.assertEqual(len(sessions), 1) | |
1580 | ||
1581 | # Now fail the mgr, check the session was evicted | |
1582 | mgr = self.mgr_cluster.get_active_id() | |
1583 | self.mgr_cluster.mgr_fail(mgr) | |
1584 | self.wait_until_evicted(sessions[0]['id']) | |
1585 | ||
92f5a8d4 TL |
1586 | def test_subvolume_upgrade(self): |
1587 | """ | |
1588 | poor man's upgrade test -- rather than going through a full upgrade cycle, | |
1589 | emulate subvolumes by going through the wormhole and verify if they are | |
1590 | accessible. | |
1591 | """ | |
1592 | subvolume1, subvolume2 = self._generate_random_subvolume_name(2) | |
1593 | group = self._generate_random_group_name() | |
1594 | ||
1595 | # emulate a old-fashioned subvolume -- one in the default group and | |
1596 | # the other in a custom group | |
1597 | createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1) | |
1598 | self.mount_a.run_shell(['mkdir', '-p', createpath1]) | |
1599 | ||
1600 | # create group | |
1601 | createpath2 = os.path.join(".", "volumes", group, subvolume2) | |
1602 | self.mount_a.run_shell(['mkdir', '-p', createpath2]) | |
1603 | ||
1604 | # this would auto-upgrade on access without anyone noticing | |
1605 | subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1) | |
1606 | self.assertNotEqual(subvolpath1, None) | |
1607 | subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline | |
1608 | ||
1609 | subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group) | |
1610 | self.assertNotEqual(subvolpath2, None) | |
1611 | subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline | |
1612 | ||
1613 | # and... the subvolume path returned should be what we created behind the scene | |
1614 | self.assertEqual(createpath1[1:], subvolpath1) | |
1615 | self.assertEqual(createpath2[1:], subvolpath2) | |
1616 | ||
1617 | # remove subvolume | |
1618 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1) | |
1619 | self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group) | |
1620 | ||
1621 | # verify trash dir is clean | |
1622 | self._wait_for_trash_empty() | |
1623 | ||
1624 | # remove group | |
1625 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1626 | ||
1627 | def test_subvolume_rm_with_snapshots(self): | |
494da23a | 1628 | subvolume = self._generate_random_subvolume_name() |
92f5a8d4 | 1629 | snapshot = self._generate_random_snapshot_name() |
494da23a TL |
1630 | |
1631 | # create subvolume | |
1632 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1633 | ||
92f5a8d4 TL |
1634 | # snapshot subvolume |
1635 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
494da23a | 1636 | |
92f5a8d4 TL |
1637 | # remove subvolume -- should fail with ENOTEMPTY since it has snapshots |
1638 | try: | |
1639 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1640 | except CommandFailedError as ce: | |
1641 | if ce.exitstatus != errno.ENOTEMPTY: | |
1642 | raise RuntimeError("invalid error code returned when deleting subvolume with snapshots") | |
1643 | else: | |
1644 | raise RuntimeError("expected subvolume deletion to fail") | |
1645 | ||
1646 | # remove snapshot | |
1647 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
494da23a TL |
1648 | |
1649 | # remove subvolume | |
1650 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1651 | ||
92f5a8d4 TL |
1652 | # verify trash dir is clean |
1653 | self._wait_for_trash_empty() | |
1654 | ||
f6b5b4d7 TL |
1655 | def test_subvolume_snapshot_protect_unprotect_sanity(self): |
1656 | """ | |
1657 | Snapshot protect/unprotect commands are deprecated. This test exists to ensure that | |
1658 | invoking the command does not cause errors, till they are removed from a subsequent release. | |
1659 | """ | |
92f5a8d4 TL |
1660 | subvolume = self._generate_random_subvolume_name() |
1661 | snapshot = self._generate_random_snapshot_name() | |
f6b5b4d7 | 1662 | clone = self._generate_random_clone_name() |
92f5a8d4 TL |
1663 | |
1664 | # create subvolume | |
1665 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1666 | ||
f6b5b4d7 TL |
1667 | # do some IO |
1668 | self._do_subvolume_io(subvolume, number_of_files=64) | |
92f5a8d4 TL |
1669 | |
1670 | # snapshot subvolume | |
1671 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1672 | ||
1673 | # now, protect snapshot | |
1674 | self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) | |
1675 | ||
f6b5b4d7 TL |
1676 | # schedule a clone |
1677 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
92f5a8d4 | 1678 | |
f6b5b4d7 TL |
1679 | # check clone status |
1680 | self._wait_for_clone_to_complete(clone) | |
92f5a8d4 TL |
1681 | |
1682 | # now, unprotect snapshot | |
1683 | self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) | |
1684 | ||
1685 | # remove snapshot | |
1686 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1687 | ||
f6b5b4d7 TL |
1688 | # verify clone |
1689 | self._verify_clone(subvolume, clone) | |
92f5a8d4 TL |
1690 | |
1691 | # remove subvolumes | |
1692 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
f6b5b4d7 | 1693 | self._fs_cmd("subvolume", "rm", self.volname, clone) |
92f5a8d4 TL |
1694 | |
1695 | # verify trash dir is clean | |
1696 | self._wait_for_trash_empty() | |
1697 | ||
1698 | def test_subvolume_snapshot_clone(self): | |
1699 | subvolume = self._generate_random_subvolume_name() | |
1700 | snapshot = self._generate_random_snapshot_name() | |
1701 | clone = self._generate_random_clone_name() | |
1702 | ||
1703 | # create subvolume | |
1704 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1705 | ||
1706 | # do some IO | |
1707 | self._do_subvolume_io(subvolume, number_of_files=64) | |
1708 | ||
1709 | # snapshot subvolume | |
1710 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1711 | ||
92f5a8d4 TL |
1712 | # schedule a clone |
1713 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
1714 | ||
92f5a8d4 TL |
1715 | # check clone status |
1716 | self._wait_for_clone_to_complete(clone) | |
1717 | ||
92f5a8d4 TL |
1718 | # remove snapshot |
1719 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1720 | ||
1721 | # verify clone | |
1722 | self._verify_clone(subvolume, clone) | |
1723 | ||
1724 | # remove subvolumes | |
1725 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1726 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
1727 | ||
1728 | # verify trash dir is clean | |
1729 | self._wait_for_trash_empty() | |
1730 | ||
1731 | def test_subvolume_snapshot_clone_pool_layout(self): | |
1732 | subvolume = self._generate_random_subvolume_name() | |
1733 | snapshot = self._generate_random_snapshot_name() | |
1734 | clone = self._generate_random_clone_name() | |
1735 | ||
1736 | # add data pool | |
1737 | new_pool = "new_pool" | |
1738 | self.fs.add_data_pool(new_pool) | |
1739 | ||
1740 | # create subvolume | |
1741 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1742 | ||
1743 | # do some IO | |
1744 | self._do_subvolume_io(subvolume, number_of_files=32) | |
1745 | ||
1746 | # snapshot subvolume | |
1747 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1748 | ||
92f5a8d4 TL |
1749 | # schedule a clone |
1750 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool) | |
1751 | ||
1752 | # check clone status | |
1753 | self._wait_for_clone_to_complete(clone) | |
1754 | ||
92f5a8d4 TL |
1755 | # remove snapshot |
1756 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1757 | ||
1758 | # verify clone | |
1759 | self._verify_clone(subvolume, clone) | |
1760 | ||
1761 | subvol_path = self._get_subvolume_path(self.volname, clone) | |
1762 | desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool") | |
1763 | self.assertEqual(desired_pool, new_pool) | |
1764 | ||
1765 | # remove subvolumes | |
1766 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1767 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
1768 | ||
1769 | # verify trash dir is clean | |
1770 | self._wait_for_trash_empty() | |
1771 | ||
1772 | def test_subvolume_snapshot_clone_with_attrs(self): | |
1773 | subvolume = self._generate_random_subvolume_name() | |
1774 | snapshot = self._generate_random_snapshot_name() | |
1775 | clone = self._generate_random_clone_name() | |
1776 | ||
1777 | mode = "777" | |
1778 | uid = "1000" | |
1779 | gid = "1000" | |
1780 | ||
1781 | # create subvolume | |
1782 | self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid) | |
1783 | ||
1784 | # do some IO | |
1785 | self._do_subvolume_io(subvolume, number_of_files=32) | |
1786 | ||
1787 | # snapshot subvolume | |
1788 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1789 | ||
92f5a8d4 TL |
1790 | # schedule a clone |
1791 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
1792 | ||
1793 | # check clone status | |
1794 | self._wait_for_clone_to_complete(clone) | |
1795 | ||
92f5a8d4 TL |
1796 | # remove snapshot |
1797 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1798 | ||
1799 | # verify clone | |
1800 | self._verify_clone(subvolume, clone) | |
1801 | ||
1802 | # remove subvolumes | |
1803 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1804 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
1805 | ||
1806 | # verify trash dir is clean | |
1807 | self._wait_for_trash_empty() | |
1808 | ||
1809 | def test_subvolume_snapshot_clone_and_reclone(self): | |
1810 | subvolume = self._generate_random_subvolume_name() | |
1811 | snapshot = self._generate_random_snapshot_name() | |
1812 | clone1, clone2 = self._generate_random_clone_name(2) | |
1813 | ||
1814 | # create subvolume | |
1815 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1816 | ||
1817 | # do some IO | |
1818 | self._do_subvolume_io(subvolume, number_of_files=32) | |
1819 | ||
1820 | # snapshot subvolume | |
1821 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1822 | ||
92f5a8d4 TL |
1823 | # schedule a clone |
1824 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) | |
1825 | ||
1826 | # check clone status | |
1827 | self._wait_for_clone_to_complete(clone1) | |
1828 | ||
92f5a8d4 TL |
1829 | # remove snapshot |
1830 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1831 | ||
1832 | # verify clone | |
1833 | self._verify_clone(subvolume, clone1) | |
1834 | ||
1835 | # now the clone is just like a normal subvolume -- snapshot the clone and fork | |
1836 | # another clone. before that do some IO so it's can be differentiated. | |
1837 | self._do_subvolume_io(clone1, create_dir="data", number_of_files=32) | |
1838 | ||
1839 | # snapshot clone -- use same snap name | |
1840 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot) | |
1841 | ||
92f5a8d4 TL |
1842 | # schedule a clone |
1843 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2) | |
1844 | ||
1845 | # check clone status | |
1846 | self._wait_for_clone_to_complete(clone2) | |
1847 | ||
92f5a8d4 TL |
1848 | # remove snapshot |
1849 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot) | |
1850 | ||
1851 | # verify clone | |
1852 | self._verify_clone(clone1, clone2) | |
1853 | ||
1854 | # remove subvolumes | |
1855 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1856 | self._fs_cmd("subvolume", "rm", self.volname, clone1) | |
1857 | self._fs_cmd("subvolume", "rm", self.volname, clone2) | |
1858 | ||
1859 | # verify trash dir is clean | |
1860 | self._wait_for_trash_empty() | |
1861 | ||
1862 | def test_subvolume_snapshot_clone_under_group(self): | |
1863 | subvolume = self._generate_random_subvolume_name() | |
1864 | snapshot = self._generate_random_snapshot_name() | |
1865 | clone = self._generate_random_clone_name() | |
1866 | group = self._generate_random_group_name() | |
1867 | ||
1868 | # create subvolume | |
1869 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
1870 | ||
1871 | # do some IO | |
1872 | self._do_subvolume_io(subvolume, number_of_files=32) | |
1873 | ||
1874 | # snapshot subvolume | |
1875 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
1876 | ||
92f5a8d4 TL |
1877 | # create group |
1878 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1879 | ||
1880 | # schedule a clone | |
1881 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group) | |
1882 | ||
1883 | # check clone status | |
1884 | self._wait_for_clone_to_complete(clone, clone_group=group) | |
1885 | ||
92f5a8d4 TL |
1886 | # remove snapshot |
1887 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
1888 | ||
1889 | # verify clone | |
1890 | self._verify_clone(subvolume, clone, clone_group=group) | |
1891 | ||
1892 | # remove subvolumes | |
1893 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
1894 | self._fs_cmd("subvolume", "rm", self.volname, clone, group) | |
1895 | ||
1896 | # remove group | |
1897 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1898 | ||
1899 | # verify trash dir is clean | |
1900 | self._wait_for_trash_empty() | |
1901 | ||
1902 | def test_subvolume_under_group_snapshot_clone(self): | |
1903 | subvolume = self._generate_random_subvolume_name() | |
1904 | group = self._generate_random_group_name() | |
1905 | snapshot = self._generate_random_snapshot_name() | |
1906 | clone = self._generate_random_clone_name() | |
1907 | ||
1908 | # create group | |
1909 | self._fs_cmd("subvolumegroup", "create", self.volname, group) | |
1910 | ||
1911 | # create subvolume | |
1912 | self._fs_cmd("subvolume", "create", self.volname, subvolume, group) | |
1913 | ||
1914 | # do some IO | |
1915 | self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32) | |
1916 | ||
1917 | # snapshot subvolume | |
1918 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) | |
1919 | ||
92f5a8d4 TL |
1920 | # schedule a clone |
1921 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group) | |
1922 | ||
1923 | # check clone status | |
1924 | self._wait_for_clone_to_complete(clone) | |
1925 | ||
92f5a8d4 TL |
1926 | # remove snapshot |
1927 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) | |
1928 | ||
1929 | # verify clone | |
1930 | self._verify_clone(subvolume, clone, source_group=group) | |
1931 | ||
1932 | # remove subvolumes | |
1933 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) | |
1934 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
1935 | ||
1936 | # remove group | |
1937 | self._fs_cmd("subvolumegroup", "rm", self.volname, group) | |
1938 | ||
1939 | # verify trash dir is clean | |
1940 | self._wait_for_trash_empty() | |
1941 | ||
1942 | def test_subvolume_snapshot_clone_different_groups(self): | |
1943 | subvolume = self._generate_random_subvolume_name() | |
1944 | snapshot = self._generate_random_snapshot_name() | |
1945 | clone = self._generate_random_clone_name() | |
1946 | s_group, c_group = self._generate_random_group_name(2) | |
1947 | ||
1948 | # create groups | |
1949 | self._fs_cmd("subvolumegroup", "create", self.volname, s_group) | |
1950 | self._fs_cmd("subvolumegroup", "create", self.volname, c_group) | |
1951 | ||
1952 | # create subvolume | |
1953 | self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group) | |
1954 | ||
1955 | # do some IO | |
1956 | self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32) | |
1957 | ||
1958 | # snapshot subvolume | |
1959 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group) | |
1960 | ||
92f5a8d4 TL |
1961 | # schedule a clone |
1962 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, | |
1963 | '--group_name', s_group, '--target_group_name', c_group) | |
1964 | ||
1965 | # check clone status | |
1966 | self._wait_for_clone_to_complete(clone, clone_group=c_group) | |
1967 | ||
92f5a8d4 TL |
1968 | # remove snapshot |
1969 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group) | |
1970 | ||
1971 | # verify clone | |
1972 | self._verify_clone(subvolume, clone, source_group=s_group, clone_group=c_group) | |
1973 | ||
1974 | # remove subvolumes | |
1975 | self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group) | |
1976 | self._fs_cmd("subvolume", "rm", self.volname, clone, c_group) | |
1977 | ||
1978 | # remove groups | |
1979 | self._fs_cmd("subvolumegroup", "rm", self.volname, s_group) | |
1980 | self._fs_cmd("subvolumegroup", "rm", self.volname, c_group) | |
1981 | ||
1982 | # verify trash dir is clean | |
1983 | self._wait_for_trash_empty() | |
1984 | ||
1985 | def test_subvolume_snapshot_clone_with_upgrade(self): | |
1986 | """ | |
1987 | yet another poor man's upgrade test -- rather than going through a full | |
1988 | upgrade cycle, emulate old types subvolumes by going through the wormhole | |
1989 | and verify clone operation. | |
1990 | """ | |
1991 | subvolume = self._generate_random_subvolume_name() | |
1992 | snapshot = self._generate_random_snapshot_name() | |
1993 | clone = self._generate_random_clone_name() | |
1994 | ||
1995 | # emulate a old-fashioned subvolume | |
1996 | createpath = os.path.join(".", "volumes", "_nogroup", subvolume) | |
1997 | self.mount_a.run_shell(['mkdir', '-p', createpath]) | |
1998 | ||
1999 | # do some IO | |
f6b5b4d7 | 2000 | self._do_subvolume_io(subvolume, number_of_files=64) |
92f5a8d4 TL |
2001 | |
2002 | # snapshot subvolume | |
2003 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
2004 | ||
92f5a8d4 TL |
2005 | # schedule a clone |
2006 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
2007 | ||
f6b5b4d7 TL |
2008 | # snapshot should not be deletable now |
2009 | try: | |
2010 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
2011 | except CommandFailedError as ce: | |
2012 | self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone") | |
2013 | else: | |
2014 | self.fail("expected removing source snapshot of a clone to fail") | |
2015 | ||
92f5a8d4 TL |
2016 | # check clone status |
2017 | self._wait_for_clone_to_complete(clone) | |
2018 | ||
92f5a8d4 TL |
2019 | # remove snapshot |
2020 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
2021 | ||
2022 | # verify clone | |
2023 | self._verify_clone(subvolume, clone) | |
2024 | ||
2025 | # remove subvolumes | |
2026 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2027 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
2028 | ||
2029 | # verify trash dir is clean | |
2030 | self._wait_for_trash_empty() | |
2031 | ||
2032 | def test_subvolume_clone_in_progress_getpath(self): | |
2033 | subvolume = self._generate_random_subvolume_name() | |
2034 | snapshot = self._generate_random_snapshot_name() | |
2035 | clone = self._generate_random_clone_name() | |
2036 | ||
2037 | # create subvolume | |
2038 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2039 | ||
2040 | # do some IO | |
2041 | self._do_subvolume_io(subvolume, number_of_files=64) | |
2042 | ||
2043 | # snapshot subvolume | |
2044 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
2045 | ||
92f5a8d4 TL |
2046 | # schedule a clone |
2047 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
2048 | ||
2049 | # clone should not be accessible right now | |
2050 | try: | |
2051 | self._get_subvolume_path(self.volname, clone) | |
2052 | except CommandFailedError as ce: | |
2053 | if ce.exitstatus != errno.EAGAIN: | |
f6b5b4d7 | 2054 | raise RuntimeError("invalid error code when fetching path of an pending clone") |
92f5a8d4 TL |
2055 | else: |
2056 | raise RuntimeError("expected fetching path of an pending clone to fail") | |
2057 | ||
2058 | # check clone status | |
2059 | self._wait_for_clone_to_complete(clone) | |
2060 | ||
2061 | # clone should be accessible now | |
2062 | subvolpath = self._get_subvolume_path(self.volname, clone) | |
2063 | self.assertNotEqual(subvolpath, None) | |
2064 | ||
f6b5b4d7 TL |
2065 | # remove snapshot |
2066 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
2067 | ||
2068 | # verify clone | |
2069 | self._verify_clone(subvolume, clone) | |
2070 | ||
2071 | # remove subvolumes | |
2072 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2073 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
2074 | ||
2075 | # verify trash dir is clean | |
2076 | self._wait_for_trash_empty() | |
2077 | ||
2078 | def test_subvolume_clone_in_progress_snapshot_rm(self): | |
2079 | subvolume = self._generate_random_subvolume_name() | |
2080 | snapshot = self._generate_random_snapshot_name() | |
2081 | clone = self._generate_random_clone_name() | |
2082 | ||
2083 | # create subvolume | |
2084 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2085 | ||
2086 | # do some IO | |
2087 | self._do_subvolume_io(subvolume, number_of_files=64) | |
2088 | ||
2089 | # snapshot subvolume | |
2090 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
2091 | ||
2092 | # schedule a clone | |
2093 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
2094 | ||
2095 | # snapshot should not be deletable now | |
2096 | try: | |
2097 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
2098 | except CommandFailedError as ce: | |
2099 | self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone") | |
2100 | else: | |
2101 | self.fail("expected removing source snapshot of a clone to fail") | |
2102 | ||
2103 | # check clone status | |
2104 | self._wait_for_clone_to_complete(clone) | |
2105 | ||
2106 | # clone should be accessible now | |
2107 | subvolpath = self._get_subvolume_path(self.volname, clone) | |
2108 | self.assertNotEqual(subvolpath, None) | |
92f5a8d4 TL |
2109 | |
2110 | # remove snapshot | |
2111 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
2112 | ||
2113 | # verify clone | |
2114 | self._verify_clone(subvolume, clone) | |
2115 | ||
2116 | # remove subvolumes | |
2117 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2118 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
2119 | ||
2120 | # verify trash dir is clean | |
2121 | self._wait_for_trash_empty() | |
2122 | ||
2123 | def test_subvolume_clone_in_progress_source(self): | |
2124 | subvolume = self._generate_random_subvolume_name() | |
2125 | snapshot = self._generate_random_snapshot_name() | |
2126 | clone = self._generate_random_clone_name() | |
2127 | ||
2128 | # create subvolume | |
2129 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2130 | ||
2131 | # do some IO | |
2132 | self._do_subvolume_io(subvolume, number_of_files=64) | |
2133 | ||
2134 | # snapshot subvolume | |
2135 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
2136 | ||
92f5a8d4 TL |
2137 | # schedule a clone |
2138 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
2139 | ||
2140 | # verify clone source | |
2141 | result = json.loads(self._fs_cmd("clone", "status", self.volname, clone)) | |
2142 | source = result['status']['source'] | |
2143 | self.assertEqual(source['volume'], self.volname) | |
2144 | self.assertEqual(source['subvolume'], subvolume) | |
2145 | self.assertEqual(source.get('group', None), None) | |
2146 | self.assertEqual(source['snapshot'], snapshot) | |
2147 | ||
2148 | # check clone status | |
2149 | self._wait_for_clone_to_complete(clone) | |
2150 | ||
2151 | # clone should be accessible now | |
2152 | subvolpath = self._get_subvolume_path(self.volname, clone) | |
2153 | self.assertNotEqual(subvolpath, None) | |
2154 | ||
92f5a8d4 TL |
2155 | # remove snapshot |
2156 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
2157 | ||
2158 | # verify clone | |
2159 | self._verify_clone(subvolume, clone) | |
2160 | ||
2161 | # remove subvolumes | |
2162 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2163 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
2164 | ||
2165 | # verify trash dir is clean | |
2166 | self._wait_for_trash_empty() | |
2167 | ||
2168 | def test_non_clone_status(self): | |
2169 | subvolume = self._generate_random_subvolume_name() | |
2170 | ||
2171 | # create subvolume | |
2172 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2173 | ||
2174 | try: | |
2175 | self._fs_cmd("clone", "status", self.volname, subvolume) | |
2176 | except CommandFailedError as ce: | |
2177 | if ce.exitstatus != errno.ENOTSUP: | |
2178 | raise RuntimeError("invalid error code when fetching status of a non cloned subvolume") | |
2179 | else: | |
2180 | raise RuntimeError("expected fetching of clone status of a subvolume to fail") | |
2181 | ||
2182 | # remove subvolume | |
2183 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2184 | ||
2185 | # verify trash dir is clean | |
2186 | self._wait_for_trash_empty() | |
2187 | ||
2188 | def test_subvolume_snapshot_clone_on_existing_subvolumes(self): | |
2189 | subvolume1, subvolume2 = self._generate_random_subvolume_name(2) | |
2190 | snapshot = self._generate_random_snapshot_name() | |
2191 | clone = self._generate_random_clone_name() | |
2192 | ||
2193 | # create subvolumes | |
2194 | self._fs_cmd("subvolume", "create", self.volname, subvolume1) | |
2195 | self._fs_cmd("subvolume", "create", self.volname, subvolume2) | |
2196 | ||
2197 | # do some IO | |
2198 | self._do_subvolume_io(subvolume1, number_of_files=32) | |
2199 | ||
2200 | # snapshot subvolume | |
2201 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot) | |
2202 | ||
92f5a8d4 TL |
2203 | # schedule a clone with target as subvolume2 |
2204 | try: | |
2205 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2) | |
2206 | except CommandFailedError as ce: | |
2207 | if ce.exitstatus != errno.EEXIST: | |
2208 | raise RuntimeError("invalid error code when cloning to existing subvolume") | |
2209 | else: | |
2210 | raise RuntimeError("expected cloning to fail if the target is an existing subvolume") | |
2211 | ||
2212 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone) | |
2213 | ||
2214 | # schedule a clone with target as clone | |
2215 | try: | |
2216 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone) | |
2217 | except CommandFailedError as ce: | |
2218 | if ce.exitstatus != errno.EEXIST: | |
2219 | raise RuntimeError("invalid error code when cloning to existing clone") | |
2220 | else: | |
2221 | raise RuntimeError("expected cloning to fail if the target is an existing clone") | |
2222 | ||
2223 | # check clone status | |
2224 | self._wait_for_clone_to_complete(clone) | |
2225 | ||
92f5a8d4 TL |
2226 | # remove snapshot |
2227 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot) | |
2228 | ||
2229 | # verify clone | |
2230 | self._verify_clone(subvolume1, clone) | |
2231 | ||
2232 | # remove subvolumes | |
2233 | self._fs_cmd("subvolume", "rm", self.volname, subvolume1) | |
2234 | self._fs_cmd("subvolume", "rm", self.volname, subvolume2) | |
2235 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
2236 | ||
2237 | # verify trash dir is clean | |
2238 | self._wait_for_trash_empty() | |
2239 | ||
2240 | def test_subvolume_snapshot_clone_fail_with_remove(self): | |
2241 | subvolume = self._generate_random_subvolume_name() | |
2242 | snapshot = self._generate_random_snapshot_name() | |
2243 | clone1, clone2 = self._generate_random_clone_name(2) | |
2244 | ||
2245 | pool_capacity = 32 * 1024 * 1024 | |
2246 | # number of files required to fill up 99% of the pool | |
2247 | nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024)) | |
2248 | ||
2249 | # create subvolume | |
2250 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2251 | ||
2252 | # do some IO | |
2253 | self._do_subvolume_io(subvolume, number_of_files=nr_files) | |
2254 | ||
2255 | # snapshot subvolume | |
2256 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
2257 | ||
92f5a8d4 TL |
2258 | # add data pool |
2259 | new_pool = "new_pool" | |
2260 | self.fs.add_data_pool(new_pool) | |
2261 | ||
2262 | self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool, | |
e306af50 | 2263 | "max_bytes", "{0}".format(pool_capacity // 4)) |
92f5a8d4 TL |
2264 | |
2265 | # schedule a clone | |
2266 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool) | |
2267 | ||
2268 | # check clone status -- this should dramatically overshoot the pool quota | |
2269 | self._wait_for_clone_to_complete(clone1) | |
2270 | ||
2271 | # verify clone | |
2272 | self._verify_clone(subvolume, clone1) | |
2273 | ||
2274 | # wait a bit so that subsequent I/O will give pool full error | |
2275 | time.sleep(120) | |
2276 | ||
2277 | # schedule a clone | |
2278 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool) | |
2279 | ||
2280 | # check clone status | |
2281 | self._wait_for_clone_to_fail(clone2) | |
2282 | ||
92f5a8d4 TL |
2283 | # remove snapshot |
2284 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
2285 | ||
2286 | # remove subvolumes | |
2287 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2288 | self._fs_cmd("subvolume", "rm", self.volname, clone1) | |
2289 | try: | |
2290 | self._fs_cmd("subvolume", "rm", self.volname, clone2) | |
2291 | except CommandFailedError as ce: | |
2292 | if ce.exitstatus != errno.EAGAIN: | |
2293 | raise RuntimeError("invalid error code when trying to remove failed clone") | |
2294 | else: | |
2295 | raise RuntimeError("expected error when removing a failed clone") | |
2296 | ||
2297 | # ... and with force, failed clone can be removed | |
2298 | self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force") | |
494da23a TL |
2299 | |
2300 | # verify trash dir is clean | |
2301 | self._wait_for_trash_empty() | |
9f95a23c TL |
2302 | |
2303 | def test_subvolume_snapshot_attr_clone(self): | |
2304 | subvolume = self._generate_random_subvolume_name() | |
2305 | snapshot = self._generate_random_snapshot_name() | |
2306 | clone = self._generate_random_clone_name() | |
2307 | ||
2308 | # create subvolume | |
2309 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2310 | ||
2311 | # do some IO | |
2312 | self._do_subvolume_io_mixed(subvolume) | |
2313 | ||
2314 | # snapshot subvolume | |
2315 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
2316 | ||
9f95a23c TL |
2317 | # schedule a clone |
2318 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
2319 | ||
2320 | # check clone status | |
2321 | self._wait_for_clone_to_complete(clone) | |
2322 | ||
9f95a23c TL |
2323 | # remove snapshot |
2324 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
2325 | ||
2326 | # verify clone | |
2327 | self._verify_clone(subvolume, clone) | |
2328 | ||
2329 | # remove subvolumes | |
2330 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2331 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
2332 | ||
2333 | # verify trash dir is clean | |
2334 | self._wait_for_trash_empty() | |
2335 | ||
2336 | def test_subvolume_snapshot_clone_cancel_in_progress(self): | |
2337 | subvolume = self._generate_random_subvolume_name() | |
2338 | snapshot = self._generate_random_snapshot_name() | |
2339 | clone = self._generate_random_clone_name() | |
2340 | ||
2341 | # create subvolume | |
2342 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2343 | ||
2344 | # do some IO | |
2345 | self._do_subvolume_io(subvolume, number_of_files=128) | |
2346 | ||
2347 | # snapshot subvolume | |
2348 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
2349 | ||
9f95a23c TL |
2350 | # schedule a clone |
2351 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
2352 | ||
2353 | # cancel on-going clone | |
2354 | self._fs_cmd("clone", "cancel", self.volname, clone) | |
2355 | ||
2356 | # verify canceled state | |
2357 | self._check_clone_canceled(clone) | |
2358 | ||
9f95a23c TL |
2359 | # remove snapshot |
2360 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
2361 | ||
2362 | # remove subvolumes | |
2363 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2364 | self._fs_cmd("subvolume", "rm", self.volname, clone, "--force") | |
2365 | ||
2366 | # verify trash dir is clean | |
2367 | self._wait_for_trash_empty() | |
2368 | ||
2369 | def test_subvolume_snapshot_clone_cancel_pending(self): | |
2370 | """ | |
2371 | this test is a bit more involved compared to canceling an in-progress clone. | |
2372 | we'd need to ensure that a to-be canceled clone has still not been picked up | |
2373 | by cloner threads. exploit the fact that clones are picked up in an FCFS | |
2374 | fashion and there are four (4) cloner threads by default. When the number of | |
2375 | cloner threads increase, this test _may_ start tripping -- so, the number of | |
2376 | clone operations would need to be jacked up. | |
2377 | """ | |
2378 | # default number of clone threads | |
2379 | NR_THREADS = 4 | |
2380 | # good enough for 4 threads | |
2381 | NR_CLONES = 5 | |
2382 | # yeh, 1gig -- we need the clone to run for sometime | |
2383 | FILE_SIZE_MB = 1024 | |
2384 | ||
2385 | subvolume = self._generate_random_subvolume_name() | |
2386 | snapshot = self._generate_random_snapshot_name() | |
2387 | clones = self._generate_random_clone_name(NR_CLONES) | |
2388 | ||
2389 | # create subvolume | |
2390 | self._fs_cmd("subvolume", "create", self.volname, subvolume) | |
2391 | ||
2392 | # do some IO | |
2393 | self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB) | |
2394 | ||
2395 | # snapshot subvolume | |
2396 | self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) | |
2397 | ||
9f95a23c TL |
2398 | # schedule clones |
2399 | for clone in clones: | |
2400 | self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) | |
2401 | ||
2402 | to_wait = clones[0:NR_THREADS] | |
2403 | to_cancel = clones[NR_THREADS:] | |
2404 | ||
2405 | # cancel pending clones and verify | |
2406 | for clone in to_cancel: | |
2407 | status = json.loads(self._fs_cmd("clone", "status", self.volname, clone)) | |
2408 | self.assertEqual(status["status"]["state"], "pending") | |
2409 | self._fs_cmd("clone", "cancel", self.volname, clone) | |
2410 | self._check_clone_canceled(clone) | |
2411 | ||
2412 | # let's cancel on-going clones. handle the case where some of the clones | |
2413 | # _just_ complete | |
2414 | for clone in list(to_wait): | |
2415 | try: | |
2416 | self._fs_cmd("clone", "cancel", self.volname, clone) | |
2417 | to_cancel.append(clone) | |
2418 | to_wait.remove(clone) | |
2419 | except CommandFailedError as ce: | |
2420 | if ce.exitstatus != errno.EINVAL: | |
2421 | raise RuntimeError("invalid error code when cancelling on-going clone") | |
2422 | ||
9f95a23c TL |
2423 | # remove snapshot |
2424 | self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) | |
2425 | ||
2426 | # remove subvolumes | |
2427 | self._fs_cmd("subvolume", "rm", self.volname, subvolume) | |
2428 | for clone in to_wait: | |
2429 | self._fs_cmd("subvolume", "rm", self.volname, clone) | |
2430 | for clone in to_cancel: | |
2431 | self._fs_cmd("subvolume", "rm", self.volname, clone, "--force") | |
2432 | ||
2433 | # verify trash dir is clean | |
2434 | self._wait_for_trash_empty() |