]>
Commit | Line | Data |
---|---|---|
522d829b | 1 | import errno |
1911f103 | 2 | import json |
522d829b TL |
3 | import logging |
4 | import time | |
f67539c2 TL |
5 | import uuid |
6 | from io import StringIO | |
7 | from os.path import join as os_path_join | |
1911f103 | 8 | |
f67539c2 | 9 | from teuthology.orchestra.run import CommandFailedError, Raw |
92f5a8d4 | 10 | |
92f5a8d4 | 11 | from tasks.cephfs.cephfs_test_case import CephFSTestCase |
f67539c2 | 12 | from tasks.cephfs.filesystem import FileLayout, FSMissing |
92f5a8d4 | 13 | from tasks.cephfs.fuse_mount import FuseMount |
f67539c2 | 14 | from tasks.cephfs.caps_helper import CapsHelper |
92f5a8d4 | 15 | |
522d829b | 16 | log = logging.getLogger(__name__) |
e306af50 | 17 | |
92f5a8d4 TL |
18 | class TestAdminCommands(CephFSTestCase): |
19 | """ | |
20 | Tests for administration command. | |
21 | """ | |
22 | ||
23 | CLIENTS_REQUIRED = 1 | |
522d829b | 24 | MDSS_REQUIRED = 3 |
92f5a8d4 | 25 | |
f67539c2 TL |
26 | def test_fsnames_can_only_by_goodchars(self): |
27 | n = 'test_fsnames_can_only_by_goodchars' | |
28 | metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool' | |
29 | badname = n+'badname@#' | |
30 | ||
31 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', | |
32 | n+metapoolname) | |
33 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', | |
34 | n+datapoolname) | |
35 | ||
36 | # test that fsname not with "goodchars" fails | |
37 | args = ['fs', 'new', badname, metapoolname, datapoolname] | |
38 | proc = self.fs.mon_manager.run_cluster_cmd(args=args,stderr=StringIO(), | |
39 | check_status=False) | |
40 | self.assertIn('invalid chars', proc.stderr.getvalue().lower()) | |
41 | ||
42 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname, | |
43 | metapoolname, | |
44 | '--yes-i-really-really-mean-it-not-faking') | |
45 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', datapoolname, | |
46 | datapoolname, | |
47 | '--yes-i-really-really-mean-it-not-faking') | |
48 | ||
92f5a8d4 TL |
49 | def test_fs_status(self): |
50 | """ | |
51 | That `ceph fs status` command functions. | |
52 | """ | |
53 | ||
54 | s = self.fs.mon_manager.raw_cluster_cmd("fs", "status") | |
55 | self.assertTrue("active" in s) | |
56 | ||
e306af50 TL |
57 | mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json-pretty"))["mdsmap"] |
58 | self.assertEqual(mdsmap[0]["state"], "active") | |
59 | ||
60 | mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json"))["mdsmap"] | |
61 | self.assertEqual(mdsmap[0]["state"], "active") | |
62 | ||
92f5a8d4 TL |
63 | def _setup_ec_pools(self, n, metadata=True, overwrites=True): |
64 | if metadata: | |
65 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8") | |
66 | cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"] | |
67 | self.fs.mon_manager.raw_cluster_cmd(*cmd) | |
68 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile") | |
69 | if overwrites: | |
70 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true') | |
71 | ||
1911f103 TL |
72 | def _check_pool_application_metadata_key_value(self, pool, app, key, value): |
73 | output = self.fs.mon_manager.raw_cluster_cmd( | |
74 | 'osd', 'pool', 'application', 'get', pool, app, key) | |
75 | self.assertEqual(str(output.strip()), value) | |
76 | ||
92f5a8d4 TL |
77 | def test_add_data_pool_root(self): |
78 | """ | |
79 | That a new data pool can be added and used for the root directory. | |
80 | """ | |
81 | ||
82 | p = self.fs.add_data_pool("foo") | |
83 | self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p)) | |
84 | ||
1911f103 TL |
85 | def test_add_data_pool_application_metadata(self): |
86 | """ | |
87 | That the application metadata set on a newly added data pool is as expected. | |
88 | """ | |
89 | pool_name = "foo" | |
90 | mon_cmd = self.fs.mon_manager.raw_cluster_cmd | |
522d829b TL |
91 | mon_cmd('osd', 'pool', 'create', pool_name, '--pg_num_min', |
92 | str(self.fs.pg_num_min)) | |
1911f103 TL |
93 | # Check whether https://tracker.ceph.com/issues/43061 is fixed |
94 | mon_cmd('osd', 'pool', 'application', 'enable', pool_name, 'cephfs') | |
95 | self.fs.add_data_pool(pool_name, create=False) | |
96 | self._check_pool_application_metadata_key_value( | |
97 | pool_name, 'cephfs', 'data', self.fs.name) | |
98 | ||
92f5a8d4 TL |
99 | def test_add_data_pool_subdir(self): |
100 | """ | |
101 | That a new data pool can be added and used for a sub-directory. | |
102 | """ | |
103 | ||
104 | p = self.fs.add_data_pool("foo") | |
9f95a23c | 105 | self.mount_a.run_shell("mkdir subdir") |
92f5a8d4 TL |
106 | self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p)) |
107 | ||
e306af50 TL |
108 | def test_add_data_pool_non_alphamueric_name_as_subdir(self): |
109 | """ | |
110 | That a new data pool with non-alphanumeric name can be added and used for a sub-directory. | |
111 | """ | |
112 | p = self.fs.add_data_pool("I-am-data_pool00.") | |
113 | self.mount_a.run_shell("mkdir subdir") | |
114 | self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p)) | |
115 | ||
92f5a8d4 TL |
116 | def test_add_data_pool_ec(self): |
117 | """ | |
118 | That a new EC data pool can be added. | |
119 | """ | |
120 | ||
121 | n = "test_add_data_pool_ec" | |
122 | self._setup_ec_pools(n, metadata=False) | |
f67539c2 | 123 | self.fs.add_data_pool(n+"-data", create=False) |
92f5a8d4 TL |
124 | |
125 | def test_new_default_ec(self): | |
126 | """ | |
127 | That a new file system warns/fails with an EC default data pool. | |
128 | """ | |
129 | ||
f91f0fd5 | 130 | self.mount_a.umount_wait(require_clean=True) |
f67539c2 | 131 | self.mds_cluster.delete_all_filesystems() |
92f5a8d4 TL |
132 | n = "test_new_default_ec" |
133 | self._setup_ec_pools(n) | |
134 | try: | |
135 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data") | |
136 | except CommandFailedError as e: | |
137 | if e.exitstatus == 22: | |
138 | pass | |
139 | else: | |
140 | raise | |
141 | else: | |
142 | raise RuntimeError("expected failure") | |
143 | ||
144 | def test_new_default_ec_force(self): | |
145 | """ | |
146 | That a new file system succeeds with an EC default data pool with --force. | |
147 | """ | |
148 | ||
f91f0fd5 | 149 | self.mount_a.umount_wait(require_clean=True) |
f67539c2 | 150 | self.mds_cluster.delete_all_filesystems() |
92f5a8d4 TL |
151 | n = "test_new_default_ec_force" |
152 | self._setup_ec_pools(n) | |
153 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force") | |
154 | ||
155 | def test_new_default_ec_no_overwrite(self): | |
156 | """ | |
157 | That a new file system fails with an EC default data pool without overwrite. | |
158 | """ | |
159 | ||
f91f0fd5 | 160 | self.mount_a.umount_wait(require_clean=True) |
f67539c2 | 161 | self.mds_cluster.delete_all_filesystems() |
92f5a8d4 TL |
162 | n = "test_new_default_ec_no_overwrite" |
163 | self._setup_ec_pools(n, overwrites=False) | |
164 | try: | |
165 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data") | |
166 | except CommandFailedError as e: | |
167 | if e.exitstatus == 22: | |
168 | pass | |
169 | else: | |
170 | raise | |
171 | else: | |
172 | raise RuntimeError("expected failure") | |
173 | # and even with --force ! | |
174 | try: | |
175 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force") | |
176 | except CommandFailedError as e: | |
177 | if e.exitstatus == 22: | |
178 | pass | |
179 | else: | |
180 | raise | |
181 | else: | |
182 | raise RuntimeError("expected failure") | |
183 | ||
1911f103 TL |
184 | def test_fs_new_pool_application_metadata(self): |
185 | """ | |
186 | That the application metadata set on the pools of a newly created filesystem are as expected. | |
187 | """ | |
f91f0fd5 | 188 | self.mount_a.umount_wait(require_clean=True) |
f67539c2 | 189 | self.mds_cluster.delete_all_filesystems() |
1911f103 TL |
190 | fs_name = "test_fs_new_pool_application" |
191 | keys = ['metadata', 'data'] | |
192 | pool_names = [fs_name+'-'+key for key in keys] | |
193 | mon_cmd = self.fs.mon_manager.raw_cluster_cmd | |
194 | for p in pool_names: | |
522d829b | 195 | mon_cmd('osd', 'pool', 'create', p, '--pg_num_min', str(self.fs.pg_num_min)) |
1911f103 TL |
196 | mon_cmd('osd', 'pool', 'application', 'enable', p, 'cephfs') |
197 | mon_cmd('fs', 'new', fs_name, pool_names[0], pool_names[1]) | |
198 | for i in range(2): | |
199 | self._check_pool_application_metadata_key_value( | |
200 | pool_names[i], 'cephfs', keys[i], fs_name) | |
201 | ||
522d829b TL |
202 | def test_fs_new_with_specific_id(self): |
203 | """ | |
204 | That a file system can be created with a specific ID. | |
205 | """ | |
206 | fs_name = "test_fs_specific_id" | |
207 | fscid = 100 | |
208 | keys = ['metadata', 'data'] | |
209 | pool_names = [fs_name+'-'+key for key in keys] | |
210 | for p in pool_names: | |
211 | self.run_cluster_cmd(f'osd pool create {p}') | |
212 | self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') | |
213 | self.fs.status().get_fsmap(fscid) | |
214 | for i in range(2): | |
215 | self._check_pool_application_metadata_key_value(pool_names[i], 'cephfs', keys[i], fs_name) | |
216 | ||
217 | def test_fs_new_with_specific_id_idempotency(self): | |
218 | """ | |
219 | That command to create file system with specific ID is idempotent. | |
220 | """ | |
221 | fs_name = "test_fs_specific_id" | |
222 | fscid = 100 | |
223 | keys = ['metadata', 'data'] | |
224 | pool_names = [fs_name+'-'+key for key in keys] | |
225 | for p in pool_names: | |
226 | self.run_cluster_cmd(f'osd pool create {p}') | |
227 | self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') | |
228 | self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') | |
229 | self.fs.status().get_fsmap(fscid) | |
230 | ||
231 | def test_fs_new_with_specific_id_fails_without_force_flag(self): | |
232 | """ | |
233 | That command to create file system with specific ID fails without '--force' flag. | |
234 | """ | |
235 | fs_name = "test_fs_specific_id" | |
236 | fscid = 100 | |
237 | keys = ['metadata', 'data'] | |
238 | pool_names = [fs_name+'-'+key for key in keys] | |
239 | for p in pool_names: | |
240 | self.run_cluster_cmd(f'osd pool create {p}') | |
241 | try: | |
242 | self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid}') | |
243 | except CommandFailedError as ce: | |
244 | self.assertEqual(ce.exitstatus, errno.EINVAL, | |
245 | "invalid error code on creating a file system with specifc ID without --force flag") | |
246 | else: | |
247 | self.fail("expected creating file system with specific ID without '--force' flag to fail") | |
248 | ||
249 | def test_fs_new_with_specific_id_fails_already_in_use(self): | |
250 | """ | |
251 | That creating file system with ID already in use fails. | |
252 | """ | |
253 | fs_name = "test_fs_specific_id" | |
254 | # file system ID already in use | |
255 | fscid = self.fs.status().map['filesystems'][0]['id'] | |
256 | keys = ['metadata', 'data'] | |
257 | pool_names = [fs_name+'-'+key for key in keys] | |
258 | for p in pool_names: | |
259 | self.run_cluster_cmd(f'osd pool create {p}') | |
260 | try: | |
261 | self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') | |
262 | except CommandFailedError as ce: | |
263 | self.assertEqual(ce.exitstatus, errno.EINVAL, | |
264 | "invalid error code on creating a file system with specifc ID that is already in use") | |
265 | else: | |
266 | self.fail("expected creating file system with ID already in use to fail") | |
267 | ||
268 | ||
269 | class TestDump(CephFSTestCase): | |
270 | CLIENTS_REQUIRED = 0 | |
271 | MDSS_REQUIRED = 1 | |
272 | ||
273 | def test_fs_dump_epoch(self): | |
274 | """ | |
275 | That dumping a specific epoch works. | |
276 | """ | |
277 | ||
278 | status1 = self.fs.status() | |
279 | status2 = self.fs.status(epoch=status1["epoch"]-1) | |
280 | self.assertEqual(status1["epoch"], status2["epoch"]+1) | |
281 | ||
282 | def test_fsmap_trim(self): | |
283 | """ | |
284 | That the fsmap is trimmed normally. | |
285 | """ | |
286 | ||
287 | paxos_service_trim_min = 25 | |
288 | self.config_set('mon', 'paxos_service_trim_min', paxos_service_trim_min) | |
289 | mon_max_mdsmap_epochs = 20 | |
290 | self.config_set('mon', 'mon_max_mdsmap_epochs', mon_max_mdsmap_epochs) | |
291 | ||
292 | status = self.fs.status() | |
293 | epoch = status["epoch"] | |
294 | ||
295 | # for N mutations | |
296 | mutations = paxos_service_trim_min + mon_max_mdsmap_epochs | |
297 | b = False | |
298 | for i in range(mutations): | |
299 | self.fs.set_joinable(b) | |
300 | b = not b | |
301 | ||
302 | time.sleep(10) # for tick/compaction | |
303 | ||
304 | try: | |
305 | self.fs.status(epoch=epoch) | |
306 | except CommandFailedError as e: | |
307 | self.assertEqual(e.exitstatus, errno.ENOENT, "invalid error code when trying to fetch FSMap that was trimmed") | |
308 | else: | |
309 | self.fail("trimming did not occur as expected") | |
310 | ||
311 | def test_fsmap_force_trim(self): | |
312 | """ | |
313 | That the fsmap is trimmed forcefully. | |
314 | """ | |
315 | ||
316 | status = self.fs.status() | |
317 | epoch = status["epoch"] | |
318 | ||
319 | paxos_service_trim_min = 1 | |
320 | self.config_set('mon', 'paxos_service_trim_min', paxos_service_trim_min) | |
321 | mon_mds_force_trim_to = epoch+1 | |
322 | self.config_set('mon', 'mon_mds_force_trim_to', mon_mds_force_trim_to) | |
323 | ||
324 | # force a new fsmap | |
325 | self.fs.set_joinable(False) | |
326 | time.sleep(10) # for tick/compaction | |
327 | ||
328 | status = self.fs.status() | |
329 | log.debug(f"new epoch is {status['epoch']}") | |
330 | self.fs.status(epoch=epoch+1) # epoch+1 is not trimmed, may not == status["epoch"] | |
331 | ||
332 | try: | |
333 | self.fs.status(epoch=epoch) | |
334 | except CommandFailedError as e: | |
335 | self.assertEqual(e.exitstatus, errno.ENOENT, "invalid error code when trying to fetch FSMap that was trimmed") | |
336 | else: | |
337 | self.fail("trimming did not occur as expected") | |
338 | ||
f67539c2 TL |
339 | class TestRequiredClientFeatures(CephFSTestCase): |
340 | CLIENTS_REQUIRED = 0 | |
341 | MDSS_REQUIRED = 1 | |
342 | ||
343 | def test_required_client_features(self): | |
344 | """ | |
345 | That `ceph fs required_client_features` command functions. | |
346 | """ | |
347 | ||
348 | def is_required(index): | |
349 | out = self.fs.mon_manager.raw_cluster_cmd('fs', 'get', self.fs.name, '--format=json-pretty') | |
350 | features = json.loads(out)['mdsmap']['required_client_features'] | |
351 | if "feature_{0}".format(index) in features: | |
352 | return True; | |
353 | return False; | |
354 | ||
355 | features = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'feature', 'ls', '--format=json-pretty')) | |
356 | self.assertGreater(len(features), 0); | |
357 | ||
358 | for f in features: | |
359 | self.fs.required_client_features('rm', str(f['index'])) | |
360 | ||
361 | for f in features: | |
362 | index = f['index'] | |
363 | feature = f['name'] | |
364 | if feature == 'reserved': | |
365 | feature = str(index) | |
366 | ||
367 | if index % 3 == 0: | |
368 | continue; | |
369 | self.fs.required_client_features('add', feature) | |
370 | self.assertTrue(is_required(index)) | |
371 | ||
372 | if index % 2 == 0: | |
373 | continue; | |
374 | self.fs.required_client_features('rm', feature) | |
375 | self.assertFalse(is_required(index)) | |
376 | ||
377 | def test_required_client_feature_add_reserved(self): | |
378 | """ | |
379 | That `ceph fs required_client_features X add reserved` fails. | |
380 | """ | |
381 | ||
382 | p = self.fs.required_client_features('add', 'reserved', check_status=False, stderr=StringIO()) | |
383 | self.assertIn('Invalid feature name', p.stderr.getvalue()) | |
384 | ||
385 | def test_required_client_feature_rm_reserved(self): | |
386 | """ | |
387 | That `ceph fs required_client_features X rm reserved` fails. | |
388 | """ | |
389 | ||
390 | p = self.fs.required_client_features('rm', 'reserved', check_status=False, stderr=StringIO()) | |
391 | self.assertIn('Invalid feature name', p.stderr.getvalue()) | |
392 | ||
393 | def test_required_client_feature_add_reserved_bit(self): | |
394 | """ | |
395 | That `ceph fs required_client_features X add <reserved_bit>` passes. | |
396 | """ | |
397 | ||
398 | p = self.fs.required_client_features('add', '1', stderr=StringIO()) | |
399 | self.assertIn("added feature 'reserved' to required_client_features", p.stderr.getvalue()) | |
400 | ||
401 | def test_required_client_feature_rm_reserved_bit(self): | |
402 | """ | |
403 | That `ceph fs required_client_features X rm <reserved_bit>` passes. | |
404 | """ | |
405 | ||
406 | self.fs.required_client_features('add', '1') | |
407 | p = self.fs.required_client_features('rm', '1', stderr=StringIO()) | |
408 | self.assertIn("removed feature 'reserved' from required_client_features", p.stderr.getvalue()) | |
1911f103 | 409 | |
522d829b TL |
410 | class TestCompatCommands(CephFSTestCase): |
411 | """ | |
412 | """ | |
413 | ||
414 | CLIENTS_REQUIRED = 0 | |
415 | MDSS_REQUIRED = 3 | |
416 | ||
417 | def test_add_compat(self): | |
418 | """ | |
419 | Test adding a compat. | |
420 | """ | |
421 | ||
422 | self.fs.fail() | |
423 | self.fs.add_compat(63, 'placeholder') | |
424 | mdsmap = self.fs.get_mds_map() | |
425 | self.assertIn("feature_63", mdsmap['compat']['compat']) | |
426 | ||
427 | def test_add_incompat(self): | |
428 | """ | |
429 | Test adding an incompat. | |
430 | """ | |
431 | ||
432 | self.fs.fail() | |
433 | self.fs.add_incompat(63, 'placeholder') | |
434 | mdsmap = self.fs.get_mds_map() | |
435 | log.info(f"{mdsmap}") | |
436 | self.assertIn("feature_63", mdsmap['compat']['incompat']) | |
437 | ||
438 | def test_rm_compat(self): | |
439 | """ | |
440 | Test removing a compat. | |
441 | """ | |
442 | ||
443 | self.fs.fail() | |
444 | self.fs.add_compat(63, 'placeholder') | |
445 | self.fs.rm_compat(63) | |
446 | mdsmap = self.fs.get_mds_map() | |
447 | self.assertNotIn("feature_63", mdsmap['compat']['compat']) | |
448 | ||
449 | def test_rm_incompat(self): | |
450 | """ | |
451 | Test removing an incompat. | |
452 | """ | |
453 | ||
454 | self.fs.fail() | |
455 | self.fs.add_incompat(63, 'placeholder') | |
456 | self.fs.rm_incompat(63) | |
457 | mdsmap = self.fs.get_mds_map() | |
458 | self.assertNotIn("feature_63", mdsmap['compat']['incompat']) | |
459 | ||
460 | def test_standby_compat(self): | |
461 | """ | |
462 | That adding a compat does not prevent standbys from joining. | |
463 | """ | |
464 | ||
465 | self.fs.fail() | |
466 | self.fs.add_compat(63, "placeholder") | |
467 | self.fs.set_joinable() | |
468 | self.fs.wait_for_daemons() | |
469 | mdsmap = self.fs.get_mds_map() | |
470 | self.assertIn("feature_63", mdsmap['compat']['compat']) | |
471 | ||
472 | def test_standby_incompat_reject(self): | |
473 | """ | |
474 | That adding an incompat feature prevents incompatible daemons from joining. | |
475 | """ | |
476 | ||
477 | self.fs.fail() | |
478 | self.fs.add_incompat(63, "placeholder") | |
479 | self.fs.set_joinable() | |
480 | try: | |
481 | self.fs.wait_for_daemons(timeout=60) | |
482 | except RuntimeError as e: | |
483 | if "Timed out waiting for MDS daemons to become healthy" in str(e): | |
484 | pass | |
485 | else: | |
486 | raise | |
487 | else: | |
488 | self.fail() | |
489 | ||
490 | def test_standby_incompat_upgrade(self): | |
491 | """ | |
492 | That an MDS can upgrade the compat of a fs. | |
493 | """ | |
494 | ||
495 | self.fs.fail() | |
496 | self.fs.rm_incompat(1) | |
497 | self.fs.set_joinable() | |
498 | self.fs.wait_for_daemons() | |
499 | mdsmap = self.fs.get_mds_map() | |
500 | self.assertIn("feature_1", mdsmap['compat']['incompat']) | |
501 | ||
502 | def test_standby_replay_not_upgradeable(self): | |
503 | """ | |
504 | That the mons will not upgrade the MDSMap compat if standby-replay is | |
505 | enabled. | |
506 | """ | |
507 | ||
508 | self.fs.fail() | |
509 | self.fs.rm_incompat(1) | |
510 | self.fs.set_allow_standby_replay(True) | |
511 | self.fs.set_joinable() | |
512 | try: | |
513 | self.fs.wait_for_daemons(timeout=60) | |
514 | except RuntimeError as e: | |
515 | if "Timed out waiting for MDS daemons to become healthy" in str(e): | |
516 | pass | |
517 | else: | |
518 | raise | |
519 | else: | |
520 | self.fail() | |
521 | ||
522 | def test_standby_incompat_reject_multifs(self): | |
523 | """ | |
524 | Like test_standby_incompat_reject but with a second fs. | |
525 | """ | |
526 | ||
527 | fs2 = self.mds_cluster.newfs(name="cephfs2", create=True) | |
528 | fs2.fail() | |
529 | fs2.add_incompat(63, 'placeholder') | |
530 | fs2.set_joinable() | |
531 | try: | |
532 | fs2.wait_for_daemons(timeout=60) | |
533 | except RuntimeError as e: | |
534 | if "Timed out waiting for MDS daemons to become healthy" in str(e): | |
535 | pass | |
536 | else: | |
537 | raise | |
538 | else: | |
539 | self.fail() | |
540 | # did self.fs lose MDS or standbys suicide? | |
541 | self.fs.wait_for_daemons() | |
542 | mdsmap = fs2.get_mds_map() | |
543 | self.assertIn("feature_63", mdsmap['compat']['incompat']) | |
544 | ||
92f5a8d4 TL |
545 | class TestConfigCommands(CephFSTestCase): |
546 | """ | |
547 | Test that daemons and clients respond to the otherwise rarely-used | |
548 | runtime config modification operations. | |
549 | """ | |
550 | ||
551 | CLIENTS_REQUIRED = 1 | |
552 | MDSS_REQUIRED = 1 | |
553 | ||
554 | def test_ceph_config_show(self): | |
555 | """ | |
556 | That I can successfully show MDS configuration. | |
557 | """ | |
558 | ||
559 | names = self.fs.get_rank_names() | |
560 | for n in names: | |
561 | s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n) | |
562 | self.assertTrue("NAME" in s) | |
563 | self.assertTrue("mon_host" in s) | |
564 | ||
f67539c2 | 565 | |
92f5a8d4 TL |
566 | def test_client_config(self): |
567 | """ | |
568 | That I can successfully issue asok "config set" commands | |
569 | ||
570 | :return: | |
571 | """ | |
572 | ||
573 | if not isinstance(self.mount_a, FuseMount): | |
9f95a23c | 574 | self.skipTest("Test only applies to FUSE clients") |
92f5a8d4 TL |
575 | |
576 | test_key = "client_cache_size" | |
577 | test_val = "123" | |
578 | self.mount_a.admin_socket(['config', 'set', test_key, test_val]) | |
579 | out = self.mount_a.admin_socket(['config', 'get', test_key]) | |
580 | self.assertEqual(out[test_key], test_val) | |
581 | ||
92f5a8d4 TL |
582 | |
583 | def test_mds_config_asok(self): | |
584 | test_key = "mds_max_purge_ops" | |
585 | test_val = "123" | |
586 | self.fs.mds_asok(['config', 'set', test_key, test_val]) | |
587 | out = self.fs.mds_asok(['config', 'get', test_key]) | |
588 | self.assertEqual(out[test_key], test_val) | |
589 | ||
92f5a8d4 TL |
590 | def test_mds_config_tell(self): |
591 | test_key = "mds_max_purge_ops" | |
592 | test_val = "123" | |
593 | ||
f67539c2 | 594 | self.fs.rank_tell(['injectargs', "--{0}={1}".format(test_key, test_val)]) |
92f5a8d4 TL |
595 | |
596 | # Read it back with asok because there is no `tell` equivalent | |
f67539c2 | 597 | out = self.fs.rank_tell(['config', 'get', test_key]) |
92f5a8d4 TL |
598 | self.assertEqual(out[test_key], test_val) |
599 | ||
f67539c2 TL |
600 | |
601 | class TestMirroringCommands(CephFSTestCase): | |
602 | CLIENTS_REQUIRED = 1 | |
603 | MDSS_REQUIRED = 1 | |
604 | ||
605 | def _enable_mirroring(self, fs_name): | |
606 | self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", fs_name) | |
607 | ||
608 | def _disable_mirroring(self, fs_name): | |
609 | self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", fs_name) | |
610 | ||
611 | def _add_peer(self, fs_name, peer_spec, remote_fs_name): | |
612 | peer_uuid = str(uuid.uuid4()) | |
613 | self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name) | |
614 | ||
615 | def _remove_peer(self, fs_name, peer_uuid): | |
616 | self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid) | |
617 | ||
618 | def _verify_mirroring(self, fs_name, flag_str): | |
619 | status = self.fs.status() | |
620 | fs_map = status.get_fsmap_byname(fs_name) | |
621 | if flag_str == 'enabled': | |
622 | self.assertTrue('mirror_info' in fs_map) | |
623 | elif flag_str == 'disabled': | |
624 | self.assertTrue('mirror_info' not in fs_map) | |
625 | else: | |
626 | raise RuntimeError(f'invalid flag_str {flag_str}') | |
627 | ||
628 | def _get_peer_uuid(self, fs_name, peer_spec): | |
629 | status = self.fs.status() | |
630 | fs_map = status.get_fsmap_byname(fs_name) | |
631 | mirror_info = fs_map.get('mirror_info', None) | |
632 | self.assertTrue(mirror_info is not None) | |
633 | for peer_uuid, remote in mirror_info['peers'].items(): | |
634 | client_name = remote['remote']['client_name'] | |
635 | cluster_name = remote['remote']['cluster_name'] | |
636 | spec = f'{client_name}@{cluster_name}' | |
637 | if spec == peer_spec: | |
638 | return peer_uuid | |
639 | return None | |
640 | ||
641 | def test_mirroring_command(self): | |
642 | """basic mirroring command test -- enable, disable mirroring on a | |
643 | filesystem""" | |
644 | self._enable_mirroring(self.fs.name) | |
645 | self._verify_mirroring(self.fs.name, "enabled") | |
646 | self._disable_mirroring(self.fs.name) | |
647 | self._verify_mirroring(self.fs.name, "disabled") | |
648 | ||
649 | def test_mirroring_peer_commands(self): | |
650 | """test adding and removing peers to a mirror enabled filesystem""" | |
651 | self._enable_mirroring(self.fs.name) | |
652 | self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b") | |
653 | self._add_peer(self.fs.name, "client.site-c@site-c", "fs_c") | |
654 | self._verify_mirroring(self.fs.name, "enabled") | |
655 | uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b") | |
656 | uuid_peer_c = self._get_peer_uuid(self.fs.name, "client.site-c@site-c") | |
657 | self.assertTrue(uuid_peer_b is not None) | |
658 | self.assertTrue(uuid_peer_c is not None) | |
659 | self._remove_peer(self.fs.name, uuid_peer_b) | |
660 | self._remove_peer(self.fs.name, uuid_peer_c) | |
661 | self._disable_mirroring(self.fs.name) | |
662 | self._verify_mirroring(self.fs.name, "disabled") | |
663 | ||
664 | def test_mirroring_command_idempotency(self): | |
665 | """test to check idempotency of mirroring family of commands """ | |
666 | self._enable_mirroring(self.fs.name) | |
667 | self._verify_mirroring(self.fs.name, "enabled") | |
668 | self._enable_mirroring(self.fs.name) | |
669 | # add peer | |
670 | self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b") | |
671 | uuid_peer_b1 = self._get_peer_uuid(self.fs.name, "client.site-b@site-b") | |
672 | self.assertTrue(uuid_peer_b1 is not None) | |
673 | # adding the peer again should be idempotent | |
674 | self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b") | |
675 | uuid_peer_b2 = self._get_peer_uuid(self.fs.name, "client.site-b@site-b") | |
676 | self.assertTrue(uuid_peer_b2 is not None) | |
677 | self.assertTrue(uuid_peer_b1 == uuid_peer_b2) | |
678 | # remove peer | |
679 | self._remove_peer(self.fs.name, uuid_peer_b1) | |
680 | uuid_peer_b3 = self._get_peer_uuid(self.fs.name, "client.site-b@site-b") | |
681 | self.assertTrue(uuid_peer_b3 is None) | |
682 | # removing the peer again should be idempotent | |
683 | self._remove_peer(self.fs.name, uuid_peer_b1) | |
684 | self._disable_mirroring(self.fs.name) | |
685 | self._verify_mirroring(self.fs.name, "disabled") | |
686 | self._disable_mirroring(self.fs.name) | |
687 | ||
688 | def test_mirroring_disable_with_peers(self): | |
689 | """test disabling mirroring for a filesystem with active peers""" | |
690 | self._enable_mirroring(self.fs.name) | |
691 | self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b") | |
692 | self._verify_mirroring(self.fs.name, "enabled") | |
693 | uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b") | |
694 | self.assertTrue(uuid_peer_b is not None) | |
695 | self._disable_mirroring(self.fs.name) | |
696 | self._verify_mirroring(self.fs.name, "disabled") | |
697 | # enable mirroring to check old peers | |
698 | self._enable_mirroring(self.fs.name) | |
699 | self._verify_mirroring(self.fs.name, "enabled") | |
700 | # peer should be gone | |
701 | uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b") | |
702 | self.assertTrue(uuid_peer_b is None) | |
703 | self._disable_mirroring(self.fs.name) | |
704 | self._verify_mirroring(self.fs.name, "disabled") | |
705 | ||
706 | def test_mirroring_with_filesystem_reset(self): | |
707 | """test to verify mirroring state post filesystem reset""" | |
708 | self._enable_mirroring(self.fs.name) | |
709 | self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b") | |
710 | self._verify_mirroring(self.fs.name, "enabled") | |
711 | uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b") | |
712 | self.assertTrue(uuid_peer_b is not None) | |
713 | # reset filesystem | |
714 | self.fs.fail() | |
715 | self.fs.reset() | |
716 | self.fs.wait_for_daemons() | |
717 | self._verify_mirroring(self.fs.name, "disabled") | |
718 | ||
719 | ||
720 | class TestSubCmdFsAuthorize(CapsHelper): | |
721 | client_id = 'testuser' | |
722 | client_name = 'client.' + client_id | |
723 | ||
724 | def test_single_path_r(self): | |
725 | perm = 'r' | |
726 | filepaths, filedata, mounts, keyring = self.setup_test_env(perm) | |
727 | moncap = self.get_mon_cap_from_keyring(self.client_name) | |
728 | ||
729 | self.run_mon_cap_tests(moncap, keyring) | |
730 | self.run_mds_cap_tests(filepaths, filedata, mounts, perm) | |
731 | ||
732 | def test_single_path_rw(self): | |
733 | perm = 'rw' | |
734 | filepaths, filedata, mounts, keyring = self.setup_test_env(perm) | |
735 | moncap = self.get_mon_cap_from_keyring(self.client_name) | |
736 | ||
737 | self.run_mon_cap_tests(moncap, keyring) | |
738 | self.run_mds_cap_tests(filepaths, filedata, mounts, perm) | |
739 | ||
740 | def test_single_path_rootsquash(self): | |
741 | filedata, filename = 'some data on fs 1', 'file_on_fs1' | |
742 | filepath = os_path_join(self.mount_a.hostfs_mntpt, filename) | |
743 | self.mount_a.write_file(filepath, filedata) | |
744 | ||
745 | keyring = self.fs.authorize(self.client_id, ('/', 'rw', 'root_squash')) | |
746 | keyring_path = self.create_keyring_file(self.mount_a.client_remote, | |
747 | keyring) | |
748 | self.mount_a.remount(client_id=self.client_id, | |
749 | client_keyring_path=keyring_path, | |
750 | cephfs_mntpt='/') | |
751 | ||
752 | if filepath.find(self.mount_a.hostfs_mntpt) != -1: | |
753 | # can read, but not write as root | |
754 | contents = self.mount_a.read_file(filepath) | |
755 | self.assertEqual(filedata, contents) | |
756 | cmdargs = ['echo', 'some random data', Raw('|'), 'sudo', 'tee', filepath] | |
757 | self.mount_a.negtestcmd(args=cmdargs, retval=1, errmsg='permission denied') | |
758 | ||
759 | def test_single_path_authorize_on_nonalphanumeric_fsname(self): | |
760 | """ | |
761 | That fs authorize command works on filesystems with names having [_.-] characters | |
762 | """ | |
92f5a8d4 | 763 | self.mount_a.umount_wait(require_clean=True) |
f67539c2 TL |
764 | self.mds_cluster.delete_all_filesystems() |
765 | fs_name = "cephfs-_." | |
766 | self.fs = self.mds_cluster.newfs(name=fs_name) | |
767 | self.fs.wait_for_daemons() | |
768 | self.run_cluster_cmd(f'auth caps client.{self.mount_a.client_id} ' | |
769 | f'mon "allow r" ' | |
770 | f'osd "allow rw pool={self.fs.get_data_pool_name()}" ' | |
771 | f'mds allow') | |
772 | self.mount_a.remount(cephfs_name=self.fs.name) | |
773 | perm = 'rw' | |
774 | filepaths, filedata, mounts, keyring = self.setup_test_env(perm) | |
775 | self.run_mds_cap_tests(filepaths, filedata, mounts, perm) | |
776 | ||
777 | def test_multiple_path_r(self): | |
778 | perm, paths = 'r', ('/dir1', '/dir2/dir22') | |
779 | filepaths, filedata, mounts, keyring = self.setup_test_env(perm, paths) | |
780 | moncap = self.get_mon_cap_from_keyring(self.client_name) | |
781 | ||
782 | keyring_path = self.create_keyring_file(self.mount_a.client_remote, | |
783 | keyring) | |
784 | for path in paths: | |
785 | self.mount_a.remount(client_id=self.client_id, | |
786 | client_keyring_path=keyring_path, | |
787 | cephfs_mntpt=path) | |
788 | ||
789 | ||
790 | # actual tests... | |
791 | self.run_mon_cap_tests(moncap, keyring) | |
792 | self.run_mds_cap_tests(filepaths, filedata, mounts, perm) | |
793 | ||
794 | def test_multiple_path_rw(self): | |
795 | perm, paths = 'rw', ('/dir1', '/dir2/dir22') | |
796 | filepaths, filedata, mounts, keyring = self.setup_test_env(perm, paths) | |
797 | moncap = self.get_mon_cap_from_keyring(self.client_name) | |
798 | ||
799 | keyring_path = self.create_keyring_file(self.mount_a.client_remote, | |
800 | keyring) | |
801 | for path in paths: | |
802 | self.mount_a.remount(client_id=self.client_id, | |
803 | client_keyring_path=keyring_path, | |
804 | cephfs_mntpt=path) | |
805 | ||
806 | ||
807 | # actual tests... | |
808 | self.run_mon_cap_tests(moncap, keyring) | |
809 | self.run_mds_cap_tests(filepaths, filedata, mounts, perm) | |
810 | ||
811 | def tearDown(self): | |
812 | self.mount_a.umount_wait() | |
813 | self.run_cluster_cmd(f'auth rm {self.client_name}') | |
814 | ||
815 | super(type(self), self).tearDown() | |
816 | ||
817 | def setup_for_single_path(self, perm): | |
818 | filedata, filename = 'some data on fs 1', 'file_on_fs1' | |
819 | ||
820 | filepath = os_path_join(self.mount_a.hostfs_mntpt, filename) | |
821 | self.mount_a.write_file(filepath, filedata) | |
822 | ||
823 | keyring = self.fs.authorize(self.client_id, ('/', perm)) | |
824 | keyring_path = self.create_keyring_file(self.mount_a.client_remote, | |
825 | keyring) | |
826 | ||
827 | self.mount_a.remount(client_id=self.client_id, | |
828 | client_keyring_path=keyring_path, | |
829 | cephfs_mntpt='/') | |
830 | ||
831 | return filepath, filedata, keyring | |
832 | ||
833 | def setup_for_multiple_paths(self, perm, paths): | |
834 | filedata, filename = 'some data on fs 1', 'file_on_fs1' | |
835 | ||
836 | self.mount_a.run_shell('mkdir -p dir1/dir12/dir13 dir2/dir22/dir23') | |
837 | ||
838 | filepaths = [] | |
839 | for path in paths: | |
840 | filepath = os_path_join(self.mount_a.hostfs_mntpt, path[1:], filename) | |
841 | self.mount_a.write_file(filepath, filedata) | |
842 | filepaths.append(filepath.replace(path, '')) | |
843 | filepaths = tuple(filepaths) | |
844 | ||
845 | keyring = self.fs.authorize(self.client_id, (paths[0], perm, paths[1], | |
846 | perm)) | |
847 | ||
848 | return filepaths, filedata, keyring | |
849 | ||
850 | def setup_test_env(self, perm, paths=()): | |
851 | filepaths, filedata, keyring = self.setup_for_multiple_paths(perm, paths) if paths \ | |
852 | else self.setup_for_single_path(perm) | |
853 | ||
854 | if not isinstance(filepaths, tuple): | |
855 | filepaths = (filepaths, ) | |
856 | if not isinstance(filedata, tuple): | |
857 | filedata = (filedata, ) | |
858 | mounts = (self.mount_a, ) | |
859 | ||
860 | return filepaths, filedata, mounts, keyring | |
861 | ||
862 | class TestAdminCommandIdempotency(CephFSTestCase): | |
863 | """ | |
864 | Tests for administration command idempotency. | |
865 | """ | |
866 | ||
867 | CLIENTS_REQUIRED = 0 | |
868 | MDSS_REQUIRED = 1 | |
869 | ||
870 | def test_rm_idempotency(self): | |
871 | """ | |
872 | That a removing a fs twice is idempotent. | |
873 | """ | |
874 | ||
875 | data_pools = self.fs.get_data_pool_names(refresh=True) | |
876 | self.fs.fail() | |
877 | self.fs.rm() | |
878 | try: | |
879 | self.fs.get_mds_map() | |
880 | except FSMissing: | |
881 | pass | |
882 | else: | |
883 | self.fail("get_mds_map should raise") | |
884 | p = self.fs.rm() | |
885 | self.assertIn("does not exist", p.stderr.getvalue()) | |
886 | self.fs.remove_pools(data_pools) |