]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_admin.py
update ceph source to reef 18.2.1
[ceph.git] / ceph / qa / tasks / cephfs / test_admin.py
CommitLineData
522d829b 1import errno
1911f103 2import json
522d829b
TL
3import logging
4import time
f67539c2
TL
5import uuid
6from io import StringIO
7from os.path import join as os_path_join
1911f103 8
20effc67 9from teuthology.exceptions import CommandFailedError
92f5a8d4 10
aee94f69 11from tasks.cephfs.cephfs_test_case import CephFSTestCase, classhook
f67539c2 12from tasks.cephfs.filesystem import FileLayout, FSMissing
92f5a8d4 13from tasks.cephfs.fuse_mount import FuseMount
1e59de90 14from tasks.cephfs.caps_helper import CapTester
92f5a8d4 15
522d829b 16log = logging.getLogger(__name__)
e306af50 17
92f5a8d4
TL
18class TestAdminCommands(CephFSTestCase):
19 """
20 Tests for administration command.
21 """
22
23 CLIENTS_REQUIRED = 1
20effc67 24 MDSS_REQUIRED = 1
92f5a8d4 25
20effc67
TL
26 def check_pool_application_metadata_key_value(self, pool, app, key, value):
27 output = self.fs.mon_manager.raw_cluster_cmd(
28 'osd', 'pool', 'application', 'get', pool, app, key)
29 self.assertEqual(str(output.strip()), value)
f67539c2 30
20effc67
TL
31 def setup_ec_pools(self, n, metadata=True, overwrites=True):
32 if metadata:
33 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8")
34 cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
35 self.fs.mon_manager.raw_cluster_cmd(*cmd)
36 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
37 if overwrites:
38 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
f67539c2 39
aee94f69
TL
40@classhook('_add_valid_tell')
41class TestValidTell(TestAdminCommands):
42 @classmethod
43 def _add_valid_tell(cls):
44 tells = [
45 ['cache', 'status'],
46 ['damage', 'ls'],
47 ['dump_blocked_ops'],
48 ['dump_blocked_ops_count'],
49 ['dump_historic_ops'],
50 ['dump_historic_ops_by_duration'],
51 ['dump_mempools'],
52 ['dump_ops_in_flight'],
53 ['flush', 'journal'],
54 ['get', 'subtrees'],
55 ['ops', 'locks'],
56 ['ops'],
57 ['status'],
58 ['version'],
59 ]
60 def test(c):
61 def f(self):
62 J = self.fs.rank_tell(c)
63 json.dumps(J)
64 log.debug("dumped:\n%s", str(J))
65 return f
66 for c in tells:
67 setattr(cls, 'test_valid_' + '_'.join(c), test(c))
f67539c2 68
20effc67
TL
69class TestFsStatus(TestAdminCommands):
70 """
71 Test "ceph fs status subcommand.
72 """
f67539c2 73
92f5a8d4
TL
74 def test_fs_status(self):
75 """
76 That `ceph fs status` command functions.
77 """
78
79 s = self.fs.mon_manager.raw_cluster_cmd("fs", "status")
80 self.assertTrue("active" in s)
81
e306af50
TL
82 mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json-pretty"))["mdsmap"]
83 self.assertEqual(mdsmap[0]["state"], "active")
84
85 mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json"))["mdsmap"]
86 self.assertEqual(mdsmap[0]["state"], "active")
87
92f5a8d4 88
20effc67
TL
89class TestAddDataPool(TestAdminCommands):
90 """
91 Test "ceph fs add_data_pool" subcommand.
92 """
1911f103 93
92f5a8d4
TL
94 def test_add_data_pool_root(self):
95 """
96 That a new data pool can be added and used for the root directory.
97 """
98
99 p = self.fs.add_data_pool("foo")
100 self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p))
101
1911f103
TL
102 def test_add_data_pool_application_metadata(self):
103 """
104 That the application metadata set on a newly added data pool is as expected.
105 """
106 pool_name = "foo"
107 mon_cmd = self.fs.mon_manager.raw_cluster_cmd
522d829b
TL
108 mon_cmd('osd', 'pool', 'create', pool_name, '--pg_num_min',
109 str(self.fs.pg_num_min))
1911f103
TL
110 # Check whether https://tracker.ceph.com/issues/43061 is fixed
111 mon_cmd('osd', 'pool', 'application', 'enable', pool_name, 'cephfs')
112 self.fs.add_data_pool(pool_name, create=False)
20effc67 113 self.check_pool_application_metadata_key_value(
1911f103
TL
114 pool_name, 'cephfs', 'data', self.fs.name)
115
92f5a8d4
TL
116 def test_add_data_pool_subdir(self):
117 """
118 That a new data pool can be added and used for a sub-directory.
119 """
120
121 p = self.fs.add_data_pool("foo")
9f95a23c 122 self.mount_a.run_shell("mkdir subdir")
92f5a8d4
TL
123 self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p))
124
e306af50
TL
125 def test_add_data_pool_non_alphamueric_name_as_subdir(self):
126 """
127 That a new data pool with non-alphanumeric name can be added and used for a sub-directory.
128 """
129 p = self.fs.add_data_pool("I-am-data_pool00.")
130 self.mount_a.run_shell("mkdir subdir")
131 self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p))
132
92f5a8d4
TL
133 def test_add_data_pool_ec(self):
134 """
135 That a new EC data pool can be added.
136 """
137
138 n = "test_add_data_pool_ec"
20effc67 139 self.setup_ec_pools(n, metadata=False)
f67539c2 140 self.fs.add_data_pool(n+"-data", create=False)
92f5a8d4 141
1e59de90
TL
142 def test_add_already_in_use_data_pool(self):
143 """
144 That command try to add data pool which is already in use with another fs.
145 """
146
147 # create first data pool, metadata pool and add with filesystem
148 first_fs = "first_fs"
149 first_metadata_pool = "first_metadata_pool"
150 first_data_pool = "first_data_pool"
151 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
152 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
153 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
154
155 # create second data pool, metadata pool and add with filesystem
156 second_fs = "second_fs"
157 second_metadata_pool = "second_metadata_pool"
158 second_data_pool = "second_data_pool"
159 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
160 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
161 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
162
163 # try to add 'first_data_pool' with 'second_fs'
164 # Expecting EINVAL exit status because 'first_data_pool' is already in use with 'first_fs'
165 try:
166 self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', second_fs, first_data_pool)
167 except CommandFailedError as e:
168 self.assertEqual(e.exitstatus, errno.EINVAL)
169 else:
170 self.fail("Expected EINVAL because data pool is already in use as data pool for first_fs")
171
172 def test_add_already_in_use_metadata_pool(self):
173 """
174 That command try to add metadata pool which is already in use with another fs.
175 """
176
177 # create first data pool, metadata pool and add with filesystem
178 first_fs = "first_fs"
179 first_metadata_pool = "first_metadata_pool"
180 first_data_pool = "first_data_pool"
181 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
182 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
183 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
184
185 # create second data pool, metadata pool and add with filesystem
186 second_fs = "second_fs"
187 second_metadata_pool = "second_metadata_pool"
188 second_data_pool = "second_data_pool"
189 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
190 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
191 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
192
193 # try to add 'second_metadata_pool' with 'first_fs' as a data pool
194 # Expecting EINVAL exit status because 'second_metadata_pool'
195 # is already in use with 'second_fs' as a metadata pool
196 try:
197 self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', first_fs, second_metadata_pool)
198 except CommandFailedError as e:
199 self.assertEqual(e.exitstatus, errno.EINVAL)
200 else:
201 self.fail("Expected EINVAL because data pool is already in use as metadata pool for 'second_fs'")
20effc67
TL
202
203class TestFsNew(TestAdminCommands):
204 """
205 Test "ceph fs new" subcommand.
206 """
207 MDSS_REQUIRED = 3
208
209 def test_fsnames_can_only_by_goodchars(self):
210 n = 'test_fsnames_can_only_by_goodchars'
211 metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool'
212 badname = n+'badname@#'
213
214 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
215 n+metapoolname)
216 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
217 n+datapoolname)
218
219 # test that fsname not with "goodchars" fails
220 args = ['fs', 'new', badname, metapoolname, datapoolname]
221 proc = self.fs.mon_manager.run_cluster_cmd(args=args,stderr=StringIO(),
222 check_status=False)
223 self.assertIn('invalid chars', proc.stderr.getvalue().lower())
224
225 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname,
226 metapoolname,
227 '--yes-i-really-really-mean-it-not-faking')
228 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', datapoolname,
229 datapoolname,
230 '--yes-i-really-really-mean-it-not-faking')
231
92f5a8d4
TL
232 def test_new_default_ec(self):
233 """
234 That a new file system warns/fails with an EC default data pool.
235 """
236
f91f0fd5 237 self.mount_a.umount_wait(require_clean=True)
f67539c2 238 self.mds_cluster.delete_all_filesystems()
92f5a8d4 239 n = "test_new_default_ec"
20effc67 240 self.setup_ec_pools(n)
92f5a8d4
TL
241 try:
242 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
243 except CommandFailedError as e:
244 if e.exitstatus == 22:
245 pass
246 else:
247 raise
248 else:
249 raise RuntimeError("expected failure")
250
251 def test_new_default_ec_force(self):
252 """
253 That a new file system succeeds with an EC default data pool with --force.
254 """
255
f91f0fd5 256 self.mount_a.umount_wait(require_clean=True)
f67539c2 257 self.mds_cluster.delete_all_filesystems()
92f5a8d4 258 n = "test_new_default_ec_force"
20effc67 259 self.setup_ec_pools(n)
92f5a8d4
TL
260 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
261
262 def test_new_default_ec_no_overwrite(self):
263 """
264 That a new file system fails with an EC default data pool without overwrite.
265 """
266
f91f0fd5 267 self.mount_a.umount_wait(require_clean=True)
f67539c2 268 self.mds_cluster.delete_all_filesystems()
92f5a8d4 269 n = "test_new_default_ec_no_overwrite"
20effc67 270 self.setup_ec_pools(n, overwrites=False)
92f5a8d4
TL
271 try:
272 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
273 except CommandFailedError as e:
274 if e.exitstatus == 22:
275 pass
276 else:
277 raise
278 else:
279 raise RuntimeError("expected failure")
280 # and even with --force !
281 try:
282 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
283 except CommandFailedError as e:
284 if e.exitstatus == 22:
285 pass
286 else:
287 raise
288 else:
289 raise RuntimeError("expected failure")
290
1911f103
TL
291 def test_fs_new_pool_application_metadata(self):
292 """
293 That the application metadata set on the pools of a newly created filesystem are as expected.
294 """
f91f0fd5 295 self.mount_a.umount_wait(require_clean=True)
f67539c2 296 self.mds_cluster.delete_all_filesystems()
1911f103
TL
297 fs_name = "test_fs_new_pool_application"
298 keys = ['metadata', 'data']
299 pool_names = [fs_name+'-'+key for key in keys]
300 mon_cmd = self.fs.mon_manager.raw_cluster_cmd
301 for p in pool_names:
522d829b 302 mon_cmd('osd', 'pool', 'create', p, '--pg_num_min', str(self.fs.pg_num_min))
1911f103
TL
303 mon_cmd('osd', 'pool', 'application', 'enable', p, 'cephfs')
304 mon_cmd('fs', 'new', fs_name, pool_names[0], pool_names[1])
305 for i in range(2):
20effc67 306 self.check_pool_application_metadata_key_value(
1911f103
TL
307 pool_names[i], 'cephfs', keys[i], fs_name)
308
522d829b
TL
309 def test_fs_new_with_specific_id(self):
310 """
311 That a file system can be created with a specific ID.
312 """
313 fs_name = "test_fs_specific_id"
314 fscid = 100
315 keys = ['metadata', 'data']
316 pool_names = [fs_name+'-'+key for key in keys]
317 for p in pool_names:
318 self.run_cluster_cmd(f'osd pool create {p}')
319 self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
320 self.fs.status().get_fsmap(fscid)
321 for i in range(2):
20effc67 322 self.check_pool_application_metadata_key_value(pool_names[i], 'cephfs', keys[i], fs_name)
522d829b
TL
323
324 def test_fs_new_with_specific_id_idempotency(self):
325 """
326 That command to create file system with specific ID is idempotent.
327 """
328 fs_name = "test_fs_specific_id"
329 fscid = 100
330 keys = ['metadata', 'data']
331 pool_names = [fs_name+'-'+key for key in keys]
332 for p in pool_names:
333 self.run_cluster_cmd(f'osd pool create {p}')
334 self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
335 self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
336 self.fs.status().get_fsmap(fscid)
337
338 def test_fs_new_with_specific_id_fails_without_force_flag(self):
339 """
340 That command to create file system with specific ID fails without '--force' flag.
341 """
342 fs_name = "test_fs_specific_id"
343 fscid = 100
344 keys = ['metadata', 'data']
345 pool_names = [fs_name+'-'+key for key in keys]
346 for p in pool_names:
347 self.run_cluster_cmd(f'osd pool create {p}')
348 try:
349 self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid}')
350 except CommandFailedError as ce:
351 self.assertEqual(ce.exitstatus, errno.EINVAL,
352 "invalid error code on creating a file system with specifc ID without --force flag")
353 else:
354 self.fail("expected creating file system with specific ID without '--force' flag to fail")
355
356 def test_fs_new_with_specific_id_fails_already_in_use(self):
357 """
358 That creating file system with ID already in use fails.
359 """
360 fs_name = "test_fs_specific_id"
361 # file system ID already in use
362 fscid = self.fs.status().map['filesystems'][0]['id']
363 keys = ['metadata', 'data']
364 pool_names = [fs_name+'-'+key for key in keys]
365 for p in pool_names:
366 self.run_cluster_cmd(f'osd pool create {p}')
367 try:
368 self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
369 except CommandFailedError as ce:
370 self.assertEqual(ce.exitstatus, errno.EINVAL,
371 "invalid error code on creating a file system with specifc ID that is already in use")
372 else:
373 self.fail("expected creating file system with ID already in use to fail")
374
1e59de90
TL
375 def test_fs_new_metadata_pool_already_in_use(self):
376 """
377 That creating file system with metadata pool already in use.
378 """
379
380 # create first data pool, metadata pool and add with filesystem
381 first_fs = "first_fs"
382 first_metadata_pool = "first_metadata_pool"
383 first_data_pool = "first_data_pool"
384 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
385 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
386 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
387
388 second_fs = "second_fs"
389 second_data_pool = "second_data_pool"
390 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
391
392 # try to create new fs 'second_fs' with following configuration
393 # metadata pool -> 'first_metadata_pool'
394 # data pool -> 'second_data_pool'
395 # Expecting EINVAL exit status because 'first_metadata_pool'
396 # is already in use with 'first_fs'
397 try:
398 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
399 except CommandFailedError as e:
400 self.assertEqual(e.exitstatus, errno.EINVAL)
401 else:
402 self.fail("Expected EINVAL because metadata pool is already in use for 'first_fs'")
403
404 def test_fs_new_data_pool_already_in_use(self):
405 """
406 That creating file system with data pool already in use.
407 """
408
409 # create first data pool, metadata pool and add with filesystem
410 first_fs = "first_fs"
411 first_metadata_pool = "first_metadata_pool"
412 first_data_pool = "first_data_pool"
413 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
414 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
415 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
416
417 second_fs = "second_fs"
418 second_metadata_pool = "second_metadata_pool"
419 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
420
421 # try to create new fs 'second_fs' with following configuration
422 # metadata pool -> 'second_metadata_pool'
423 # data pool -> 'first_data_pool'
424 # Expecting EINVAL exit status because 'first_data_pool'
425 # is already in use with 'first_fs'
426 try:
427 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
428 except CommandFailedError as e:
429 self.assertEqual(e.exitstatus, errno.EINVAL)
430 else:
431 self.fail("Expected EINVAL because data pool is already in use for 'first_fs'")
432
433 def test_fs_new_metadata_and_data_pool_in_use_by_another_same_fs(self):
434 """
435 That creating file system with metadata and data pool which is already in use by another same fs.
436 """
437
438 # create first data pool, metadata pool and add with filesystem
439 first_fs = "first_fs"
440 first_metadata_pool = "first_metadata_pool"
441 first_data_pool = "first_data_pool"
442 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
443 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
444 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
445
446 second_fs = "second_fs"
447
448 # try to create new fs 'second_fs' with following configuration
449 # metadata pool -> 'first_metadata_pool'
450 # data pool -> 'first_data_pool'
451 # Expecting EINVAL exit status because 'first_metadata_pool' and 'first_data_pool'
452 # is already in use with 'first_fs'
453 try:
454 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
455 except CommandFailedError as e:
456 self.assertEqual(e.exitstatus, errno.EINVAL)
457 else:
458 self.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs'")
459
460 def test_fs_new_metadata_and_data_pool_in_use_by_different_fs(self):
461 """
462 That creating file system with metadata and data pool which is already in use by different fs.
463 """
464
465 # create first data pool, metadata pool and add with filesystem
466 first_fs = "first_fs"
467 first_metadata_pool = "first_metadata_pool"
468 first_data_pool = "first_data_pool"
469 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
470 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
471 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
472
473 # create second data pool, metadata pool and add with filesystem
474 second_fs = "second_fs"
475 second_metadata_pool = "second_metadata_pool"
476 second_data_pool = "second_data_pool"
477 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
478 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
479 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
480
481 third_fs = "third_fs"
482
483 # try to create new fs 'third_fs' with following configuration
484 # metadata pool -> 'first_metadata_pool'
485 # data pool -> 'second_data_pool'
486 # Expecting EINVAL exit status because 'first_metadata_pool' and 'second_data_pool'
487 # is already in use with 'first_fs' and 'second_fs'
488 try:
489 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
490 except CommandFailedError as e:
491 self.assertEqual(e.exitstatus, errno.EINVAL)
492 else:
493 self.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs' and 'second_fs'")
494
495 def test_fs_new_interchange_already_in_use_metadata_and_data_pool_of_same_fs(self):
496 """
497 That creating file system with interchanging metadata and data pool which is already in use by same fs.
498 """
499
500 # create first data pool, metadata pool and add with filesystem
501 first_fs = "first_fs"
502 first_metadata_pool = "first_metadata_pool"
503 first_data_pool = "first_data_pool"
504 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
505 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
506 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
507
508 second_fs = "second_fs"
509
510 # try to create new fs 'second_fs' with following configuration
511 # metadata pool -> 'first_data_pool' (already used as data pool for 'first_fs')
512 # data pool -> 'first_metadata_pool' (already used as metadata pool for 'first_fs')
513 # Expecting EINVAL exit status because 'first_data_pool' and 'first_metadata_pool'
514 # is already in use with 'first_fs'
515 try:
516 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
517 except CommandFailedError as e:
518 self.assertEqual(e.exitstatus, errno.EINVAL)
519 else:
520 self.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs'")
521
522 def test_fs_new_interchange_already_in_use_metadata_and_data_pool_of_different_fs(self):
523 """
524 That creating file system with interchanging metadata and data pool which is already in use by defferent fs.
525 """
526
527 # create first data pool, metadata pool and add with filesystem
528 first_fs = "first_fs"
529 first_metadata_pool = "first_metadata_pool"
530 first_data_pool = "first_data_pool"
531 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
532 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
533 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
534
535 # create second data pool, metadata pool and add with filesystem
536 second_fs = "second_fs"
537 second_metadata_pool = "second_metadata_pool"
538 second_data_pool = "second_data_pool"
539 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
540 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
541 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
542
543 third_fs = "third_fs"
544
545 # try to create new fs 'third_fs' with following configuration
546 # metadata pool -> 'first_data_pool' (already used as data pool for 'first_fs')
547 # data pool -> 'second_metadata_pool' (already used as metadata pool for 'second_fs')
548 # Expecting EINVAL exit status because 'first_data_pool' and 'second_metadata_pool'
549 # is already in use with 'first_fs' and 'second_fs'
550 try:
551 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
552 except CommandFailedError as e:
553 self.assertEqual(e.exitstatus, errno.EINVAL)
554 else:
555 self.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs' and 'second_fs'")
556
557 def test_fs_new_metadata_pool_already_in_use_with_rbd(self):
558 """
559 That creating new file system with metadata pool already used by rbd.
560 """
561
562 # create pool and initialise with rbd
563 new_pool = "new_pool"
564 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
565 self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
566
567 new_fs = "new_fs"
568 new_data_pool = "new_data_pool"
569
570 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_data_pool)
571
572 # try to create new fs 'new_fs' with following configuration
573 # metadata pool -> 'new_pool' (already used by rbd app)
574 # data pool -> 'new_data_pool'
575 # Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
576 try:
577 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_pool, new_data_pool)
578 except CommandFailedError as e:
579 self.assertEqual(e.exitstatus, errno.EINVAL)
580 else:
581 self.fail("Expected EINVAL because metadata pool is already in use for rbd")
582
583 def test_fs_new_data_pool_already_in_use_with_rbd(self):
584 """
585 That creating new file system with data pool already used by rbd.
586 """
587
588 # create pool and initialise with rbd
589 new_pool = "new_pool"
590 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
591 self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
592
593 new_fs = "new_fs"
594 new_metadata_pool = "new_metadata_pool"
595
596 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_metadata_pool)
597
598 # try to create new fs 'new_fs' with following configuration
599 # metadata pool -> 'new_metadata_pool'
600 # data pool -> 'new_pool' (already used by rbd app)
601 # Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
602 try:
603 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_metadata_pool, new_pool)
604 except CommandFailedError as e:
605 self.assertEqual(e.exitstatus, errno.EINVAL)
606 else:
607 self.fail("Expected EINVAL because data pool is already in use for rbd")
522d829b 608
20effc67
TL
609class TestRenameCommand(TestAdminCommands):
610 """
611 Tests for rename command.
612 """
613
614 CLIENTS_REQUIRED = 1
615 MDSS_REQUIRED = 2
616
617 def test_fs_rename(self):
618 """
619 That the file system can be renamed, and the application metadata set on its pools are as expected.
620 """
621 # Renaming the file system breaks this mount as the client uses
622 # file system specific authorization. The client cannot read
623 # or write even if the client's cephx ID caps are updated to access
624 # the new file system name without the client being unmounted and
625 # re-mounted.
626 self.mount_a.umount_wait(require_clean=True)
627 orig_fs_name = self.fs.name
628 new_fs_name = 'new_cephfs'
629 client_id = 'test_new_cephfs'
630
631 self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
632
633 # authorize a cephx ID access to the renamed file system.
634 # use the ID to write to the file system.
635 self.fs.name = new_fs_name
636 keyring = self.fs.authorize(client_id, ('/', 'rw'))
637 keyring_path = self.mount_a.client_remote.mktemp(data=keyring)
638 self.mount_a.remount(client_id=client_id,
639 client_keyring_path=keyring_path,
640 cephfs_mntpt='/',
641 cephfs_name=self.fs.name)
642 filedata, filename = 'some data on fs', 'file_on_fs'
643 filepath = os_path_join(self.mount_a.hostfs_mntpt, filename)
644 self.mount_a.write_file(filepath, filedata)
645 self.check_pool_application_metadata_key_value(
646 self.fs.get_data_pool_name(), 'cephfs', 'data', new_fs_name)
647 self.check_pool_application_metadata_key_value(
648 self.fs.get_metadata_pool_name(), 'cephfs', 'metadata', new_fs_name)
649
650 # cleanup
651 self.mount_a.umount_wait()
652 self.run_cluster_cmd(f'auth rm client.{client_id}')
653
654 def test_fs_rename_idempotency(self):
655 """
656 That the file system rename operation is idempotent.
657 """
658 # Renaming the file system breaks this mount as the client uses
659 # file system specific authorization.
660 self.mount_a.umount_wait(require_clean=True)
661 orig_fs_name = self.fs.name
662 new_fs_name = 'new_cephfs'
663
664 self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
665 self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
666
667 # original file system name does not appear in `fs ls` command
668 self.assertFalse(self.fs.exists())
669 self.fs.name = new_fs_name
670 self.assertTrue(self.fs.exists())
671
672 def test_fs_rename_fs_new_fails_with_old_fsname_existing_pools(self):
673 """
674 That after renaming a file system, creating a file system with
675 old name and existing FS pools fails.
676 """
677 # Renaming the file system breaks this mount as the client uses
678 # file system specific authorization.
679 self.mount_a.umount_wait(require_clean=True)
680 orig_fs_name = self.fs.name
681 new_fs_name = 'new_cephfs'
682 data_pool = self.fs.get_data_pool_name()
683 metadata_pool = self.fs.get_metadata_pool_name()
684 self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
685
686 try:
687 self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
688 except CommandFailedError as ce:
689 self.assertEqual(ce.exitstatus, errno.EINVAL,
690 "invalid error code on creating a new file system with old "
691 "name and existing pools.")
692 else:
693 self.fail("expected creating new file system with old name and "
694 "existing pools to fail.")
695
696 try:
697 self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
698 except CommandFailedError as ce:
1e59de90 699 self.assertEqual(ce.exitstatus, errno.EINVAL,
20effc67
TL
700 "invalid error code on creating a new file system with old "
701 "name, existing pools and --force flag.")
702 else:
703 self.fail("expected creating new file system with old name, "
704 "existing pools, and --force flag to fail.")
705
706 try:
707 self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} "
708 "--allow-dangerous-metadata-overlay")
709 except CommandFailedError as ce:
710 self.assertEqual(ce.exitstatus, errno.EINVAL,
711 "invalid error code on creating a new file system with old name, "
712 "existing pools and --allow-dangerous-metadata-overlay flag.")
713 else:
714 self.fail("expected creating new file system with old name, "
715 "existing pools, and --allow-dangerous-metadata-overlay flag to fail.")
716
717 def test_fs_rename_fails_without_yes_i_really_mean_it_flag(self):
718 """
719 That renaming a file system without '--yes-i-really-mean-it' flag fails.
720 """
721 try:
722 self.run_cluster_cmd(f"fs rename {self.fs.name} new_fs")
723 except CommandFailedError as ce:
724 self.assertEqual(ce.exitstatus, errno.EPERM,
725 "invalid error code on renaming a file system without the "
726 "'--yes-i-really-mean-it' flag")
727 else:
728 self.fail("expected renaming of file system without the "
729 "'--yes-i-really-mean-it' flag to fail ")
730
731 def test_fs_rename_fails_for_non_existent_fs(self):
732 """
733 That renaming a non-existent file system fails.
734 """
735 try:
736 self.run_cluster_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
737 except CommandFailedError as ce:
738 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on renaming a non-existent fs")
739 else:
740 self.fail("expected renaming of a non-existent file system to fail")
741
742 def test_fs_rename_fails_new_name_already_in_use(self):
743 """
744 That renaming a file system fails if the new name refers to an existing file system.
745 """
746 self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
747
748 try:
749 self.run_cluster_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
750 except CommandFailedError as ce:
751 self.assertEqual(ce.exitstatus, errno.EINVAL,
752 "invalid error code on renaming to a fs name that is already in use")
753 else:
754 self.fail("expected renaming to a new file system name that is already in use to fail.")
755
756 def test_fs_rename_fails_with_mirroring_enabled(self):
757 """
758 That renaming a file system fails if mirroring is enabled on it.
759 """
760 orig_fs_name = self.fs.name
761 new_fs_name = 'new_cephfs'
762
763 self.run_cluster_cmd(f'fs mirror enable {orig_fs_name}')
764 try:
765 self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
766 except CommandFailedError as ce:
767 self.assertEqual(ce.exitstatus, errno.EPERM, "invalid error code on renaming a mirrored file system")
768 else:
769 self.fail("expected renaming of a mirrored file system to fail")
770 self.run_cluster_cmd(f'fs mirror disable {orig_fs_name}')
771
772
522d829b
TL
773class TestDump(CephFSTestCase):
774 CLIENTS_REQUIRED = 0
775 MDSS_REQUIRED = 1
776
777 def test_fs_dump_epoch(self):
778 """
779 That dumping a specific epoch works.
780 """
781
782 status1 = self.fs.status()
783 status2 = self.fs.status(epoch=status1["epoch"]-1)
784 self.assertEqual(status1["epoch"], status2["epoch"]+1)
785
786 def test_fsmap_trim(self):
787 """
788 That the fsmap is trimmed normally.
789 """
790
791 paxos_service_trim_min = 25
792 self.config_set('mon', 'paxos_service_trim_min', paxos_service_trim_min)
793 mon_max_mdsmap_epochs = 20
794 self.config_set('mon', 'mon_max_mdsmap_epochs', mon_max_mdsmap_epochs)
795
796 status = self.fs.status()
797 epoch = status["epoch"]
798
799 # for N mutations
800 mutations = paxos_service_trim_min + mon_max_mdsmap_epochs
801 b = False
802 for i in range(mutations):
803 self.fs.set_joinable(b)
804 b = not b
805
806 time.sleep(10) # for tick/compaction
807
808 try:
809 self.fs.status(epoch=epoch)
810 except CommandFailedError as e:
811 self.assertEqual(e.exitstatus, errno.ENOENT, "invalid error code when trying to fetch FSMap that was trimmed")
812 else:
813 self.fail("trimming did not occur as expected")
814
815 def test_fsmap_force_trim(self):
816 """
817 That the fsmap is trimmed forcefully.
818 """
819
820 status = self.fs.status()
821 epoch = status["epoch"]
822
823 paxos_service_trim_min = 1
824 self.config_set('mon', 'paxos_service_trim_min', paxos_service_trim_min)
825 mon_mds_force_trim_to = epoch+1
826 self.config_set('mon', 'mon_mds_force_trim_to', mon_mds_force_trim_to)
827
828 # force a new fsmap
829 self.fs.set_joinable(False)
830 time.sleep(10) # for tick/compaction
831
832 status = self.fs.status()
833 log.debug(f"new epoch is {status['epoch']}")
834 self.fs.status(epoch=epoch+1) # epoch+1 is not trimmed, may not == status["epoch"]
835
836 try:
837 self.fs.status(epoch=epoch)
838 except CommandFailedError as e:
839 self.assertEqual(e.exitstatus, errno.ENOENT, "invalid error code when trying to fetch FSMap that was trimmed")
840 else:
841 self.fail("trimming did not occur as expected")
842
20effc67 843
f67539c2
TL
844class TestRequiredClientFeatures(CephFSTestCase):
845 CLIENTS_REQUIRED = 0
846 MDSS_REQUIRED = 1
847
848 def test_required_client_features(self):
849 """
850 That `ceph fs required_client_features` command functions.
851 """
852
853 def is_required(index):
854 out = self.fs.mon_manager.raw_cluster_cmd('fs', 'get', self.fs.name, '--format=json-pretty')
855 features = json.loads(out)['mdsmap']['required_client_features']
856 if "feature_{0}".format(index) in features:
857 return True;
858 return False;
859
860 features = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'feature', 'ls', '--format=json-pretty'))
861 self.assertGreater(len(features), 0);
862
863 for f in features:
864 self.fs.required_client_features('rm', str(f['index']))
865
866 for f in features:
867 index = f['index']
868 feature = f['name']
869 if feature == 'reserved':
870 feature = str(index)
871
872 if index % 3 == 0:
873 continue;
874 self.fs.required_client_features('add', feature)
875 self.assertTrue(is_required(index))
876
877 if index % 2 == 0:
878 continue;
879 self.fs.required_client_features('rm', feature)
880 self.assertFalse(is_required(index))
881
882 def test_required_client_feature_add_reserved(self):
883 """
884 That `ceph fs required_client_features X add reserved` fails.
885 """
886
887 p = self.fs.required_client_features('add', 'reserved', check_status=False, stderr=StringIO())
888 self.assertIn('Invalid feature name', p.stderr.getvalue())
889
890 def test_required_client_feature_rm_reserved(self):
891 """
892 That `ceph fs required_client_features X rm reserved` fails.
893 """
894
895 p = self.fs.required_client_features('rm', 'reserved', check_status=False, stderr=StringIO())
896 self.assertIn('Invalid feature name', p.stderr.getvalue())
897
898 def test_required_client_feature_add_reserved_bit(self):
899 """
900 That `ceph fs required_client_features X add <reserved_bit>` passes.
901 """
902
903 p = self.fs.required_client_features('add', '1', stderr=StringIO())
904 self.assertIn("added feature 'reserved' to required_client_features", p.stderr.getvalue())
905
906 def test_required_client_feature_rm_reserved_bit(self):
907 """
908 That `ceph fs required_client_features X rm <reserved_bit>` passes.
909 """
910
911 self.fs.required_client_features('add', '1')
912 p = self.fs.required_client_features('rm', '1', stderr=StringIO())
913 self.assertIn("removed feature 'reserved' from required_client_features", p.stderr.getvalue())
1911f103 914
522d829b
TL
915class TestCompatCommands(CephFSTestCase):
916 """
917 """
918
919 CLIENTS_REQUIRED = 0
920 MDSS_REQUIRED = 3
921
922 def test_add_compat(self):
923 """
924 Test adding a compat.
925 """
926
927 self.fs.fail()
928 self.fs.add_compat(63, 'placeholder')
929 mdsmap = self.fs.get_mds_map()
930 self.assertIn("feature_63", mdsmap['compat']['compat'])
931
932 def test_add_incompat(self):
933 """
934 Test adding an incompat.
935 """
936
937 self.fs.fail()
938 self.fs.add_incompat(63, 'placeholder')
939 mdsmap = self.fs.get_mds_map()
940 log.info(f"{mdsmap}")
941 self.assertIn("feature_63", mdsmap['compat']['incompat'])
942
943 def test_rm_compat(self):
944 """
945 Test removing a compat.
946 """
947
948 self.fs.fail()
949 self.fs.add_compat(63, 'placeholder')
950 self.fs.rm_compat(63)
951 mdsmap = self.fs.get_mds_map()
952 self.assertNotIn("feature_63", mdsmap['compat']['compat'])
953
954 def test_rm_incompat(self):
955 """
956 Test removing an incompat.
957 """
958
959 self.fs.fail()
960 self.fs.add_incompat(63, 'placeholder')
961 self.fs.rm_incompat(63)
962 mdsmap = self.fs.get_mds_map()
963 self.assertNotIn("feature_63", mdsmap['compat']['incompat'])
964
965 def test_standby_compat(self):
966 """
967 That adding a compat does not prevent standbys from joining.
968 """
969
970 self.fs.fail()
971 self.fs.add_compat(63, "placeholder")
972 self.fs.set_joinable()
973 self.fs.wait_for_daemons()
974 mdsmap = self.fs.get_mds_map()
975 self.assertIn("feature_63", mdsmap['compat']['compat'])
976
977 def test_standby_incompat_reject(self):
978 """
979 That adding an incompat feature prevents incompatible daemons from joining.
980 """
981
982 self.fs.fail()
983 self.fs.add_incompat(63, "placeholder")
984 self.fs.set_joinable()
985 try:
986 self.fs.wait_for_daemons(timeout=60)
987 except RuntimeError as e:
988 if "Timed out waiting for MDS daemons to become healthy" in str(e):
989 pass
990 else:
991 raise
992 else:
993 self.fail()
994
995 def test_standby_incompat_upgrade(self):
996 """
997 That an MDS can upgrade the compat of a fs.
998 """
999
1000 self.fs.fail()
1001 self.fs.rm_incompat(1)
1002 self.fs.set_joinable()
1003 self.fs.wait_for_daemons()
1004 mdsmap = self.fs.get_mds_map()
1005 self.assertIn("feature_1", mdsmap['compat']['incompat'])
1006
1007 def test_standby_replay_not_upgradeable(self):
1008 """
1009 That the mons will not upgrade the MDSMap compat if standby-replay is
1010 enabled.
1011 """
1012
1013 self.fs.fail()
1014 self.fs.rm_incompat(1)
1015 self.fs.set_allow_standby_replay(True)
1016 self.fs.set_joinable()
1017 try:
1018 self.fs.wait_for_daemons(timeout=60)
1019 except RuntimeError as e:
1020 if "Timed out waiting for MDS daemons to become healthy" in str(e):
1021 pass
1022 else:
1023 raise
1024 else:
1025 self.fail()
1026
1027 def test_standby_incompat_reject_multifs(self):
1028 """
1029 Like test_standby_incompat_reject but with a second fs.
1030 """
1031
1032 fs2 = self.mds_cluster.newfs(name="cephfs2", create=True)
1033 fs2.fail()
1034 fs2.add_incompat(63, 'placeholder')
1035 fs2.set_joinable()
1036 try:
1037 fs2.wait_for_daemons(timeout=60)
1038 except RuntimeError as e:
1039 if "Timed out waiting for MDS daemons to become healthy" in str(e):
1040 pass
1041 else:
1042 raise
1043 else:
1044 self.fail()
1045 # did self.fs lose MDS or standbys suicide?
1046 self.fs.wait_for_daemons()
1047 mdsmap = fs2.get_mds_map()
1048 self.assertIn("feature_63", mdsmap['compat']['incompat'])
1049
92f5a8d4
TL
1050class TestConfigCommands(CephFSTestCase):
1051 """
1052 Test that daemons and clients respond to the otherwise rarely-used
1053 runtime config modification operations.
1054 """
1055
1056 CLIENTS_REQUIRED = 1
1057 MDSS_REQUIRED = 1
1058
1059 def test_ceph_config_show(self):
1060 """
1061 That I can successfully show MDS configuration.
1062 """
1063
1064 names = self.fs.get_rank_names()
1065 for n in names:
1066 s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n)
1067 self.assertTrue("NAME" in s)
1068 self.assertTrue("mon_host" in s)
1069
f67539c2 1070
92f5a8d4
TL
1071 def test_client_config(self):
1072 """
1073 That I can successfully issue asok "config set" commands
1074
1075 :return:
1076 """
1077
1078 if not isinstance(self.mount_a, FuseMount):
9f95a23c 1079 self.skipTest("Test only applies to FUSE clients")
92f5a8d4
TL
1080
1081 test_key = "client_cache_size"
1082 test_val = "123"
1083 self.mount_a.admin_socket(['config', 'set', test_key, test_val])
1084 out = self.mount_a.admin_socket(['config', 'get', test_key])
1085 self.assertEqual(out[test_key], test_val)
1086
92f5a8d4
TL
1087
1088 def test_mds_config_asok(self):
1089 test_key = "mds_max_purge_ops"
1090 test_val = "123"
1091 self.fs.mds_asok(['config', 'set', test_key, test_val])
1092 out = self.fs.mds_asok(['config', 'get', test_key])
1093 self.assertEqual(out[test_key], test_val)
1094
20effc67
TL
1095 def test_mds_dump_cache_asok(self):
1096 cache_file = "cache_file"
1097 timeout = "1"
1098 self.fs.rank_asok(['dump', 'cache', cache_file, timeout])
1099
92f5a8d4
TL
1100 def test_mds_config_tell(self):
1101 test_key = "mds_max_purge_ops"
1102 test_val = "123"
1103
f67539c2 1104 self.fs.rank_tell(['injectargs', "--{0}={1}".format(test_key, test_val)])
92f5a8d4
TL
1105
1106 # Read it back with asok because there is no `tell` equivalent
f67539c2 1107 out = self.fs.rank_tell(['config', 'get', test_key])
92f5a8d4
TL
1108 self.assertEqual(out[test_key], test_val)
1109
f67539c2
TL
1110
1111class TestMirroringCommands(CephFSTestCase):
1112 CLIENTS_REQUIRED = 1
1113 MDSS_REQUIRED = 1
1114
1115 def _enable_mirroring(self, fs_name):
1116 self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", fs_name)
1117
1118 def _disable_mirroring(self, fs_name):
1119 self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", fs_name)
1120
1121 def _add_peer(self, fs_name, peer_spec, remote_fs_name):
1122 peer_uuid = str(uuid.uuid4())
1123 self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
1124
1125 def _remove_peer(self, fs_name, peer_uuid):
1126 self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
1127
1128 def _verify_mirroring(self, fs_name, flag_str):
1129 status = self.fs.status()
1130 fs_map = status.get_fsmap_byname(fs_name)
1131 if flag_str == 'enabled':
1132 self.assertTrue('mirror_info' in fs_map)
1133 elif flag_str == 'disabled':
1134 self.assertTrue('mirror_info' not in fs_map)
1135 else:
1136 raise RuntimeError(f'invalid flag_str {flag_str}')
1137
1138 def _get_peer_uuid(self, fs_name, peer_spec):
1139 status = self.fs.status()
1140 fs_map = status.get_fsmap_byname(fs_name)
1141 mirror_info = fs_map.get('mirror_info', None)
1142 self.assertTrue(mirror_info is not None)
1143 for peer_uuid, remote in mirror_info['peers'].items():
1144 client_name = remote['remote']['client_name']
1145 cluster_name = remote['remote']['cluster_name']
1146 spec = f'{client_name}@{cluster_name}'
1147 if spec == peer_spec:
1148 return peer_uuid
1149 return None
1150
1151 def test_mirroring_command(self):
1152 """basic mirroring command test -- enable, disable mirroring on a
1153 filesystem"""
1154 self._enable_mirroring(self.fs.name)
1155 self._verify_mirroring(self.fs.name, "enabled")
1156 self._disable_mirroring(self.fs.name)
1157 self._verify_mirroring(self.fs.name, "disabled")
1158
1159 def test_mirroring_peer_commands(self):
1160 """test adding and removing peers to a mirror enabled filesystem"""
1161 self._enable_mirroring(self.fs.name)
1162 self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b")
1163 self._add_peer(self.fs.name, "client.site-c@site-c", "fs_c")
1164 self._verify_mirroring(self.fs.name, "enabled")
1165 uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1166 uuid_peer_c = self._get_peer_uuid(self.fs.name, "client.site-c@site-c")
1167 self.assertTrue(uuid_peer_b is not None)
1168 self.assertTrue(uuid_peer_c is not None)
1169 self._remove_peer(self.fs.name, uuid_peer_b)
1170 self._remove_peer(self.fs.name, uuid_peer_c)
1171 self._disable_mirroring(self.fs.name)
1172 self._verify_mirroring(self.fs.name, "disabled")
1173
1174 def test_mirroring_command_idempotency(self):
1175 """test to check idempotency of mirroring family of commands """
1176 self._enable_mirroring(self.fs.name)
1177 self._verify_mirroring(self.fs.name, "enabled")
1178 self._enable_mirroring(self.fs.name)
1179 # add peer
1180 self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b")
1181 uuid_peer_b1 = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1182 self.assertTrue(uuid_peer_b1 is not None)
1183 # adding the peer again should be idempotent
1184 self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b")
1185 uuid_peer_b2 = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1186 self.assertTrue(uuid_peer_b2 is not None)
1187 self.assertTrue(uuid_peer_b1 == uuid_peer_b2)
1188 # remove peer
1189 self._remove_peer(self.fs.name, uuid_peer_b1)
1190 uuid_peer_b3 = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1191 self.assertTrue(uuid_peer_b3 is None)
1192 # removing the peer again should be idempotent
1193 self._remove_peer(self.fs.name, uuid_peer_b1)
1194 self._disable_mirroring(self.fs.name)
1195 self._verify_mirroring(self.fs.name, "disabled")
1196 self._disable_mirroring(self.fs.name)
1197
1198 def test_mirroring_disable_with_peers(self):
1199 """test disabling mirroring for a filesystem with active peers"""
1200 self._enable_mirroring(self.fs.name)
1201 self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b")
1202 self._verify_mirroring(self.fs.name, "enabled")
1203 uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1204 self.assertTrue(uuid_peer_b is not None)
1205 self._disable_mirroring(self.fs.name)
1206 self._verify_mirroring(self.fs.name, "disabled")
1207 # enable mirroring to check old peers
1208 self._enable_mirroring(self.fs.name)
1209 self._verify_mirroring(self.fs.name, "enabled")
1210 # peer should be gone
1211 uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1212 self.assertTrue(uuid_peer_b is None)
1213 self._disable_mirroring(self.fs.name)
1214 self._verify_mirroring(self.fs.name, "disabled")
1215
1216 def test_mirroring_with_filesystem_reset(self):
1217 """test to verify mirroring state post filesystem reset"""
1218 self._enable_mirroring(self.fs.name)
1219 self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b")
1220 self._verify_mirroring(self.fs.name, "enabled")
1221 uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1222 self.assertTrue(uuid_peer_b is not None)
1223 # reset filesystem
1224 self.fs.fail()
1225 self.fs.reset()
1226 self.fs.wait_for_daemons()
1227 self._verify_mirroring(self.fs.name, "disabled")
1228
1229
1e59de90 1230class TestFsAuthorize(CephFSTestCase):
f67539c2
TL
1231 client_id = 'testuser'
1232 client_name = 'client.' + client_id
1233
1234 def test_single_path_r(self):
1e59de90
TL
1235 PERM = 'r'
1236 FS_AUTH_CAPS = (('/', PERM),)
1237 self.captester = CapTester()
1238 self.setup_test_env(FS_AUTH_CAPS)
f67539c2 1239
1e59de90
TL
1240 self.captester.run_mon_cap_tests(self.fs, self.client_id)
1241 self.captester.run_mds_cap_tests(PERM)
f67539c2
TL
1242
1243 def test_single_path_rw(self):
1e59de90
TL
1244 PERM = 'rw'
1245 FS_AUTH_CAPS = (('/', PERM),)
1246 self.captester = CapTester()
1247 self.setup_test_env(FS_AUTH_CAPS)
f67539c2 1248
1e59de90
TL
1249 self.captester.run_mon_cap_tests(self.fs, self.client_id)
1250 self.captester.run_mds_cap_tests(PERM)
f67539c2
TL
1251
1252 def test_single_path_rootsquash(self):
1e59de90
TL
1253 PERM = 'rw'
1254 FS_AUTH_CAPS = (('/', PERM, 'root_squash'),)
1255 self.captester = CapTester()
1256 self.setup_test_env(FS_AUTH_CAPS)
f67539c2 1257
1e59de90
TL
1258 # testing MDS caps...
1259 # Since root_squash is set in client caps, client can read but not
1260 # write even thought access level is set to "rw".
1261 self.captester.conduct_pos_test_for_read_caps()
1262 self.captester.conduct_neg_test_for_write_caps(sudo_write=True)
f67539c2
TL
1263
1264 def test_single_path_authorize_on_nonalphanumeric_fsname(self):
1265 """
1e59de90
TL
1266 That fs authorize command works on filesystems with names having [_.-]
1267 characters
f67539c2 1268 """
92f5a8d4 1269 self.mount_a.umount_wait(require_clean=True)
f67539c2
TL
1270 self.mds_cluster.delete_all_filesystems()
1271 fs_name = "cephfs-_."
1272 self.fs = self.mds_cluster.newfs(name=fs_name)
1273 self.fs.wait_for_daemons()
1274 self.run_cluster_cmd(f'auth caps client.{self.mount_a.client_id} '
1275 f'mon "allow r" '
1276 f'osd "allow rw pool={self.fs.get_data_pool_name()}" '
1277 f'mds allow')
1278 self.mount_a.remount(cephfs_name=self.fs.name)
1e59de90
TL
1279 PERM = 'rw'
1280 FS_AUTH_CAPS = (('/', PERM),)
1281 self.captester = CapTester()
1282 self.setup_test_env(FS_AUTH_CAPS)
1283 self.captester.run_mds_cap_tests(PERM)
f67539c2
TL
1284
1285 def test_multiple_path_r(self):
1e59de90
TL
1286 PERM = 'r'
1287 FS_AUTH_CAPS = (('/dir1/dir12', PERM), ('/dir2/dir22', PERM))
1288 for c in FS_AUTH_CAPS:
1289 self.mount_a.run_shell(f'mkdir -p .{c[0]}')
1290 self.captesters = (CapTester(), CapTester())
1291 self.setup_test_env(FS_AUTH_CAPS)
f67539c2 1292
1e59de90 1293 self.run_cap_test_one_by_one(FS_AUTH_CAPS)
f67539c2
TL
1294
1295 def test_multiple_path_rw(self):
1e59de90
TL
1296 PERM = 'rw'
1297 FS_AUTH_CAPS = (('/dir1/dir12', PERM), ('/dir2/dir22', PERM))
1298 for c in FS_AUTH_CAPS:
1299 self.mount_a.run_shell(f'mkdir -p .{c[0]}')
1300 self.captesters = (CapTester(), CapTester())
1301 self.setup_test_env(FS_AUTH_CAPS)
1302
1303 self.run_cap_test_one_by_one(FS_AUTH_CAPS)
1304
1305 def run_cap_test_one_by_one(self, fs_auth_caps):
1306 keyring = self.run_cluster_cmd(f'auth get {self.client_name}')
1307 for i, c in enumerate(fs_auth_caps):
1308 self.assertIn(i, (0, 1))
1309 PATH = c[0]
1310 PERM = c[1]
1311 self._remount(keyring, PATH)
f67539c2 1312 # actual tests...
1e59de90
TL
1313 self.captesters[i].run_mon_cap_tests(self.fs, self.client_id)
1314 self.captesters[i].run_mds_cap_tests(PERM, PATH)
f67539c2
TL
1315
1316 def tearDown(self):
1317 self.mount_a.umount_wait()
1318 self.run_cluster_cmd(f'auth rm {self.client_name}')
1319
1320 super(type(self), self).tearDown()
1321
1e59de90 1322 def _remount(self, keyring, path='/'):
20effc67 1323 keyring_path = self.mount_a.client_remote.mktemp(data=keyring)
f67539c2
TL
1324 self.mount_a.remount(client_id=self.client_id,
1325 client_keyring_path=keyring_path,
1e59de90 1326 cephfs_mntpt=path)
f67539c2 1327
1e59de90
TL
1328 def setup_for_single_path(self, fs_auth_caps):
1329 self.captester.write_test_files((self.mount_a,), '/')
1330 keyring = self.fs.authorize(self.client_id, fs_auth_caps)
1331 self._remount(keyring)
f67539c2 1332
1e59de90
TL
1333 def setup_for_multiple_paths(self, fs_auth_caps):
1334 for i, c in enumerate(fs_auth_caps):
1335 PATH = c[0]
1336 self.captesters[i].write_test_files((self.mount_a,), PATH)
f67539c2 1337
1e59de90 1338 self.fs.authorize(self.client_id, fs_auth_caps)
f67539c2 1339
1e59de90
TL
1340 def setup_test_env(self, fs_auth_caps):
1341 if len(fs_auth_caps) == 1:
1342 self.setup_for_single_path(fs_auth_caps[0])
1343 else:
1344 self.setup_for_multiple_paths(fs_auth_caps)
f67539c2 1345
20effc67 1346
f67539c2
TL
1347class TestAdminCommandIdempotency(CephFSTestCase):
1348 """
1349 Tests for administration command idempotency.
1350 """
1351
1352 CLIENTS_REQUIRED = 0
1353 MDSS_REQUIRED = 1
1354
1355 def test_rm_idempotency(self):
1356 """
1357 That a removing a fs twice is idempotent.
1358 """
1359
1360 data_pools = self.fs.get_data_pool_names(refresh=True)
1361 self.fs.fail()
1362 self.fs.rm()
1363 try:
1364 self.fs.get_mds_map()
1365 except FSMissing:
1366 pass
1367 else:
1368 self.fail("get_mds_map should raise")
1369 p = self.fs.rm()
1370 self.assertIn("does not exist", p.stderr.getvalue())
1371 self.fs.remove_pools(data_pools)
20effc67
TL
1372
1373
1374class TestAdminCommandDumpTree(CephFSTestCase):
1375 """
1376 Tests for administration command subtrees.
1377 """
1378
1379 CLIENTS_REQUIRED = 0
1380 MDSS_REQUIRED = 1
1381
1382 def test_dump_subtrees(self):
1383 """
1384 Dump all the subtrees to make sure the MDS daemon won't crash.
1385 """
1386
1387 subtrees = self.fs.mds_asok(['get', 'subtrees'])
1388 log.info(f"dumping {len(subtrees)} subtrees:")
1389 for subtree in subtrees:
1390 log.info(f" subtree: '{subtree['dir']['path']}'")
1391 self.fs.mds_asok(['dump', 'tree', subtree['dir']['path']])
1392
1393 log.info("dumping 2 special subtrees:")
1394 log.info(" subtree: '/'")
1395 self.fs.mds_asok(['dump', 'tree', '/'])
1396 log.info(" subtree: '~mdsdir'")
1397 self.fs.mds_asok(['dump', 'tree', '~mdsdir'])
1e59de90
TL
1398
1399class TestAdminCommandDumpLoads(CephFSTestCase):
1400 """
1401 Tests for administration command dump loads.
1402 """
1403
1404 CLIENTS_REQUIRED = 0
1405 MDSS_REQUIRED = 1
1406
1407 def test_dump_loads(self):
1408 """
1409 make sure depth limit param is considered when dump loads for a MDS daemon.
1410 """
1411
1412 log.info("dumping loads")
1413 loads = self.fs.mds_asok(['dump', 'loads', '1'])
1414 self.assertIsNotNone(loads)
1415 self.assertIn("dirfrags", loads)
1416 for d in loads["dirfrags"]:
1417 self.assertLessEqual(d["path"].count("/"), 1)
1418
1419class TestFsBalRankMask(CephFSTestCase):
1420 """
1421 Tests ceph fs set <fs_name> bal_rank_mask
1422 """
1423
1424 CLIENTS_REQUIRED = 0
1425 MDSS_REQUIRED = 2
1426
1427 def test_bal_rank_mask(self):
1428 """
1429 check whether a specified bal_rank_mask value is valid or not.
1430 """
1431 bal_rank_mask = '0x0'
1432 log.info(f"set bal_rank_mask {bal_rank_mask}")
1433 self.fs.set_bal_rank_mask(bal_rank_mask)
1434 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1435
1436 bal_rank_mask = '0'
1437 log.info(f"set bal_rank_mask {bal_rank_mask}")
1438 self.fs.set_bal_rank_mask(bal_rank_mask)
1439 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1440
1441 bal_rank_mask = '-1'
1442 log.info(f"set bal_rank_mask {bal_rank_mask}")
1443 self.fs.set_bal_rank_mask(bal_rank_mask)
1444 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1445
1446 bal_rank_mask = 'all'
1447 log.info(f"set bal_rank_mask {bal_rank_mask}")
1448 self.fs.set_bal_rank_mask(bal_rank_mask)
1449 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1450
1451 bal_rank_mask = '0x1'
1452 log.info(f"set bal_rank_mask {bal_rank_mask}")
1453 self.fs.set_bal_rank_mask(bal_rank_mask)
1454 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1455
1456 bal_rank_mask = '1'
1457 log.info(f"set bal_rank_mask {bal_rank_mask}")
1458 self.fs.set_bal_rank_mask(bal_rank_mask)
1459 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1460
1461 bal_rank_mask = 'f0'
1462 log.info(f"set bal_rank_mask {bal_rank_mask}")
1463 self.fs.set_bal_rank_mask(bal_rank_mask)
1464 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1465
1466 bal_rank_mask = 'ab'
1467 log.info(f"set bal_rank_mask {bal_rank_mask}")
1468 self.fs.set_bal_rank_mask(bal_rank_mask)
1469 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1470
1471 bal_rank_mask = '0xfff0'
1472 log.info(f"set bal_rank_mask {bal_rank_mask}")
1473 self.fs.set_bal_rank_mask(bal_rank_mask)
1474 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1475
1476 MAX_MDS = 256
1477 bal_rank_mask = '0x' + 'f' * int(MAX_MDS / 4)
1478 log.info(f"set bal_rank_mask {bal_rank_mask}")
1479 self.fs.set_bal_rank_mask(bal_rank_mask)
1480 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1481
1482 bal_rank_mask = ''
1483 log.info("set bal_rank_mask to empty string")
1484 try:
1485 self.fs.set_bal_rank_mask(bal_rank_mask)
1486 except CommandFailedError as e:
1487 self.assertEqual(e.exitstatus, errno.EINVAL)
1488
1489 bal_rank_mask = '0x1' + 'f' * int(MAX_MDS / 4)
1490 log.info(f"set bal_rank_mask {bal_rank_mask}")
1491 try:
1492 self.fs.set_bal_rank_mask(bal_rank_mask)
1493 except CommandFailedError as e:
1494 self.assertEqual(e.exitstatus, errno.EINVAL)