]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_admin.py
6 from io
import StringIO
7 from os
.path
import join
as os_path_join
9 from teuthology
.exceptions
import CommandFailedError
11 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
12 from tasks
.cephfs
.filesystem
import FileLayout
, FSMissing
13 from tasks
.cephfs
.fuse_mount
import FuseMount
14 from tasks
.cephfs
.caps_helper
import CapTester
16 log
= logging
.getLogger(__name__
)
18 class TestAdminCommands(CephFSTestCase
):
20 Tests for administration command.
26 def check_pool_application_metadata_key_value(self
, pool
, app
, key
, value
):
27 output
= self
.fs
.mon_manager
.raw_cluster_cmd(
28 'osd', 'pool', 'application', 'get', pool
, app
, key
)
29 self
.assertEqual(str(output
.strip()), value
)
31 def setup_ec_pools(self
, n
, metadata
=True, overwrites
=True):
33 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', n
+"-meta", "8")
34 cmd
= ['osd', 'erasure-code-profile', 'set', n
+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
35 self
.fs
.mon_manager
.raw_cluster_cmd(*cmd
)
36 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', n
+"-data", "8", "erasure", n
+"-profile")
38 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'set', n
+"-data", 'allow_ec_overwrites', 'true')
41 class TestFsStatus(TestAdminCommands
):
43 Test "ceph fs status subcommand.
46 def test_fs_status(self
):
48 That `ceph fs status` command functions.
51 s
= self
.fs
.mon_manager
.raw_cluster_cmd("fs", "status")
52 self
.assertTrue("active" in s
)
54 mdsmap
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("fs", "status", "--format=json-pretty"))["mdsmap"]
55 self
.assertEqual(mdsmap
[0]["state"], "active")
57 mdsmap
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("fs", "status", "--format=json"))["mdsmap"]
58 self
.assertEqual(mdsmap
[0]["state"], "active")
61 class TestAddDataPool(TestAdminCommands
):
63 Test "ceph fs add_data_pool" subcommand.
66 def test_add_data_pool_root(self
):
68 That a new data pool can be added and used for the root directory.
71 p
= self
.fs
.add_data_pool("foo")
72 self
.fs
.set_dir_layout(self
.mount_a
, ".", FileLayout(pool
=p
))
74 def test_add_data_pool_application_metadata(self
):
76 That the application metadata set on a newly added data pool is as expected.
79 mon_cmd
= self
.fs
.mon_manager
.raw_cluster_cmd
80 mon_cmd('osd', 'pool', 'create', pool_name
, '--pg_num_min',
81 str(self
.fs
.pg_num_min
))
82 # Check whether https://tracker.ceph.com/issues/43061 is fixed
83 mon_cmd('osd', 'pool', 'application', 'enable', pool_name
, 'cephfs')
84 self
.fs
.add_data_pool(pool_name
, create
=False)
85 self
.check_pool_application_metadata_key_value(
86 pool_name
, 'cephfs', 'data', self
.fs
.name
)
88 def test_add_data_pool_subdir(self
):
90 That a new data pool can be added and used for a sub-directory.
93 p
= self
.fs
.add_data_pool("foo")
94 self
.mount_a
.run_shell("mkdir subdir")
95 self
.fs
.set_dir_layout(self
.mount_a
, "subdir", FileLayout(pool
=p
))
97 def test_add_data_pool_non_alphamueric_name_as_subdir(self
):
99 That a new data pool with non-alphanumeric name can be added and used for a sub-directory.
101 p
= self
.fs
.add_data_pool("I-am-data_pool00.")
102 self
.mount_a
.run_shell("mkdir subdir")
103 self
.fs
.set_dir_layout(self
.mount_a
, "subdir", FileLayout(pool
=p
))
105 def test_add_data_pool_ec(self
):
107 That a new EC data pool can be added.
110 n
= "test_add_data_pool_ec"
111 self
.setup_ec_pools(n
, metadata
=False)
112 self
.fs
.add_data_pool(n
+"-data", create
=False)
114 def test_add_already_in_use_data_pool(self
):
116 That command try to add data pool which is already in use with another fs.
119 # create first data pool, metadata pool and add with filesystem
120 first_fs
= "first_fs"
121 first_metadata_pool
= "first_metadata_pool"
122 first_data_pool
= "first_data_pool"
123 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool
)
124 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool
)
125 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', first_fs
, first_metadata_pool
, first_data_pool
)
127 # create second data pool, metadata pool and add with filesystem
128 second_fs
= "second_fs"
129 second_metadata_pool
= "second_metadata_pool"
130 second_data_pool
= "second_data_pool"
131 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool
)
132 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool
)
133 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', second_fs
, second_metadata_pool
, second_data_pool
)
135 # try to add 'first_data_pool' with 'second_fs'
136 # Expecting EINVAL exit status because 'first_data_pool' is already in use with 'first_fs'
138 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'add_data_pool', second_fs
, first_data_pool
)
139 except CommandFailedError
as e
:
140 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
142 self
.fail("Expected EINVAL because data pool is already in use as data pool for first_fs")
144 def test_add_already_in_use_metadata_pool(self
):
146 That command try to add metadata pool which is already in use with another fs.
149 # create first data pool, metadata pool and add with filesystem
150 first_fs
= "first_fs"
151 first_metadata_pool
= "first_metadata_pool"
152 first_data_pool
= "first_data_pool"
153 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool
)
154 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool
)
155 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', first_fs
, first_metadata_pool
, first_data_pool
)
157 # create second data pool, metadata pool and add with filesystem
158 second_fs
= "second_fs"
159 second_metadata_pool
= "second_metadata_pool"
160 second_data_pool
= "second_data_pool"
161 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool
)
162 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool
)
163 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', second_fs
, second_metadata_pool
, second_data_pool
)
165 # try to add 'second_metadata_pool' with 'first_fs' as a data pool
166 # Expecting EINVAL exit status because 'second_metadata_pool'
167 # is already in use with 'second_fs' as a metadata pool
169 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'add_data_pool', first_fs
, second_metadata_pool
)
170 except CommandFailedError
as e
:
171 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
173 self
.fail("Expected EINVAL because data pool is already in use as metadata pool for 'second_fs'")
175 class TestFsNew(TestAdminCommands
):
177 Test "ceph fs new" subcommand.
181 def test_fsnames_can_only_by_goodchars(self
):
182 n
= 'test_fsnames_can_only_by_goodchars'
183 metapoolname
, datapoolname
= n
+'-testmetapool', n
+'-testdatapool'
184 badname
= n
+'badname@#'
186 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
188 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
191 # test that fsname not with "goodchars" fails
192 args
= ['fs', 'new', badname
, metapoolname
, datapoolname
]
193 proc
= self
.fs
.mon_manager
.run_cluster_cmd(args
=args
,stderr
=StringIO(),
195 self
.assertIn('invalid chars', proc
.stderr
.getvalue().lower())
197 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname
,
199 '--yes-i-really-really-mean-it-not-faking')
200 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'rm', datapoolname
,
202 '--yes-i-really-really-mean-it-not-faking')
204 def test_new_default_ec(self
):
206 That a new file system warns/fails with an EC default data pool.
209 self
.mount_a
.umount_wait(require_clean
=True)
210 self
.mds_cluster
.delete_all_filesystems()
211 n
= "test_new_default_ec"
212 self
.setup_ec_pools(n
)
214 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data")
215 except CommandFailedError
as e
:
216 if e
.exitstatus
== 22:
221 raise RuntimeError("expected failure")
223 def test_new_default_ec_force(self
):
225 That a new file system succeeds with an EC default data pool with --force.
228 self
.mount_a
.umount_wait(require_clean
=True)
229 self
.mds_cluster
.delete_all_filesystems()
230 n
= "test_new_default_ec_force"
231 self
.setup_ec_pools(n
)
232 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data", "--force")
234 def test_new_default_ec_no_overwrite(self
):
236 That a new file system fails with an EC default data pool without overwrite.
239 self
.mount_a
.umount_wait(require_clean
=True)
240 self
.mds_cluster
.delete_all_filesystems()
241 n
= "test_new_default_ec_no_overwrite"
242 self
.setup_ec_pools(n
, overwrites
=False)
244 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data")
245 except CommandFailedError
as e
:
246 if e
.exitstatus
== 22:
251 raise RuntimeError("expected failure")
252 # and even with --force !
254 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data", "--force")
255 except CommandFailedError
as e
:
256 if e
.exitstatus
== 22:
261 raise RuntimeError("expected failure")
263 def test_fs_new_pool_application_metadata(self
):
265 That the application metadata set on the pools of a newly created filesystem are as expected.
267 self
.mount_a
.umount_wait(require_clean
=True)
268 self
.mds_cluster
.delete_all_filesystems()
269 fs_name
= "test_fs_new_pool_application"
270 keys
= ['metadata', 'data']
271 pool_names
= [fs_name
+'-'+key
for key
in keys
]
272 mon_cmd
= self
.fs
.mon_manager
.raw_cluster_cmd
274 mon_cmd('osd', 'pool', 'create', p
, '--pg_num_min', str(self
.fs
.pg_num_min
))
275 mon_cmd('osd', 'pool', 'application', 'enable', p
, 'cephfs')
276 mon_cmd('fs', 'new', fs_name
, pool_names
[0], pool_names
[1])
278 self
.check_pool_application_metadata_key_value(
279 pool_names
[i
], 'cephfs', keys
[i
], fs_name
)
281 def test_fs_new_with_specific_id(self
):
283 That a file system can be created with a specific ID.
285 fs_name
= "test_fs_specific_id"
287 keys
= ['metadata', 'data']
288 pool_names
= [fs_name
+'-'+key
for key
in keys
]
290 self
.run_cluster_cmd(f
'osd pool create {p}')
291 self
.run_cluster_cmd(f
'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
292 self
.fs
.status().get_fsmap(fscid
)
294 self
.check_pool_application_metadata_key_value(pool_names
[i
], 'cephfs', keys
[i
], fs_name
)
296 def test_fs_new_with_specific_id_idempotency(self
):
298 That command to create file system with specific ID is idempotent.
300 fs_name
= "test_fs_specific_id"
302 keys
= ['metadata', 'data']
303 pool_names
= [fs_name
+'-'+key
for key
in keys
]
305 self
.run_cluster_cmd(f
'osd pool create {p}')
306 self
.run_cluster_cmd(f
'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
307 self
.run_cluster_cmd(f
'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
308 self
.fs
.status().get_fsmap(fscid
)
310 def test_fs_new_with_specific_id_fails_without_force_flag(self
):
312 That command to create file system with specific ID fails without '--force' flag.
314 fs_name
= "test_fs_specific_id"
316 keys
= ['metadata', 'data']
317 pool_names
= [fs_name
+'-'+key
for key
in keys
]
319 self
.run_cluster_cmd(f
'osd pool create {p}')
321 self
.run_cluster_cmd(f
'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid}')
322 except CommandFailedError
as ce
:
323 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
324 "invalid error code on creating a file system with specifc ID without --force flag")
326 self
.fail("expected creating file system with specific ID without '--force' flag to fail")
328 def test_fs_new_with_specific_id_fails_already_in_use(self
):
330 That creating file system with ID already in use fails.
332 fs_name
= "test_fs_specific_id"
333 # file system ID already in use
334 fscid
= self
.fs
.status().map['filesystems'][0]['id']
335 keys
= ['metadata', 'data']
336 pool_names
= [fs_name
+'-'+key
for key
in keys
]
338 self
.run_cluster_cmd(f
'osd pool create {p}')
340 self
.run_cluster_cmd(f
'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
341 except CommandFailedError
as ce
:
342 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
343 "invalid error code on creating a file system with specifc ID that is already in use")
345 self
.fail("expected creating file system with ID already in use to fail")
347 def test_fs_new_metadata_pool_already_in_use(self
):
349 That creating file system with metadata pool already in use.
352 # create first data pool, metadata pool and add with filesystem
353 first_fs
= "first_fs"
354 first_metadata_pool
= "first_metadata_pool"
355 first_data_pool
= "first_data_pool"
356 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool
)
357 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool
)
358 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', first_fs
, first_metadata_pool
, first_data_pool
)
360 second_fs
= "second_fs"
361 second_data_pool
= "second_data_pool"
362 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool
)
364 # try to create new fs 'second_fs' with following configuration
365 # metadata pool -> 'first_metadata_pool'
366 # data pool -> 'second_data_pool'
367 # Expecting EINVAL exit status because 'first_metadata_pool'
368 # is already in use with 'first_fs'
370 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', second_fs
, first_metadata_pool
, second_data_pool
)
371 except CommandFailedError
as e
:
372 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
374 self
.fail("Expected EINVAL because metadata pool is already in use for 'first_fs'")
376 def test_fs_new_data_pool_already_in_use(self
):
378 That creating file system with data pool already in use.
381 # create first data pool, metadata pool and add with filesystem
382 first_fs
= "first_fs"
383 first_metadata_pool
= "first_metadata_pool"
384 first_data_pool
= "first_data_pool"
385 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool
)
386 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool
)
387 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', first_fs
, first_metadata_pool
, first_data_pool
)
389 second_fs
= "second_fs"
390 second_metadata_pool
= "second_metadata_pool"
391 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool
)
393 # try to create new fs 'second_fs' with following configuration
394 # metadata pool -> 'second_metadata_pool'
395 # data pool -> 'first_data_pool'
396 # Expecting EINVAL exit status because 'first_data_pool'
397 # is already in use with 'first_fs'
399 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', second_fs
, second_metadata_pool
, first_data_pool
)
400 except CommandFailedError
as e
:
401 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
403 self
.fail("Expected EINVAL because data pool is already in use for 'first_fs'")
405 def test_fs_new_metadata_and_data_pool_in_use_by_another_same_fs(self
):
407 That creating file system with metadata and data pool which is already in use by another same fs.
410 # create first data pool, metadata pool and add with filesystem
411 first_fs
= "first_fs"
412 first_metadata_pool
= "first_metadata_pool"
413 first_data_pool
= "first_data_pool"
414 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool
)
415 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool
)
416 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', first_fs
, first_metadata_pool
, first_data_pool
)
418 second_fs
= "second_fs"
420 # try to create new fs 'second_fs' with following configuration
421 # metadata pool -> 'first_metadata_pool'
422 # data pool -> 'first_data_pool'
423 # Expecting EINVAL exit status because 'first_metadata_pool' and 'first_data_pool'
424 # is already in use with 'first_fs'
426 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', second_fs
, first_metadata_pool
, first_data_pool
)
427 except CommandFailedError
as e
:
428 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
430 self
.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs'")
432 def test_fs_new_metadata_and_data_pool_in_use_by_different_fs(self
):
434 That creating file system with metadata and data pool which is already in use by different fs.
437 # create first data pool, metadata pool and add with filesystem
438 first_fs
= "first_fs"
439 first_metadata_pool
= "first_metadata_pool"
440 first_data_pool
= "first_data_pool"
441 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool
)
442 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool
)
443 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', first_fs
, first_metadata_pool
, first_data_pool
)
445 # create second data pool, metadata pool and add with filesystem
446 second_fs
= "second_fs"
447 second_metadata_pool
= "second_metadata_pool"
448 second_data_pool
= "second_data_pool"
449 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool
)
450 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool
)
451 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', second_fs
, second_metadata_pool
, second_data_pool
)
453 third_fs
= "third_fs"
455 # try to create new fs 'third_fs' with following configuration
456 # metadata pool -> 'first_metadata_pool'
457 # data pool -> 'second_data_pool'
458 # Expecting EINVAL exit status because 'first_metadata_pool' and 'second_data_pool'
459 # is already in use with 'first_fs' and 'second_fs'
461 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', third_fs
, first_metadata_pool
, second_data_pool
)
462 except CommandFailedError
as e
:
463 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
465 self
.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs' and 'second_fs'")
467 def test_fs_new_interchange_already_in_use_metadata_and_data_pool_of_same_fs(self
):
469 That creating file system with interchanging metadata and data pool which is already in use by same fs.
472 # create first data pool, metadata pool and add with filesystem
473 first_fs
= "first_fs"
474 first_metadata_pool
= "first_metadata_pool"
475 first_data_pool
= "first_data_pool"
476 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool
)
477 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool
)
478 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', first_fs
, first_metadata_pool
, first_data_pool
)
480 second_fs
= "second_fs"
482 # try to create new fs 'second_fs' with following configuration
483 # metadata pool -> 'first_data_pool' (already used as data pool for 'first_fs')
484 # data pool -> 'first_metadata_pool' (already used as metadata pool for 'first_fs')
485 # Expecting EINVAL exit status because 'first_data_pool' and 'first_metadata_pool'
486 # is already in use with 'first_fs'
488 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', second_fs
, first_data_pool
, first_metadata_pool
)
489 except CommandFailedError
as e
:
490 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
492 self
.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs'")
494 def test_fs_new_interchange_already_in_use_metadata_and_data_pool_of_different_fs(self
):
496 That creating file system with interchanging metadata and data pool which is already in use by defferent fs.
499 # create first data pool, metadata pool and add with filesystem
500 first_fs
= "first_fs"
501 first_metadata_pool
= "first_metadata_pool"
502 first_data_pool
= "first_data_pool"
503 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool
)
504 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool
)
505 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', first_fs
, first_metadata_pool
, first_data_pool
)
507 # create second data pool, metadata pool and add with filesystem
508 second_fs
= "second_fs"
509 second_metadata_pool
= "second_metadata_pool"
510 second_data_pool
= "second_data_pool"
511 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool
)
512 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool
)
513 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', second_fs
, second_metadata_pool
, second_data_pool
)
515 third_fs
= "third_fs"
517 # try to create new fs 'third_fs' with following configuration
518 # metadata pool -> 'first_data_pool' (already used as data pool for 'first_fs')
519 # data pool -> 'second_metadata_pool' (already used as metadata pool for 'second_fs')
520 # Expecting EINVAL exit status because 'first_data_pool' and 'second_metadata_pool'
521 # is already in use with 'first_fs' and 'second_fs'
523 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', third_fs
, first_data_pool
, second_metadata_pool
)
524 except CommandFailedError
as e
:
525 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
527 self
.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs' and 'second_fs'")
529 def test_fs_new_metadata_pool_already_in_use_with_rbd(self
):
531 That creating new file system with metadata pool already used by rbd.
534 # create pool and initialise with rbd
535 new_pool
= "new_pool"
536 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', new_pool
)
537 self
.ctx
.cluster
.run(args
=['rbd', 'pool', 'init', new_pool
])
540 new_data_pool
= "new_data_pool"
542 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', new_data_pool
)
544 # try to create new fs 'new_fs' with following configuration
545 # metadata pool -> 'new_pool' (already used by rbd app)
546 # data pool -> 'new_data_pool'
547 # Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
549 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', new_fs
, new_pool
, new_data_pool
)
550 except CommandFailedError
as e
:
551 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
553 self
.fail("Expected EINVAL because metadata pool is already in use for rbd")
555 def test_fs_new_data_pool_already_in_use_with_rbd(self
):
557 That creating new file system with data pool already used by rbd.
560 # create pool and initialise with rbd
561 new_pool
= "new_pool"
562 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', new_pool
)
563 self
.ctx
.cluster
.run(args
=['rbd', 'pool', 'init', new_pool
])
566 new_metadata_pool
= "new_metadata_pool"
568 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', new_metadata_pool
)
570 # try to create new fs 'new_fs' with following configuration
571 # metadata pool -> 'new_metadata_pool'
572 # data pool -> 'new_pool' (already used by rbd app)
573 # Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
575 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', new_fs
, new_metadata_pool
, new_pool
)
576 except CommandFailedError
as e
:
577 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
579 self
.fail("Expected EINVAL because data pool is already in use for rbd")
581 class TestRenameCommand(TestAdminCommands
):
583 Tests for rename command.
589 def test_fs_rename(self
):
591 That the file system can be renamed, and the application metadata set on its pools are as expected.
593 # Renaming the file system breaks this mount as the client uses
594 # file system specific authorization. The client cannot read
595 # or write even if the client's cephx ID caps are updated to access
596 # the new file system name without the client being unmounted and
598 self
.mount_a
.umount_wait(require_clean
=True)
599 orig_fs_name
= self
.fs
.name
600 new_fs_name
= 'new_cephfs'
601 client_id
= 'test_new_cephfs'
603 self
.run_cluster_cmd(f
'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
605 # authorize a cephx ID access to the renamed file system.
606 # use the ID to write to the file system.
607 self
.fs
.name
= new_fs_name
608 keyring
= self
.fs
.authorize(client_id
, ('/', 'rw'))
609 keyring_path
= self
.mount_a
.client_remote
.mktemp(data
=keyring
)
610 self
.mount_a
.remount(client_id
=client_id
,
611 client_keyring_path
=keyring_path
,
613 cephfs_name
=self
.fs
.name
)
614 filedata
, filename
= 'some data on fs', 'file_on_fs'
615 filepath
= os_path_join(self
.mount_a
.hostfs_mntpt
, filename
)
616 self
.mount_a
.write_file(filepath
, filedata
)
617 self
.check_pool_application_metadata_key_value(
618 self
.fs
.get_data_pool_name(), 'cephfs', 'data', new_fs_name
)
619 self
.check_pool_application_metadata_key_value(
620 self
.fs
.get_metadata_pool_name(), 'cephfs', 'metadata', new_fs_name
)
623 self
.mount_a
.umount_wait()
624 self
.run_cluster_cmd(f
'auth rm client.{client_id}')
626 def test_fs_rename_idempotency(self
):
628 That the file system rename operation is idempotent.
630 # Renaming the file system breaks this mount as the client uses
631 # file system specific authorization.
632 self
.mount_a
.umount_wait(require_clean
=True)
633 orig_fs_name
= self
.fs
.name
634 new_fs_name
= 'new_cephfs'
636 self
.run_cluster_cmd(f
'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
637 self
.run_cluster_cmd(f
'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
639 # original file system name does not appear in `fs ls` command
640 self
.assertFalse(self
.fs
.exists())
641 self
.fs
.name
= new_fs_name
642 self
.assertTrue(self
.fs
.exists())
644 def test_fs_rename_fs_new_fails_with_old_fsname_existing_pools(self
):
646 That after renaming a file system, creating a file system with
647 old name and existing FS pools fails.
649 # Renaming the file system breaks this mount as the client uses
650 # file system specific authorization.
651 self
.mount_a
.umount_wait(require_clean
=True)
652 orig_fs_name
= self
.fs
.name
653 new_fs_name
= 'new_cephfs'
654 data_pool
= self
.fs
.get_data_pool_name()
655 metadata_pool
= self
.fs
.get_metadata_pool_name()
656 self
.run_cluster_cmd(f
'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
659 self
.run_cluster_cmd(f
"fs new {orig_fs_name} {metadata_pool} {data_pool}")
660 except CommandFailedError
as ce
:
661 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
662 "invalid error code on creating a new file system with old "
663 "name and existing pools.")
665 self
.fail("expected creating new file system with old name and "
666 "existing pools to fail.")
669 self
.run_cluster_cmd(f
"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
670 except CommandFailedError
as ce
:
671 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
672 "invalid error code on creating a new file system with old "
673 "name, existing pools and --force flag.")
675 self
.fail("expected creating new file system with old name, "
676 "existing pools, and --force flag to fail.")
679 self
.run_cluster_cmd(f
"fs new {orig_fs_name} {metadata_pool} {data_pool} "
680 "--allow-dangerous-metadata-overlay")
681 except CommandFailedError
as ce
:
682 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
683 "invalid error code on creating a new file system with old name, "
684 "existing pools and --allow-dangerous-metadata-overlay flag.")
686 self
.fail("expected creating new file system with old name, "
687 "existing pools, and --allow-dangerous-metadata-overlay flag to fail.")
689 def test_fs_rename_fails_without_yes_i_really_mean_it_flag(self
):
691 That renaming a file system without '--yes-i-really-mean-it' flag fails.
694 self
.run_cluster_cmd(f
"fs rename {self.fs.name} new_fs")
695 except CommandFailedError
as ce
:
696 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
697 "invalid error code on renaming a file system without the "
698 "'--yes-i-really-mean-it' flag")
700 self
.fail("expected renaming of file system without the "
701 "'--yes-i-really-mean-it' flag to fail ")
703 def test_fs_rename_fails_for_non_existent_fs(self
):
705 That renaming a non-existent file system fails.
708 self
.run_cluster_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
709 except CommandFailedError
as ce
:
710 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on renaming a non-existent fs")
712 self
.fail("expected renaming of a non-existent file system to fail")
714 def test_fs_rename_fails_new_name_already_in_use(self
):
716 That renaming a file system fails if the new name refers to an existing file system.
718 self
.fs2
= self
.mds_cluster
.newfs(name
='cephfs2', create
=True)
721 self
.run_cluster_cmd(f
"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
722 except CommandFailedError
as ce
:
723 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
724 "invalid error code on renaming to a fs name that is already in use")
726 self
.fail("expected renaming to a new file system name that is already in use to fail.")
728 def test_fs_rename_fails_with_mirroring_enabled(self
):
730 That renaming a file system fails if mirroring is enabled on it.
732 orig_fs_name
= self
.fs
.name
733 new_fs_name
= 'new_cephfs'
735 self
.run_cluster_cmd(f
'fs mirror enable {orig_fs_name}')
737 self
.run_cluster_cmd(f
'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
738 except CommandFailedError
as ce
:
739 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
, "invalid error code on renaming a mirrored file system")
741 self
.fail("expected renaming of a mirrored file system to fail")
742 self
.run_cluster_cmd(f
'fs mirror disable {orig_fs_name}')
745 class TestDump(CephFSTestCase
):
749 def test_fs_dump_epoch(self
):
751 That dumping a specific epoch works.
754 status1
= self
.fs
.status()
755 status2
= self
.fs
.status(epoch
=status1
["epoch"]-1)
756 self
.assertEqual(status1
["epoch"], status2
["epoch"]+1)
758 def test_fsmap_trim(self
):
760 That the fsmap is trimmed normally.
763 paxos_service_trim_min
= 25
764 self
.config_set('mon', 'paxos_service_trim_min', paxos_service_trim_min
)
765 mon_max_mdsmap_epochs
= 20
766 self
.config_set('mon', 'mon_max_mdsmap_epochs', mon_max_mdsmap_epochs
)
768 status
= self
.fs
.status()
769 epoch
= status
["epoch"]
772 mutations
= paxos_service_trim_min
+ mon_max_mdsmap_epochs
774 for i
in range(mutations
):
775 self
.fs
.set_joinable(b
)
778 time
.sleep(10) # for tick/compaction
781 self
.fs
.status(epoch
=epoch
)
782 except CommandFailedError
as e
:
783 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
, "invalid error code when trying to fetch FSMap that was trimmed")
785 self
.fail("trimming did not occur as expected")
787 def test_fsmap_force_trim(self
):
789 That the fsmap is trimmed forcefully.
792 status
= self
.fs
.status()
793 epoch
= status
["epoch"]
795 paxos_service_trim_min
= 1
796 self
.config_set('mon', 'paxos_service_trim_min', paxos_service_trim_min
)
797 mon_mds_force_trim_to
= epoch
+1
798 self
.config_set('mon', 'mon_mds_force_trim_to', mon_mds_force_trim_to
)
801 self
.fs
.set_joinable(False)
802 time
.sleep(10) # for tick/compaction
804 status
= self
.fs
.status()
805 log
.debug(f
"new epoch is {status['epoch']}")
806 self
.fs
.status(epoch
=epoch
+1) # epoch+1 is not trimmed, may not == status["epoch"]
809 self
.fs
.status(epoch
=epoch
)
810 except CommandFailedError
as e
:
811 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
, "invalid error code when trying to fetch FSMap that was trimmed")
813 self
.fail("trimming did not occur as expected")
816 class TestRequiredClientFeatures(CephFSTestCase
):
820 def test_required_client_features(self
):
822 That `ceph fs required_client_features` command functions.
825 def is_required(index
):
826 out
= self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'get', self
.fs
.name
, '--format=json-pretty')
827 features
= json
.loads(out
)['mdsmap']['required_client_features']
828 if "feature_{0}".format(index
) in features
:
832 features
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'feature', 'ls', '--format=json-pretty'))
833 self
.assertGreater(len(features
), 0);
836 self
.fs
.required_client_features('rm', str(f
['index']))
841 if feature
== 'reserved':
846 self
.fs
.required_client_features('add', feature
)
847 self
.assertTrue(is_required(index
))
851 self
.fs
.required_client_features('rm', feature
)
852 self
.assertFalse(is_required(index
))
854 def test_required_client_feature_add_reserved(self
):
856 That `ceph fs required_client_features X add reserved` fails.
859 p
= self
.fs
.required_client_features('add', 'reserved', check_status
=False, stderr
=StringIO())
860 self
.assertIn('Invalid feature name', p
.stderr
.getvalue())
862 def test_required_client_feature_rm_reserved(self
):
864 That `ceph fs required_client_features X rm reserved` fails.
867 p
= self
.fs
.required_client_features('rm', 'reserved', check_status
=False, stderr
=StringIO())
868 self
.assertIn('Invalid feature name', p
.stderr
.getvalue())
870 def test_required_client_feature_add_reserved_bit(self
):
872 That `ceph fs required_client_features X add <reserved_bit>` passes.
875 p
= self
.fs
.required_client_features('add', '1', stderr
=StringIO())
876 self
.assertIn("added feature 'reserved' to required_client_features", p
.stderr
.getvalue())
878 def test_required_client_feature_rm_reserved_bit(self
):
880 That `ceph fs required_client_features X rm <reserved_bit>` passes.
883 self
.fs
.required_client_features('add', '1')
884 p
= self
.fs
.required_client_features('rm', '1', stderr
=StringIO())
885 self
.assertIn("removed feature 'reserved' from required_client_features", p
.stderr
.getvalue())
887 class TestCompatCommands(CephFSTestCase
):
894 def test_add_compat(self
):
896 Test adding a compat.
900 self
.fs
.add_compat(63, 'placeholder')
901 mdsmap
= self
.fs
.get_mds_map()
902 self
.assertIn("feature_63", mdsmap
['compat']['compat'])
904 def test_add_incompat(self
):
906 Test adding an incompat.
910 self
.fs
.add_incompat(63, 'placeholder')
911 mdsmap
= self
.fs
.get_mds_map()
912 log
.info(f
"{mdsmap}")
913 self
.assertIn("feature_63", mdsmap
['compat']['incompat'])
915 def test_rm_compat(self
):
917 Test removing a compat.
921 self
.fs
.add_compat(63, 'placeholder')
922 self
.fs
.rm_compat(63)
923 mdsmap
= self
.fs
.get_mds_map()
924 self
.assertNotIn("feature_63", mdsmap
['compat']['compat'])
926 def test_rm_incompat(self
):
928 Test removing an incompat.
932 self
.fs
.add_incompat(63, 'placeholder')
933 self
.fs
.rm_incompat(63)
934 mdsmap
= self
.fs
.get_mds_map()
935 self
.assertNotIn("feature_63", mdsmap
['compat']['incompat'])
937 def test_standby_compat(self
):
939 That adding a compat does not prevent standbys from joining.
943 self
.fs
.add_compat(63, "placeholder")
944 self
.fs
.set_joinable()
945 self
.fs
.wait_for_daemons()
946 mdsmap
= self
.fs
.get_mds_map()
947 self
.assertIn("feature_63", mdsmap
['compat']['compat'])
949 def test_standby_incompat_reject(self
):
951 That adding an incompat feature prevents incompatible daemons from joining.
955 self
.fs
.add_incompat(63, "placeholder")
956 self
.fs
.set_joinable()
958 self
.fs
.wait_for_daemons(timeout
=60)
959 except RuntimeError as e
:
960 if "Timed out waiting for MDS daemons to become healthy" in str(e
):
967 def test_standby_incompat_upgrade(self
):
969 That an MDS can upgrade the compat of a fs.
973 self
.fs
.rm_incompat(1)
974 self
.fs
.set_joinable()
975 self
.fs
.wait_for_daemons()
976 mdsmap
= self
.fs
.get_mds_map()
977 self
.assertIn("feature_1", mdsmap
['compat']['incompat'])
979 def test_standby_replay_not_upgradeable(self
):
981 That the mons will not upgrade the MDSMap compat if standby-replay is
986 self
.fs
.rm_incompat(1)
987 self
.fs
.set_allow_standby_replay(True)
988 self
.fs
.set_joinable()
990 self
.fs
.wait_for_daemons(timeout
=60)
991 except RuntimeError as e
:
992 if "Timed out waiting for MDS daemons to become healthy" in str(e
):
999 def test_standby_incompat_reject_multifs(self
):
1001 Like test_standby_incompat_reject but with a second fs.
1004 fs2
= self
.mds_cluster
.newfs(name
="cephfs2", create
=True)
1006 fs2
.add_incompat(63, 'placeholder')
1009 fs2
.wait_for_daemons(timeout
=60)
1010 except RuntimeError as e
:
1011 if "Timed out waiting for MDS daemons to become healthy" in str(e
):
1017 # did self.fs lose MDS or standbys suicide?
1018 self
.fs
.wait_for_daemons()
1019 mdsmap
= fs2
.get_mds_map()
1020 self
.assertIn("feature_63", mdsmap
['compat']['incompat'])
1022 class TestConfigCommands(CephFSTestCase
):
1024 Test that daemons and clients respond to the otherwise rarely-used
1025 runtime config modification operations.
1028 CLIENTS_REQUIRED
= 1
1031 def test_ceph_config_show(self
):
1033 That I can successfully show MDS configuration.
1036 names
= self
.fs
.get_rank_names()
1038 s
= self
.fs
.mon_manager
.raw_cluster_cmd("config", "show", "mds."+n
)
1039 self
.assertTrue("NAME" in s
)
1040 self
.assertTrue("mon_host" in s
)
1043 def test_client_config(self
):
1045 That I can successfully issue asok "config set" commands
1050 if not isinstance(self
.mount_a
, FuseMount
):
1051 self
.skipTest("Test only applies to FUSE clients")
1053 test_key
= "client_cache_size"
1055 self
.mount_a
.admin_socket(['config', 'set', test_key
, test_val
])
1056 out
= self
.mount_a
.admin_socket(['config', 'get', test_key
])
1057 self
.assertEqual(out
[test_key
], test_val
)
1060 def test_mds_config_asok(self
):
1061 test_key
= "mds_max_purge_ops"
1063 self
.fs
.mds_asok(['config', 'set', test_key
, test_val
])
1064 out
= self
.fs
.mds_asok(['config', 'get', test_key
])
1065 self
.assertEqual(out
[test_key
], test_val
)
1067 def test_mds_dump_cache_asok(self
):
1068 cache_file
= "cache_file"
1070 self
.fs
.rank_asok(['dump', 'cache', cache_file
, timeout
])
1072 def test_mds_config_tell(self
):
1073 test_key
= "mds_max_purge_ops"
1076 self
.fs
.rank_tell(['injectargs', "--{0}={1}".format(test_key
, test_val
)])
1078 # Read it back with asok because there is no `tell` equivalent
1079 out
= self
.fs
.rank_tell(['config', 'get', test_key
])
1080 self
.assertEqual(out
[test_key
], test_val
)
1083 class TestMirroringCommands(CephFSTestCase
):
1084 CLIENTS_REQUIRED
= 1
1087 def _enable_mirroring(self
, fs_name
):
1088 self
.fs
.mon_manager
.raw_cluster_cmd("fs", "mirror", "enable", fs_name
)
1090 def _disable_mirroring(self
, fs_name
):
1091 self
.fs
.mon_manager
.raw_cluster_cmd("fs", "mirror", "disable", fs_name
)
1093 def _add_peer(self
, fs_name
, peer_spec
, remote_fs_name
):
1094 peer_uuid
= str(uuid
.uuid4())
1095 self
.fs
.mon_manager
.raw_cluster_cmd("fs", "mirror", "peer_add", fs_name
, peer_uuid
, peer_spec
, remote_fs_name
)
1097 def _remove_peer(self
, fs_name
, peer_uuid
):
1098 self
.fs
.mon_manager
.raw_cluster_cmd("fs", "mirror", "peer_remove", fs_name
, peer_uuid
)
1100 def _verify_mirroring(self
, fs_name
, flag_str
):
1101 status
= self
.fs
.status()
1102 fs_map
= status
.get_fsmap_byname(fs_name
)
1103 if flag_str
== 'enabled':
1104 self
.assertTrue('mirror_info' in fs_map
)
1105 elif flag_str
== 'disabled':
1106 self
.assertTrue('mirror_info' not in fs_map
)
1108 raise RuntimeError(f
'invalid flag_str {flag_str}')
1110 def _get_peer_uuid(self
, fs_name
, peer_spec
):
1111 status
= self
.fs
.status()
1112 fs_map
= status
.get_fsmap_byname(fs_name
)
1113 mirror_info
= fs_map
.get('mirror_info', None)
1114 self
.assertTrue(mirror_info
is not None)
1115 for peer_uuid
, remote
in mirror_info
['peers'].items():
1116 client_name
= remote
['remote']['client_name']
1117 cluster_name
= remote
['remote']['cluster_name']
1118 spec
= f
'{client_name}@{cluster_name}'
1119 if spec
== peer_spec
:
1123 def test_mirroring_command(self
):
1124 """basic mirroring command test -- enable, disable mirroring on a
1126 self
._enable
_mirroring
(self
.fs
.name
)
1127 self
._verify
_mirroring
(self
.fs
.name
, "enabled")
1128 self
._disable
_mirroring
(self
.fs
.name
)
1129 self
._verify
_mirroring
(self
.fs
.name
, "disabled")
1131 def test_mirroring_peer_commands(self
):
1132 """test adding and removing peers to a mirror enabled filesystem"""
1133 self
._enable
_mirroring
(self
.fs
.name
)
1134 self
._add
_peer
(self
.fs
.name
, "client.site-b@site-b", "fs_b")
1135 self
._add
_peer
(self
.fs
.name
, "client.site-c@site-c", "fs_c")
1136 self
._verify
_mirroring
(self
.fs
.name
, "enabled")
1137 uuid_peer_b
= self
._get
_peer
_uuid
(self
.fs
.name
, "client.site-b@site-b")
1138 uuid_peer_c
= self
._get
_peer
_uuid
(self
.fs
.name
, "client.site-c@site-c")
1139 self
.assertTrue(uuid_peer_b
is not None)
1140 self
.assertTrue(uuid_peer_c
is not None)
1141 self
._remove
_peer
(self
.fs
.name
, uuid_peer_b
)
1142 self
._remove
_peer
(self
.fs
.name
, uuid_peer_c
)
1143 self
._disable
_mirroring
(self
.fs
.name
)
1144 self
._verify
_mirroring
(self
.fs
.name
, "disabled")
1146 def test_mirroring_command_idempotency(self
):
1147 """test to check idempotency of mirroring family of commands """
1148 self
._enable
_mirroring
(self
.fs
.name
)
1149 self
._verify
_mirroring
(self
.fs
.name
, "enabled")
1150 self
._enable
_mirroring
(self
.fs
.name
)
1152 self
._add
_peer
(self
.fs
.name
, "client.site-b@site-b", "fs_b")
1153 uuid_peer_b1
= self
._get
_peer
_uuid
(self
.fs
.name
, "client.site-b@site-b")
1154 self
.assertTrue(uuid_peer_b1
is not None)
1155 # adding the peer again should be idempotent
1156 self
._add
_peer
(self
.fs
.name
, "client.site-b@site-b", "fs_b")
1157 uuid_peer_b2
= self
._get
_peer
_uuid
(self
.fs
.name
, "client.site-b@site-b")
1158 self
.assertTrue(uuid_peer_b2
is not None)
1159 self
.assertTrue(uuid_peer_b1
== uuid_peer_b2
)
1161 self
._remove
_peer
(self
.fs
.name
, uuid_peer_b1
)
1162 uuid_peer_b3
= self
._get
_peer
_uuid
(self
.fs
.name
, "client.site-b@site-b")
1163 self
.assertTrue(uuid_peer_b3
is None)
1164 # removing the peer again should be idempotent
1165 self
._remove
_peer
(self
.fs
.name
, uuid_peer_b1
)
1166 self
._disable
_mirroring
(self
.fs
.name
)
1167 self
._verify
_mirroring
(self
.fs
.name
, "disabled")
1168 self
._disable
_mirroring
(self
.fs
.name
)
1170 def test_mirroring_disable_with_peers(self
):
1171 """test disabling mirroring for a filesystem with active peers"""
1172 self
._enable
_mirroring
(self
.fs
.name
)
1173 self
._add
_peer
(self
.fs
.name
, "client.site-b@site-b", "fs_b")
1174 self
._verify
_mirroring
(self
.fs
.name
, "enabled")
1175 uuid_peer_b
= self
._get
_peer
_uuid
(self
.fs
.name
, "client.site-b@site-b")
1176 self
.assertTrue(uuid_peer_b
is not None)
1177 self
._disable
_mirroring
(self
.fs
.name
)
1178 self
._verify
_mirroring
(self
.fs
.name
, "disabled")
1179 # enable mirroring to check old peers
1180 self
._enable
_mirroring
(self
.fs
.name
)
1181 self
._verify
_mirroring
(self
.fs
.name
, "enabled")
1182 # peer should be gone
1183 uuid_peer_b
= self
._get
_peer
_uuid
(self
.fs
.name
, "client.site-b@site-b")
1184 self
.assertTrue(uuid_peer_b
is None)
1185 self
._disable
_mirroring
(self
.fs
.name
)
1186 self
._verify
_mirroring
(self
.fs
.name
, "disabled")
1188 def test_mirroring_with_filesystem_reset(self
):
1189 """test to verify mirroring state post filesystem reset"""
1190 self
._enable
_mirroring
(self
.fs
.name
)
1191 self
._add
_peer
(self
.fs
.name
, "client.site-b@site-b", "fs_b")
1192 self
._verify
_mirroring
(self
.fs
.name
, "enabled")
1193 uuid_peer_b
= self
._get
_peer
_uuid
(self
.fs
.name
, "client.site-b@site-b")
1194 self
.assertTrue(uuid_peer_b
is not None)
1198 self
.fs
.wait_for_daemons()
1199 self
._verify
_mirroring
(self
.fs
.name
, "disabled")
1202 class TestFsAuthorize(CephFSTestCase
):
1203 client_id
= 'testuser'
1204 client_name
= 'client.' + client_id
1206 def test_single_path_r(self
):
1208 FS_AUTH_CAPS
= (('/', PERM
),)
1209 self
.captester
= CapTester()
1210 self
.setup_test_env(FS_AUTH_CAPS
)
1212 self
.captester
.run_mon_cap_tests(self
.fs
, self
.client_id
)
1213 self
.captester
.run_mds_cap_tests(PERM
)
1215 def test_single_path_rw(self
):
1217 FS_AUTH_CAPS
= (('/', PERM
),)
1218 self
.captester
= CapTester()
1219 self
.setup_test_env(FS_AUTH_CAPS
)
1221 self
.captester
.run_mon_cap_tests(self
.fs
, self
.client_id
)
1222 self
.captester
.run_mds_cap_tests(PERM
)
1224 def test_single_path_rootsquash(self
):
1226 FS_AUTH_CAPS
= (('/', PERM
, 'root_squash'),)
1227 self
.captester
= CapTester()
1228 self
.setup_test_env(FS_AUTH_CAPS
)
1230 # testing MDS caps...
1231 # Since root_squash is set in client caps, client can read but not
1232 # write even thought access level is set to "rw".
1233 self
.captester
.conduct_pos_test_for_read_caps()
1234 self
.captester
.conduct_neg_test_for_write_caps(sudo_write
=True)
1236 def test_single_path_authorize_on_nonalphanumeric_fsname(self
):
1238 That fs authorize command works on filesystems with names having [_.-]
1241 self
.mount_a
.umount_wait(require_clean
=True)
1242 self
.mds_cluster
.delete_all_filesystems()
1243 fs_name
= "cephfs-_."
1244 self
.fs
= self
.mds_cluster
.newfs(name
=fs_name
)
1245 self
.fs
.wait_for_daemons()
1246 self
.run_cluster_cmd(f
'auth caps client.{self.mount_a.client_id} '
1248 f
'osd "allow rw pool={self.fs.get_data_pool_name()}" '
1250 self
.mount_a
.remount(cephfs_name
=self
.fs
.name
)
1252 FS_AUTH_CAPS
= (('/', PERM
),)
1253 self
.captester
= CapTester()
1254 self
.setup_test_env(FS_AUTH_CAPS
)
1255 self
.captester
.run_mds_cap_tests(PERM
)
1257 def test_multiple_path_r(self
):
1259 FS_AUTH_CAPS
= (('/dir1/dir12', PERM
), ('/dir2/dir22', PERM
))
1260 for c
in FS_AUTH_CAPS
:
1261 self
.mount_a
.run_shell(f
'mkdir -p .{c[0]}')
1262 self
.captesters
= (CapTester(), CapTester())
1263 self
.setup_test_env(FS_AUTH_CAPS
)
1265 self
.run_cap_test_one_by_one(FS_AUTH_CAPS
)
1267 def test_multiple_path_rw(self
):
1269 FS_AUTH_CAPS
= (('/dir1/dir12', PERM
), ('/dir2/dir22', PERM
))
1270 for c
in FS_AUTH_CAPS
:
1271 self
.mount_a
.run_shell(f
'mkdir -p .{c[0]}')
1272 self
.captesters
= (CapTester(), CapTester())
1273 self
.setup_test_env(FS_AUTH_CAPS
)
1275 self
.run_cap_test_one_by_one(FS_AUTH_CAPS
)
1277 def run_cap_test_one_by_one(self
, fs_auth_caps
):
1278 keyring
= self
.run_cluster_cmd(f
'auth get {self.client_name}')
1279 for i
, c
in enumerate(fs_auth_caps
):
1280 self
.assertIn(i
, (0, 1))
1283 self
._remount
(keyring
, PATH
)
1285 self
.captesters
[i
].run_mon_cap_tests(self
.fs
, self
.client_id
)
1286 self
.captesters
[i
].run_mds_cap_tests(PERM
, PATH
)
1289 self
.mount_a
.umount_wait()
1290 self
.run_cluster_cmd(f
'auth rm {self.client_name}')
1292 super(type(self
), self
).tearDown()
1294 def _remount(self
, keyring
, path
='/'):
1295 keyring_path
= self
.mount_a
.client_remote
.mktemp(data
=keyring
)
1296 self
.mount_a
.remount(client_id
=self
.client_id
,
1297 client_keyring_path
=keyring_path
,
1300 def setup_for_single_path(self
, fs_auth_caps
):
1301 self
.captester
.write_test_files((self
.mount_a
,), '/')
1302 keyring
= self
.fs
.authorize(self
.client_id
, fs_auth_caps
)
1303 self
._remount
(keyring
)
1305 def setup_for_multiple_paths(self
, fs_auth_caps
):
1306 for i
, c
in enumerate(fs_auth_caps
):
1308 self
.captesters
[i
].write_test_files((self
.mount_a
,), PATH
)
1310 self
.fs
.authorize(self
.client_id
, fs_auth_caps
)
1312 def setup_test_env(self
, fs_auth_caps
):
1313 if len(fs_auth_caps
) == 1:
1314 self
.setup_for_single_path(fs_auth_caps
[0])
1316 self
.setup_for_multiple_paths(fs_auth_caps
)
1319 class TestAdminCommandIdempotency(CephFSTestCase
):
1321 Tests for administration command idempotency.
1324 CLIENTS_REQUIRED
= 0
1327 def test_rm_idempotency(self
):
1329 That a removing a fs twice is idempotent.
1332 data_pools
= self
.fs
.get_data_pool_names(refresh
=True)
1336 self
.fs
.get_mds_map()
1340 self
.fail("get_mds_map should raise")
1342 self
.assertIn("does not exist", p
.stderr
.getvalue())
1343 self
.fs
.remove_pools(data_pools
)
1346 class TestAdminCommandDumpTree(CephFSTestCase
):
1348 Tests for administration command subtrees.
1351 CLIENTS_REQUIRED
= 0
1354 def test_dump_subtrees(self
):
1356 Dump all the subtrees to make sure the MDS daemon won't crash.
1359 subtrees
= self
.fs
.mds_asok(['get', 'subtrees'])
1360 log
.info(f
"dumping {len(subtrees)} subtrees:")
1361 for subtree
in subtrees
:
1362 log
.info(f
" subtree: '{subtree['dir']['path']}'")
1363 self
.fs
.mds_asok(['dump', 'tree', subtree
['dir']['path']])
1365 log
.info("dumping 2 special subtrees:")
1366 log
.info(" subtree: '/'")
1367 self
.fs
.mds_asok(['dump', 'tree', '/'])
1368 log
.info(" subtree: '~mdsdir'")
1369 self
.fs
.mds_asok(['dump', 'tree', '~mdsdir'])
1371 class TestAdminCommandDumpLoads(CephFSTestCase
):
1373 Tests for administration command dump loads.
1376 CLIENTS_REQUIRED
= 0
1379 def test_dump_loads(self
):
1381 make sure depth limit param is considered when dump loads for a MDS daemon.
1384 log
.info("dumping loads")
1385 loads
= self
.fs
.mds_asok(['dump', 'loads', '1'])
1386 self
.assertIsNotNone(loads
)
1387 self
.assertIn("dirfrags", loads
)
1388 for d
in loads
["dirfrags"]:
1389 self
.assertLessEqual(d
["path"].count("/"), 1)
1391 class TestFsBalRankMask(CephFSTestCase
):
1393 Tests ceph fs set <fs_name> bal_rank_mask
1396 CLIENTS_REQUIRED
= 0
1399 def test_bal_rank_mask(self
):
1401 check whether a specified bal_rank_mask value is valid or not.
1403 bal_rank_mask
= '0x0'
1404 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1405 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1406 self
.assertEqual(bal_rank_mask
, self
.fs
.get_var('bal_rank_mask'))
1409 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1410 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1411 self
.assertEqual(bal_rank_mask
, self
.fs
.get_var('bal_rank_mask'))
1413 bal_rank_mask
= '-1'
1414 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1415 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1416 self
.assertEqual(bal_rank_mask
, self
.fs
.get_var('bal_rank_mask'))
1418 bal_rank_mask
= 'all'
1419 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1420 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1421 self
.assertEqual(bal_rank_mask
, self
.fs
.get_var('bal_rank_mask'))
1423 bal_rank_mask
= '0x1'
1424 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1425 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1426 self
.assertEqual(bal_rank_mask
, self
.fs
.get_var('bal_rank_mask'))
1429 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1430 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1431 self
.assertEqual(bal_rank_mask
, self
.fs
.get_var('bal_rank_mask'))
1433 bal_rank_mask
= 'f0'
1434 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1435 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1436 self
.assertEqual(bal_rank_mask
, self
.fs
.get_var('bal_rank_mask'))
1438 bal_rank_mask
= 'ab'
1439 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1440 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1441 self
.assertEqual(bal_rank_mask
, self
.fs
.get_var('bal_rank_mask'))
1443 bal_rank_mask
= '0xfff0'
1444 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1445 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1446 self
.assertEqual(bal_rank_mask
, self
.fs
.get_var('bal_rank_mask'))
1449 bal_rank_mask
= '0x' + 'f' * int(MAX_MDS
/ 4)
1450 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1451 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1452 self
.assertEqual(bal_rank_mask
, self
.fs
.get_var('bal_rank_mask'))
1455 log
.info("set bal_rank_mask to empty string")
1457 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1458 except CommandFailedError
as e
:
1459 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
1461 bal_rank_mask
= '0x1' + 'f' * int(MAX_MDS
/ 4)
1462 log
.info(f
"set bal_rank_mask {bal_rank_mask}")
1464 self
.fs
.set_bal_rank_mask(bal_rank_mask
)
1465 except CommandFailedError
as e
:
1466 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)