]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_admin.py
e6ad57b5c3027941ea4727b68dea164cd119f10a
[ceph.git] / ceph / qa / tasks / cephfs / test_admin.py
1 import errno
2 import json
3 import logging
4 import time
5 import uuid
6 from io import StringIO
7 from os.path import join as os_path_join
8
9 from teuthology.exceptions import CommandFailedError
10
11 from tasks.cephfs.cephfs_test_case import CephFSTestCase
12 from tasks.cephfs.filesystem import FileLayout, FSMissing
13 from tasks.cephfs.fuse_mount import FuseMount
14 from tasks.cephfs.caps_helper import CapTester
15
16 log = logging.getLogger(__name__)
17
18 class TestAdminCommands(CephFSTestCase):
19 """
20 Tests for administration command.
21 """
22
23 CLIENTS_REQUIRED = 1
24 MDSS_REQUIRED = 1
25
26 def check_pool_application_metadata_key_value(self, pool, app, key, value):
27 output = self.fs.mon_manager.raw_cluster_cmd(
28 'osd', 'pool', 'application', 'get', pool, app, key)
29 self.assertEqual(str(output.strip()), value)
30
31 def setup_ec_pools(self, n, metadata=True, overwrites=True):
32 if metadata:
33 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8")
34 cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
35 self.fs.mon_manager.raw_cluster_cmd(*cmd)
36 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
37 if overwrites:
38 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
39
40
41 class TestFsStatus(TestAdminCommands):
42 """
43 Test "ceph fs status subcommand.
44 """
45
46 def test_fs_status(self):
47 """
48 That `ceph fs status` command functions.
49 """
50
51 s = self.fs.mon_manager.raw_cluster_cmd("fs", "status")
52 self.assertTrue("active" in s)
53
54 mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json-pretty"))["mdsmap"]
55 self.assertEqual(mdsmap[0]["state"], "active")
56
57 mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json"))["mdsmap"]
58 self.assertEqual(mdsmap[0]["state"], "active")
59
60
61 class TestAddDataPool(TestAdminCommands):
62 """
63 Test "ceph fs add_data_pool" subcommand.
64 """
65
66 def test_add_data_pool_root(self):
67 """
68 That a new data pool can be added and used for the root directory.
69 """
70
71 p = self.fs.add_data_pool("foo")
72 self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p))
73
74 def test_add_data_pool_application_metadata(self):
75 """
76 That the application metadata set on a newly added data pool is as expected.
77 """
78 pool_name = "foo"
79 mon_cmd = self.fs.mon_manager.raw_cluster_cmd
80 mon_cmd('osd', 'pool', 'create', pool_name, '--pg_num_min',
81 str(self.fs.pg_num_min))
82 # Check whether https://tracker.ceph.com/issues/43061 is fixed
83 mon_cmd('osd', 'pool', 'application', 'enable', pool_name, 'cephfs')
84 self.fs.add_data_pool(pool_name, create=False)
85 self.check_pool_application_metadata_key_value(
86 pool_name, 'cephfs', 'data', self.fs.name)
87
88 def test_add_data_pool_subdir(self):
89 """
90 That a new data pool can be added and used for a sub-directory.
91 """
92
93 p = self.fs.add_data_pool("foo")
94 self.mount_a.run_shell("mkdir subdir")
95 self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p))
96
97 def test_add_data_pool_non_alphamueric_name_as_subdir(self):
98 """
99 That a new data pool with non-alphanumeric name can be added and used for a sub-directory.
100 """
101 p = self.fs.add_data_pool("I-am-data_pool00.")
102 self.mount_a.run_shell("mkdir subdir")
103 self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p))
104
105 def test_add_data_pool_ec(self):
106 """
107 That a new EC data pool can be added.
108 """
109
110 n = "test_add_data_pool_ec"
111 self.setup_ec_pools(n, metadata=False)
112 self.fs.add_data_pool(n+"-data", create=False)
113
114 def test_add_already_in_use_data_pool(self):
115 """
116 That command try to add data pool which is already in use with another fs.
117 """
118
119 # create first data pool, metadata pool and add with filesystem
120 first_fs = "first_fs"
121 first_metadata_pool = "first_metadata_pool"
122 first_data_pool = "first_data_pool"
123 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
124 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
125 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
126
127 # create second data pool, metadata pool and add with filesystem
128 second_fs = "second_fs"
129 second_metadata_pool = "second_metadata_pool"
130 second_data_pool = "second_data_pool"
131 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
132 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
133 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
134
135 # try to add 'first_data_pool' with 'second_fs'
136 # Expecting EINVAL exit status because 'first_data_pool' is already in use with 'first_fs'
137 try:
138 self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', second_fs, first_data_pool)
139 except CommandFailedError as e:
140 self.assertEqual(e.exitstatus, errno.EINVAL)
141 else:
142 self.fail("Expected EINVAL because data pool is already in use as data pool for first_fs")
143
144 def test_add_already_in_use_metadata_pool(self):
145 """
146 That command try to add metadata pool which is already in use with another fs.
147 """
148
149 # create first data pool, metadata pool and add with filesystem
150 first_fs = "first_fs"
151 first_metadata_pool = "first_metadata_pool"
152 first_data_pool = "first_data_pool"
153 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
154 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
155 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
156
157 # create second data pool, metadata pool and add with filesystem
158 second_fs = "second_fs"
159 second_metadata_pool = "second_metadata_pool"
160 second_data_pool = "second_data_pool"
161 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
162 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
163 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
164
165 # try to add 'second_metadata_pool' with 'first_fs' as a data pool
166 # Expecting EINVAL exit status because 'second_metadata_pool'
167 # is already in use with 'second_fs' as a metadata pool
168 try:
169 self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', first_fs, second_metadata_pool)
170 except CommandFailedError as e:
171 self.assertEqual(e.exitstatus, errno.EINVAL)
172 else:
173 self.fail("Expected EINVAL because data pool is already in use as metadata pool for 'second_fs'")
174
175 class TestFsNew(TestAdminCommands):
176 """
177 Test "ceph fs new" subcommand.
178 """
179 MDSS_REQUIRED = 3
180
181 def test_fsnames_can_only_by_goodchars(self):
182 n = 'test_fsnames_can_only_by_goodchars'
183 metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool'
184 badname = n+'badname@#'
185
186 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
187 n+metapoolname)
188 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
189 n+datapoolname)
190
191 # test that fsname not with "goodchars" fails
192 args = ['fs', 'new', badname, metapoolname, datapoolname]
193 proc = self.fs.mon_manager.run_cluster_cmd(args=args,stderr=StringIO(),
194 check_status=False)
195 self.assertIn('invalid chars', proc.stderr.getvalue().lower())
196
197 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname,
198 metapoolname,
199 '--yes-i-really-really-mean-it-not-faking')
200 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', datapoolname,
201 datapoolname,
202 '--yes-i-really-really-mean-it-not-faking')
203
204 def test_new_default_ec(self):
205 """
206 That a new file system warns/fails with an EC default data pool.
207 """
208
209 self.mount_a.umount_wait(require_clean=True)
210 self.mds_cluster.delete_all_filesystems()
211 n = "test_new_default_ec"
212 self.setup_ec_pools(n)
213 try:
214 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
215 except CommandFailedError as e:
216 if e.exitstatus == 22:
217 pass
218 else:
219 raise
220 else:
221 raise RuntimeError("expected failure")
222
223 def test_new_default_ec_force(self):
224 """
225 That a new file system succeeds with an EC default data pool with --force.
226 """
227
228 self.mount_a.umount_wait(require_clean=True)
229 self.mds_cluster.delete_all_filesystems()
230 n = "test_new_default_ec_force"
231 self.setup_ec_pools(n)
232 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
233
234 def test_new_default_ec_no_overwrite(self):
235 """
236 That a new file system fails with an EC default data pool without overwrite.
237 """
238
239 self.mount_a.umount_wait(require_clean=True)
240 self.mds_cluster.delete_all_filesystems()
241 n = "test_new_default_ec_no_overwrite"
242 self.setup_ec_pools(n, overwrites=False)
243 try:
244 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
245 except CommandFailedError as e:
246 if e.exitstatus == 22:
247 pass
248 else:
249 raise
250 else:
251 raise RuntimeError("expected failure")
252 # and even with --force !
253 try:
254 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
255 except CommandFailedError as e:
256 if e.exitstatus == 22:
257 pass
258 else:
259 raise
260 else:
261 raise RuntimeError("expected failure")
262
263 def test_fs_new_pool_application_metadata(self):
264 """
265 That the application metadata set on the pools of a newly created filesystem are as expected.
266 """
267 self.mount_a.umount_wait(require_clean=True)
268 self.mds_cluster.delete_all_filesystems()
269 fs_name = "test_fs_new_pool_application"
270 keys = ['metadata', 'data']
271 pool_names = [fs_name+'-'+key for key in keys]
272 mon_cmd = self.fs.mon_manager.raw_cluster_cmd
273 for p in pool_names:
274 mon_cmd('osd', 'pool', 'create', p, '--pg_num_min', str(self.fs.pg_num_min))
275 mon_cmd('osd', 'pool', 'application', 'enable', p, 'cephfs')
276 mon_cmd('fs', 'new', fs_name, pool_names[0], pool_names[1])
277 for i in range(2):
278 self.check_pool_application_metadata_key_value(
279 pool_names[i], 'cephfs', keys[i], fs_name)
280
281 def test_fs_new_with_specific_id(self):
282 """
283 That a file system can be created with a specific ID.
284 """
285 fs_name = "test_fs_specific_id"
286 fscid = 100
287 keys = ['metadata', 'data']
288 pool_names = [fs_name+'-'+key for key in keys]
289 for p in pool_names:
290 self.run_cluster_cmd(f'osd pool create {p}')
291 self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
292 self.fs.status().get_fsmap(fscid)
293 for i in range(2):
294 self.check_pool_application_metadata_key_value(pool_names[i], 'cephfs', keys[i], fs_name)
295
296 def test_fs_new_with_specific_id_idempotency(self):
297 """
298 That command to create file system with specific ID is idempotent.
299 """
300 fs_name = "test_fs_specific_id"
301 fscid = 100
302 keys = ['metadata', 'data']
303 pool_names = [fs_name+'-'+key for key in keys]
304 for p in pool_names:
305 self.run_cluster_cmd(f'osd pool create {p}')
306 self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
307 self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
308 self.fs.status().get_fsmap(fscid)
309
310 def test_fs_new_with_specific_id_fails_without_force_flag(self):
311 """
312 That command to create file system with specific ID fails without '--force' flag.
313 """
314 fs_name = "test_fs_specific_id"
315 fscid = 100
316 keys = ['metadata', 'data']
317 pool_names = [fs_name+'-'+key for key in keys]
318 for p in pool_names:
319 self.run_cluster_cmd(f'osd pool create {p}')
320 try:
321 self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid}')
322 except CommandFailedError as ce:
323 self.assertEqual(ce.exitstatus, errno.EINVAL,
324 "invalid error code on creating a file system with specifc ID without --force flag")
325 else:
326 self.fail("expected creating file system with specific ID without '--force' flag to fail")
327
328 def test_fs_new_with_specific_id_fails_already_in_use(self):
329 """
330 That creating file system with ID already in use fails.
331 """
332 fs_name = "test_fs_specific_id"
333 # file system ID already in use
334 fscid = self.fs.status().map['filesystems'][0]['id']
335 keys = ['metadata', 'data']
336 pool_names = [fs_name+'-'+key for key in keys]
337 for p in pool_names:
338 self.run_cluster_cmd(f'osd pool create {p}')
339 try:
340 self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
341 except CommandFailedError as ce:
342 self.assertEqual(ce.exitstatus, errno.EINVAL,
343 "invalid error code on creating a file system with specifc ID that is already in use")
344 else:
345 self.fail("expected creating file system with ID already in use to fail")
346
347 def test_fs_new_metadata_pool_already_in_use(self):
348 """
349 That creating file system with metadata pool already in use.
350 """
351
352 # create first data pool, metadata pool and add with filesystem
353 first_fs = "first_fs"
354 first_metadata_pool = "first_metadata_pool"
355 first_data_pool = "first_data_pool"
356 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
357 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
358 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
359
360 second_fs = "second_fs"
361 second_data_pool = "second_data_pool"
362 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
363
364 # try to create new fs 'second_fs' with following configuration
365 # metadata pool -> 'first_metadata_pool'
366 # data pool -> 'second_data_pool'
367 # Expecting EINVAL exit status because 'first_metadata_pool'
368 # is already in use with 'first_fs'
369 try:
370 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
371 except CommandFailedError as e:
372 self.assertEqual(e.exitstatus, errno.EINVAL)
373 else:
374 self.fail("Expected EINVAL because metadata pool is already in use for 'first_fs'")
375
376 def test_fs_new_data_pool_already_in_use(self):
377 """
378 That creating file system with data pool already in use.
379 """
380
381 # create first data pool, metadata pool and add with filesystem
382 first_fs = "first_fs"
383 first_metadata_pool = "first_metadata_pool"
384 first_data_pool = "first_data_pool"
385 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
386 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
387 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
388
389 second_fs = "second_fs"
390 second_metadata_pool = "second_metadata_pool"
391 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
392
393 # try to create new fs 'second_fs' with following configuration
394 # metadata pool -> 'second_metadata_pool'
395 # data pool -> 'first_data_pool'
396 # Expecting EINVAL exit status because 'first_data_pool'
397 # is already in use with 'first_fs'
398 try:
399 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
400 except CommandFailedError as e:
401 self.assertEqual(e.exitstatus, errno.EINVAL)
402 else:
403 self.fail("Expected EINVAL because data pool is already in use for 'first_fs'")
404
405 def test_fs_new_metadata_and_data_pool_in_use_by_another_same_fs(self):
406 """
407 That creating file system with metadata and data pool which is already in use by another same fs.
408 """
409
410 # create first data pool, metadata pool and add with filesystem
411 first_fs = "first_fs"
412 first_metadata_pool = "first_metadata_pool"
413 first_data_pool = "first_data_pool"
414 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
415 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
416 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
417
418 second_fs = "second_fs"
419
420 # try to create new fs 'second_fs' with following configuration
421 # metadata pool -> 'first_metadata_pool'
422 # data pool -> 'first_data_pool'
423 # Expecting EINVAL exit status because 'first_metadata_pool' and 'first_data_pool'
424 # is already in use with 'first_fs'
425 try:
426 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
427 except CommandFailedError as e:
428 self.assertEqual(e.exitstatus, errno.EINVAL)
429 else:
430 self.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs'")
431
432 def test_fs_new_metadata_and_data_pool_in_use_by_different_fs(self):
433 """
434 That creating file system with metadata and data pool which is already in use by different fs.
435 """
436
437 # create first data pool, metadata pool and add with filesystem
438 first_fs = "first_fs"
439 first_metadata_pool = "first_metadata_pool"
440 first_data_pool = "first_data_pool"
441 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
442 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
443 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
444
445 # create second data pool, metadata pool and add with filesystem
446 second_fs = "second_fs"
447 second_metadata_pool = "second_metadata_pool"
448 second_data_pool = "second_data_pool"
449 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
450 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
451 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
452
453 third_fs = "third_fs"
454
455 # try to create new fs 'third_fs' with following configuration
456 # metadata pool -> 'first_metadata_pool'
457 # data pool -> 'second_data_pool'
458 # Expecting EINVAL exit status because 'first_metadata_pool' and 'second_data_pool'
459 # is already in use with 'first_fs' and 'second_fs'
460 try:
461 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
462 except CommandFailedError as e:
463 self.assertEqual(e.exitstatus, errno.EINVAL)
464 else:
465 self.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs' and 'second_fs'")
466
467 def test_fs_new_interchange_already_in_use_metadata_and_data_pool_of_same_fs(self):
468 """
469 That creating file system with interchanging metadata and data pool which is already in use by same fs.
470 """
471
472 # create first data pool, metadata pool and add with filesystem
473 first_fs = "first_fs"
474 first_metadata_pool = "first_metadata_pool"
475 first_data_pool = "first_data_pool"
476 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
477 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
478 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
479
480 second_fs = "second_fs"
481
482 # try to create new fs 'second_fs' with following configuration
483 # metadata pool -> 'first_data_pool' (already used as data pool for 'first_fs')
484 # data pool -> 'first_metadata_pool' (already used as metadata pool for 'first_fs')
485 # Expecting EINVAL exit status because 'first_data_pool' and 'first_metadata_pool'
486 # is already in use with 'first_fs'
487 try:
488 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
489 except CommandFailedError as e:
490 self.assertEqual(e.exitstatus, errno.EINVAL)
491 else:
492 self.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs'")
493
494 def test_fs_new_interchange_already_in_use_metadata_and_data_pool_of_different_fs(self):
495 """
496 That creating file system with interchanging metadata and data pool which is already in use by defferent fs.
497 """
498
499 # create first data pool, metadata pool and add with filesystem
500 first_fs = "first_fs"
501 first_metadata_pool = "first_metadata_pool"
502 first_data_pool = "first_data_pool"
503 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
504 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
505 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
506
507 # create second data pool, metadata pool and add with filesystem
508 second_fs = "second_fs"
509 second_metadata_pool = "second_metadata_pool"
510 second_data_pool = "second_data_pool"
511 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
512 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
513 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
514
515 third_fs = "third_fs"
516
517 # try to create new fs 'third_fs' with following configuration
518 # metadata pool -> 'first_data_pool' (already used as data pool for 'first_fs')
519 # data pool -> 'second_metadata_pool' (already used as metadata pool for 'second_fs')
520 # Expecting EINVAL exit status because 'first_data_pool' and 'second_metadata_pool'
521 # is already in use with 'first_fs' and 'second_fs'
522 try:
523 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
524 except CommandFailedError as e:
525 self.assertEqual(e.exitstatus, errno.EINVAL)
526 else:
527 self.fail("Expected EINVAL because metadata and data pool is already in use for 'first_fs' and 'second_fs'")
528
529 def test_fs_new_metadata_pool_already_in_use_with_rbd(self):
530 """
531 That creating new file system with metadata pool already used by rbd.
532 """
533
534 # create pool and initialise with rbd
535 new_pool = "new_pool"
536 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
537 self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
538
539 new_fs = "new_fs"
540 new_data_pool = "new_data_pool"
541
542 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_data_pool)
543
544 # try to create new fs 'new_fs' with following configuration
545 # metadata pool -> 'new_pool' (already used by rbd app)
546 # data pool -> 'new_data_pool'
547 # Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
548 try:
549 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_pool, new_data_pool)
550 except CommandFailedError as e:
551 self.assertEqual(e.exitstatus, errno.EINVAL)
552 else:
553 self.fail("Expected EINVAL because metadata pool is already in use for rbd")
554
555 def test_fs_new_data_pool_already_in_use_with_rbd(self):
556 """
557 That creating new file system with data pool already used by rbd.
558 """
559
560 # create pool and initialise with rbd
561 new_pool = "new_pool"
562 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
563 self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
564
565 new_fs = "new_fs"
566 new_metadata_pool = "new_metadata_pool"
567
568 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_metadata_pool)
569
570 # try to create new fs 'new_fs' with following configuration
571 # metadata pool -> 'new_metadata_pool'
572 # data pool -> 'new_pool' (already used by rbd app)
573 # Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
574 try:
575 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_metadata_pool, new_pool)
576 except CommandFailedError as e:
577 self.assertEqual(e.exitstatus, errno.EINVAL)
578 else:
579 self.fail("Expected EINVAL because data pool is already in use for rbd")
580
581 class TestRenameCommand(TestAdminCommands):
582 """
583 Tests for rename command.
584 """
585
586 CLIENTS_REQUIRED = 1
587 MDSS_REQUIRED = 2
588
589 def test_fs_rename(self):
590 """
591 That the file system can be renamed, and the application metadata set on its pools are as expected.
592 """
593 # Renaming the file system breaks this mount as the client uses
594 # file system specific authorization. The client cannot read
595 # or write even if the client's cephx ID caps are updated to access
596 # the new file system name without the client being unmounted and
597 # re-mounted.
598 self.mount_a.umount_wait(require_clean=True)
599 orig_fs_name = self.fs.name
600 new_fs_name = 'new_cephfs'
601 client_id = 'test_new_cephfs'
602
603 self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
604
605 # authorize a cephx ID access to the renamed file system.
606 # use the ID to write to the file system.
607 self.fs.name = new_fs_name
608 keyring = self.fs.authorize(client_id, ('/', 'rw'))
609 keyring_path = self.mount_a.client_remote.mktemp(data=keyring)
610 self.mount_a.remount(client_id=client_id,
611 client_keyring_path=keyring_path,
612 cephfs_mntpt='/',
613 cephfs_name=self.fs.name)
614 filedata, filename = 'some data on fs', 'file_on_fs'
615 filepath = os_path_join(self.mount_a.hostfs_mntpt, filename)
616 self.mount_a.write_file(filepath, filedata)
617 self.check_pool_application_metadata_key_value(
618 self.fs.get_data_pool_name(), 'cephfs', 'data', new_fs_name)
619 self.check_pool_application_metadata_key_value(
620 self.fs.get_metadata_pool_name(), 'cephfs', 'metadata', new_fs_name)
621
622 # cleanup
623 self.mount_a.umount_wait()
624 self.run_cluster_cmd(f'auth rm client.{client_id}')
625
626 def test_fs_rename_idempotency(self):
627 """
628 That the file system rename operation is idempotent.
629 """
630 # Renaming the file system breaks this mount as the client uses
631 # file system specific authorization.
632 self.mount_a.umount_wait(require_clean=True)
633 orig_fs_name = self.fs.name
634 new_fs_name = 'new_cephfs'
635
636 self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
637 self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
638
639 # original file system name does not appear in `fs ls` command
640 self.assertFalse(self.fs.exists())
641 self.fs.name = new_fs_name
642 self.assertTrue(self.fs.exists())
643
644 def test_fs_rename_fs_new_fails_with_old_fsname_existing_pools(self):
645 """
646 That after renaming a file system, creating a file system with
647 old name and existing FS pools fails.
648 """
649 # Renaming the file system breaks this mount as the client uses
650 # file system specific authorization.
651 self.mount_a.umount_wait(require_clean=True)
652 orig_fs_name = self.fs.name
653 new_fs_name = 'new_cephfs'
654 data_pool = self.fs.get_data_pool_name()
655 metadata_pool = self.fs.get_metadata_pool_name()
656 self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
657
658 try:
659 self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
660 except CommandFailedError as ce:
661 self.assertEqual(ce.exitstatus, errno.EINVAL,
662 "invalid error code on creating a new file system with old "
663 "name and existing pools.")
664 else:
665 self.fail("expected creating new file system with old name and "
666 "existing pools to fail.")
667
668 try:
669 self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
670 except CommandFailedError as ce:
671 self.assertEqual(ce.exitstatus, errno.EINVAL,
672 "invalid error code on creating a new file system with old "
673 "name, existing pools and --force flag.")
674 else:
675 self.fail("expected creating new file system with old name, "
676 "existing pools, and --force flag to fail.")
677
678 try:
679 self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} "
680 "--allow-dangerous-metadata-overlay")
681 except CommandFailedError as ce:
682 self.assertEqual(ce.exitstatus, errno.EINVAL,
683 "invalid error code on creating a new file system with old name, "
684 "existing pools and --allow-dangerous-metadata-overlay flag.")
685 else:
686 self.fail("expected creating new file system with old name, "
687 "existing pools, and --allow-dangerous-metadata-overlay flag to fail.")
688
689 def test_fs_rename_fails_without_yes_i_really_mean_it_flag(self):
690 """
691 That renaming a file system without '--yes-i-really-mean-it' flag fails.
692 """
693 try:
694 self.run_cluster_cmd(f"fs rename {self.fs.name} new_fs")
695 except CommandFailedError as ce:
696 self.assertEqual(ce.exitstatus, errno.EPERM,
697 "invalid error code on renaming a file system without the "
698 "'--yes-i-really-mean-it' flag")
699 else:
700 self.fail("expected renaming of file system without the "
701 "'--yes-i-really-mean-it' flag to fail ")
702
703 def test_fs_rename_fails_for_non_existent_fs(self):
704 """
705 That renaming a non-existent file system fails.
706 """
707 try:
708 self.run_cluster_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
709 except CommandFailedError as ce:
710 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on renaming a non-existent fs")
711 else:
712 self.fail("expected renaming of a non-existent file system to fail")
713
714 def test_fs_rename_fails_new_name_already_in_use(self):
715 """
716 That renaming a file system fails if the new name refers to an existing file system.
717 """
718 self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
719
720 try:
721 self.run_cluster_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
722 except CommandFailedError as ce:
723 self.assertEqual(ce.exitstatus, errno.EINVAL,
724 "invalid error code on renaming to a fs name that is already in use")
725 else:
726 self.fail("expected renaming to a new file system name that is already in use to fail.")
727
728 def test_fs_rename_fails_with_mirroring_enabled(self):
729 """
730 That renaming a file system fails if mirroring is enabled on it.
731 """
732 orig_fs_name = self.fs.name
733 new_fs_name = 'new_cephfs'
734
735 self.run_cluster_cmd(f'fs mirror enable {orig_fs_name}')
736 try:
737 self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
738 except CommandFailedError as ce:
739 self.assertEqual(ce.exitstatus, errno.EPERM, "invalid error code on renaming a mirrored file system")
740 else:
741 self.fail("expected renaming of a mirrored file system to fail")
742 self.run_cluster_cmd(f'fs mirror disable {orig_fs_name}')
743
744
745 class TestDump(CephFSTestCase):
746 CLIENTS_REQUIRED = 0
747 MDSS_REQUIRED = 1
748
749 def test_fs_dump_epoch(self):
750 """
751 That dumping a specific epoch works.
752 """
753
754 status1 = self.fs.status()
755 status2 = self.fs.status(epoch=status1["epoch"]-1)
756 self.assertEqual(status1["epoch"], status2["epoch"]+1)
757
758 def test_fsmap_trim(self):
759 """
760 That the fsmap is trimmed normally.
761 """
762
763 paxos_service_trim_min = 25
764 self.config_set('mon', 'paxos_service_trim_min', paxos_service_trim_min)
765 mon_max_mdsmap_epochs = 20
766 self.config_set('mon', 'mon_max_mdsmap_epochs', mon_max_mdsmap_epochs)
767
768 status = self.fs.status()
769 epoch = status["epoch"]
770
771 # for N mutations
772 mutations = paxos_service_trim_min + mon_max_mdsmap_epochs
773 b = False
774 for i in range(mutations):
775 self.fs.set_joinable(b)
776 b = not b
777
778 time.sleep(10) # for tick/compaction
779
780 try:
781 self.fs.status(epoch=epoch)
782 except CommandFailedError as e:
783 self.assertEqual(e.exitstatus, errno.ENOENT, "invalid error code when trying to fetch FSMap that was trimmed")
784 else:
785 self.fail("trimming did not occur as expected")
786
787 def test_fsmap_force_trim(self):
788 """
789 That the fsmap is trimmed forcefully.
790 """
791
792 status = self.fs.status()
793 epoch = status["epoch"]
794
795 paxos_service_trim_min = 1
796 self.config_set('mon', 'paxos_service_trim_min', paxos_service_trim_min)
797 mon_mds_force_trim_to = epoch+1
798 self.config_set('mon', 'mon_mds_force_trim_to', mon_mds_force_trim_to)
799
800 # force a new fsmap
801 self.fs.set_joinable(False)
802 time.sleep(10) # for tick/compaction
803
804 status = self.fs.status()
805 log.debug(f"new epoch is {status['epoch']}")
806 self.fs.status(epoch=epoch+1) # epoch+1 is not trimmed, may not == status["epoch"]
807
808 try:
809 self.fs.status(epoch=epoch)
810 except CommandFailedError as e:
811 self.assertEqual(e.exitstatus, errno.ENOENT, "invalid error code when trying to fetch FSMap that was trimmed")
812 else:
813 self.fail("trimming did not occur as expected")
814
815
816 class TestRequiredClientFeatures(CephFSTestCase):
817 CLIENTS_REQUIRED = 0
818 MDSS_REQUIRED = 1
819
820 def test_required_client_features(self):
821 """
822 That `ceph fs required_client_features` command functions.
823 """
824
825 def is_required(index):
826 out = self.fs.mon_manager.raw_cluster_cmd('fs', 'get', self.fs.name, '--format=json-pretty')
827 features = json.loads(out)['mdsmap']['required_client_features']
828 if "feature_{0}".format(index) in features:
829 return True;
830 return False;
831
832 features = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'feature', 'ls', '--format=json-pretty'))
833 self.assertGreater(len(features), 0);
834
835 for f in features:
836 self.fs.required_client_features('rm', str(f['index']))
837
838 for f in features:
839 index = f['index']
840 feature = f['name']
841 if feature == 'reserved':
842 feature = str(index)
843
844 if index % 3 == 0:
845 continue;
846 self.fs.required_client_features('add', feature)
847 self.assertTrue(is_required(index))
848
849 if index % 2 == 0:
850 continue;
851 self.fs.required_client_features('rm', feature)
852 self.assertFalse(is_required(index))
853
854 def test_required_client_feature_add_reserved(self):
855 """
856 That `ceph fs required_client_features X add reserved` fails.
857 """
858
859 p = self.fs.required_client_features('add', 'reserved', check_status=False, stderr=StringIO())
860 self.assertIn('Invalid feature name', p.stderr.getvalue())
861
862 def test_required_client_feature_rm_reserved(self):
863 """
864 That `ceph fs required_client_features X rm reserved` fails.
865 """
866
867 p = self.fs.required_client_features('rm', 'reserved', check_status=False, stderr=StringIO())
868 self.assertIn('Invalid feature name', p.stderr.getvalue())
869
870 def test_required_client_feature_add_reserved_bit(self):
871 """
872 That `ceph fs required_client_features X add <reserved_bit>` passes.
873 """
874
875 p = self.fs.required_client_features('add', '1', stderr=StringIO())
876 self.assertIn("added feature 'reserved' to required_client_features", p.stderr.getvalue())
877
878 def test_required_client_feature_rm_reserved_bit(self):
879 """
880 That `ceph fs required_client_features X rm <reserved_bit>` passes.
881 """
882
883 self.fs.required_client_features('add', '1')
884 p = self.fs.required_client_features('rm', '1', stderr=StringIO())
885 self.assertIn("removed feature 'reserved' from required_client_features", p.stderr.getvalue())
886
887 class TestCompatCommands(CephFSTestCase):
888 """
889 """
890
891 CLIENTS_REQUIRED = 0
892 MDSS_REQUIRED = 3
893
894 def test_add_compat(self):
895 """
896 Test adding a compat.
897 """
898
899 self.fs.fail()
900 self.fs.add_compat(63, 'placeholder')
901 mdsmap = self.fs.get_mds_map()
902 self.assertIn("feature_63", mdsmap['compat']['compat'])
903
904 def test_add_incompat(self):
905 """
906 Test adding an incompat.
907 """
908
909 self.fs.fail()
910 self.fs.add_incompat(63, 'placeholder')
911 mdsmap = self.fs.get_mds_map()
912 log.info(f"{mdsmap}")
913 self.assertIn("feature_63", mdsmap['compat']['incompat'])
914
915 def test_rm_compat(self):
916 """
917 Test removing a compat.
918 """
919
920 self.fs.fail()
921 self.fs.add_compat(63, 'placeholder')
922 self.fs.rm_compat(63)
923 mdsmap = self.fs.get_mds_map()
924 self.assertNotIn("feature_63", mdsmap['compat']['compat'])
925
926 def test_rm_incompat(self):
927 """
928 Test removing an incompat.
929 """
930
931 self.fs.fail()
932 self.fs.add_incompat(63, 'placeholder')
933 self.fs.rm_incompat(63)
934 mdsmap = self.fs.get_mds_map()
935 self.assertNotIn("feature_63", mdsmap['compat']['incompat'])
936
937 def test_standby_compat(self):
938 """
939 That adding a compat does not prevent standbys from joining.
940 """
941
942 self.fs.fail()
943 self.fs.add_compat(63, "placeholder")
944 self.fs.set_joinable()
945 self.fs.wait_for_daemons()
946 mdsmap = self.fs.get_mds_map()
947 self.assertIn("feature_63", mdsmap['compat']['compat'])
948
949 def test_standby_incompat_reject(self):
950 """
951 That adding an incompat feature prevents incompatible daemons from joining.
952 """
953
954 self.fs.fail()
955 self.fs.add_incompat(63, "placeholder")
956 self.fs.set_joinable()
957 try:
958 self.fs.wait_for_daemons(timeout=60)
959 except RuntimeError as e:
960 if "Timed out waiting for MDS daemons to become healthy" in str(e):
961 pass
962 else:
963 raise
964 else:
965 self.fail()
966
967 def test_standby_incompat_upgrade(self):
968 """
969 That an MDS can upgrade the compat of a fs.
970 """
971
972 self.fs.fail()
973 self.fs.rm_incompat(1)
974 self.fs.set_joinable()
975 self.fs.wait_for_daemons()
976 mdsmap = self.fs.get_mds_map()
977 self.assertIn("feature_1", mdsmap['compat']['incompat'])
978
979 def test_standby_replay_not_upgradeable(self):
980 """
981 That the mons will not upgrade the MDSMap compat if standby-replay is
982 enabled.
983 """
984
985 self.fs.fail()
986 self.fs.rm_incompat(1)
987 self.fs.set_allow_standby_replay(True)
988 self.fs.set_joinable()
989 try:
990 self.fs.wait_for_daemons(timeout=60)
991 except RuntimeError as e:
992 if "Timed out waiting for MDS daemons to become healthy" in str(e):
993 pass
994 else:
995 raise
996 else:
997 self.fail()
998
999 def test_standby_incompat_reject_multifs(self):
1000 """
1001 Like test_standby_incompat_reject but with a second fs.
1002 """
1003
1004 fs2 = self.mds_cluster.newfs(name="cephfs2", create=True)
1005 fs2.fail()
1006 fs2.add_incompat(63, 'placeholder')
1007 fs2.set_joinable()
1008 try:
1009 fs2.wait_for_daemons(timeout=60)
1010 except RuntimeError as e:
1011 if "Timed out waiting for MDS daemons to become healthy" in str(e):
1012 pass
1013 else:
1014 raise
1015 else:
1016 self.fail()
1017 # did self.fs lose MDS or standbys suicide?
1018 self.fs.wait_for_daemons()
1019 mdsmap = fs2.get_mds_map()
1020 self.assertIn("feature_63", mdsmap['compat']['incompat'])
1021
1022 class TestConfigCommands(CephFSTestCase):
1023 """
1024 Test that daemons and clients respond to the otherwise rarely-used
1025 runtime config modification operations.
1026 """
1027
1028 CLIENTS_REQUIRED = 1
1029 MDSS_REQUIRED = 1
1030
1031 def test_ceph_config_show(self):
1032 """
1033 That I can successfully show MDS configuration.
1034 """
1035
1036 names = self.fs.get_rank_names()
1037 for n in names:
1038 s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n)
1039 self.assertTrue("NAME" in s)
1040 self.assertTrue("mon_host" in s)
1041
1042
1043 def test_client_config(self):
1044 """
1045 That I can successfully issue asok "config set" commands
1046
1047 :return:
1048 """
1049
1050 if not isinstance(self.mount_a, FuseMount):
1051 self.skipTest("Test only applies to FUSE clients")
1052
1053 test_key = "client_cache_size"
1054 test_val = "123"
1055 self.mount_a.admin_socket(['config', 'set', test_key, test_val])
1056 out = self.mount_a.admin_socket(['config', 'get', test_key])
1057 self.assertEqual(out[test_key], test_val)
1058
1059
1060 def test_mds_config_asok(self):
1061 test_key = "mds_max_purge_ops"
1062 test_val = "123"
1063 self.fs.mds_asok(['config', 'set', test_key, test_val])
1064 out = self.fs.mds_asok(['config', 'get', test_key])
1065 self.assertEqual(out[test_key], test_val)
1066
1067 def test_mds_dump_cache_asok(self):
1068 cache_file = "cache_file"
1069 timeout = "1"
1070 self.fs.rank_asok(['dump', 'cache', cache_file, timeout])
1071
1072 def test_mds_config_tell(self):
1073 test_key = "mds_max_purge_ops"
1074 test_val = "123"
1075
1076 self.fs.rank_tell(['injectargs', "--{0}={1}".format(test_key, test_val)])
1077
1078 # Read it back with asok because there is no `tell` equivalent
1079 out = self.fs.rank_tell(['config', 'get', test_key])
1080 self.assertEqual(out[test_key], test_val)
1081
1082
1083 class TestMirroringCommands(CephFSTestCase):
1084 CLIENTS_REQUIRED = 1
1085 MDSS_REQUIRED = 1
1086
1087 def _enable_mirroring(self, fs_name):
1088 self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", fs_name)
1089
1090 def _disable_mirroring(self, fs_name):
1091 self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", fs_name)
1092
1093 def _add_peer(self, fs_name, peer_spec, remote_fs_name):
1094 peer_uuid = str(uuid.uuid4())
1095 self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
1096
1097 def _remove_peer(self, fs_name, peer_uuid):
1098 self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
1099
1100 def _verify_mirroring(self, fs_name, flag_str):
1101 status = self.fs.status()
1102 fs_map = status.get_fsmap_byname(fs_name)
1103 if flag_str == 'enabled':
1104 self.assertTrue('mirror_info' in fs_map)
1105 elif flag_str == 'disabled':
1106 self.assertTrue('mirror_info' not in fs_map)
1107 else:
1108 raise RuntimeError(f'invalid flag_str {flag_str}')
1109
1110 def _get_peer_uuid(self, fs_name, peer_spec):
1111 status = self.fs.status()
1112 fs_map = status.get_fsmap_byname(fs_name)
1113 mirror_info = fs_map.get('mirror_info', None)
1114 self.assertTrue(mirror_info is not None)
1115 for peer_uuid, remote in mirror_info['peers'].items():
1116 client_name = remote['remote']['client_name']
1117 cluster_name = remote['remote']['cluster_name']
1118 spec = f'{client_name}@{cluster_name}'
1119 if spec == peer_spec:
1120 return peer_uuid
1121 return None
1122
1123 def test_mirroring_command(self):
1124 """basic mirroring command test -- enable, disable mirroring on a
1125 filesystem"""
1126 self._enable_mirroring(self.fs.name)
1127 self._verify_mirroring(self.fs.name, "enabled")
1128 self._disable_mirroring(self.fs.name)
1129 self._verify_mirroring(self.fs.name, "disabled")
1130
1131 def test_mirroring_peer_commands(self):
1132 """test adding and removing peers to a mirror enabled filesystem"""
1133 self._enable_mirroring(self.fs.name)
1134 self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b")
1135 self._add_peer(self.fs.name, "client.site-c@site-c", "fs_c")
1136 self._verify_mirroring(self.fs.name, "enabled")
1137 uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1138 uuid_peer_c = self._get_peer_uuid(self.fs.name, "client.site-c@site-c")
1139 self.assertTrue(uuid_peer_b is not None)
1140 self.assertTrue(uuid_peer_c is not None)
1141 self._remove_peer(self.fs.name, uuid_peer_b)
1142 self._remove_peer(self.fs.name, uuid_peer_c)
1143 self._disable_mirroring(self.fs.name)
1144 self._verify_mirroring(self.fs.name, "disabled")
1145
1146 def test_mirroring_command_idempotency(self):
1147 """test to check idempotency of mirroring family of commands """
1148 self._enable_mirroring(self.fs.name)
1149 self._verify_mirroring(self.fs.name, "enabled")
1150 self._enable_mirroring(self.fs.name)
1151 # add peer
1152 self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b")
1153 uuid_peer_b1 = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1154 self.assertTrue(uuid_peer_b1 is not None)
1155 # adding the peer again should be idempotent
1156 self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b")
1157 uuid_peer_b2 = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1158 self.assertTrue(uuid_peer_b2 is not None)
1159 self.assertTrue(uuid_peer_b1 == uuid_peer_b2)
1160 # remove peer
1161 self._remove_peer(self.fs.name, uuid_peer_b1)
1162 uuid_peer_b3 = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1163 self.assertTrue(uuid_peer_b3 is None)
1164 # removing the peer again should be idempotent
1165 self._remove_peer(self.fs.name, uuid_peer_b1)
1166 self._disable_mirroring(self.fs.name)
1167 self._verify_mirroring(self.fs.name, "disabled")
1168 self._disable_mirroring(self.fs.name)
1169
1170 def test_mirroring_disable_with_peers(self):
1171 """test disabling mirroring for a filesystem with active peers"""
1172 self._enable_mirroring(self.fs.name)
1173 self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b")
1174 self._verify_mirroring(self.fs.name, "enabled")
1175 uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1176 self.assertTrue(uuid_peer_b is not None)
1177 self._disable_mirroring(self.fs.name)
1178 self._verify_mirroring(self.fs.name, "disabled")
1179 # enable mirroring to check old peers
1180 self._enable_mirroring(self.fs.name)
1181 self._verify_mirroring(self.fs.name, "enabled")
1182 # peer should be gone
1183 uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1184 self.assertTrue(uuid_peer_b is None)
1185 self._disable_mirroring(self.fs.name)
1186 self._verify_mirroring(self.fs.name, "disabled")
1187
1188 def test_mirroring_with_filesystem_reset(self):
1189 """test to verify mirroring state post filesystem reset"""
1190 self._enable_mirroring(self.fs.name)
1191 self._add_peer(self.fs.name, "client.site-b@site-b", "fs_b")
1192 self._verify_mirroring(self.fs.name, "enabled")
1193 uuid_peer_b = self._get_peer_uuid(self.fs.name, "client.site-b@site-b")
1194 self.assertTrue(uuid_peer_b is not None)
1195 # reset filesystem
1196 self.fs.fail()
1197 self.fs.reset()
1198 self.fs.wait_for_daemons()
1199 self._verify_mirroring(self.fs.name, "disabled")
1200
1201
1202 class TestFsAuthorize(CephFSTestCase):
1203 client_id = 'testuser'
1204 client_name = 'client.' + client_id
1205
1206 def test_single_path_r(self):
1207 PERM = 'r'
1208 FS_AUTH_CAPS = (('/', PERM),)
1209 self.captester = CapTester()
1210 self.setup_test_env(FS_AUTH_CAPS)
1211
1212 self.captester.run_mon_cap_tests(self.fs, self.client_id)
1213 self.captester.run_mds_cap_tests(PERM)
1214
1215 def test_single_path_rw(self):
1216 PERM = 'rw'
1217 FS_AUTH_CAPS = (('/', PERM),)
1218 self.captester = CapTester()
1219 self.setup_test_env(FS_AUTH_CAPS)
1220
1221 self.captester.run_mon_cap_tests(self.fs, self.client_id)
1222 self.captester.run_mds_cap_tests(PERM)
1223
1224 def test_single_path_rootsquash(self):
1225 PERM = 'rw'
1226 FS_AUTH_CAPS = (('/', PERM, 'root_squash'),)
1227 self.captester = CapTester()
1228 self.setup_test_env(FS_AUTH_CAPS)
1229
1230 # testing MDS caps...
1231 # Since root_squash is set in client caps, client can read but not
1232 # write even thought access level is set to "rw".
1233 self.captester.conduct_pos_test_for_read_caps()
1234 self.captester.conduct_neg_test_for_write_caps(sudo_write=True)
1235
1236 def test_single_path_authorize_on_nonalphanumeric_fsname(self):
1237 """
1238 That fs authorize command works on filesystems with names having [_.-]
1239 characters
1240 """
1241 self.mount_a.umount_wait(require_clean=True)
1242 self.mds_cluster.delete_all_filesystems()
1243 fs_name = "cephfs-_."
1244 self.fs = self.mds_cluster.newfs(name=fs_name)
1245 self.fs.wait_for_daemons()
1246 self.run_cluster_cmd(f'auth caps client.{self.mount_a.client_id} '
1247 f'mon "allow r" '
1248 f'osd "allow rw pool={self.fs.get_data_pool_name()}" '
1249 f'mds allow')
1250 self.mount_a.remount(cephfs_name=self.fs.name)
1251 PERM = 'rw'
1252 FS_AUTH_CAPS = (('/', PERM),)
1253 self.captester = CapTester()
1254 self.setup_test_env(FS_AUTH_CAPS)
1255 self.captester.run_mds_cap_tests(PERM)
1256
1257 def test_multiple_path_r(self):
1258 PERM = 'r'
1259 FS_AUTH_CAPS = (('/dir1/dir12', PERM), ('/dir2/dir22', PERM))
1260 for c in FS_AUTH_CAPS:
1261 self.mount_a.run_shell(f'mkdir -p .{c[0]}')
1262 self.captesters = (CapTester(), CapTester())
1263 self.setup_test_env(FS_AUTH_CAPS)
1264
1265 self.run_cap_test_one_by_one(FS_AUTH_CAPS)
1266
1267 def test_multiple_path_rw(self):
1268 PERM = 'rw'
1269 FS_AUTH_CAPS = (('/dir1/dir12', PERM), ('/dir2/dir22', PERM))
1270 for c in FS_AUTH_CAPS:
1271 self.mount_a.run_shell(f'mkdir -p .{c[0]}')
1272 self.captesters = (CapTester(), CapTester())
1273 self.setup_test_env(FS_AUTH_CAPS)
1274
1275 self.run_cap_test_one_by_one(FS_AUTH_CAPS)
1276
1277 def run_cap_test_one_by_one(self, fs_auth_caps):
1278 keyring = self.run_cluster_cmd(f'auth get {self.client_name}')
1279 for i, c in enumerate(fs_auth_caps):
1280 self.assertIn(i, (0, 1))
1281 PATH = c[0]
1282 PERM = c[1]
1283 self._remount(keyring, PATH)
1284 # actual tests...
1285 self.captesters[i].run_mon_cap_tests(self.fs, self.client_id)
1286 self.captesters[i].run_mds_cap_tests(PERM, PATH)
1287
1288 def tearDown(self):
1289 self.mount_a.umount_wait()
1290 self.run_cluster_cmd(f'auth rm {self.client_name}')
1291
1292 super(type(self), self).tearDown()
1293
1294 def _remount(self, keyring, path='/'):
1295 keyring_path = self.mount_a.client_remote.mktemp(data=keyring)
1296 self.mount_a.remount(client_id=self.client_id,
1297 client_keyring_path=keyring_path,
1298 cephfs_mntpt=path)
1299
1300 def setup_for_single_path(self, fs_auth_caps):
1301 self.captester.write_test_files((self.mount_a,), '/')
1302 keyring = self.fs.authorize(self.client_id, fs_auth_caps)
1303 self._remount(keyring)
1304
1305 def setup_for_multiple_paths(self, fs_auth_caps):
1306 for i, c in enumerate(fs_auth_caps):
1307 PATH = c[0]
1308 self.captesters[i].write_test_files((self.mount_a,), PATH)
1309
1310 self.fs.authorize(self.client_id, fs_auth_caps)
1311
1312 def setup_test_env(self, fs_auth_caps):
1313 if len(fs_auth_caps) == 1:
1314 self.setup_for_single_path(fs_auth_caps[0])
1315 else:
1316 self.setup_for_multiple_paths(fs_auth_caps)
1317
1318
1319 class TestAdminCommandIdempotency(CephFSTestCase):
1320 """
1321 Tests for administration command idempotency.
1322 """
1323
1324 CLIENTS_REQUIRED = 0
1325 MDSS_REQUIRED = 1
1326
1327 def test_rm_idempotency(self):
1328 """
1329 That a removing a fs twice is idempotent.
1330 """
1331
1332 data_pools = self.fs.get_data_pool_names(refresh=True)
1333 self.fs.fail()
1334 self.fs.rm()
1335 try:
1336 self.fs.get_mds_map()
1337 except FSMissing:
1338 pass
1339 else:
1340 self.fail("get_mds_map should raise")
1341 p = self.fs.rm()
1342 self.assertIn("does not exist", p.stderr.getvalue())
1343 self.fs.remove_pools(data_pools)
1344
1345
1346 class TestAdminCommandDumpTree(CephFSTestCase):
1347 """
1348 Tests for administration command subtrees.
1349 """
1350
1351 CLIENTS_REQUIRED = 0
1352 MDSS_REQUIRED = 1
1353
1354 def test_dump_subtrees(self):
1355 """
1356 Dump all the subtrees to make sure the MDS daemon won't crash.
1357 """
1358
1359 subtrees = self.fs.mds_asok(['get', 'subtrees'])
1360 log.info(f"dumping {len(subtrees)} subtrees:")
1361 for subtree in subtrees:
1362 log.info(f" subtree: '{subtree['dir']['path']}'")
1363 self.fs.mds_asok(['dump', 'tree', subtree['dir']['path']])
1364
1365 log.info("dumping 2 special subtrees:")
1366 log.info(" subtree: '/'")
1367 self.fs.mds_asok(['dump', 'tree', '/'])
1368 log.info(" subtree: '~mdsdir'")
1369 self.fs.mds_asok(['dump', 'tree', '~mdsdir'])
1370
1371 class TestAdminCommandDumpLoads(CephFSTestCase):
1372 """
1373 Tests for administration command dump loads.
1374 """
1375
1376 CLIENTS_REQUIRED = 0
1377 MDSS_REQUIRED = 1
1378
1379 def test_dump_loads(self):
1380 """
1381 make sure depth limit param is considered when dump loads for a MDS daemon.
1382 """
1383
1384 log.info("dumping loads")
1385 loads = self.fs.mds_asok(['dump', 'loads', '1'])
1386 self.assertIsNotNone(loads)
1387 self.assertIn("dirfrags", loads)
1388 for d in loads["dirfrags"]:
1389 self.assertLessEqual(d["path"].count("/"), 1)
1390
1391 class TestFsBalRankMask(CephFSTestCase):
1392 """
1393 Tests ceph fs set <fs_name> bal_rank_mask
1394 """
1395
1396 CLIENTS_REQUIRED = 0
1397 MDSS_REQUIRED = 2
1398
1399 def test_bal_rank_mask(self):
1400 """
1401 check whether a specified bal_rank_mask value is valid or not.
1402 """
1403 bal_rank_mask = '0x0'
1404 log.info(f"set bal_rank_mask {bal_rank_mask}")
1405 self.fs.set_bal_rank_mask(bal_rank_mask)
1406 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1407
1408 bal_rank_mask = '0'
1409 log.info(f"set bal_rank_mask {bal_rank_mask}")
1410 self.fs.set_bal_rank_mask(bal_rank_mask)
1411 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1412
1413 bal_rank_mask = '-1'
1414 log.info(f"set bal_rank_mask {bal_rank_mask}")
1415 self.fs.set_bal_rank_mask(bal_rank_mask)
1416 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1417
1418 bal_rank_mask = 'all'
1419 log.info(f"set bal_rank_mask {bal_rank_mask}")
1420 self.fs.set_bal_rank_mask(bal_rank_mask)
1421 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1422
1423 bal_rank_mask = '0x1'
1424 log.info(f"set bal_rank_mask {bal_rank_mask}")
1425 self.fs.set_bal_rank_mask(bal_rank_mask)
1426 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1427
1428 bal_rank_mask = '1'
1429 log.info(f"set bal_rank_mask {bal_rank_mask}")
1430 self.fs.set_bal_rank_mask(bal_rank_mask)
1431 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1432
1433 bal_rank_mask = 'f0'
1434 log.info(f"set bal_rank_mask {bal_rank_mask}")
1435 self.fs.set_bal_rank_mask(bal_rank_mask)
1436 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1437
1438 bal_rank_mask = 'ab'
1439 log.info(f"set bal_rank_mask {bal_rank_mask}")
1440 self.fs.set_bal_rank_mask(bal_rank_mask)
1441 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1442
1443 bal_rank_mask = '0xfff0'
1444 log.info(f"set bal_rank_mask {bal_rank_mask}")
1445 self.fs.set_bal_rank_mask(bal_rank_mask)
1446 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1447
1448 MAX_MDS = 256
1449 bal_rank_mask = '0x' + 'f' * int(MAX_MDS / 4)
1450 log.info(f"set bal_rank_mask {bal_rank_mask}")
1451 self.fs.set_bal_rank_mask(bal_rank_mask)
1452 self.assertEqual(bal_rank_mask, self.fs.get_var('bal_rank_mask'))
1453
1454 bal_rank_mask = ''
1455 log.info("set bal_rank_mask to empty string")
1456 try:
1457 self.fs.set_bal_rank_mask(bal_rank_mask)
1458 except CommandFailedError as e:
1459 self.assertEqual(e.exitstatus, errno.EINVAL)
1460
1461 bal_rank_mask = '0x1' + 'f' * int(MAX_MDS / 4)
1462 log.info(f"set bal_rank_mask {bal_rank_mask}")
1463 try:
1464 self.fs.set_bal_rank_mask(bal_rank_mask)
1465 except CommandFailedError as e:
1466 self.assertEqual(e.exitstatus, errno.EINVAL)