]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_admin.py
60198604b395b41d4ca2401a93a9352836e23ef8
3 from teuthology
.orchestra
.run
import CommandFailedError
5 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
6 from tasks
.cephfs
.fuse_mount
import FuseMount
8 from tasks
.cephfs
.filesystem
import FileLayout
11 class TestAdminCommands(CephFSTestCase
):
13 Tests for administration command.
19 def test_fs_status(self
):
21 That `ceph fs status` command functions.
24 s
= self
.fs
.mon_manager
.raw_cluster_cmd("fs", "status")
25 self
.assertTrue("active" in s
)
27 mdsmap
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("fs", "status", "--format=json-pretty"))["mdsmap"]
28 self
.assertEqual(mdsmap
[0]["state"], "active")
30 mdsmap
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("fs", "status", "--format=json"))["mdsmap"]
31 self
.assertEqual(mdsmap
[0]["state"], "active")
33 def _setup_ec_pools(self
, n
, metadata
=True, overwrites
=True):
35 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', n
+"-meta", "8")
36 cmd
= ['osd', 'erasure-code-profile', 'set', n
+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
37 self
.fs
.mon_manager
.raw_cluster_cmd(*cmd
)
38 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', n
+"-data", "8", "erasure", n
+"-profile")
40 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'set', n
+"-data", 'allow_ec_overwrites', 'true')
42 def _check_pool_application_metadata_key_value(self
, pool
, app
, key
, value
):
43 output
= self
.fs
.mon_manager
.raw_cluster_cmd(
44 'osd', 'pool', 'application', 'get', pool
, app
, key
)
45 self
.assertEqual(str(output
.strip()), value
)
47 def test_add_data_pool_root(self
):
49 That a new data pool can be added and used for the root directory.
52 p
= self
.fs
.add_data_pool("foo")
53 self
.fs
.set_dir_layout(self
.mount_a
, ".", FileLayout(pool
=p
))
55 def test_add_data_pool_application_metadata(self
):
57 That the application metadata set on a newly added data pool is as expected.
60 mon_cmd
= self
.fs
.mon_manager
.raw_cluster_cmd
61 mon_cmd('osd', 'pool', 'create', pool_name
, str(self
.fs
.pgs_per_fs_pool
))
62 # Check whether https://tracker.ceph.com/issues/43061 is fixed
63 mon_cmd('osd', 'pool', 'application', 'enable', pool_name
, 'cephfs')
64 self
.fs
.add_data_pool(pool_name
, create
=False)
65 self
._check
_pool
_application
_metadata
_key
_value
(
66 pool_name
, 'cephfs', 'data', self
.fs
.name
)
68 def test_add_data_pool_subdir(self
):
70 That a new data pool can be added and used for a sub-directory.
73 p
= self
.fs
.add_data_pool("foo")
74 self
.mount_a
.run_shell("mkdir subdir")
75 self
.fs
.set_dir_layout(self
.mount_a
, "subdir", FileLayout(pool
=p
))
77 def test_add_data_pool_non_alphamueric_name_as_subdir(self
):
79 That a new data pool with non-alphanumeric name can be added and used for a sub-directory.
81 p
= self
.fs
.add_data_pool("I-am-data_pool00.")
82 self
.mount_a
.run_shell("mkdir subdir")
83 self
.fs
.set_dir_layout(self
.mount_a
, "subdir", FileLayout(pool
=p
))
85 def test_add_data_pool_ec(self
):
87 That a new EC data pool can be added.
90 n
= "test_add_data_pool_ec"
91 self
._setup
_ec
_pools
(n
, metadata
=False)
92 p
= self
.fs
.add_data_pool(n
+"-data", create
=False)
94 def test_new_default_ec(self
):
96 That a new file system warns/fails with an EC default data pool.
99 self
.mount_a
.umount_wait(require_clean
=True)
100 self
.fs
.delete_all_filesystems()
101 n
= "test_new_default_ec"
102 self
._setup
_ec
_pools
(n
)
104 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data")
105 except CommandFailedError
as e
:
106 if e
.exitstatus
== 22:
111 raise RuntimeError("expected failure")
113 def test_new_default_ec_force(self
):
115 That a new file system succeeds with an EC default data pool with --force.
118 self
.mount_a
.umount_wait(require_clean
=True)
119 self
.fs
.delete_all_filesystems()
120 n
= "test_new_default_ec_force"
121 self
._setup
_ec
_pools
(n
)
122 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data", "--force")
124 def test_new_default_ec_no_overwrite(self
):
126 That a new file system fails with an EC default data pool without overwrite.
129 self
.mount_a
.umount_wait(require_clean
=True)
130 self
.fs
.delete_all_filesystems()
131 n
= "test_new_default_ec_no_overwrite"
132 self
._setup
_ec
_pools
(n
, overwrites
=False)
134 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data")
135 except CommandFailedError
as e
:
136 if e
.exitstatus
== 22:
141 raise RuntimeError("expected failure")
142 # and even with --force !
144 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data", "--force")
145 except CommandFailedError
as e
:
146 if e
.exitstatus
== 22:
151 raise RuntimeError("expected failure")
153 def test_fs_new_pool_application_metadata(self
):
155 That the application metadata set on the pools of a newly created filesystem are as expected.
157 self
.mount_a
.umount_wait(require_clean
=True)
158 self
.fs
.delete_all_filesystems()
159 fs_name
= "test_fs_new_pool_application"
160 keys
= ['metadata', 'data']
161 pool_names
= [fs_name
+'-'+key
for key
in keys
]
162 mon_cmd
= self
.fs
.mon_manager
.raw_cluster_cmd
164 mon_cmd('osd', 'pool', 'create', p
, str(self
.fs
.pgs_per_fs_pool
))
165 mon_cmd('osd', 'pool', 'application', 'enable', p
, 'cephfs')
166 mon_cmd('fs', 'new', fs_name
, pool_names
[0], pool_names
[1])
168 self
._check
_pool
_application
_metadata
_key
_value
(
169 pool_names
[i
], 'cephfs', keys
[i
], fs_name
)
172 class TestConfigCommands(CephFSTestCase
):
174 Test that daemons and clients respond to the otherwise rarely-used
175 runtime config modification operations.
181 def test_ceph_config_show(self
):
183 That I can successfully show MDS configuration.
186 names
= self
.fs
.get_rank_names()
188 s
= self
.fs
.mon_manager
.raw_cluster_cmd("config", "show", "mds."+n
)
189 self
.assertTrue("NAME" in s
)
190 self
.assertTrue("mon_host" in s
)
192 def test_client_config(self
):
194 That I can successfully issue asok "config set" commands
199 if not isinstance(self
.mount_a
, FuseMount
):
200 self
.skipTest("Test only applies to FUSE clients")
202 test_key
= "client_cache_size"
204 self
.mount_a
.admin_socket(['config', 'set', test_key
, test_val
])
205 out
= self
.mount_a
.admin_socket(['config', 'get', test_key
])
206 self
.assertEqual(out
[test_key
], test_val
)
208 self
.mount_a
.write_n_mb("file.bin", 1);
210 # Implicitly asserting that things don't have lockdep error in shutdown
211 self
.mount_a
.umount_wait(require_clean
=True)
214 def test_mds_config_asok(self
):
215 test_key
= "mds_max_purge_ops"
217 self
.fs
.mds_asok(['config', 'set', test_key
, test_val
])
218 out
= self
.fs
.mds_asok(['config', 'get', test_key
])
219 self
.assertEqual(out
[test_key
], test_val
)
221 # Implicitly asserting that things don't have lockdep error in shutdown
222 self
.mount_a
.umount_wait(require_clean
=True)
225 def test_mds_config_tell(self
):
226 test_key
= "mds_max_purge_ops"
229 mds_id
= self
.fs
.get_lone_mds_id()
230 self
.fs
.mon_manager
.raw_cluster_cmd("tell", "mds.{0}".format(mds_id
), "injectargs",
231 "--{0}={1}".format(test_key
, test_val
))
233 # Read it back with asok because there is no `tell` equivalent
234 out
= self
.fs
.mds_asok(['config', 'get', test_key
])
235 self
.assertEqual(out
[test_key
], test_val
)
237 # Implicitly asserting that things don't have lockdep error in shutdown
238 self
.mount_a
.umount_wait(require_clean
=True)