]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_admin.py
c535489602a101e6403e919e312c77c1e0d86d32
3 from teuthology
.orchestra
.run
import CommandFailedError
5 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
6 from tasks
.cephfs
.fuse_mount
import FuseMount
8 from tasks
.cephfs
.filesystem
import FileLayout
10 class TestAdminCommands(CephFSTestCase
):
12 Tests for administration command.
18 def test_fs_status(self
):
20 That `ceph fs status` command functions.
23 s
= self
.fs
.mon_manager
.raw_cluster_cmd("fs", "status")
24 self
.assertTrue("active" in s
)
26 def _setup_ec_pools(self
, n
, metadata
=True, overwrites
=True):
28 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', n
+"-meta", "8")
29 cmd
= ['osd', 'erasure-code-profile', 'set', n
+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
30 self
.fs
.mon_manager
.raw_cluster_cmd(*cmd
)
31 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', n
+"-data", "8", "erasure", n
+"-profile")
33 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'set', n
+"-data", 'allow_ec_overwrites', 'true')
35 def _check_pool_application_metadata_key_value(self
, pool
, app
, key
, value
):
36 output
= self
.fs
.mon_manager
.raw_cluster_cmd(
37 'osd', 'pool', 'application', 'get', pool
, app
, key
)
38 self
.assertEqual(str(output
.strip()), value
)
40 def test_add_data_pool_root(self
):
42 That a new data pool can be added and used for the root directory.
45 p
= self
.fs
.add_data_pool("foo")
46 self
.fs
.set_dir_layout(self
.mount_a
, ".", FileLayout(pool
=p
))
48 def test_add_data_pool_application_metadata(self
):
50 That the application metadata set on a newly added data pool is as expected.
53 mon_cmd
= self
.fs
.mon_manager
.raw_cluster_cmd
54 mon_cmd('osd', 'pool', 'create', pool_name
, str(self
.fs
.pgs_per_fs_pool
))
55 # Check whether https://tracker.ceph.com/issues/43061 is fixed
56 mon_cmd('osd', 'pool', 'application', 'enable', pool_name
, 'cephfs')
57 self
.fs
.add_data_pool(pool_name
, create
=False)
58 self
._check
_pool
_application
_metadata
_key
_value
(
59 pool_name
, 'cephfs', 'data', self
.fs
.name
)
61 def test_add_data_pool_subdir(self
):
63 That a new data pool can be added and used for a sub-directory.
66 p
= self
.fs
.add_data_pool("foo")
67 self
.mount_a
.run_shell("mkdir subdir")
68 self
.fs
.set_dir_layout(self
.mount_a
, "subdir", FileLayout(pool
=p
))
70 def test_add_data_pool_ec(self
):
72 That a new EC data pool can be added.
75 n
= "test_add_data_pool_ec"
76 self
._setup
_ec
_pools
(n
, metadata
=False)
77 p
= self
.fs
.add_data_pool(n
+"-data", create
=False)
79 def test_new_default_ec(self
):
81 That a new file system warns/fails with an EC default data pool.
84 self
.fs
.delete_all_filesystems()
85 n
= "test_new_default_ec"
86 self
._setup
_ec
_pools
(n
)
88 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data")
89 except CommandFailedError
as e
:
90 if e
.exitstatus
== 22:
95 raise RuntimeError("expected failure")
97 def test_new_default_ec_force(self
):
99 That a new file system succeeds with an EC default data pool with --force.
102 self
.fs
.delete_all_filesystems()
103 n
= "test_new_default_ec_force"
104 self
._setup
_ec
_pools
(n
)
105 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data", "--force")
107 def test_new_default_ec_no_overwrite(self
):
109 That a new file system fails with an EC default data pool without overwrite.
112 self
.fs
.delete_all_filesystems()
113 n
= "test_new_default_ec_no_overwrite"
114 self
._setup
_ec
_pools
(n
, overwrites
=False)
116 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data")
117 except CommandFailedError
as e
:
118 if e
.exitstatus
== 22:
123 raise RuntimeError("expected failure")
124 # and even with --force !
126 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', n
, n
+"-meta", n
+"-data", "--force")
127 except CommandFailedError
as e
:
128 if e
.exitstatus
== 22:
133 raise RuntimeError("expected failure")
135 def test_fs_new_pool_application_metadata(self
):
137 That the application metadata set on the pools of a newly created filesystem are as expected.
139 self
.fs
.delete_all_filesystems()
140 fs_name
= "test_fs_new_pool_application"
141 keys
= ['metadata', 'data']
142 pool_names
= [fs_name
+'-'+key
for key
in keys
]
143 mon_cmd
= self
.fs
.mon_manager
.raw_cluster_cmd
145 mon_cmd('osd', 'pool', 'create', p
, str(self
.fs
.pgs_per_fs_pool
))
146 mon_cmd('osd', 'pool', 'application', 'enable', p
, 'cephfs')
147 mon_cmd('fs', 'new', fs_name
, pool_names
[0], pool_names
[1])
149 self
._check
_pool
_application
_metadata
_key
_value
(
150 pool_names
[i
], 'cephfs', keys
[i
], fs_name
)
153 class TestConfigCommands(CephFSTestCase
):
155 Test that daemons and clients respond to the otherwise rarely-used
156 runtime config modification operations.
162 def test_ceph_config_show(self
):
164 That I can successfully show MDS configuration.
167 names
= self
.fs
.get_rank_names()
169 s
= self
.fs
.mon_manager
.raw_cluster_cmd("config", "show", "mds."+n
)
170 self
.assertTrue("NAME" in s
)
171 self
.assertTrue("mon_host" in s
)
173 def test_client_config(self
):
175 That I can successfully issue asok "config set" commands
180 if not isinstance(self
.mount_a
, FuseMount
):
181 self
.skipTest("Test only applies to FUSE clients")
183 test_key
= "client_cache_size"
185 self
.mount_a
.admin_socket(['config', 'set', test_key
, test_val
])
186 out
= self
.mount_a
.admin_socket(['config', 'get', test_key
])
187 self
.assertEqual(out
[test_key
], test_val
)
189 self
.mount_a
.write_n_mb("file.bin", 1);
191 # Implicitly asserting that things don't have lockdep error in shutdown
192 self
.mount_a
.umount_wait(require_clean
=True)
195 def test_mds_config_asok(self
):
196 test_key
= "mds_max_purge_ops"
198 self
.fs
.mds_asok(['config', 'set', test_key
, test_val
])
199 out
= self
.fs
.mds_asok(['config', 'get', test_key
])
200 self
.assertEqual(out
[test_key
], test_val
)
202 # Implicitly asserting that things don't have lockdep error in shutdown
203 self
.mount_a
.umount_wait(require_clean
=True)
206 def test_mds_config_tell(self
):
207 test_key
= "mds_max_purge_ops"
210 mds_id
= self
.fs
.get_lone_mds_id()
211 self
.fs
.mon_manager
.raw_cluster_cmd("tell", "mds.{0}".format(mds_id
), "injectargs",
212 "--{0}={1}".format(test_key
, test_val
))
214 # Read it back with asok because there is no `tell` equivalent
215 out
= self
.fs
.mds_asok(['config', 'get', test_key
])
216 self
.assertEqual(out
[test_key
], test_val
)
218 # Implicitly asserting that things don't have lockdep error in shutdown
219 self
.mount_a
.umount_wait(require_clean
=True)