]>
Commit | Line | Data |
---|---|---|
1911f103 TL |
1 | import json |
2 | ||
92f5a8d4 TL |
3 | from teuthology.orchestra.run import CommandFailedError |
4 | ||
92f5a8d4 TL |
5 | from tasks.cephfs.cephfs_test_case import CephFSTestCase |
6 | from tasks.cephfs.fuse_mount import FuseMount | |
7 | ||
8 | from tasks.cephfs.filesystem import FileLayout | |
9 | ||
e306af50 | 10 | |
92f5a8d4 TL |
11 | class TestAdminCommands(CephFSTestCase): |
12 | """ | |
13 | Tests for administration command. | |
14 | """ | |
15 | ||
16 | CLIENTS_REQUIRED = 1 | |
17 | MDSS_REQUIRED = 1 | |
18 | ||
19 | def test_fs_status(self): | |
20 | """ | |
21 | That `ceph fs status` command functions. | |
22 | """ | |
23 | ||
24 | s = self.fs.mon_manager.raw_cluster_cmd("fs", "status") | |
25 | self.assertTrue("active" in s) | |
26 | ||
e306af50 TL |
27 | mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json-pretty"))["mdsmap"] |
28 | self.assertEqual(mdsmap[0]["state"], "active") | |
29 | ||
30 | mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json"))["mdsmap"] | |
31 | self.assertEqual(mdsmap[0]["state"], "active") | |
32 | ||
92f5a8d4 TL |
33 | def _setup_ec_pools(self, n, metadata=True, overwrites=True): |
34 | if metadata: | |
35 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8") | |
36 | cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"] | |
37 | self.fs.mon_manager.raw_cluster_cmd(*cmd) | |
38 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile") | |
39 | if overwrites: | |
40 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true') | |
41 | ||
1911f103 TL |
42 | def _check_pool_application_metadata_key_value(self, pool, app, key, value): |
43 | output = self.fs.mon_manager.raw_cluster_cmd( | |
44 | 'osd', 'pool', 'application', 'get', pool, app, key) | |
45 | self.assertEqual(str(output.strip()), value) | |
46 | ||
92f5a8d4 TL |
47 | def test_add_data_pool_root(self): |
48 | """ | |
49 | That a new data pool can be added and used for the root directory. | |
50 | """ | |
51 | ||
52 | p = self.fs.add_data_pool("foo") | |
53 | self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p)) | |
54 | ||
1911f103 TL |
55 | def test_add_data_pool_application_metadata(self): |
56 | """ | |
57 | That the application metadata set on a newly added data pool is as expected. | |
58 | """ | |
59 | pool_name = "foo" | |
60 | mon_cmd = self.fs.mon_manager.raw_cluster_cmd | |
61 | mon_cmd('osd', 'pool', 'create', pool_name, str(self.fs.pgs_per_fs_pool)) | |
62 | # Check whether https://tracker.ceph.com/issues/43061 is fixed | |
63 | mon_cmd('osd', 'pool', 'application', 'enable', pool_name, 'cephfs') | |
64 | self.fs.add_data_pool(pool_name, create=False) | |
65 | self._check_pool_application_metadata_key_value( | |
66 | pool_name, 'cephfs', 'data', self.fs.name) | |
67 | ||
92f5a8d4 TL |
68 | def test_add_data_pool_subdir(self): |
69 | """ | |
70 | That a new data pool can be added and used for a sub-directory. | |
71 | """ | |
72 | ||
73 | p = self.fs.add_data_pool("foo") | |
9f95a23c | 74 | self.mount_a.run_shell("mkdir subdir") |
92f5a8d4 TL |
75 | self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p)) |
76 | ||
e306af50 TL |
77 | def test_add_data_pool_non_alphamueric_name_as_subdir(self): |
78 | """ | |
79 | That a new data pool with non-alphanumeric name can be added and used for a sub-directory. | |
80 | """ | |
81 | p = self.fs.add_data_pool("I-am-data_pool00.") | |
82 | self.mount_a.run_shell("mkdir subdir") | |
83 | self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p)) | |
84 | ||
92f5a8d4 TL |
85 | def test_add_data_pool_ec(self): |
86 | """ | |
87 | That a new EC data pool can be added. | |
88 | """ | |
89 | ||
90 | n = "test_add_data_pool_ec" | |
91 | self._setup_ec_pools(n, metadata=False) | |
92 | p = self.fs.add_data_pool(n+"-data", create=False) | |
93 | ||
94 | def test_new_default_ec(self): | |
95 | """ | |
96 | That a new file system warns/fails with an EC default data pool. | |
97 | """ | |
98 | ||
99 | self.fs.delete_all_filesystems() | |
100 | n = "test_new_default_ec" | |
101 | self._setup_ec_pools(n) | |
102 | try: | |
103 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data") | |
104 | except CommandFailedError as e: | |
105 | if e.exitstatus == 22: | |
106 | pass | |
107 | else: | |
108 | raise | |
109 | else: | |
110 | raise RuntimeError("expected failure") | |
111 | ||
112 | def test_new_default_ec_force(self): | |
113 | """ | |
114 | That a new file system succeeds with an EC default data pool with --force. | |
115 | """ | |
116 | ||
117 | self.fs.delete_all_filesystems() | |
118 | n = "test_new_default_ec_force" | |
119 | self._setup_ec_pools(n) | |
120 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force") | |
121 | ||
122 | def test_new_default_ec_no_overwrite(self): | |
123 | """ | |
124 | That a new file system fails with an EC default data pool without overwrite. | |
125 | """ | |
126 | ||
127 | self.fs.delete_all_filesystems() | |
128 | n = "test_new_default_ec_no_overwrite" | |
129 | self._setup_ec_pools(n, overwrites=False) | |
130 | try: | |
131 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data") | |
132 | except CommandFailedError as e: | |
133 | if e.exitstatus == 22: | |
134 | pass | |
135 | else: | |
136 | raise | |
137 | else: | |
138 | raise RuntimeError("expected failure") | |
139 | # and even with --force ! | |
140 | try: | |
141 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force") | |
142 | except CommandFailedError as e: | |
143 | if e.exitstatus == 22: | |
144 | pass | |
145 | else: | |
146 | raise | |
147 | else: | |
148 | raise RuntimeError("expected failure") | |
149 | ||
1911f103 TL |
150 | def test_fs_new_pool_application_metadata(self): |
151 | """ | |
152 | That the application metadata set on the pools of a newly created filesystem are as expected. | |
153 | """ | |
154 | self.fs.delete_all_filesystems() | |
155 | fs_name = "test_fs_new_pool_application" | |
156 | keys = ['metadata', 'data'] | |
157 | pool_names = [fs_name+'-'+key for key in keys] | |
158 | mon_cmd = self.fs.mon_manager.raw_cluster_cmd | |
159 | for p in pool_names: | |
160 | mon_cmd('osd', 'pool', 'create', p, str(self.fs.pgs_per_fs_pool)) | |
161 | mon_cmd('osd', 'pool', 'application', 'enable', p, 'cephfs') | |
162 | mon_cmd('fs', 'new', fs_name, pool_names[0], pool_names[1]) | |
163 | for i in range(2): | |
164 | self._check_pool_application_metadata_key_value( | |
165 | pool_names[i], 'cephfs', keys[i], fs_name) | |
166 | ||
167 | ||
92f5a8d4 TL |
168 | class TestConfigCommands(CephFSTestCase): |
169 | """ | |
170 | Test that daemons and clients respond to the otherwise rarely-used | |
171 | runtime config modification operations. | |
172 | """ | |
173 | ||
174 | CLIENTS_REQUIRED = 1 | |
175 | MDSS_REQUIRED = 1 | |
176 | ||
177 | def test_ceph_config_show(self): | |
178 | """ | |
179 | That I can successfully show MDS configuration. | |
180 | """ | |
181 | ||
182 | names = self.fs.get_rank_names() | |
183 | for n in names: | |
184 | s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n) | |
185 | self.assertTrue("NAME" in s) | |
186 | self.assertTrue("mon_host" in s) | |
187 | ||
188 | def test_client_config(self): | |
189 | """ | |
190 | That I can successfully issue asok "config set" commands | |
191 | ||
192 | :return: | |
193 | """ | |
194 | ||
195 | if not isinstance(self.mount_a, FuseMount): | |
9f95a23c | 196 | self.skipTest("Test only applies to FUSE clients") |
92f5a8d4 TL |
197 | |
198 | test_key = "client_cache_size" | |
199 | test_val = "123" | |
200 | self.mount_a.admin_socket(['config', 'set', test_key, test_val]) | |
201 | out = self.mount_a.admin_socket(['config', 'get', test_key]) | |
202 | self.assertEqual(out[test_key], test_val) | |
203 | ||
204 | self.mount_a.write_n_mb("file.bin", 1); | |
205 | ||
206 | # Implicitly asserting that things don't have lockdep error in shutdown | |
207 | self.mount_a.umount_wait(require_clean=True) | |
208 | self.fs.mds_stop() | |
209 | ||
210 | def test_mds_config_asok(self): | |
211 | test_key = "mds_max_purge_ops" | |
212 | test_val = "123" | |
213 | self.fs.mds_asok(['config', 'set', test_key, test_val]) | |
214 | out = self.fs.mds_asok(['config', 'get', test_key]) | |
215 | self.assertEqual(out[test_key], test_val) | |
216 | ||
217 | # Implicitly asserting that things don't have lockdep error in shutdown | |
218 | self.mount_a.umount_wait(require_clean=True) | |
219 | self.fs.mds_stop() | |
220 | ||
221 | def test_mds_config_tell(self): | |
222 | test_key = "mds_max_purge_ops" | |
223 | test_val = "123" | |
224 | ||
225 | mds_id = self.fs.get_lone_mds_id() | |
226 | self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), "injectargs", | |
227 | "--{0}={1}".format(test_key, test_val)) | |
228 | ||
229 | # Read it back with asok because there is no `tell` equivalent | |
230 | out = self.fs.mds_asok(['config', 'get', test_key]) | |
231 | self.assertEqual(out[test_key], test_val) | |
232 | ||
233 | # Implicitly asserting that things don't have lockdep error in shutdown | |
234 | self.mount_a.umount_wait(require_clean=True) | |
235 | self.fs.mds_stop() |