]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_admin.py
c535489602a101e6403e919e312c77c1e0d86d32
[ceph.git] / ceph / qa / tasks / cephfs / test_admin.py
1 import json
2
3 from teuthology.orchestra.run import CommandFailedError
4
5 from tasks.cephfs.cephfs_test_case import CephFSTestCase
6 from tasks.cephfs.fuse_mount import FuseMount
7
8 from tasks.cephfs.filesystem import FileLayout
9
10 class TestAdminCommands(CephFSTestCase):
11 """
12 Tests for administration command.
13 """
14
15 CLIENTS_REQUIRED = 1
16 MDSS_REQUIRED = 1
17
18 def test_fs_status(self):
19 """
20 That `ceph fs status` command functions.
21 """
22
23 s = self.fs.mon_manager.raw_cluster_cmd("fs", "status")
24 self.assertTrue("active" in s)
25
26 def _setup_ec_pools(self, n, metadata=True, overwrites=True):
27 if metadata:
28 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8")
29 cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
30 self.fs.mon_manager.raw_cluster_cmd(*cmd)
31 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
32 if overwrites:
33 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
34
35 def _check_pool_application_metadata_key_value(self, pool, app, key, value):
36 output = self.fs.mon_manager.raw_cluster_cmd(
37 'osd', 'pool', 'application', 'get', pool, app, key)
38 self.assertEqual(str(output.strip()), value)
39
40 def test_add_data_pool_root(self):
41 """
42 That a new data pool can be added and used for the root directory.
43 """
44
45 p = self.fs.add_data_pool("foo")
46 self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p))
47
48 def test_add_data_pool_application_metadata(self):
49 """
50 That the application metadata set on a newly added data pool is as expected.
51 """
52 pool_name = "foo"
53 mon_cmd = self.fs.mon_manager.raw_cluster_cmd
54 mon_cmd('osd', 'pool', 'create', pool_name, str(self.fs.pgs_per_fs_pool))
55 # Check whether https://tracker.ceph.com/issues/43061 is fixed
56 mon_cmd('osd', 'pool', 'application', 'enable', pool_name, 'cephfs')
57 self.fs.add_data_pool(pool_name, create=False)
58 self._check_pool_application_metadata_key_value(
59 pool_name, 'cephfs', 'data', self.fs.name)
60
61 def test_add_data_pool_subdir(self):
62 """
63 That a new data pool can be added and used for a sub-directory.
64 """
65
66 p = self.fs.add_data_pool("foo")
67 self.mount_a.run_shell("mkdir subdir")
68 self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p))
69
70 def test_add_data_pool_ec(self):
71 """
72 That a new EC data pool can be added.
73 """
74
75 n = "test_add_data_pool_ec"
76 self._setup_ec_pools(n, metadata=False)
77 p = self.fs.add_data_pool(n+"-data", create=False)
78
79 def test_new_default_ec(self):
80 """
81 That a new file system warns/fails with an EC default data pool.
82 """
83
84 self.fs.delete_all_filesystems()
85 n = "test_new_default_ec"
86 self._setup_ec_pools(n)
87 try:
88 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
89 except CommandFailedError as e:
90 if e.exitstatus == 22:
91 pass
92 else:
93 raise
94 else:
95 raise RuntimeError("expected failure")
96
97 def test_new_default_ec_force(self):
98 """
99 That a new file system succeeds with an EC default data pool with --force.
100 """
101
102 self.fs.delete_all_filesystems()
103 n = "test_new_default_ec_force"
104 self._setup_ec_pools(n)
105 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
106
107 def test_new_default_ec_no_overwrite(self):
108 """
109 That a new file system fails with an EC default data pool without overwrite.
110 """
111
112 self.fs.delete_all_filesystems()
113 n = "test_new_default_ec_no_overwrite"
114 self._setup_ec_pools(n, overwrites=False)
115 try:
116 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
117 except CommandFailedError as e:
118 if e.exitstatus == 22:
119 pass
120 else:
121 raise
122 else:
123 raise RuntimeError("expected failure")
124 # and even with --force !
125 try:
126 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
127 except CommandFailedError as e:
128 if e.exitstatus == 22:
129 pass
130 else:
131 raise
132 else:
133 raise RuntimeError("expected failure")
134
135 def test_fs_new_pool_application_metadata(self):
136 """
137 That the application metadata set on the pools of a newly created filesystem are as expected.
138 """
139 self.fs.delete_all_filesystems()
140 fs_name = "test_fs_new_pool_application"
141 keys = ['metadata', 'data']
142 pool_names = [fs_name+'-'+key for key in keys]
143 mon_cmd = self.fs.mon_manager.raw_cluster_cmd
144 for p in pool_names:
145 mon_cmd('osd', 'pool', 'create', p, str(self.fs.pgs_per_fs_pool))
146 mon_cmd('osd', 'pool', 'application', 'enable', p, 'cephfs')
147 mon_cmd('fs', 'new', fs_name, pool_names[0], pool_names[1])
148 for i in range(2):
149 self._check_pool_application_metadata_key_value(
150 pool_names[i], 'cephfs', keys[i], fs_name)
151
152
153 class TestConfigCommands(CephFSTestCase):
154 """
155 Test that daemons and clients respond to the otherwise rarely-used
156 runtime config modification operations.
157 """
158
159 CLIENTS_REQUIRED = 1
160 MDSS_REQUIRED = 1
161
162 def test_ceph_config_show(self):
163 """
164 That I can successfully show MDS configuration.
165 """
166
167 names = self.fs.get_rank_names()
168 for n in names:
169 s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n)
170 self.assertTrue("NAME" in s)
171 self.assertTrue("mon_host" in s)
172
173 def test_client_config(self):
174 """
175 That I can successfully issue asok "config set" commands
176
177 :return:
178 """
179
180 if not isinstance(self.mount_a, FuseMount):
181 self.skipTest("Test only applies to FUSE clients")
182
183 test_key = "client_cache_size"
184 test_val = "123"
185 self.mount_a.admin_socket(['config', 'set', test_key, test_val])
186 out = self.mount_a.admin_socket(['config', 'get', test_key])
187 self.assertEqual(out[test_key], test_val)
188
189 self.mount_a.write_n_mb("file.bin", 1);
190
191 # Implicitly asserting that things don't have lockdep error in shutdown
192 self.mount_a.umount_wait(require_clean=True)
193 self.fs.mds_stop()
194
195 def test_mds_config_asok(self):
196 test_key = "mds_max_purge_ops"
197 test_val = "123"
198 self.fs.mds_asok(['config', 'set', test_key, test_val])
199 out = self.fs.mds_asok(['config', 'get', test_key])
200 self.assertEqual(out[test_key], test_val)
201
202 # Implicitly asserting that things don't have lockdep error in shutdown
203 self.mount_a.umount_wait(require_clean=True)
204 self.fs.mds_stop()
205
206 def test_mds_config_tell(self):
207 test_key = "mds_max_purge_ops"
208 test_val = "123"
209
210 mds_id = self.fs.get_lone_mds_id()
211 self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), "injectargs",
212 "--{0}={1}".format(test_key, test_val))
213
214 # Read it back with asok because there is no `tell` equivalent
215 out = self.fs.mds_asok(['config', 'get', test_key])
216 self.assertEqual(out[test_key], test_val)
217
218 # Implicitly asserting that things don't have lockdep error in shutdown
219 self.mount_a.umount_wait(require_clean=True)
220 self.fs.mds_stop()