]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | |
2 | from unittest import SkipTest | |
3 | from tasks.cephfs.fuse_mount import FuseMount | |
4 | from tasks.cephfs.cephfs_test_case import CephFSTestCase | |
91327a77 | 5 | from teuthology.orchestra.run import CommandFailedError, ConnectionLostError |
7c673cae FG |
6 | import errno |
7 | import time | |
d2e6a577 | 8 | import json |
91327a77 | 9 | import logging |
f64942e4 | 10 | import time |
7c673cae | 11 | |
91327a77 | 12 | log = logging.getLogger(__name__) |
31f18b77 | 13 | |
7c673cae FG |
14 | class TestMisc(CephFSTestCase): |
15 | CLIENTS_REQUIRED = 2 | |
16 | ||
7c673cae FG |
17 | def test_getattr_caps(self): |
18 | """ | |
19 | Check if MDS recognizes the 'mask' parameter of open request. | |
20 | The paramter allows client to request caps when opening file | |
21 | """ | |
22 | ||
23 | if not isinstance(self.mount_a, FuseMount): | |
24 | raise SkipTest("Require FUSE client") | |
25 | ||
26 | # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED | |
27 | # on lookup/open | |
28 | self.mount_b.umount_wait() | |
29 | self.set_conf('client', 'client debug getattr caps', 'true') | |
30 | self.mount_b.mount() | |
31 | self.mount_b.wait_until_mounted() | |
32 | ||
33 | # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_* | |
34 | # to mount_a | |
35 | p = self.mount_a.open_background("testfile") | |
36 | self.mount_b.wait_for_visible("testfile") | |
37 | ||
38 | # this tiggers a lookup request and an open request. The debug | |
39 | # code will check if lookup/open reply contains xattrs | |
40 | self.mount_b.run_shell(["cat", "testfile"]) | |
41 | ||
42 | self.mount_a.kill_background(p) | |
43 | ||
f64942e4 AA |
44 | def test_root_rctime(self): |
45 | """ | |
46 | Check that the root inode has a non-default rctime on startup. | |
47 | """ | |
48 | ||
49 | t = time.time() | |
50 | rctime = self.mount_a.getfattr(".", "ceph.dir.rctime") | |
51 | log.info("rctime = {}".format(rctime)) | |
52 | self.assertGreaterEqual(rctime, t-10) | |
53 | ||
7c673cae | 54 | def test_fs_new(self): |
a8e16298 TL |
55 | self.mount_a.umount_wait() |
56 | self.mount_b.umount_wait() | |
57 | ||
7c673cae FG |
58 | data_pool_name = self.fs.get_data_pool_name() |
59 | ||
60 | self.fs.mds_stop() | |
61 | self.fs.mds_fail() | |
62 | ||
63 | self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name, | |
64 | '--yes-i-really-mean-it') | |
65 | ||
66 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete', | |
67 | self.fs.metadata_pool_name, | |
68 | self.fs.metadata_pool_name, | |
69 | '--yes-i-really-really-mean-it') | |
70 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', | |
71 | self.fs.metadata_pool_name, | |
72 | self.fs.get_pgs_per_fs_pool().__str__()) | |
73 | ||
74 | dummyfile = '/etc/fstab' | |
75 | ||
76 | self.fs.put_metadata_object_raw("key", dummyfile) | |
77 | ||
224ce89b WB |
78 | def get_pool_df(fs, name): |
79 | try: | |
80 | return fs.get_pool_df(name)['objects'] > 0 | |
81 | except RuntimeError as e: | |
82 | return False | |
7c673cae | 83 | |
224ce89b | 84 | self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30) |
7c673cae FG |
85 | |
86 | try: | |
87 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, | |
88 | self.fs.metadata_pool_name, | |
89 | data_pool_name) | |
90 | except CommandFailedError as e: | |
91 | self.assertEqual(e.exitstatus, errno.EINVAL) | |
92 | else: | |
93 | raise AssertionError("Expected EINVAL") | |
94 | ||
95 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, | |
96 | self.fs.metadata_pool_name, | |
97 | data_pool_name, "--force") | |
98 | ||
99 | self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name, | |
100 | '--yes-i-really-mean-it') | |
101 | ||
102 | ||
103 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete', | |
104 | self.fs.metadata_pool_name, | |
105 | self.fs.metadata_pool_name, | |
106 | '--yes-i-really-really-mean-it') | |
107 | self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', | |
108 | self.fs.metadata_pool_name, | |
109 | self.fs.get_pgs_per_fs_pool().__str__()) | |
110 | self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, | |
111 | self.fs.metadata_pool_name, | |
112 | data_pool_name) | |
113 | ||
114 | def test_evict_client(self): | |
115 | """ | |
116 | Check that a slow client session won't get evicted if it's the | |
117 | only session | |
118 | """ | |
119 | ||
f64942e4 AA |
120 | session_autoclose = self.fs.get_var("session_autoclose") |
121 | ||
31f18b77 | 122 | self.mount_b.umount_wait() |
7c673cae FG |
123 | ls_data = self.fs.mds_asok(['session', 'ls']) |
124 | self.assert_session_count(1, ls_data) | |
125 | ||
31f18b77 FG |
126 | self.mount_a.kill() |
127 | self.mount_a.kill_cleanup() | |
7c673cae | 128 | |
f64942e4 | 129 | time.sleep(session_autoclose * 1.5) |
7c673cae FG |
130 | ls_data = self.fs.mds_asok(['session', 'ls']) |
131 | self.assert_session_count(1, ls_data) | |
132 | ||
133 | self.mount_a.mount() | |
134 | self.mount_a.wait_until_mounted() | |
135 | self.mount_b.mount() | |
136 | self.mount_b.wait_until_mounted() | |
137 | ||
138 | ls_data = self._session_list() | |
139 | self.assert_session_count(2, ls_data) | |
140 | ||
141 | self.mount_a.kill() | |
181888fb | 142 | self.mount_a.kill_cleanup() |
7c673cae | 143 | |
f64942e4 | 144 | time.sleep(session_autoclose * 1.5) |
7c673cae FG |
145 | ls_data = self.fs.mds_asok(['session', 'ls']) |
146 | self.assert_session_count(1, ls_data) | |
d2e6a577 | 147 | |
91327a77 AA |
148 | def test_cap_revoke_nonresponder(self): |
149 | """ | |
150 | Check that a client is evicted if it has not responded to cap revoke | |
151 | request for configured number of seconds. | |
152 | """ | |
153 | session_timeout = self.fs.get_var("session_timeout") | |
154 | eviction_timeout = session_timeout / 2.0 | |
155 | ||
156 | self.fs.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout', | |
157 | str(eviction_timeout)]) | |
158 | ||
159 | cap_holder = self.mount_a.open_background() | |
160 | ||
161 | # Wait for the file to be visible from another client, indicating | |
162 | # that mount_a has completed its network ops | |
163 | self.mount_b.wait_for_visible() | |
164 | ||
165 | # Simulate client death | |
166 | self.mount_a.kill() | |
167 | ||
168 | try: | |
169 | # The waiter should get stuck waiting for the capability | |
170 | # held on the MDS by the now-dead client A | |
171 | cap_waiter = self.mount_b.write_background() | |
172 | ||
173 | a = time.time() | |
174 | time.sleep(eviction_timeout) | |
175 | cap_waiter.wait() | |
176 | b = time.time() | |
177 | cap_waited = b - a | |
178 | log.info("cap_waiter waited {0}s".format(cap_waited)) | |
179 | ||
180 | # check if the cap is transferred before session timeout kicked in. | |
181 | # this is a good enough check to ensure that the client got evicted | |
182 | # by the cap auto evicter rather than transitioning to stale state | |
183 | # and then getting evicted. | |
184 | self.assertLess(cap_waited, session_timeout, | |
185 | "Capability handover took {0}, expected less than {1}".format( | |
186 | cap_waited, session_timeout | |
187 | )) | |
188 | ||
189 | cap_holder.stdin.close() | |
190 | try: | |
191 | cap_holder.wait() | |
192 | except (CommandFailedError, ConnectionLostError): | |
193 | # We killed it (and possibly its node), so it raises an error | |
194 | pass | |
195 | finally: | |
196 | self.mount_a.kill_cleanup() | |
197 | ||
198 | self.mount_a.mount() | |
199 | self.mount_a.wait_until_mounted() | |
200 | ||
d2e6a577 FG |
201 | def test_filtered_df(self): |
202 | pool_name = self.fs.get_data_pool_name() | |
203 | raw_df = self.fs.get_pool_df(pool_name) | |
204 | raw_avail = float(raw_df["max_avail"]) | |
205 | out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get', | |
206 | pool_name, 'size', | |
207 | '-f', 'json-pretty') | |
208 | j = json.loads(out) | |
209 | pool_size = int(j['size']) | |
210 | ||
211 | proc = self.mount_a.run_shell(['df', '.']) | |
212 | output = proc.stdout.getvalue() | |
213 | fs_avail = output.split('\n')[1].split()[3] | |
214 | fs_avail = float(fs_avail) * 1024 | |
215 | ||
181888fb | 216 | ratio = raw_avail / fs_avail |
d2e6a577 | 217 | assert 0.9 < ratio < 1.1 |
f64942e4 AA |
218 | |
219 | def _run_drop_cache_cmd(self, timeout, use_tell): | |
220 | drop_res = None | |
221 | if use_tell: | |
222 | mds_id = self.fs.get_lone_mds_id() | |
223 | drop_res = json.loads( | |
224 | self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), | |
225 | "cache", "drop", str(timeout))) | |
226 | else: | |
227 | drop_res = self.fs.mds_asok(["cache", "drop", str(timeout)]) | |
228 | return drop_res | |
229 | ||
230 | def _drop_cache_command(self, timeout, use_tell=True): | |
231 | self.mount_b.umount_wait() | |
232 | ls_data = self.fs.mds_asok(['session', 'ls']) | |
233 | self.assert_session_count(1, ls_data) | |
234 | ||
235 | # create some files | |
236 | self.mount_a.create_n_files("dc-dir/dc-file", 1000) | |
237 | # drop cache | |
238 | drop_res = self._run_drop_cache_cmd(timeout, use_tell) | |
239 | ||
240 | self.assertTrue(drop_res['client_recall']['return_code'] == 0) | |
241 | self.assertTrue(drop_res['flush_journal']['return_code'] == 0) | |
242 | ||
243 | def _drop_cache_command_timeout(self, timeout, use_tell=True): | |
244 | self.mount_b.umount_wait() | |
245 | ls_data = self.fs.mds_asok(['session', 'ls']) | |
246 | self.assert_session_count(1, ls_data) | |
247 | ||
248 | # create some files | |
249 | self.mount_a.create_n_files("dc-dir/dc-file-t", 1000) | |
250 | ||
251 | # simulate client death and try drop cache | |
252 | self.mount_a.kill() | |
253 | drop_res = self._run_drop_cache_cmd(timeout, use_tell) | |
254 | ||
255 | self.assertTrue(drop_res['client_recall']['return_code'] == -errno.ETIMEDOUT) | |
256 | self.assertTrue(drop_res['flush_journal']['return_code'] == 0) | |
257 | ||
258 | self.mount_a.kill_cleanup() | |
259 | self.mount_a.mount() | |
260 | self.mount_a.wait_until_mounted() | |
261 | ||
262 | def test_drop_cache_command_asok(self): | |
263 | """ | |
264 | Basic test for checking drop cache command using admin socket. | |
265 | Note that the cache size post trimming is not checked here. | |
266 | """ | |
267 | self._drop_cache_command(10, use_tell=False) | |
268 | ||
269 | def test_drop_cache_command_tell(self): | |
270 | """ | |
271 | Basic test for checking drop cache command using tell interface. | |
272 | Note that the cache size post trimming is not checked here. | |
273 | """ | |
274 | self._drop_cache_command(10) | |
275 | ||
276 | def test_drop_cache_command_timeout_asok(self): | |
277 | """ | |
278 | Check drop cache command with non-responding client using admin | |
279 | socket. Note that the cache size post trimming is not checked here. | |
280 | """ | |
281 | self._drop_cache_command_timeout(5, use_tell=False) | |
282 | ||
283 | def test_drop_cache_command_timeout_tell(self): | |
284 | """ | |
285 | Check drop cache command with non-responding client using tell | |
286 | interface. Note that the cache size post trimming is not checked | |
287 | here. | |
288 | """ | |
289 | self._drop_cache_command_timeout(5) |