]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_misc.py
import 15.2.0 Octopus source
[ceph.git] / ceph / qa / tasks / cephfs / test_misc.py
1
2 from tasks.cephfs.fuse_mount import FuseMount
3 from tasks.cephfs.cephfs_test_case import CephFSTestCase
4 from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
5 import errno
6 import time
7 import json
8 import logging
9
10 log = logging.getLogger(__name__)
11
12 class TestMisc(CephFSTestCase):
13 CLIENTS_REQUIRED = 2
14
15 def test_getattr_caps(self):
16 """
17 Check if MDS recognizes the 'mask' parameter of open request.
18 The parameter allows client to request caps when opening file
19 """
20
21 if not isinstance(self.mount_a, FuseMount):
22 self.skipTest("Require FUSE client")
23
24 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
25 # on lookup/open
26 self.mount_b.umount_wait()
27 self.set_conf('client', 'client debug getattr caps', 'true')
28 self.mount_b.mount()
29 self.mount_b.wait_until_mounted()
30
31 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
32 # to mount_a
33 p = self.mount_a.open_background("testfile")
34 self.mount_b.wait_for_visible("testfile")
35
36 # this triggers a lookup request and an open request. The debug
37 # code will check if lookup/open reply contains xattrs
38 self.mount_b.run_shell(["cat", "testfile"])
39
40 self.mount_a.kill_background(p)
41
42 def test_root_rctime(self):
43 """
44 Check that the root inode has a non-default rctime on startup.
45 """
46
47 t = time.time()
48 rctime = self.mount_a.getfattr(".", "ceph.dir.rctime")
49 log.info("rctime = {}".format(rctime))
50 self.assertGreaterEqual(rctime, t-10)
51
52 def test_fs_new(self):
53 self.mount_a.umount_wait()
54 self.mount_b.umount_wait()
55
56 data_pool_name = self.fs.get_data_pool_name()
57
58 self.fs.mds_stop()
59 self.fs.mds_fail()
60
61 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
62 '--yes-i-really-mean-it')
63
64 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
65 self.fs.metadata_pool_name,
66 self.fs.metadata_pool_name,
67 '--yes-i-really-really-mean-it')
68 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
69 self.fs.metadata_pool_name,
70 self.fs.pgs_per_fs_pool.__str__())
71
72 dummyfile = '/etc/fstab'
73
74 self.fs.put_metadata_object_raw("key", dummyfile)
75
76 def get_pool_df(fs, name):
77 try:
78 return fs.get_pool_df(name)['objects'] > 0
79 except RuntimeError:
80 return False
81
82 self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
83
84 try:
85 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
86 self.fs.metadata_pool_name,
87 data_pool_name)
88 except CommandFailedError as e:
89 self.assertEqual(e.exitstatus, errno.EINVAL)
90 else:
91 raise AssertionError("Expected EINVAL")
92
93 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
94 self.fs.metadata_pool_name,
95 data_pool_name, "--force")
96
97 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
98 '--yes-i-really-mean-it')
99
100
101 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
102 self.fs.metadata_pool_name,
103 self.fs.metadata_pool_name,
104 '--yes-i-really-really-mean-it')
105 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
106 self.fs.metadata_pool_name,
107 self.fs.pgs_per_fs_pool.__str__())
108 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
109 self.fs.metadata_pool_name,
110 data_pool_name)
111
112 def test_cap_revoke_nonresponder(self):
113 """
114 Check that a client is evicted if it has not responded to cap revoke
115 request for configured number of seconds.
116 """
117 session_timeout = self.fs.get_var("session_timeout")
118 eviction_timeout = session_timeout / 2.0
119
120 self.fs.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout',
121 str(eviction_timeout)])
122
123 cap_holder = self.mount_a.open_background()
124
125 # Wait for the file to be visible from another client, indicating
126 # that mount_a has completed its network ops
127 self.mount_b.wait_for_visible()
128
129 # Simulate client death
130 self.mount_a.kill()
131
132 try:
133 # The waiter should get stuck waiting for the capability
134 # held on the MDS by the now-dead client A
135 cap_waiter = self.mount_b.write_background()
136
137 a = time.time()
138 time.sleep(eviction_timeout)
139 cap_waiter.wait()
140 b = time.time()
141 cap_waited = b - a
142 log.info("cap_waiter waited {0}s".format(cap_waited))
143
144 # check if the cap is transferred before session timeout kicked in.
145 # this is a good enough check to ensure that the client got evicted
146 # by the cap auto evicter rather than transitioning to stale state
147 # and then getting evicted.
148 self.assertLess(cap_waited, session_timeout,
149 "Capability handover took {0}, expected less than {1}".format(
150 cap_waited, session_timeout
151 ))
152
153 self.assertTrue(self.mount_a.is_blacklisted())
154 cap_holder.stdin.close()
155 try:
156 cap_holder.wait()
157 except (CommandFailedError, ConnectionLostError):
158 # We killed it (and possibly its node), so it raises an error
159 pass
160 finally:
161 self.mount_a.kill_cleanup()
162
163 self.mount_a.mount()
164 self.mount_a.wait_until_mounted()
165
166 def test_filtered_df(self):
167 pool_name = self.fs.get_data_pool_name()
168 raw_df = self.fs.get_pool_df(pool_name)
169 raw_avail = float(raw_df["max_avail"])
170 out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
171 pool_name, 'size',
172 '-f', 'json-pretty')
173 _ = json.loads(out)
174
175 proc = self.mount_a.run_shell(['df', '.'])
176 output = proc.stdout.getvalue()
177 fs_avail = output.split('\n')[1].split()[3]
178 fs_avail = float(fs_avail) * 1024
179
180 ratio = raw_avail / fs_avail
181 assert 0.9 < ratio < 1.1
182
183 def test_dump_inode(self):
184 info = self.fs.mds_asok(['dump', 'inode', '1'])
185 assert(info['path'] == "/")
186
187 def test_dump_inode_hexademical(self):
188 self.mount_a.run_shell(["mkdir", "-p", "foo"])
189 ino = self.mount_a.path_to_ino("foo")
190 assert type(ino) is int
191 info = self.fs.mds_asok(['dump', 'inode', hex(ino)])
192 assert info['path'] == "/foo"
193
194
195 class TestCacheDrop(CephFSTestCase):
196 CLIENTS_REQUIRED = 1
197
198 def _run_drop_cache_cmd(self, timeout=None):
199 result = None
200 mds_id = self.fs.get_lone_mds_id()
201 if timeout is not None:
202 result = self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id),
203 "cache", "drop", str(timeout))
204 else:
205 result = self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id),
206 "cache", "drop")
207 return json.loads(result)
208
209 def _setup(self, max_caps=20, threshold=400):
210 # create some files
211 self.mount_a.create_n_files("dc-dir/dc-file", 1000, sync=True)
212
213 # Reduce this so the MDS doesn't rkcall the maximum for simple tests
214 self.fs.rank_asok(['config', 'set', 'mds_recall_max_caps', str(max_caps)])
215 self.fs.rank_asok(['config', 'set', 'mds_recall_max_decay_threshold', str(threshold)])
216
217 def test_drop_cache_command(self):
218 """
219 Basic test for checking drop cache command.
220 Confirm it halts without a timeout.
221 Note that the cache size post trimming is not checked here.
222 """
223 mds_min_caps_per_client = int(self.fs.get_config("mds_min_caps_per_client"))
224 self._setup()
225 result = self._run_drop_cache_cmd()
226 self.assertEqual(result['client_recall']['return_code'], 0)
227 self.assertEqual(result['flush_journal']['return_code'], 0)
228 # It should take at least 1 second
229 self.assertGreater(result['duration'], 1)
230 self.assertGreaterEqual(result['trim_cache']['trimmed'], 1000-2*mds_min_caps_per_client)
231
232 def test_drop_cache_command_timeout(self):
233 """
234 Basic test for checking drop cache command.
235 Confirm recall halts early via a timeout.
236 Note that the cache size post trimming is not checked here.
237 """
238 self._setup()
239 result = self._run_drop_cache_cmd(timeout=10)
240 self.assertEqual(result['client_recall']['return_code'], -errno.ETIMEDOUT)
241 self.assertEqual(result['flush_journal']['return_code'], 0)
242 self.assertGreater(result['duration'], 10)
243 self.assertGreaterEqual(result['trim_cache']['trimmed'], 100) # we did something, right?
244
245 def test_drop_cache_command_dead_timeout(self):
246 """
247 Check drop cache command with non-responding client using tell
248 interface. Note that the cache size post trimming is not checked
249 here.
250 """
251 self._setup()
252 self.mount_a.kill()
253 # Note: recall is subject to the timeout. The journal flush will
254 # be delayed due to the client being dead.
255 result = self._run_drop_cache_cmd(timeout=5)
256 self.assertEqual(result['client_recall']['return_code'], -errno.ETIMEDOUT)
257 self.assertEqual(result['flush_journal']['return_code'], 0)
258 self.assertGreater(result['duration'], 5)
259 self.assertLess(result['duration'], 120)
260 # Note: result['trim_cache']['trimmed'] may be >0 because dropping the
261 # cache now causes the Locker to drive eviction of stale clients (a
262 # stale session will be autoclosed at mdsmap['session_timeout']). The
263 # particular operation causing this is journal flush which causes the
264 # MDS to wait wait for cap revoke.
265 #self.assertEqual(0, result['trim_cache']['trimmed'])
266 self.mount_a.kill_cleanup()
267 self.mount_a.mount()
268 self.mount_a.wait_until_mounted()
269
270 def test_drop_cache_command_dead(self):
271 """
272 Check drop cache command with non-responding client using tell
273 interface. Note that the cache size post trimming is not checked
274 here.
275 """
276 self._setup()
277 self.mount_a.kill()
278 result = self._run_drop_cache_cmd()
279 self.assertEqual(result['client_recall']['return_code'], 0)
280 self.assertEqual(result['flush_journal']['return_code'], 0)
281 self.assertGreater(result['duration'], 5)
282 self.assertLess(result['duration'], 120)
283 # Note: result['trim_cache']['trimmed'] may be >0 because dropping the
284 # cache now causes the Locker to drive eviction of stale clients (a
285 # stale session will be autoclosed at mdsmap['session_timeout']). The
286 # particular operation causing this is journal flush which causes the
287 # MDS to wait wait for cap revoke.
288 self.mount_a.kill_cleanup()
289 self.mount_a.mount()
290 self.mount_a.wait_until_mounted()