]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_misc.py
import ceph 16.2.6
[ceph.git] / ceph / qa / tasks / cephfs / test_misc.py
1 from io import StringIO
2
3 from tasks.cephfs.fuse_mount import FuseMount
4 from tasks.cephfs.cephfs_test_case import CephFSTestCase
5 from teuthology.orchestra.run import CommandFailedError
6 import errno
7 import time
8 import json
9 import logging
10
11 log = logging.getLogger(__name__)
12
13 class TestMisc(CephFSTestCase):
14 CLIENTS_REQUIRED = 2
15
16 def test_statfs_on_deleted_fs(self):
17 """
18 That statfs does not cause monitors to SIGSEGV after fs deletion.
19 """
20
21 self.mount_b.umount_wait()
22 self.mount_a.run_shell_payload("stat -f .")
23 self.fs.delete_all_filesystems()
24 # This will hang either way, run in background.
25 p = self.mount_a.run_shell_payload("stat -f .", wait=False, timeout=60, check_status=False)
26 time.sleep(30)
27 self.assertFalse(p.finished)
28 # the process is stuck in uninterruptible sleep, just kill the mount
29 self.mount_a.umount_wait(force=True)
30 p.wait()
31
32 def test_getattr_caps(self):
33 """
34 Check if MDS recognizes the 'mask' parameter of open request.
35 The parameter allows client to request caps when opening file
36 """
37
38 if not isinstance(self.mount_a, FuseMount):
39 self.skipTest("Require FUSE client")
40
41 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
42 # on lookup/open
43 self.mount_b.umount_wait()
44 self.set_conf('client', 'client debug getattr caps', 'true')
45 self.mount_b.mount_wait()
46
47 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
48 # to mount_a
49 p = self.mount_a.open_background("testfile")
50 self.mount_b.wait_for_visible("testfile")
51
52 # this triggers a lookup request and an open request. The debug
53 # code will check if lookup/open reply contains xattrs
54 self.mount_b.run_shell(["cat", "testfile"])
55
56 self.mount_a.kill_background(p)
57
58 def test_root_rctime(self):
59 """
60 Check that the root inode has a non-default rctime on startup.
61 """
62
63 t = time.time()
64 rctime = self.mount_a.getfattr(".", "ceph.dir.rctime")
65 log.info("rctime = {}".format(rctime))
66 self.assertGreaterEqual(float(rctime), t - 10)
67
68 def test_fs_new(self):
69 self.mount_a.umount_wait()
70 self.mount_b.umount_wait()
71
72 data_pool_name = self.fs.get_data_pool_name()
73
74 self.fs.fail()
75
76 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
77 '--yes-i-really-mean-it')
78
79 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
80 self.fs.metadata_pool_name,
81 self.fs.metadata_pool_name,
82 '--yes-i-really-really-mean-it')
83 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
84 self.fs.metadata_pool_name,
85 '--pg_num_min', str(self.fs.pg_num_min))
86
87 # insert a garbage object
88 self.fs.radosm(["put", "foo", "-"], stdin=StringIO("bar"))
89
90 def get_pool_df(fs, name):
91 try:
92 return fs.get_pool_df(name)['objects'] > 0
93 except RuntimeError:
94 return False
95
96 self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
97
98 try:
99 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
100 self.fs.metadata_pool_name,
101 data_pool_name)
102 except CommandFailedError as e:
103 self.assertEqual(e.exitstatus, errno.EINVAL)
104 else:
105 raise AssertionError("Expected EINVAL")
106
107 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
108 self.fs.metadata_pool_name,
109 data_pool_name, "--force")
110
111 self.fs.mon_manager.raw_cluster_cmd('fs', 'fail', self.fs.name)
112
113 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
114 '--yes-i-really-mean-it')
115
116 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
117 self.fs.metadata_pool_name,
118 self.fs.metadata_pool_name,
119 '--yes-i-really-really-mean-it')
120 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
121 self.fs.metadata_pool_name,
122 '--pg_num_min', str(self.fs.pg_num_min))
123 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
124 self.fs.metadata_pool_name,
125 data_pool_name)
126
127 def test_cap_revoke_nonresponder(self):
128 """
129 Check that a client is evicted if it has not responded to cap revoke
130 request for configured number of seconds.
131 """
132 session_timeout = self.fs.get_var("session_timeout")
133 eviction_timeout = session_timeout / 2.0
134
135 self.fs.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout',
136 str(eviction_timeout)])
137
138 cap_holder = self.mount_a.open_background()
139
140 # Wait for the file to be visible from another client, indicating
141 # that mount_a has completed its network ops
142 self.mount_b.wait_for_visible()
143
144 # Simulate client death
145 self.mount_a.suspend_netns()
146
147 try:
148 # The waiter should get stuck waiting for the capability
149 # held on the MDS by the now-dead client A
150 cap_waiter = self.mount_b.write_background()
151
152 a = time.time()
153 time.sleep(eviction_timeout)
154 cap_waiter.wait()
155 b = time.time()
156 cap_waited = b - a
157 log.info("cap_waiter waited {0}s".format(cap_waited))
158
159 # check if the cap is transferred before session timeout kicked in.
160 # this is a good enough check to ensure that the client got evicted
161 # by the cap auto evicter rather than transitioning to stale state
162 # and then getting evicted.
163 self.assertLess(cap_waited, session_timeout,
164 "Capability handover took {0}, expected less than {1}".format(
165 cap_waited, session_timeout
166 ))
167
168 self.assertTrue(self.mds_cluster.is_addr_blocklisted(
169 self.mount_a.get_global_addr()))
170 self.mount_a._kill_background(cap_holder)
171 finally:
172 self.mount_a.resume_netns()
173
174 def test_filtered_df(self):
175 pool_name = self.fs.get_data_pool_name()
176 raw_df = self.fs.get_pool_df(pool_name)
177 raw_avail = float(raw_df["max_avail"])
178 out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
179 pool_name, 'size',
180 '-f', 'json-pretty')
181 _ = json.loads(out)
182
183 proc = self.mount_a.run_shell(['df', '.'])
184 output = proc.stdout.getvalue()
185 fs_avail = output.split('\n')[1].split()[3]
186 fs_avail = float(fs_avail) * 1024
187
188 ratio = raw_avail / fs_avail
189 assert 0.9 < ratio < 1.1
190
191 def test_dump_inode(self):
192 info = self.fs.mds_asok(['dump', 'inode', '1'])
193 assert(info['path'] == "/")
194
195 def test_dump_inode_hexademical(self):
196 self.mount_a.run_shell(["mkdir", "-p", "foo"])
197 ino = self.mount_a.path_to_ino("foo")
198 assert type(ino) is int
199 info = self.fs.mds_asok(['dump', 'inode', hex(ino)])
200 assert info['path'] == "/foo"
201
202
203 class TestCacheDrop(CephFSTestCase):
204 CLIENTS_REQUIRED = 1
205
206 def _run_drop_cache_cmd(self, timeout=None):
207 result = None
208 args = ["cache", "drop"]
209 if timeout is not None:
210 args.append(str(timeout))
211 result = self.fs.rank_tell(args)
212 return result
213
214 def _setup(self, max_caps=20, threshold=400):
215 # create some files
216 self.mount_a.create_n_files("dc-dir/dc-file", 1000, sync=True)
217
218 # Reduce this so the MDS doesn't rkcall the maximum for simple tests
219 self.fs.rank_asok(['config', 'set', 'mds_recall_max_caps', str(max_caps)])
220 self.fs.rank_asok(['config', 'set', 'mds_recall_max_decay_threshold', str(threshold)])
221
222 def test_drop_cache_command(self):
223 """
224 Basic test for checking drop cache command.
225 Confirm it halts without a timeout.
226 Note that the cache size post trimming is not checked here.
227 """
228 mds_min_caps_per_client = int(self.fs.get_config("mds_min_caps_per_client"))
229 self._setup()
230 result = self._run_drop_cache_cmd()
231 self.assertEqual(result['client_recall']['return_code'], 0)
232 self.assertEqual(result['flush_journal']['return_code'], 0)
233 # It should take at least 1 second
234 self.assertGreater(result['duration'], 1)
235 self.assertGreaterEqual(result['trim_cache']['trimmed'], 1000-2*mds_min_caps_per_client)
236
237 def test_drop_cache_command_timeout(self):
238 """
239 Basic test for checking drop cache command.
240 Confirm recall halts early via a timeout.
241 Note that the cache size post trimming is not checked here.
242 """
243 self._setup()
244 result = self._run_drop_cache_cmd(timeout=10)
245 self.assertEqual(result['client_recall']['return_code'], -errno.ETIMEDOUT)
246 self.assertEqual(result['flush_journal']['return_code'], 0)
247 self.assertGreater(result['duration'], 10)
248 self.assertGreaterEqual(result['trim_cache']['trimmed'], 100) # we did something, right?
249
250 def test_drop_cache_command_dead_timeout(self):
251 """
252 Check drop cache command with non-responding client using tell
253 interface. Note that the cache size post trimming is not checked
254 here.
255 """
256 self._setup()
257 self.mount_a.suspend_netns()
258 # Note: recall is subject to the timeout. The journal flush will
259 # be delayed due to the client being dead.
260 result = self._run_drop_cache_cmd(timeout=5)
261 self.assertEqual(result['client_recall']['return_code'], -errno.ETIMEDOUT)
262 self.assertEqual(result['flush_journal']['return_code'], 0)
263 self.assertGreater(result['duration'], 5)
264 self.assertLess(result['duration'], 120)
265 # Note: result['trim_cache']['trimmed'] may be >0 because dropping the
266 # cache now causes the Locker to drive eviction of stale clients (a
267 # stale session will be autoclosed at mdsmap['session_timeout']). The
268 # particular operation causing this is journal flush which causes the
269 # MDS to wait wait for cap revoke.
270 #self.assertEqual(0, result['trim_cache']['trimmed'])
271 self.mount_a.resume_netns()
272
273 def test_drop_cache_command_dead(self):
274 """
275 Check drop cache command with non-responding client using tell
276 interface. Note that the cache size post trimming is not checked
277 here.
278 """
279 self._setup()
280 self.mount_a.suspend_netns()
281 result = self._run_drop_cache_cmd()
282 self.assertEqual(result['client_recall']['return_code'], 0)
283 self.assertEqual(result['flush_journal']['return_code'], 0)
284 self.assertGreater(result['duration'], 5)
285 self.assertLess(result['duration'], 120)
286 # Note: result['trim_cache']['trimmed'] may be >0 because dropping the
287 # cache now causes the Locker to drive eviction of stale clients (a
288 # stale session will be autoclosed at mdsmap['session_timeout']). The
289 # particular operation causing this is journal flush which causes the
290 # MDS to wait wait for cap revoke.
291 self.mount_a.resume_netns()