]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_misc.py
2 from unittest
import SkipTest
3 from tasks
.cephfs
.fuse_mount
import FuseMount
4 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
5 from teuthology
.orchestra
.run
import CommandFailedError
, ConnectionLostError
12 log
= logging
.getLogger(__name__
)
14 class TestMisc(CephFSTestCase
):
17 def test_getattr_caps(self
):
19 Check if MDS recognizes the 'mask' parameter of open request.
20 The parameter allows client to request caps when opening file
23 if not isinstance(self
.mount_a
, FuseMount
):
24 raise SkipTest("Require FUSE client")
26 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
28 self
.mount_b
.umount_wait()
29 self
.set_conf('client', 'client debug getattr caps', 'true')
31 self
.mount_b
.wait_until_mounted()
33 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
35 p
= self
.mount_a
.open_background("testfile")
36 self
.mount_b
.wait_for_visible("testfile")
38 # this triggers a lookup request and an open request. The debug
39 # code will check if lookup/open reply contains xattrs
40 self
.mount_b
.run_shell(["cat", "testfile"])
42 self
.mount_a
.kill_background(p
)
44 def test_root_rctime(self
):
46 Check that the root inode has a non-default rctime on startup.
50 rctime
= self
.mount_a
.getfattr(".", "ceph.dir.rctime")
51 log
.info("rctime = {}".format(rctime
))
52 self
.assertGreaterEqual(rctime
, t
-10)
54 def test_fs_new(self
):
55 self
.mount_a
.umount_wait()
56 self
.mount_b
.umount_wait()
58 data_pool_name
= self
.fs
.get_data_pool_name()
63 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
64 '--yes-i-really-mean-it')
66 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
67 self
.fs
.metadata_pool_name
,
68 self
.fs
.metadata_pool_name
,
69 '--yes-i-really-really-mean-it')
70 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
71 self
.fs
.metadata_pool_name
,
72 self
.fs
.get_pgs_per_fs_pool().__str
__())
74 dummyfile
= '/etc/fstab'
76 self
.fs
.put_metadata_object_raw("key", dummyfile
)
78 def get_pool_df(fs
, name
):
80 return fs
.get_pool_df(name
)['objects'] > 0
81 except RuntimeError as e
:
84 self
.wait_until_true(lambda: get_pool_df(self
.fs
, self
.fs
.metadata_pool_name
), timeout
=30)
87 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
88 self
.fs
.metadata_pool_name
,
90 except CommandFailedError
as e
:
91 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
93 raise AssertionError("Expected EINVAL")
95 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
96 self
.fs
.metadata_pool_name
,
97 data_pool_name
, "--force")
99 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
100 '--yes-i-really-mean-it')
103 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
104 self
.fs
.metadata_pool_name
,
105 self
.fs
.metadata_pool_name
,
106 '--yes-i-really-really-mean-it')
107 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
108 self
.fs
.metadata_pool_name
,
109 self
.fs
.get_pgs_per_fs_pool().__str
__())
110 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
111 self
.fs
.metadata_pool_name
,
114 def test_evict_client(self
):
116 Check that a slow client session won't get evicted if it's the
120 session_autoclose
= self
.fs
.get_var("session_autoclose")
122 self
.mount_b
.umount_wait()
123 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
124 self
.assert_session_count(1, ls_data
)
126 mount_a_client_id
= self
.mount_a
.get_global_id()
128 self
.mount_a
.kill_cleanup()
130 time
.sleep(session_autoclose
* 1.5)
131 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
132 self
.assert_session_count(1, ls_data
)
134 self
.fs
.mds_asok(['session', 'evict', "%s" % mount_a_client_id
])
137 self
.mount_a
.wait_until_mounted()
139 self
.mount_b
.wait_until_mounted()
141 ls_data
= self
._session
_list
()
142 self
.assert_session_count(2, ls_data
)
145 self
.mount_a
.kill_cleanup()
147 time
.sleep(session_autoclose
* 1.5)
148 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
149 self
.assert_session_count(1, ls_data
)
151 def test_cap_revoke_nonresponder(self
):
153 Check that a client is evicted if it has not responded to cap revoke
154 request for configured number of seconds.
156 session_timeout
= self
.fs
.get_var("session_timeout")
157 eviction_timeout
= session_timeout
/ 2.0
159 self
.fs
.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout',
160 str(eviction_timeout
)])
162 cap_holder
= self
.mount_a
.open_background()
164 # Wait for the file to be visible from another client, indicating
165 # that mount_a has completed its network ops
166 self
.mount_b
.wait_for_visible()
168 # Simulate client death
172 # The waiter should get stuck waiting for the capability
173 # held on the MDS by the now-dead client A
174 cap_waiter
= self
.mount_b
.write_background()
177 time
.sleep(eviction_timeout
)
181 log
.info("cap_waiter waited {0}s".format(cap_waited
))
183 # check if the cap is transferred before session timeout kicked in.
184 # this is a good enough check to ensure that the client got evicted
185 # by the cap auto evicter rather than transitioning to stale state
186 # and then getting evicted.
187 self
.assertLess(cap_waited
, session_timeout
,
188 "Capability handover took {0}, expected less than {1}".format(
189 cap_waited
, session_timeout
192 self
.assertTrue(self
.mount_a
.is_blacklisted())
193 cap_holder
.stdin
.close()
196 except (CommandFailedError
, ConnectionLostError
):
197 # We killed it (and possibly its node), so it raises an error
200 self
.mount_a
.kill_cleanup()
203 self
.mount_a
.wait_until_mounted()
205 def test_filtered_df(self
):
206 pool_name
= self
.fs
.get_data_pool_name()
207 raw_df
= self
.fs
.get_pool_df(pool_name
)
208 raw_avail
= float(raw_df
["max_avail"])
209 out
= self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'get',
213 pool_size
= int(j
['size'])
215 proc
= self
.mount_a
.run_shell(['df', '.'])
216 output
= proc
.stdout
.getvalue()
217 fs_avail
= output
.split('\n')[1].split()[3]
218 fs_avail
= float(fs_avail
) * 1024
220 ratio
= raw_avail
/ fs_avail
221 assert 0.9 < ratio
< 1.1
223 def test_dump_inode(self
):
224 info
= self
.fs
.mds_asok(['dump', 'inode', '1'])
225 assert(info
['path'] == "/")
227 def test_dump_inode_hexademical(self
):
228 self
.mount_a
.run_shell(["mkdir", "-p", "foo"])
229 ino
= self
.mount_a
.path_to_ino("foo")
230 assert type(ino
) is int
231 info
= self
.fs
.mds_asok(['dump', 'inode', hex(ino
)])
232 assert info
['path'] == "/foo"
235 class TestCacheDrop(CephFSTestCase
):
238 def _run_drop_cache_cmd(self
, timeout
=None):
240 mds_id
= self
.fs
.get_lone_mds_id()
241 if timeout
is not None:
242 result
= self
.fs
.mon_manager
.raw_cluster_cmd("tell", "mds.{0}".format(mds_id
),
243 "cache", "drop", str(timeout
))
245 result
= self
.fs
.mon_manager
.raw_cluster_cmd("tell", "mds.{0}".format(mds_id
),
247 return json
.loads(result
)
249 def _setup(self
, max_caps
=20, threshold
=400):
251 self
.mount_a
.create_n_files("dc-dir/dc-file", 1000, sync
=True)
253 # Reduce this so the MDS doesn't rkcall the maximum for simple tests
254 self
.fs
.rank_asok(['config', 'set', 'mds_recall_max_caps', str(max_caps
)])
255 self
.fs
.rank_asok(['config', 'set', 'mds_recall_max_decay_threshold', str(threshold
)])
257 def test_drop_cache_command(self
):
259 Basic test for checking drop cache command.
260 Confirm it halts without a timeout.
261 Note that the cache size post trimming is not checked here.
263 mds_min_caps_per_client
= int(self
.fs
.get_config("mds_min_caps_per_client"))
265 result
= self
._run
_drop
_cache
_cmd
()
266 self
.assertTrue(result
['client_recall']['return_code'] == 0)
267 self
.assertTrue(result
['flush_journal']['return_code'] == 0)
268 # It should take at least 1 second
269 self
.assertTrue(result
['duration'] > 1)
270 self
.assertGreaterEqual(result
['trim_cache']['trimmed'], 1000-2*mds_min_caps_per_client
)
272 def test_drop_cache_command_timeout(self
):
274 Basic test for checking drop cache command.
275 Confirm recall halts early via a timeout.
276 Note that the cache size post trimming is not checked here.
279 result
= self
._run
_drop
_cache
_cmd
(timeout
=10)
280 self
.assertTrue(result
['client_recall']['return_code'] == -errno
.ETIMEDOUT
)
281 self
.assertTrue(result
['flush_journal']['return_code'] == 0)
282 self
.assertTrue(result
['duration'] > 10)
283 self
.assertGreaterEqual(result
['trim_cache']['trimmed'], 100) # we did something, right?
285 def test_drop_cache_command_dead_timeout(self
):
287 Check drop cache command with non-responding client using tell
288 interface. Note that the cache size post trimming is not checked
293 # Note: recall is subject to the timeout. The journal flush will
294 # be delayed due to the client being dead.
295 result
= self
._run
_drop
_cache
_cmd
(timeout
=5)
296 self
.assertTrue(result
['client_recall']['return_code'] == -errno
.ETIMEDOUT
)
297 self
.assertTrue(result
['flush_journal']['return_code'] == 0)
298 self
.assertTrue(result
['duration'] > 5)
299 self
.assertTrue(result
['duration'] < 120)
300 self
.assertEqual(0, result
['trim_cache']['trimmed'])
301 self
.mount_a
.kill_cleanup()
303 self
.mount_a
.wait_until_mounted()
305 def test_drop_cache_command_dead(self
):
307 Check drop cache command with non-responding client using tell
308 interface. Note that the cache size post trimming is not checked
313 result
= self
._run
_drop
_cache
_cmd
()
314 self
.assertTrue(result
['client_recall']['return_code'] == 0)
315 self
.assertTrue(result
['flush_journal']['return_code'] == 0)
316 self
.assertTrue(result
['duration'] > 5)
317 self
.assertTrue(result
['duration'] < 120)
318 self
.assertEqual(0, result
['trim_cache']['trimmed'])
319 self
.mount_a
.kill_cleanup()
321 self
.mount_a
.wait_until_mounted()