]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_misc.py
4158538fd41d25f3d17c15171300f28ba8fb6751
2 from unittest
import SkipTest
3 from tasks
.cephfs
.fuse_mount
import FuseMount
4 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
5 from teuthology
.orchestra
.run
import CommandFailedError
, ConnectionLostError
11 log
= logging
.getLogger(__name__
)
13 class TestMisc(CephFSTestCase
):
16 LOAD_SETTINGS
= ["mds_session_autoclose"]
17 mds_session_autoclose
= None
19 def test_getattr_caps(self
):
21 Check if MDS recognizes the 'mask' parameter of open request.
22 The paramter allows client to request caps when opening file
25 if not isinstance(self
.mount_a
, FuseMount
):
26 raise SkipTest("Require FUSE client")
28 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
30 self
.mount_b
.umount_wait()
31 self
.set_conf('client', 'client debug getattr caps', 'true')
33 self
.mount_b
.wait_until_mounted()
35 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
37 p
= self
.mount_a
.open_background("testfile")
38 self
.mount_b
.wait_for_visible("testfile")
40 # this tiggers a lookup request and an open request. The debug
41 # code will check if lookup/open reply contains xattrs
42 self
.mount_b
.run_shell(["cat", "testfile"])
44 self
.mount_a
.kill_background(p
)
46 def test_fs_new(self
):
47 data_pool_name
= self
.fs
.get_data_pool_name()
52 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
53 '--yes-i-really-mean-it')
55 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
56 self
.fs
.metadata_pool_name
,
57 self
.fs
.metadata_pool_name
,
58 '--yes-i-really-really-mean-it')
59 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
60 self
.fs
.metadata_pool_name
,
61 self
.fs
.get_pgs_per_fs_pool().__str
__())
63 dummyfile
= '/etc/fstab'
65 self
.fs
.put_metadata_object_raw("key", dummyfile
)
67 def get_pool_df(fs
, name
):
69 return fs
.get_pool_df(name
)['objects'] > 0
70 except RuntimeError as e
:
73 self
.wait_until_true(lambda: get_pool_df(self
.fs
, self
.fs
.metadata_pool_name
), timeout
=30)
76 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
77 self
.fs
.metadata_pool_name
,
79 except CommandFailedError
as e
:
80 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
82 raise AssertionError("Expected EINVAL")
84 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
85 self
.fs
.metadata_pool_name
,
86 data_pool_name
, "--force")
88 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
89 '--yes-i-really-mean-it')
92 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
93 self
.fs
.metadata_pool_name
,
94 self
.fs
.metadata_pool_name
,
95 '--yes-i-really-really-mean-it')
96 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
97 self
.fs
.metadata_pool_name
,
98 self
.fs
.get_pgs_per_fs_pool().__str
__())
99 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
100 self
.fs
.metadata_pool_name
,
103 def test_evict_client(self
):
105 Check that a slow client session won't get evicted if it's the
109 self
.mount_b
.umount_wait()
110 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
111 self
.assert_session_count(1, ls_data
)
114 self
.mount_a
.kill_cleanup()
116 time
.sleep(self
.mds_session_autoclose
* 1.5)
117 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
118 self
.assert_session_count(1, ls_data
)
121 self
.mount_a
.wait_until_mounted()
123 self
.mount_b
.wait_until_mounted()
125 ls_data
= self
._session
_list
()
126 self
.assert_session_count(2, ls_data
)
129 self
.mount_a
.kill_cleanup()
131 time
.sleep(self
.mds_session_autoclose
* 1.5)
132 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
133 self
.assert_session_count(1, ls_data
)
135 def test_cap_revoke_nonresponder(self
):
137 Check that a client is evicted if it has not responded to cap revoke
138 request for configured number of seconds.
140 session_timeout
= self
.fs
.get_var("session_timeout")
141 eviction_timeout
= session_timeout
/ 2.0
143 self
.fs
.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout',
144 str(eviction_timeout
)])
146 cap_holder
= self
.mount_a
.open_background()
148 # Wait for the file to be visible from another client, indicating
149 # that mount_a has completed its network ops
150 self
.mount_b
.wait_for_visible()
152 # Simulate client death
156 # The waiter should get stuck waiting for the capability
157 # held on the MDS by the now-dead client A
158 cap_waiter
= self
.mount_b
.write_background()
161 time
.sleep(eviction_timeout
)
165 log
.info("cap_waiter waited {0}s".format(cap_waited
))
167 # check if the cap is transferred before session timeout kicked in.
168 # this is a good enough check to ensure that the client got evicted
169 # by the cap auto evicter rather than transitioning to stale state
170 # and then getting evicted.
171 self
.assertLess(cap_waited
, session_timeout
,
172 "Capability handover took {0}, expected less than {1}".format(
173 cap_waited
, session_timeout
176 cap_holder
.stdin
.close()
179 except (CommandFailedError
, ConnectionLostError
):
180 # We killed it (and possibly its node), so it raises an error
183 self
.mount_a
.kill_cleanup()
186 self
.mount_a
.wait_until_mounted()
188 def test_filtered_df(self
):
189 pool_name
= self
.fs
.get_data_pool_name()
190 raw_df
= self
.fs
.get_pool_df(pool_name
)
191 raw_avail
= float(raw_df
["max_avail"])
192 out
= self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'get',
196 pool_size
= int(j
['size'])
198 proc
= self
.mount_a
.run_shell(['df', '.'])
199 output
= proc
.stdout
.getvalue()
200 fs_avail
= output
.split('\n')[1].split()[3]
201 fs_avail
= float(fs_avail
) * 1024
203 ratio
= raw_avail
/ fs_avail
204 assert 0.9 < ratio
< 1.1