]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_misc.py
2 from tasks
.cephfs
.fuse_mount
import FuseMount
3 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
4 from teuthology
.orchestra
.run
import CommandFailedError
, ConnectionLostError
10 log
= logging
.getLogger(__name__
)
12 class TestMisc(CephFSTestCase
):
15 def test_getattr_caps(self
):
17 Check if MDS recognizes the 'mask' parameter of open request.
18 The parameter allows client to request caps when opening file
21 if not isinstance(self
.mount_a
, FuseMount
):
22 self
.skipTest("Require FUSE client")
24 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
26 self
.mount_b
.umount_wait()
27 self
.set_conf('client', 'client debug getattr caps', 'true')
28 self
.mount_b
.mount_wait()
30 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
32 p
= self
.mount_a
.open_background("testfile")
33 self
.mount_b
.wait_for_visible("testfile")
35 # this triggers a lookup request and an open request. The debug
36 # code will check if lookup/open reply contains xattrs
37 self
.mount_b
.run_shell(["cat", "testfile"])
39 self
.mount_a
.kill_background(p
)
41 def test_root_rctime(self
):
43 Check that the root inode has a non-default rctime on startup.
47 rctime
= self
.mount_a
.getfattr(".", "ceph.dir.rctime")
48 log
.info("rctime = {}".format(rctime
))
49 self
.assertGreaterEqual(float(rctime
), t
- 10)
51 def test_fs_new(self
):
52 self
.mount_a
.umount_wait()
53 self
.mount_b
.umount_wait()
55 data_pool_name
= self
.fs
.get_data_pool_name()
60 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
61 '--yes-i-really-mean-it')
63 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
64 self
.fs
.metadata_pool_name
,
65 self
.fs
.metadata_pool_name
,
66 '--yes-i-really-really-mean-it')
67 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
68 self
.fs
.metadata_pool_name
,
69 self
.fs
.pgs_per_fs_pool
.__str
__())
71 dummyfile
= '/etc/fstab'
73 self
.fs
.put_metadata_object_raw("key", dummyfile
)
75 def get_pool_df(fs
, name
):
77 return fs
.get_pool_df(name
)['objects'] > 0
81 self
.wait_until_true(lambda: get_pool_df(self
.fs
, self
.fs
.metadata_pool_name
), timeout
=30)
84 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
85 self
.fs
.metadata_pool_name
,
87 except CommandFailedError
as e
:
88 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
90 raise AssertionError("Expected EINVAL")
92 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
93 self
.fs
.metadata_pool_name
,
94 data_pool_name
, "--force")
96 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
97 '--yes-i-really-mean-it')
100 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
101 self
.fs
.metadata_pool_name
,
102 self
.fs
.metadata_pool_name
,
103 '--yes-i-really-really-mean-it')
104 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
105 self
.fs
.metadata_pool_name
,
106 self
.fs
.pgs_per_fs_pool
.__str
__())
107 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
108 self
.fs
.metadata_pool_name
,
111 def test_cap_revoke_nonresponder(self
):
113 Check that a client is evicted if it has not responded to cap revoke
114 request for configured number of seconds.
116 session_timeout
= self
.fs
.get_var("session_timeout")
117 eviction_timeout
= session_timeout
/ 2.0
119 self
.fs
.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout',
120 str(eviction_timeout
)])
122 cap_holder
= self
.mount_a
.open_background()
124 # Wait for the file to be visible from another client, indicating
125 # that mount_a has completed its network ops
126 self
.mount_b
.wait_for_visible()
128 # Simulate client death
132 # The waiter should get stuck waiting for the capability
133 # held on the MDS by the now-dead client A
134 cap_waiter
= self
.mount_b
.write_background()
137 time
.sleep(eviction_timeout
)
141 log
.info("cap_waiter waited {0}s".format(cap_waited
))
143 # check if the cap is transferred before session timeout kicked in.
144 # this is a good enough check to ensure that the client got evicted
145 # by the cap auto evicter rather than transitioning to stale state
146 # and then getting evicted.
147 self
.assertLess(cap_waited
, session_timeout
,
148 "Capability handover took {0}, expected less than {1}".format(
149 cap_waited
, session_timeout
152 self
.assertTrue(self
.mount_a
.is_blacklisted())
153 cap_holder
.stdin
.close()
156 except (CommandFailedError
, ConnectionLostError
):
157 # We killed it (and possibly its node), so it raises an error
160 self
.mount_a
.kill_cleanup()
163 self
.mount_a
.wait_until_mounted()
165 def test_filtered_df(self
):
166 pool_name
= self
.fs
.get_data_pool_name()
167 raw_df
= self
.fs
.get_pool_df(pool_name
)
168 raw_avail
= float(raw_df
["max_avail"])
169 out
= self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'get',
174 proc
= self
.mount_a
.run_shell(['df', '.'])
175 output
= proc
.stdout
.getvalue()
176 fs_avail
= output
.split('\n')[1].split()[3]
177 fs_avail
= float(fs_avail
) * 1024
179 ratio
= raw_avail
/ fs_avail
180 assert 0.9 < ratio
< 1.1
182 def test_dump_inode(self
):
183 info
= self
.fs
.mds_asok(['dump', 'inode', '1'])
184 assert(info
['path'] == "/")
186 def test_dump_inode_hexademical(self
):
187 self
.mount_a
.run_shell(["mkdir", "-p", "foo"])
188 ino
= self
.mount_a
.path_to_ino("foo")
189 assert type(ino
) is int
190 info
= self
.fs
.mds_asok(['dump', 'inode', hex(ino
)])
191 assert info
['path'] == "/foo"
194 class TestCacheDrop(CephFSTestCase
):
197 def _run_drop_cache_cmd(self
, timeout
=None):
199 mds_id
= self
.fs
.get_lone_mds_id()
200 if timeout
is not None:
201 result
= self
.fs
.mon_manager
.raw_cluster_cmd("tell", "mds.{0}".format(mds_id
),
202 "cache", "drop", str(timeout
))
204 result
= self
.fs
.mon_manager
.raw_cluster_cmd("tell", "mds.{0}".format(mds_id
),
206 return json
.loads(result
)
208 def _setup(self
, max_caps
=20, threshold
=400):
210 self
.mount_a
.create_n_files("dc-dir/dc-file", 1000, sync
=True)
212 # Reduce this so the MDS doesn't rkcall the maximum for simple tests
213 self
.fs
.rank_asok(['config', 'set', 'mds_recall_max_caps', str(max_caps
)])
214 self
.fs
.rank_asok(['config', 'set', 'mds_recall_max_decay_threshold', str(threshold
)])
216 def test_drop_cache_command(self
):
218 Basic test for checking drop cache command.
219 Confirm it halts without a timeout.
220 Note that the cache size post trimming is not checked here.
222 mds_min_caps_per_client
= int(self
.fs
.get_config("mds_min_caps_per_client"))
224 result
= self
._run
_drop
_cache
_cmd
()
225 self
.assertEqual(result
['client_recall']['return_code'], 0)
226 self
.assertEqual(result
['flush_journal']['return_code'], 0)
227 # It should take at least 1 second
228 self
.assertGreater(result
['duration'], 1)
229 self
.assertGreaterEqual(result
['trim_cache']['trimmed'], 1000-2*mds_min_caps_per_client
)
231 def test_drop_cache_command_timeout(self
):
233 Basic test for checking drop cache command.
234 Confirm recall halts early via a timeout.
235 Note that the cache size post trimming is not checked here.
238 result
= self
._run
_drop
_cache
_cmd
(timeout
=10)
239 self
.assertEqual(result
['client_recall']['return_code'], -errno
.ETIMEDOUT
)
240 self
.assertEqual(result
['flush_journal']['return_code'], 0)
241 self
.assertGreater(result
['duration'], 10)
242 self
.assertGreaterEqual(result
['trim_cache']['trimmed'], 100) # we did something, right?
244 def test_drop_cache_command_dead_timeout(self
):
246 Check drop cache command with non-responding client using tell
247 interface. Note that the cache size post trimming is not checked
252 # Note: recall is subject to the timeout. The journal flush will
253 # be delayed due to the client being dead.
254 result
= self
._run
_drop
_cache
_cmd
(timeout
=5)
255 self
.assertEqual(result
['client_recall']['return_code'], -errno
.ETIMEDOUT
)
256 self
.assertEqual(result
['flush_journal']['return_code'], 0)
257 self
.assertGreater(result
['duration'], 5)
258 self
.assertLess(result
['duration'], 120)
259 # Note: result['trim_cache']['trimmed'] may be >0 because dropping the
260 # cache now causes the Locker to drive eviction of stale clients (a
261 # stale session will be autoclosed at mdsmap['session_timeout']). The
262 # particular operation causing this is journal flush which causes the
263 # MDS to wait wait for cap revoke.
264 #self.assertEqual(0, result['trim_cache']['trimmed'])
265 self
.mount_a
.kill_cleanup()
267 self
.mount_a
.wait_until_mounted()
269 def test_drop_cache_command_dead(self
):
271 Check drop cache command with non-responding client using tell
272 interface. Note that the cache size post trimming is not checked
277 result
= self
._run
_drop
_cache
_cmd
()
278 self
.assertEqual(result
['client_recall']['return_code'], 0)
279 self
.assertEqual(result
['flush_journal']['return_code'], 0)
280 self
.assertGreater(result
['duration'], 5)
281 self
.assertLess(result
['duration'], 120)
282 # Note: result['trim_cache']['trimmed'] may be >0 because dropping the
283 # cache now causes the Locker to drive eviction of stale clients (a
284 # stale session will be autoclosed at mdsmap['session_timeout']). The
285 # particular operation causing this is journal flush which causes the
286 # MDS to wait wait for cap revoke.
287 self
.mount_a
.kill_cleanup()
289 self
.mount_a
.wait_until_mounted()