]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_misc.py
1 from io
import StringIO
3 from tasks
.cephfs
.fuse_mount
import FuseMount
4 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
5 from teuthology
.exceptions
import CommandFailedError
6 from textwrap
import dedent
7 from threading
import Thread
15 log
= logging
.getLogger(__name__
)
17 class TestMisc(CephFSTestCase
):
20 def test_statfs_on_deleted_fs(self
):
22 That statfs does not cause monitors to SIGSEGV after fs deletion.
25 self
.mount_b
.umount_wait()
26 self
.mount_a
.run_shell_payload("stat -f .")
27 self
.fs
.delete_all_filesystems()
28 # This will hang either way, run in background.
29 p
= self
.mount_a
.run_shell_payload("stat -f .", wait
=False, timeout
=60, check_status
=False)
31 self
.assertFalse(p
.finished
)
32 # the process is stuck in uninterruptible sleep, just kill the mount
33 self
.mount_a
.umount_wait(force
=True)
36 def test_fuse_mount_on_already_mounted_path(self
):
37 if platform
.system() != "Linux":
38 self
.skipTest("Require Linux platform")
40 if not isinstance(self
.mount_a
, FuseMount
):
41 self
.skipTest("Require FUSE client")
43 # Try to mount already mounted path
44 # expecting EBUSY error
46 mount_cmd
= ['sudo'] + self
.mount_a
._mount
_bin
+ [self
.mount_a
.hostfs_mntpt
]
47 self
.mount_a
.client_remote
.run(args
=mount_cmd
, stderr
=StringIO(),
48 stdout
=StringIO(), timeout
=60, omit_sudo
=False)
49 except CommandFailedError
as e
:
50 self
.assertEqual(e
.exitstatus
, errno
.EBUSY
)
52 self
.fail("Expected EBUSY")
54 def test_getattr_caps(self
):
56 Check if MDS recognizes the 'mask' parameter of open request.
57 The parameter allows client to request caps when opening file
60 if not isinstance(self
.mount_a
, FuseMount
):
61 self
.skipTest("Require FUSE client")
63 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
65 self
.mount_b
.umount_wait()
66 self
.set_conf('client', 'client debug getattr caps', 'true')
67 self
.mount_b
.mount_wait()
69 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
71 p
= self
.mount_a
.open_background("testfile")
72 self
.mount_b
.wait_for_visible("testfile")
74 # this triggers a lookup request and an open request. The debug
75 # code will check if lookup/open reply contains xattrs
76 self
.mount_b
.run_shell(["cat", "testfile"])
78 self
.mount_a
.kill_background(p
)
80 def test_root_rctime(self
):
82 Check that the root inode has a non-default rctime on startup.
86 rctime
= self
.mount_a
.getfattr(".", "ceph.dir.rctime")
87 log
.info("rctime = {}".format(rctime
))
88 self
.assertGreaterEqual(float(rctime
), t
- 10)
90 def test_fs_new(self
):
91 self
.mount_a
.umount_wait()
92 self
.mount_b
.umount_wait()
94 data_pool_name
= self
.fs
.get_data_pool_name()
98 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
99 '--yes-i-really-mean-it')
101 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
102 self
.fs
.metadata_pool_name
,
103 self
.fs
.metadata_pool_name
,
104 '--yes-i-really-really-mean-it')
105 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
106 self
.fs
.metadata_pool_name
,
107 '--pg_num_min', str(self
.fs
.pg_num_min
))
109 # insert a garbage object
110 self
.fs
.radosm(["put", "foo", "-"], stdin
=StringIO("bar"))
112 def get_pool_df(fs
, name
):
114 return fs
.get_pool_df(name
)['objects'] > 0
118 self
.wait_until_true(lambda: get_pool_df(self
.fs
, self
.fs
.metadata_pool_name
), timeout
=30)
121 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
122 self
.fs
.metadata_pool_name
,
124 except CommandFailedError
as e
:
125 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
127 raise AssertionError("Expected EINVAL")
129 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
130 self
.fs
.metadata_pool_name
,
131 data_pool_name
, "--force")
133 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'fail', self
.fs
.name
)
135 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
136 '--yes-i-really-mean-it')
138 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
139 self
.fs
.metadata_pool_name
,
140 self
.fs
.metadata_pool_name
,
141 '--yes-i-really-really-mean-it')
142 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
143 self
.fs
.metadata_pool_name
,
144 '--pg_num_min', str(self
.fs
.pg_num_min
))
145 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
146 self
.fs
.metadata_pool_name
,
149 def test_cap_revoke_nonresponder(self
):
151 Check that a client is evicted if it has not responded to cap revoke
152 request for configured number of seconds.
154 session_timeout
= self
.fs
.get_var("session_timeout")
155 eviction_timeout
= session_timeout
/ 2.0
157 self
.fs
.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout',
158 str(eviction_timeout
)])
160 cap_holder
= self
.mount_a
.open_background()
162 # Wait for the file to be visible from another client, indicating
163 # that mount_a has completed its network ops
164 self
.mount_b
.wait_for_visible()
166 # Simulate client death
167 self
.mount_a
.suspend_netns()
170 # The waiter should get stuck waiting for the capability
171 # held on the MDS by the now-dead client A
172 cap_waiter
= self
.mount_b
.write_background()
175 time
.sleep(eviction_timeout
)
179 log
.info("cap_waiter waited {0}s".format(cap_waited
))
181 # check if the cap is transferred before session timeout kicked in.
182 # this is a good enough check to ensure that the client got evicted
183 # by the cap auto evicter rather than transitioning to stale state
184 # and then getting evicted.
185 self
.assertLess(cap_waited
, session_timeout
,
186 "Capability handover took {0}, expected less than {1}".format(
187 cap_waited
, session_timeout
190 self
.assertTrue(self
.mds_cluster
.is_addr_blocklisted(
191 self
.mount_a
.get_global_addr()))
192 self
.mount_a
._kill
_background
(cap_holder
)
194 self
.mount_a
.resume_netns()
196 def test_filtered_df(self
):
197 pool_name
= self
.fs
.get_data_pool_name()
198 raw_df
= self
.fs
.get_pool_df(pool_name
)
199 raw_avail
= float(raw_df
["max_avail"])
200 out
= self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'get',
205 proc
= self
.mount_a
.run_shell(['df', '.'])
206 output
= proc
.stdout
.getvalue()
207 fs_avail
= output
.split('\n')[1].split()[3]
208 fs_avail
= float(fs_avail
) * 1024
210 ratio
= raw_avail
/ fs_avail
211 assert 0.9 < ratio
< 1.1
213 def test_dump_inode(self
):
214 info
= self
.fs
.mds_asok(['dump', 'inode', '1'])
215 assert(info
['path'] == "/")
217 def test_dump_inode_hexademical(self
):
218 self
.mount_a
.run_shell(["mkdir", "-p", "foo"])
219 ino
= self
.mount_a
.path_to_ino("foo")
220 assert type(ino
) is int
221 info
= self
.fs
.mds_asok(['dump', 'inode', hex(ino
)])
222 assert info
['path'] == "/foo"
224 def test_fs_lsflags(self
):
226 Check that the lsflags displays the default state and the new state of flags
229 self
.fs
.set_joinable(False)
230 self
.fs
.set_allow_new_snaps(False)
231 self
.fs
.set_allow_standby_replay(True)
233 lsflags
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'lsflags',
235 "--format=json-pretty"))
236 self
.assertEqual(lsflags
["joinable"], False)
237 self
.assertEqual(lsflags
["allow_snaps"], False)
238 self
.assertEqual(lsflags
["allow_multimds_snaps"], True)
239 self
.assertEqual(lsflags
["allow_standby_replay"], True)
241 def _test_sync_stuck_for_around_5s(self
, dir_path
, file_sync
=False):
242 self
.mount_a
.run_shell(["mkdir", dir_path
])
244 sync_dir_pyscript
= dedent("""
248 dfd = os.open(path, os.O_DIRECTORY)
251 """.format(path
=dir_path
))
253 # run create/delete directories and test the sync time duration
256 self
.mount_a
.run_shell(["mkdir", os
.path
.join(dir_path
, f
"{i}_{j}")])
259 self
.mount_a
.run_shell(['python3', '-c', sync_dir_pyscript
])
261 self
.mount_a
.run_shell(["sync"])
262 duration
= time
.time() - start
263 log
.info(f
"sync mkdir i = {i}, duration = {duration}")
264 self
.assertLess(duration
, 4)
267 self
.mount_a
.run_shell(["rm", "-rf", os
.path
.join(dir_path
, f
"{i}_{j}")])
270 self
.mount_a
.run_shell(['python3', '-c', sync_dir_pyscript
])
272 self
.mount_a
.run_shell(["sync"])
273 duration
= time
.time() - start
274 log
.info(f
"sync rmdir i = {i}, duration = {duration}")
275 self
.assertLess(duration
, 4)
277 self
.mount_a
.run_shell(["rm", "-rf", dir_path
])
279 def test_filesystem_sync_stuck_for_around_5s(self
):
281 To check whether the fsync will be stuck to wait for the mdlog to be
282 flushed for at most 5 seconds.
285 dir_path
= "filesystem_sync_do_not_wait_mdlog_testdir"
286 self
._test
_sync
_stuck
_for
_around
_5s
(dir_path
)
288 def test_file_sync_stuck_for_around_5s(self
):
290 To check whether the filesystem sync will be stuck to wait for the
291 mdlog to be flushed for at most 5 seconds.
294 dir_path
= "file_sync_do_not_wait_mdlog_testdir"
295 self
._test
_sync
_stuck
_for
_around
_5s
(dir_path
, True)
297 def test_file_filesystem_sync_crash(self
):
299 To check whether the kernel crashes when doing the file/filesystem sync.
303 dir_path
= "file_filesystem_sync_crash_testdir"
304 self
.mount_a
.run_shell(["mkdir", dir_path
])
306 def mkdir_rmdir_thread(mount
, path
):
309 log
.info(" mkdir_rmdir_thread starting...")
311 while not stop_thread
:
315 mount
.run_shell(["mkdir", os
.path
.join(path
, f
"{n}")])
318 mount
.run_shell(["rm", "-rf", os
.path
.join(path
, f
"{m}")])
321 log
.info(" mkdir_rmdir_thread stopped")
323 def filesystem_sync_thread(mount
, path
):
326 log
.info(" filesystem_sync_thread starting...")
327 while not stop_thread
:
328 mount
.run_shell(["sync"])
329 log
.info(" filesystem_sync_thread stopped")
331 def file_sync_thread(mount
, path
):
334 log
.info(" file_sync_thread starting...")
335 pyscript
= dedent("""
339 dfd = os.open(path, os.O_DIRECTORY)
342 """.format(path
=path
))
344 while not stop_thread
:
345 mount
.run_shell(['python3', '-c', pyscript
])
346 log
.info(" file_sync_thread stopped")
348 td1
= Thread(target
=mkdir_rmdir_thread
, args
=(self
.mount_a
, dir_path
,))
349 td2
= Thread(target
=filesystem_sync_thread
, args
=(self
.mount_a
, dir_path
,))
350 td3
= Thread(target
=file_sync_thread
, args
=(self
.mount_a
, dir_path
,))
355 time
.sleep(1200) # run 20 minutes
360 self
.mount_a
.run_shell(["rm", "-rf", dir_path
])
363 class TestCacheDrop(CephFSTestCase
):
366 def _run_drop_cache_cmd(self
, timeout
=None):
368 args
= ["cache", "drop"]
369 if timeout
is not None:
370 args
.append(str(timeout
))
371 result
= self
.fs
.rank_tell(args
)
374 def _setup(self
, max_caps
=20, threshold
=400):
376 self
.mount_a
.create_n_files("dc-dir/dc-file", 1000, sync
=True)
378 # Reduce this so the MDS doesn't rkcall the maximum for simple tests
379 self
.fs
.rank_asok(['config', 'set', 'mds_recall_max_caps', str(max_caps
)])
380 self
.fs
.rank_asok(['config', 'set', 'mds_recall_max_decay_threshold', str(threshold
)])
382 def test_drop_cache_command(self
):
384 Basic test for checking drop cache command.
385 Confirm it halts without a timeout.
386 Note that the cache size post trimming is not checked here.
388 mds_min_caps_per_client
= int(self
.fs
.get_config("mds_min_caps_per_client"))
390 result
= self
._run
_drop
_cache
_cmd
()
391 self
.assertEqual(result
['client_recall']['return_code'], 0)
392 self
.assertEqual(result
['flush_journal']['return_code'], 0)
393 # It should take at least 1 second
394 self
.assertGreater(result
['duration'], 1)
395 self
.assertGreaterEqual(result
['trim_cache']['trimmed'], 1000-2*mds_min_caps_per_client
)
397 def test_drop_cache_command_timeout(self
):
399 Basic test for checking drop cache command.
400 Confirm recall halts early via a timeout.
401 Note that the cache size post trimming is not checked here.
404 result
= self
._run
_drop
_cache
_cmd
(timeout
=10)
405 self
.assertEqual(result
['client_recall']['return_code'], -errno
.ETIMEDOUT
)
406 self
.assertEqual(result
['flush_journal']['return_code'], 0)
407 self
.assertGreater(result
['duration'], 10)
408 self
.assertGreaterEqual(result
['trim_cache']['trimmed'], 100) # we did something, right?
410 def test_drop_cache_command_dead_timeout(self
):
412 Check drop cache command with non-responding client using tell
413 interface. Note that the cache size post trimming is not checked
417 self
.mount_a
.suspend_netns()
418 # Note: recall is subject to the timeout. The journal flush will
419 # be delayed due to the client being dead.
420 result
= self
._run
_drop
_cache
_cmd
(timeout
=5)
421 self
.assertEqual(result
['client_recall']['return_code'], -errno
.ETIMEDOUT
)
422 self
.assertEqual(result
['flush_journal']['return_code'], 0)
423 self
.assertGreater(result
['duration'], 5)
424 self
.assertLess(result
['duration'], 120)
425 # Note: result['trim_cache']['trimmed'] may be >0 because dropping the
426 # cache now causes the Locker to drive eviction of stale clients (a
427 # stale session will be autoclosed at mdsmap['session_timeout']). The
428 # particular operation causing this is journal flush which causes the
429 # MDS to wait wait for cap revoke.
430 #self.assertEqual(0, result['trim_cache']['trimmed'])
431 self
.mount_a
.resume_netns()
433 def test_drop_cache_command_dead(self
):
435 Check drop cache command with non-responding client using tell
436 interface. Note that the cache size post trimming is not checked
440 self
.mount_a
.suspend_netns()
441 result
= self
._run
_drop
_cache
_cmd
()
442 self
.assertEqual(result
['client_recall']['return_code'], 0)
443 self
.assertEqual(result
['flush_journal']['return_code'], 0)
444 self
.assertGreater(result
['duration'], 5)
445 self
.assertLess(result
['duration'], 120)
446 # Note: result['trim_cache']['trimmed'] may be >0 because dropping the
447 # cache now causes the Locker to drive eviction of stale clients (a
448 # stale session will be autoclosed at mdsmap['session_timeout']). The
449 # particular operation causing this is journal flush which causes the
450 # MDS to wait wait for cap revoke.
451 self
.mount_a
.resume_netns()