3 Teuthology task for exercising CephFS client recovery
7 from textwrap
import dedent
9 import distutils
.version
as version
13 from teuthology
.orchestra
import run
14 from teuthology
.exceptions
import CommandFailedError
15 from tasks
.cephfs
.fuse_mount
import FuseMount
16 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
17 from teuthology
.packaging
import get_package_version
19 log
= logging
.getLogger(__name__
)
22 # Arbitrary timeouts for operations involving restarting
23 # an MDS or waiting for it to come up
24 MDS_RESTART_GRACE
= 60
27 class TestClientNetworkRecovery(CephFSTestCase
):
28 REQUIRE_ONE_CLIENT_REMOTE
= True
31 LOAD_SETTINGS
= ["mds_reconnect_timeout", "ms_max_backoff"]
33 # Environment references
34 mds_reconnect_timeout
= None
37 def test_network_death(self
):
39 Simulate software freeze or temporary network failure.
41 Check that the client blocks I/O during failure, and completes
45 session_timeout
= self
.fs
.get_var("session_timeout")
46 self
.fs
.mds_asok(['config', 'set', 'mds_defer_session_stale', 'false'])
48 # We only need one client
49 self
.mount_b
.umount_wait()
51 # Initially our one client session should be visible
52 client_id
= self
.mount_a
.get_global_id()
53 ls_data
= self
._session
_list
()
54 self
.assert_session_count(1, ls_data
)
55 self
.assertEqual(ls_data
[0]['id'], client_id
)
56 self
.assert_session_state(client_id
, "open")
58 # ...and capable of doing I/O without blocking
59 self
.mount_a
.create_files()
61 # ...but if we turn off the network
62 self
.fs
.set_clients_block(True)
64 # ...and try and start an I/O
65 write_blocked
= self
.mount_a
.write_background()
67 # ...then it should block
68 self
.assertFalse(write_blocked
.finished
)
69 self
.assert_session_state(client_id
, "open")
70 time
.sleep(session_timeout
* 1.5) # Long enough for MDS to consider session stale
71 self
.assertFalse(write_blocked
.finished
)
72 self
.assert_session_state(client_id
, "stale")
74 # ...until we re-enable I/O
75 self
.fs
.set_clients_block(False)
77 # ...when it should complete promptly
79 self
.wait_until_true(lambda: write_blocked
.finished
, self
.ms_max_backoff
* 2)
80 write_blocked
.wait() # Already know we're finished, wait() to raise exception on errors
81 recovery_time
= time
.time() - a
82 log
.info("recovery time: {0}".format(recovery_time
))
83 self
.assert_session_state(client_id
, "open")
86 class TestClientRecovery(CephFSTestCase
):
89 LOAD_SETTINGS
= ["mds_reconnect_timeout", "ms_max_backoff"]
91 # Environment references
92 mds_reconnect_timeout
= None
96 # Check that two clients come up healthy and see each others' files
97 # =====================================================
98 self
.mount_a
.create_files()
99 self
.mount_a
.check_files()
100 self
.mount_a
.umount_wait()
102 self
.mount_b
.check_files()
104 self
.mount_a
.mount_wait()
106 # Check that the admin socket interface is correctly reporting
108 # =====================================================
109 ls_data
= self
._session
_list
()
110 self
.assert_session_count(2, ls_data
)
113 set([l
['id'] for l
in ls_data
]),
114 {self
.mount_a
.get_global_id(), self
.mount_b
.get_global_id()}
117 def test_restart(self
):
118 # Check that after an MDS restart both clients reconnect and continue
120 # =====================================================
121 self
.fs
.mds_fail_restart()
122 self
.fs
.wait_for_state('up:active', timeout
=MDS_RESTART_GRACE
)
124 self
.mount_a
.create_destroy()
125 self
.mount_b
.create_destroy()
127 def _session_num_caps(self
, client_id
):
128 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
129 return int(self
._session
_by
_id
(ls_data
).get(client_id
, {'num_caps': None})['num_caps'])
131 def test_reconnect_timeout(self
):
134 # Check that if I stop an MDS and a client goes away, the MDS waits
135 # for the reconnect period
137 mount_a_client_id
= self
.mount_a
.get_global_id()
141 self
.mount_a
.umount_wait(force
=True)
143 self
.fs
.set_joinable()
145 self
.fs
.wait_for_state('up:reconnect', reject
='up:active', timeout
=MDS_RESTART_GRACE
)
146 # Check that the MDS locally reports its state correctly
147 status
= self
.fs
.mds_asok(['status'])
148 self
.assertIn("reconnect_status", status
)
150 ls_data
= self
._session
_list
()
151 self
.assert_session_count(2, ls_data
)
153 # The session for the dead client should have the 'reconnect' flag set
154 self
.assertTrue(self
.get_session(mount_a_client_id
)['reconnecting'])
156 # Wait for the reconnect state to clear, this should take the
157 # reconnect timeout period.
158 in_reconnect_for
= self
.fs
.wait_for_state('up:active', timeout
=self
.mds_reconnect_timeout
* 2)
159 # Check that the period we waited to enter active is within a factor
160 # of two of the reconnect timeout.
161 self
.assertGreater(in_reconnect_for
, self
.mds_reconnect_timeout
// 2,
162 "Should have been in reconnect phase for {0} but only took {1}".format(
163 self
.mds_reconnect_timeout
, in_reconnect_for
166 self
.assert_session_count(1)
168 # Check that the client that timed out during reconnect can
169 # mount again and do I/O
170 self
.mount_a
.mount_wait()
171 self
.mount_a
.create_destroy()
173 self
.assert_session_count(2)
175 def test_reconnect_eviction(self
):
176 # Eviction during reconnect
177 # =========================
178 mount_a_client_id
= self
.mount_a
.get_global_id()
182 # The mount goes away while the MDS is offline
188 self
.fs
.set_joinable()
190 # Enter reconnect phase
191 self
.fs
.wait_for_state('up:reconnect', reject
='up:active', timeout
=MDS_RESTART_GRACE
)
192 self
.assert_session_count(2)
194 # Evict the stuck client
195 self
.fs
.mds_asok(['session', 'evict', "%s" % mount_a_client_id
])
196 self
.assert_session_count(1)
198 # Observe that we proceed to active phase without waiting full reconnect timeout
199 evict_til_active
= self
.fs
.wait_for_state('up:active', timeout
=MDS_RESTART_GRACE
)
200 # Once we evict the troublemaker, the reconnect phase should complete
201 # in well under the reconnect timeout.
202 self
.assertLess(evict_til_active
, self
.mds_reconnect_timeout
* 0.5,
203 "reconnect did not complete soon enough after eviction, took {0}".format(
207 # We killed earlier so must clean up before trying to use again
208 self
.mount_a
.kill_cleanup()
210 # Bring the client back
211 self
.mount_a
.mount_wait()
212 self
.mount_a
.create_destroy()
214 def _test_stale_caps(self
, write
):
215 session_timeout
= self
.fs
.get_var("session_timeout")
217 # Capability release from stale session
218 # =====================================
220 cap_holder
= self
.mount_a
.open_background()
222 self
.mount_a
.run_shell(["touch", "background_file"])
223 self
.mount_a
.umount_wait()
224 self
.mount_a
.mount_wait()
225 cap_holder
= self
.mount_a
.open_background(write
=False)
227 self
.assert_session_count(2)
228 mount_a_gid
= self
.mount_a
.get_global_id()
230 # Wait for the file to be visible from another client, indicating
231 # that mount_a has completed its network ops
232 self
.mount_b
.wait_for_visible()
234 # Simulate client death
235 self
.mount_a
.suspend_netns()
237 # wait for it to die so it doesn't voluntarily release buffer cap
241 # Now, after session_timeout seconds, the waiter should
242 # complete their operation when the MDS marks the holder's
244 cap_waiter
= self
.mount_b
.write_background()
249 # Should have succeeded
250 self
.assertEqual(cap_waiter
.exitstatus
, 0)
253 self
.assert_session_count(1)
255 self
.assert_session_state(mount_a_gid
, "stale")
258 log
.info("cap_waiter waited {0}s".format(cap_waited
))
259 self
.assertTrue(session_timeout
/ 2.0 <= cap_waited
<= session_timeout
* 2.0,
260 "Capability handover took {0}, expected approx {1}".format(
261 cap_waited
, session_timeout
264 self
.mount_a
._kill
_background
(cap_holder
)
266 # teardown() doesn't quite handle this case cleanly, so help it out
267 self
.mount_a
.resume_netns()
269 def test_stale_read_caps(self
):
270 self
._test
_stale
_caps
(False)
272 def test_stale_write_caps(self
):
273 self
._test
_stale
_caps
(True)
275 def test_evicted_caps(self
):
276 # Eviction while holding a capability
277 # ===================================
279 session_timeout
= self
.fs
.get_var("session_timeout")
281 # Take out a write capability on a file on client A,
282 # and then immediately kill it.
283 cap_holder
= self
.mount_a
.open_background()
284 mount_a_client_id
= self
.mount_a
.get_global_id()
286 # Wait for the file to be visible from another client, indicating
287 # that mount_a has completed its network ops
288 self
.mount_b
.wait_for_visible()
290 # Simulate client death
291 self
.mount_a
.suspend_netns()
293 # wait for it to die so it doesn't voluntarily release buffer cap
297 # The waiter should get stuck waiting for the capability
298 # held on the MDS by the now-dead client A
299 cap_waiter
= self
.mount_b
.write_background()
301 self
.assertFalse(cap_waiter
.finished
)
303 self
.fs
.mds_asok(['session', 'evict', "%s" % mount_a_client_id
])
304 # Now, because I evicted the old holder of the capability, it should
305 # immediately get handed over to the waiter
310 log
.info("cap_waiter waited {0}s".format(cap_waited
))
311 # This is the check that it happened 'now' rather than waiting
312 # for the session timeout
313 self
.assertLess(cap_waited
, session_timeout
/ 2.0,
314 "Capability handover took {0}, expected less than {1}".format(
315 cap_waited
, session_timeout
/ 2.0
318 self
.mount_a
._kill
_background
(cap_holder
)
320 self
.mount_a
.resume_netns()
322 def test_trim_caps(self
):
323 # Trim capability when reconnecting MDS
324 # ===================================
327 # Create lots of files
328 for i
in range(count
):
329 self
.mount_a
.run_shell(["touch", "f{0}".format(i
)])
331 # Populate mount_b's cache
332 self
.mount_b
.run_shell(["ls", "-l"])
334 client_id
= self
.mount_b
.get_global_id()
335 num_caps
= self
._session
_num
_caps
(client_id
)
336 self
.assertGreaterEqual(num_caps
, count
)
338 # Restart MDS. client should trim its cache when reconnecting to the MDS
339 self
.fs
.mds_fail_restart()
340 self
.fs
.wait_for_state('up:active', timeout
=MDS_RESTART_GRACE
)
342 num_caps
= self
._session
_num
_caps
(client_id
)
343 self
.assertLess(num_caps
, count
,
344 "should have less than {0} capabilities, have {1}".format(
348 def _is_flockable(self
):
349 a_version_str
= get_package_version(self
.mount_a
.client_remote
, "fuse")
350 b_version_str
= get_package_version(self
.mount_b
.client_remote
, "fuse")
351 flock_version_str
= "2.9"
353 version_regex
= re
.compile(r
"[0-9\.]+")
354 a_result
= version_regex
.match(a_version_str
)
355 self
.assertTrue(a_result
)
356 b_result
= version_regex
.match(b_version_str
)
357 self
.assertTrue(b_result
)
358 a_version
= version
.StrictVersion(a_result
.group())
359 b_version
= version
.StrictVersion(b_result
.group())
360 flock_version
=version
.StrictVersion(flock_version_str
)
362 if (a_version
>= flock_version
and b_version
>= flock_version
):
363 log
.info("flock locks are available")
366 log
.info("not testing flock locks, machines have versions {av} and {bv}".format(
367 av
=a_version_str
,bv
=b_version_str
))
370 def test_filelock(self
):
372 Check that file lock doesn't get lost after an MDS restart
375 flockable
= self
._is
_flockable
()
376 lock_holder
= self
.mount_a
.lock_background(do_flock
=flockable
)
378 self
.mount_b
.wait_for_visible("background_file-2")
379 self
.mount_b
.check_filelock(do_flock
=flockable
)
381 self
.fs
.mds_fail_restart()
382 self
.fs
.wait_for_state('up:active', timeout
=MDS_RESTART_GRACE
)
384 self
.mount_b
.check_filelock(do_flock
=flockable
)
386 # Tear down the background process
387 self
.mount_a
._kill
_background
(lock_holder
)
389 def test_filelock_eviction(self
):
391 Check that file lock held by evicted client is given to
394 if not self
._is
_flockable
():
395 self
.skipTest("flock is not available")
397 lock_holder
= self
.mount_a
.lock_background()
398 self
.mount_b
.wait_for_visible("background_file-2")
399 self
.mount_b
.check_filelock()
401 lock_taker
= self
.mount_b
.lock_and_release()
402 # Check the taker is waiting (doesn't get it immediately)
404 self
.assertFalse(lock_holder
.finished
)
405 self
.assertFalse(lock_taker
.finished
)
408 mount_a_client_id
= self
.mount_a
.get_global_id()
409 self
.fs
.mds_asok(['session', 'evict', "%s" % mount_a_client_id
])
411 # Evicting mount_a should let mount_b's attempt to take the lock
413 self
.wait_until_true(lambda: lock_taker
.finished
, timeout
=10)
415 # Tear down the background process
416 self
.mount_a
._kill
_background
(lock_holder
)
418 # teardown() doesn't quite handle this case cleanly, so help it out
420 self
.mount_a
.kill_cleanup()
422 # Bring the client back
423 self
.mount_a
.mount_wait()
425 def test_dir_fsync(self
):
426 self
._test
_fsync
(True);
428 def test_create_fsync(self
):
429 self
._test
_fsync
(False);
431 def _test_fsync(self
, dirfsync
):
433 That calls to fsync guarantee visibility of metadata to another
434 client immediately after the fsyncing client dies.
437 # Leave this guy out until he's needed
438 self
.mount_b
.umount_wait()
440 # Create dir + child dentry on client A, and fsync the dir
441 path
= os
.path
.join(self
.mount_a
.mountpoint
, "subdir")
442 self
.mount_a
.run_python(
449 print("Starting creation...")
453 dfd = os.open(path, os.O_DIRECTORY)
455 fd = open(os.path.join(path, "childfile"), "w")
456 print("Finished creation in {{0}}s".format(time.time() - start))
458 print("Starting fsync...")
464 print("Finished fsync in {{0}}s".format(time.time() - start))
465 """.format(path
=path
,dirfsync
=str(dirfsync
)))
468 # Immediately kill the MDS and then client A
471 self
.mount_a
.kill_cleanup()
473 # Restart the MDS. Wait for it to come up, it'll have to time out in clientreplay
474 self
.fs
.set_joinable()
475 log
.info("Waiting for reconnect...")
476 self
.fs
.wait_for_state("up:reconnect")
477 log
.info("Waiting for active...")
478 self
.fs
.wait_for_state("up:active", timeout
=MDS_RESTART_GRACE
+ self
.mds_reconnect_timeout
)
479 log
.info("Reached active...")
481 # Is the child dentry visible from mount B?
482 self
.mount_b
.mount_wait()
483 self
.mount_b
.run_shell(["ls", "subdir/childfile"])
485 def test_unmount_for_evicted_client(self
):
486 """Test if client hangs on unmount after evicting the client."""
487 mount_a_client_id
= self
.mount_a
.get_global_id()
488 self
.fs
.mds_asok(['session', 'evict', "%s" % mount_a_client_id
])
490 self
.mount_a
.umount_wait(require_clean
=True, timeout
=30)
492 def test_mount_after_evicted_client(self
):
493 """Test if a new mount of same fs works after client eviction."""
495 # trash this : we need it to use same remote as mount_a
496 self
.mount_b
.umount_wait()
498 cl
= self
.mount_a
.__class
__
500 # create a new instance of mount_a's class with most of the
501 # same settings, but mounted on mount_b's mountpoint.
502 m
= cl(ctx
=self
.mount_a
.ctx
,
503 client_config
=self
.mount_a
.client_config
,
504 test_dir
=self
.mount_a
.test_dir
,
505 client_id
=self
.mount_a
.client_id
,
506 client_remote
=self
.mount_a
.client_remote
,
507 client_keyring_path
=self
.mount_a
.client_keyring_path
,
508 cephfs_name
=self
.mount_a
.cephfs_name
,
509 cephfs_mntpt
= self
.mount_a
.cephfs_mntpt
,
510 hostfs_mntpt
=self
.mount_b
.hostfs_mntpt
,
511 brxnet
=self
.mount_a
.ceph_brx_net
)
514 mount_a_client_id
= self
.mount_a
.get_global_id()
515 self
.fs
.mds_asok(['session', 'evict', "%s" % mount_a_client_id
])
520 m
.umount_wait(require_clean
=True)
522 def test_stale_renew(self
):
523 if not isinstance(self
.mount_a
, FuseMount
):
524 self
.skipTest("Require FUSE client to handle signal STOP/CONT")
526 session_timeout
= self
.fs
.get_var("session_timeout")
528 self
.mount_a
.run_shell(["mkdir", "testdir"])
529 self
.mount_a
.run_shell(["touch", "testdir/file1"])
530 # populate readdir cache
531 self
.mount_a
.run_shell(["ls", "testdir"])
532 self
.mount_b
.run_shell(["ls", "testdir"])
534 # check if readdir cache is effective
535 initial_readdirs
= self
.fs
.mds_asok(['perf', 'dump', 'mds_server', 'req_readdir_latency'])
536 self
.mount_b
.run_shell(["ls", "testdir"])
537 current_readdirs
= self
.fs
.mds_asok(['perf', 'dump', 'mds_server', 'req_readdir_latency'])
538 self
.assertEqual(current_readdirs
, initial_readdirs
);
540 mount_b_gid
= self
.mount_b
.get_global_id()
541 # stop ceph-fuse process of mount_b
542 self
.mount_b
.suspend_netns()
544 self
.assert_session_state(mount_b_gid
, "open")
545 time
.sleep(session_timeout
* 1.5) # Long enough for MDS to consider session stale
547 self
.mount_a
.run_shell(["touch", "testdir/file2"])
548 self
.assert_session_state(mount_b_gid
, "stale")
550 # resume ceph-fuse process of mount_b
551 self
.mount_b
.resume_netns()
552 # Is the new file visible from mount_b? (caps become invalid after session stale)
553 self
.mount_b
.run_shell(["ls", "testdir/file2"])
555 def test_abort_conn(self
):
557 Check that abort_conn() skips closing mds sessions.
559 if not isinstance(self
.mount_a
, FuseMount
):
560 self
.skipTest("Testing libcephfs function")
562 self
.fs
.mds_asok(['config', 'set', 'mds_defer_session_stale', 'false'])
563 session_timeout
= self
.fs
.get_var("session_timeout")
565 self
.mount_a
.umount_wait()
566 self
.mount_b
.umount_wait()
568 gid_str
= self
.mount_a
.run_python(dedent("""
569 import cephfs as libcephfs
570 cephfs = libcephfs.LibCephFS(conffile='')
572 client_id = cephfs.get_instance_id()
579 self
.assert_session_state(gid
, "open")
580 time
.sleep(session_timeout
* 1.5) # Long enough for MDS to consider session stale
581 self
.assert_session_state(gid
, "stale")
583 def test_dont_mark_unresponsive_client_stale(self
):
585 Test that an unresponsive client holding caps is not marked stale or
586 evicted unless another clients wants its caps.
588 if not isinstance(self
.mount_a
, FuseMount
):
589 self
.skipTest("Require FUSE client to handle signal STOP/CONT")
591 # XXX: To conduct this test we need at least two clients since a
592 # single client is never evcited by MDS.
594 SESSION_AUTOCLOSE
= 50
595 time_at_beg
= time
.time()
596 mount_a_gid
= self
.mount_a
.get_global_id()
597 _
= self
.mount_a
.client_pid
598 self
.fs
.set_var('session_timeout', SESSION_TIMEOUT
)
599 self
.fs
.set_var('session_autoclose', SESSION_AUTOCLOSE
)
600 self
.assert_session_count(2, self
.fs
.mds_asok(['session', 'ls']))
602 # test that client holding cap not required by any other client is not
603 # marked stale when it becomes unresponsive.
604 self
.mount_a
.run_shell(['mkdir', 'dir'])
605 self
.mount_a
.send_signal('sigstop')
606 time
.sleep(SESSION_TIMEOUT
+ 2)
607 self
.assert_session_state(mount_a_gid
, "open")
609 # test that other clients have to wait to get the caps from
610 # unresponsive client until session_autoclose.
611 self
.mount_b
.run_shell(['stat', 'dir'])
612 self
.assert_session_count(1, self
.fs
.mds_asok(['session', 'ls']))
613 self
.assertLess(time
.time(), time_at_beg
+ SESSION_AUTOCLOSE
)
615 self
.mount_a
.send_signal('sigcont')
617 def test_config_session_timeout(self
):
618 self
.fs
.mds_asok(['config', 'set', 'mds_defer_session_stale', 'false'])
619 session_timeout
= self
.fs
.get_var("session_timeout")
620 mount_a_gid
= self
.mount_a
.get_global_id()
622 self
.fs
.mds_asok(['session', 'config', '%s' % mount_a_gid
, 'timeout', '%s' % (session_timeout
* 2)])
626 self
.assert_session_count(2)
628 time
.sleep(session_timeout
* 1.5)
629 self
.assert_session_state(mount_a_gid
, "open")
631 time
.sleep(session_timeout
)
632 self
.assert_session_count(1)
634 self
.mount_a
.kill_cleanup()
636 def test_reconnect_after_blocklisted(self
):
638 Test reconnect after blocklisted.
639 - writing to a fd that was opened before blocklist should return -EBADF
640 - reading/writing to a file with lost file locks should return -EIO
641 - readonly fd should continue to work
644 self
.mount_a
.umount_wait()
646 if isinstance(self
.mount_a
, FuseMount
):
647 self
.mount_a
.mount_wait(mntargs
=['--client_reconnect_stale=1', '--fuse_disable_pagecache=1'])
650 self
.mount_a
.mount_wait(mntopts
=['recover_session=clean'])
651 except CommandFailedError
:
652 self
.mount_a
.kill_cleanup()
653 self
.skipTest("Not implemented in current kernel")
655 self
.mount_a
.wait_until_mounted()
657 path
= os
.path
.join(self
.mount_a
.mountpoint
, 'testfile_reconnect_after_blocklisted')
658 pyscript
= dedent("""
665 fd1 = os.open("{path}.1", os.O_RDWR | os.O_CREAT, 0O666)
666 fd2 = os.open("{path}.1", os.O_RDONLY)
667 fd3 = os.open("{path}.2", os.O_RDWR | os.O_CREAT, 0O666)
668 fd4 = os.open("{path}.2", os.O_RDONLY)
670 os.write(fd1, b'content')
673 os.write(fd3, b'content')
675 fcntl.flock(fd4, fcntl.LOCK_SH | fcntl.LOCK_NB)
682 # wait for mds to close session
685 # trigger 'open session' message. kclient relies on 'session reject' message
686 # to detect if itself is blocklisted
692 # wait for auto reconnect
696 os.write(fd1, b'content')
698 if e.errno != errno.EBADF:
701 raise RuntimeError("write() failed to raise error")
708 if e.errno != errno.EIO:
711 raise RuntimeError("read() failed to raise error")
712 """).format(path
=path
)
713 rproc
= self
.mount_a
.client_remote
.run(
714 args
=['python3', '-c', pyscript
],
715 wait
=False, stdin
=run
.PIPE
, stdout
=run
.PIPE
)
717 rproc
.stdout
.readline()
719 mount_a_client_id
= self
.mount_a
.get_global_id()
720 self
.fs
.mds_asok(['session', 'evict', "%s" % mount_a_client_id
])
722 rproc
.stdin
.writelines(['done\n'])
726 self
.assertEqual(rproc
.exitstatus
, 0)
728 def test_refuse_client_session(self
):
730 Test that client cannot start session when file system flag
731 refuse_client_session is set
734 self
.mount_a
.umount_wait()
735 self
.fs
.set_refuse_client_session(True)
736 with self
.assertRaises(CommandFailedError
):
737 self
.mount_a
.mount_wait()
739 def test_refuse_client_session_on_reconnect(self
):
741 Test that client cannot reconnect when filesystem comes online and
742 file system flag refuse_client_session is set
745 self
.mount_a
.create_files()
746 self
.mount_a
.check_files()
749 self
.fs
.set_refuse_client_session(True)
750 self
.fs
.set_joinable()
751 with self
.assert_cluster_log('client could not reconnect as'
753 ' refuse_client_session is set'):
754 time
.sleep(self
.fs
.get_var("session_timeout") * 1.5)
755 self
.assertEqual(len(self
.fs
.mds_tell(["session", "ls"])), 0)
756 self
.mount_a
.umount_wait(force
=True)