]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_flush.py
2 from textwrap
import dedent
3 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
4 from tasks
.cephfs
.filesystem
import ObjectNotFound
, ROOT_INO
7 class TestFlush(CephFSTestCase
):
9 self
.mount_a
.run_shell(["mkdir", "mydir"])
10 self
.mount_a
.run_shell(["touch", "mydir/alpha"])
11 dir_ino
= self
.mount_a
.path_to_ino("mydir")
12 file_ino
= self
.mount_a
.path_to_ino("mydir/alpha")
14 # Unmount the client so that it isn't still holding caps
15 self
.mount_a
.umount_wait()
17 # Before flush, the dirfrag object does not exist
18 with self
.assertRaises(ObjectNotFound
):
19 self
.fs
.list_dirfrag(dir_ino
)
21 # Before flush, the file's backtrace has not been written
22 with self
.assertRaises(ObjectNotFound
):
23 self
.fs
.read_backtrace(file_ino
)
25 # Before flush, there are no dentries in the root
26 self
.assertEqual(self
.fs
.list_dirfrag(ROOT_INO
), [])
29 flush_data
= self
.fs
.mds_asok(["flush", "journal"])
30 self
.assertEqual(flush_data
['return_code'], 0)
32 # After flush, the dirfrag object has been created
33 dir_list
= self
.fs
.list_dirfrag(dir_ino
)
34 self
.assertEqual(dir_list
, ["alpha_head"])
36 # And the 'mydir' dentry is in the root
37 self
.assertEqual(self
.fs
.list_dirfrag(ROOT_INO
), ['mydir_head'])
39 # ...and the data object has its backtrace
40 backtrace
= self
.fs
.read_backtrace(file_ino
)
41 self
.assertEqual(['alpha', 'mydir'], [a
['dname'] for a
in backtrace
['ancestors']])
42 self
.assertEqual([dir_ino
, 1], [a
['dirino'] for a
in backtrace
['ancestors']])
43 self
.assertEqual(file_ino
, backtrace
['ino'])
45 # ...and the journal is truncated to just a single subtreemap from the
46 # newly created segment
47 summary_output
= self
.fs
.journal_tool(["event", "get", "summary"])
49 self
.assertEqual(summary_output
,
57 except AssertionError:
58 # In some states, flushing the journal will leave you
59 # an extra event from locks a client held. This is
60 # correct behaviour: the MDS is flushing the journal,
61 # it's just that new events are getting added too.
62 # In this case, we should nevertheless see a fully
63 # empty journal after a second flush.
64 self
.assertEqual(summary_output
,
73 flush_data
= self
.fs
.mds_asok(["flush", "journal"])
74 self
.assertEqual(flush_data
['return_code'], 0)
75 self
.assertEqual(self
.fs
.journal_tool(["event", "get", "summary"]),
85 # We will count the RADOS deletions and MDS file purges, to verify that
86 # the expected behaviour is happening as a result of the purge
87 initial_dels
= self
.fs
.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete']
88 initial_purges
= self
.fs
.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued']
90 # Use a client to delete a file
92 self
.mount_a
.wait_until_mounted()
93 self
.mount_a
.run_shell(["rm", "-rf", "mydir"])
95 # Flush the journal so that the directory inode can be purged
96 flush_data
= self
.fs
.mds_asok(["flush", "journal"])
97 self
.assertEqual(flush_data
['return_code'], 0)
99 # We expect to see a single file purge
100 self
.wait_until_true(
101 lambda: self
.fs
.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued'] - initial_purges
>= 2,
104 # We expect two deletions, one of the dirfrag and one of the backtrace
105 self
.wait_until_true(
106 lambda: self
.fs
.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete'] - initial_dels
>= 2,
107 60) # timeout is fairly long to allow for tick+rados latencies
109 with self
.assertRaises(ObjectNotFound
):
110 self
.fs
.list_dirfrag(dir_ino
)
111 with self
.assertRaises(ObjectNotFound
):
112 self
.fs
.read_backtrace(file_ino
)
113 self
.assertEqual(self
.fs
.list_dirfrag(ROOT_INO
), [])