]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_flush.py
bump version to 19.2.0-pve1
[ceph.git] / ceph / qa / tasks / cephfs / test_flush.py
CommitLineData
7c673cae
FG
1
2from textwrap import dedent
3from tasks.cephfs.cephfs_test_case import CephFSTestCase
4from tasks.cephfs.filesystem import ObjectNotFound, ROOT_INO
5
7c673cae
FG
6class TestFlush(CephFSTestCase):
7 def test_flush(self):
8 self.mount_a.run_shell(["mkdir", "mydir"])
9 self.mount_a.run_shell(["touch", "mydir/alpha"])
10 dir_ino = self.mount_a.path_to_ino("mydir")
11 file_ino = self.mount_a.path_to_ino("mydir/alpha")
12
13 # Unmount the client so that it isn't still holding caps
14 self.mount_a.umount_wait()
15
16 # Before flush, the dirfrag object does not exist
17 with self.assertRaises(ObjectNotFound):
18 self.fs.list_dirfrag(dir_ino)
19
20 # Before flush, the file's backtrace has not been written
21 with self.assertRaises(ObjectNotFound):
22 self.fs.read_backtrace(file_ino)
23
24 # Before flush, there are no dentries in the root
25 self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])
26
27 # Execute flush
28 flush_data = self.fs.mds_asok(["flush", "journal"])
29 self.assertEqual(flush_data['return_code'], 0)
30
31 # After flush, the dirfrag object has been created
32 dir_list = self.fs.list_dirfrag(dir_ino)
33 self.assertEqual(dir_list, ["alpha_head"])
34
35 # And the 'mydir' dentry is in the root
36 self.assertEqual(self.fs.list_dirfrag(ROOT_INO), ['mydir_head'])
37
38 # ...and the data object has its backtrace
39 backtrace = self.fs.read_backtrace(file_ino)
40 self.assertEqual(['alpha', 'mydir'], [a['dname'] for a in backtrace['ancestors']])
41 self.assertEqual([dir_ino, 1], [a['dirino'] for a in backtrace['ancestors']])
42 self.assertEqual(file_ino, backtrace['ino'])
43
44 # ...and the journal is truncated to just a single subtreemap from the
45 # newly created segment
f78120f9 46 self.fs.fail()
f64942e4 47 summary_output = self.fs.journal_tool(["event", "get", "summary"], 0)
f78120f9
TL
48 self.fs.set_joinable()
49 self.fs.wait_for_daemons()
7c673cae
FG
50 try:
51 self.assertEqual(summary_output,
52 dedent(
53 """
54 Events by type:
55 SUBTREEMAP: 1
56 Errors: 0
57 """
58 ).strip())
59 except AssertionError:
60 # In some states, flushing the journal will leave you
61 # an extra event from locks a client held. This is
62 # correct behaviour: the MDS is flushing the journal,
63 # it's just that new events are getting added too.
64 # In this case, we should nevertheless see a fully
65 # empty journal after a second flush.
66 self.assertEqual(summary_output,
67 dedent(
68 """
69 Events by type:
70 SUBTREEMAP: 1
71 UPDATE: 1
72 Errors: 0
73 """
74 ).strip())
75 flush_data = self.fs.mds_asok(["flush", "journal"])
76 self.assertEqual(flush_data['return_code'], 0)
f78120f9
TL
77
78 self.fs.fail()
f64942e4 79 self.assertEqual(self.fs.journal_tool(["event", "get", "summary"], 0),
7c673cae
FG
80 dedent(
81 """
82 Events by type:
83 SUBTREEMAP: 1
84 Errors: 0
85 """
86 ).strip())
f78120f9
TL
87 self.fs.set_joinable()
88 self.fs.wait_for_daemons()
7c673cae
FG
89
90 # Now for deletion!
91 # We will count the RADOS deletions and MDS file purges, to verify that
92 # the expected behaviour is happening as a result of the purge
93 initial_dels = self.fs.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete']
94 initial_purges = self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued']
95
96 # Use a client to delete a file
e306af50 97 self.mount_a.mount_wait()
7c673cae
FG
98 self.mount_a.run_shell(["rm", "-rf", "mydir"])
99
100 # Flush the journal so that the directory inode can be purged
101 flush_data = self.fs.mds_asok(["flush", "journal"])
102 self.assertEqual(flush_data['return_code'], 0)
103
104 # We expect to see a single file purge
105 self.wait_until_true(
106 lambda: self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued'] - initial_purges >= 2,
107 60)
108
109 # We expect two deletions, one of the dirfrag and one of the backtrace
110 self.wait_until_true(
111 lambda: self.fs.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete'] - initial_dels >= 2,
112 60) # timeout is fairly long to allow for tick+rados latencies
113
114 with self.assertRaises(ObjectNotFound):
115 self.fs.list_dirfrag(dir_ino)
116 with self.assertRaises(ObjectNotFound):
117 self.fs.read_backtrace(file_ino)
118 self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])