3 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
4 from teuthology
.orchestra
import run
7 log
= logging
.getLogger(__name__
)
10 class TestFragmentation(CephFSTestCase
):
15 return self
.fs
.mds_asok(['perf', 'dump', 'mds'])['mds']['dir_split']
18 return self
.fs
.mds_asok(['perf', 'dump', 'mds'])['mds']['dir_merge']
20 def get_dir_ino(self
, path
):
21 dir_cache
= self
.fs
.read_cache(path
, 0)
23 dir_inono
= self
.mount_a
.path_to_ino(path
.strip("/"))
25 if ino
['ino'] == dir_inono
:
28 self
.assertIsNotNone(dir_ino
)
31 def _configure(self
, **kwargs
):
33 Apply kwargs as MDS configuration settings, enable dirfrags
37 for k
, v
in kwargs
.items():
38 self
.ceph_cluster
.set_ceph_conf("mds", k
, v
.__str
__())
40 self
.fs
.set_allow_dirfrags(True)
42 self
.mds_cluster
.mds_fail_restart()
43 self
.fs
.wait_for_daemons()
45 def test_oversize(self
):
47 That a directory is split when it becomes too large.
54 mds_bal_split_size
=split_size
,
55 mds_bal_merge_size
=merge_size
,
59 self
.assertEqual(self
.get_splits(), 0)
61 self
.mount_a
.create_n_files("splitdir/file", split_size
+ 1)
64 lambda: self
.get_splits() == 1,
68 frags
= self
.get_dir_ino("/splitdir")['dirfrags']
69 self
.assertEqual(len(frags
), 2)
70 self
.assertEqual(frags
[0]['dirfrag'], "0x10000000000.0*")
71 self
.assertEqual(frags
[1]['dirfrag'], "0x10000000000.1*")
73 sum([len(f
['dentries']) for f
in frags
]),
77 self
.assertEqual(self
.get_merges(), 0)
79 self
.mount_a
.run_shell(["rm", "-f", run
.Raw("splitdir/file*")])
82 lambda: self
.get_merges() == 1,
86 self
.assertEqual(len(self
.get_dir_ino("/splitdir")["dirfrags"]), 1)
88 def test_rapid_creation(self
):
90 That the fast-splitting limit of 1.5x normal limit is
91 applied when creating dentries quickly.
98 mds_bal_split_size
=split_size
,
99 mds_bal_merge_size
=merge_size
,
100 mds_bal_split_bits
=3,
101 mds_bal_fragment_size_max
=int(split_size
* 1.5 + 2)
104 # We test this only at a single split level. If a client was sending
105 # IO so fast that it hit a second split before the first split
106 # was complete, it could violate mds_bal_fragment_size_max -- there
107 # is a window where the child dirfrags of a split are unfrozen
108 # (so they can grow), but still have STATE_FRAGMENTING (so they
111 # By writing 4x the split size when the split bits are set
112 # to 3 (i.e. 4-ways), I am reasonably sure to see precisely
113 # one split. The test is to check whether that split
114 # happens soon enough that the client doesn't exceed
115 # 2x the split_size (the "immediate" split mode should
116 # kick in at 1.5x the split size).
118 self
.assertEqual(self
.get_splits(), 0)
119 self
.mount_a
.create_n_files("splitdir/file", split_size
* 4)
120 self
.wait_until_equal(
123 reject_fn
=lambda s
: s
> 1,
127 def test_deep_split(self
):
129 That when the directory grows many times larger than split size,
130 the fragments get split again.
134 merge_size
= 1 # i.e. don't merge frag unless its empty
137 branch_factor
= 2**split_bits
139 # Arbitrary: how many levels shall we try fragmenting before
144 mds_bal_split_size
=split_size
,
145 mds_bal_merge_size
=merge_size
,
146 mds_bal_split_bits
=split_bits
149 # Each iteration we will create another level of fragments. The
150 # placement of dentries into fragments is by hashes (i.e. pseudo
151 # random), so we rely on statistics to get the behaviour that
152 # by writing about 1.5x as many dentries as the split_size times
153 # the number of frags, we will get them all to exceed their
154 # split size and trigger a split.
158 while depth
< max_depth
:
159 log
.info("Writing files for depth {0}".format(depth
))
160 target_files
= branch_factor
**depth
* int(split_size
* 1.5)
161 create_files
= target_files
- files_written
163 self
.ceph_cluster
.mon_manager
.raw_cluster_cmd("log",
164 "{0} Writing {1} files (depth={2})".format(
165 self
.__class
__.__name
__, create_files
, depth
167 self
.mount_a
.create_n_files("splitdir/file_{0}".format(depth
),
169 self
.ceph_cluster
.mon_manager
.raw_cluster_cmd("log",
170 "{0} Done".format(self
.__class
__.__name
__))
172 files_written
+= create_files
173 log
.info("Now have {0} files".format(files_written
))
175 splits_expected
+= branch_factor
**depth
176 log
.info("Waiting to see {0} splits".format(splits_expected
))
178 self
.wait_until_equal(
182 reject_fn
=lambda x
: x
> splits_expected
185 frags
= self
.get_dir_ino("/splitdir")['dirfrags']
186 self
.assertEqual(len(frags
), branch_factor
**(depth
+1))
188 sum([len(f
['dentries']) for f
in frags
]),
192 # On failures, log what fragmentation we actually ended
193 # up with. This block is just for logging, at the end
194 # we raise the exception again.
195 frags
= self
.get_dir_ino("/splitdir")['dirfrags']
196 log
.info("depth={0} splits_expected={1} files_written={2}".format(
197 depth
, splits_expected
, files_written
199 log
.info("Dirfrags:")
201 log
.info("{0}: {1}".format(
202 f
['dirfrag'], len(f
['dentries'])
208 # Remember the inode number because we will be checking for
210 dir_inode_no
= self
.mount_a
.path_to_ino("splitdir")
212 self
.mount_a
.run_shell(["rm", "-rf", "splitdir/"])
213 self
.mount_a
.umount_wait()
215 self
.fs
.mds_asok(['flush', 'journal'])
217 # Wait for all strays to purge
218 self
.wait_until_equal(
219 lambda: self
.fs
.mds_asok(['perf', 'dump', 'mds_cache']
220 )['mds_cache']['num_strays'],
224 # Check that the metadata pool objects for all the myriad
225 # child fragments are gone
226 metadata_objs
= self
.fs
.rados(["ls"])
228 for o
in metadata_objs
:
229 if o
.startswith("{0:x}.".format(dir_inode_no
)):
231 self
.assertListEqual(frag_objs
, [])