4 from textwrap
import dedent
5 from tasks
.cephfs
.fuse_mount
import FuseMount
6 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
7 from teuthology
.orchestra
.run
import CommandFailedError
, Raw
9 log
= logging
.getLogger(__name__
)
11 MDS_RESTART_GRACE
= 60
13 class TestSnapshots(CephFSTestCase
):
15 LOAD_SETTINGS
= ["mds_max_snaps_per_dir"]
17 def _check_subtree(self
, rank
, path
, status
=None):
18 got_subtrees
= self
.fs
.rank_asok(["get", "subtrees"], rank
=rank
, status
=status
)
19 for s
in got_subtrees
:
20 if s
['dir']['path'] == path
and s
['auth_first'] == rank
:
24 def _get_snapclient_dump(self
, rank
=0, status
=None):
25 return self
.fs
.rank_asok(["dump", "snaps"], rank
=rank
, status
=status
)
27 def _get_snapserver_dump(self
, rank
=0, status
=None):
28 return self
.fs
.rank_asok(["dump", "snaps", "--server"], rank
=rank
, status
=status
)
30 def _get_last_created_snap(self
, rank
=0, status
=None):
31 return int(self
._get
_snapserver
_dump
(rank
,status
=status
)["last_created"])
33 def _get_last_destroyed_snap(self
, rank
=0, status
=None):
34 return int(self
._get
_snapserver
_dump
(rank
,status
=status
)["last_destroyed"])
36 def _get_pending_snap_update(self
, rank
=0, status
=None):
37 return self
._get
_snapserver
_dump
(rank
,status
=status
)["pending_update"]
39 def _get_pending_snap_destroy(self
, rank
=0, status
=None):
40 return self
._get
_snapserver
_dump
(rank
,status
=status
)["pending_destroy"]
42 def test_kill_mdstable(self
):
44 check snaptable transcation
46 if not isinstance(self
.mount_a
, FuseMount
):
47 self
.skipTest("Require FUSE client to forcibly kill mount")
49 self
.fs
.set_allow_new_snaps(True);
50 self
.fs
.set_max_mds(2)
51 status
= self
.fs
.wait_for_daemons()
53 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
56 self
.mount_a
.run_shell(["mkdir", "-p", "d1/dir"])
57 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
58 self
.wait_until_true(lambda: self
._check
_subtree
(1, '/d1', status
=status
), timeout
=30)
60 last_created
= self
._get
_last
_created
_snap
(rank
=0,status
=status
)
62 # mds_kill_mdstable_at:
63 # 1: MDSTableServer::handle_prepare
64 # 2: MDSTableServer::_prepare_logged
65 # 5: MDSTableServer::handle_commit
66 # 6: MDSTableServer::_commit_logged
68 log
.info("testing snapserver mds_kill_mdstable_at={0}".format(i
))
70 status
= self
.fs
.status()
71 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
72 self
.fs
.rank_freeze(True, rank
=0)
73 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=0, status
=status
)
74 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s1{0}".format(i
)], wait
=False)
75 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=grace
*2);
76 self
.delete_mds_coredump(rank0
['name']);
78 self
.fs
.rank_fail(rank
=0)
79 self
.fs
.mds_restart(rank0
['name'])
80 self
.wait_for_daemon_start([rank0
['name']])
81 status
= self
.fs
.wait_for_daemons()
85 self
.wait_until_true(lambda: self
._get
_last
_created
_snap
(rank
=0) == last_created
, timeout
=30)
87 self
.set_conf("mds", "mds_reconnect_timeout", "5")
89 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
91 # set mds_kill_mdstable_at, also kill snapclient
93 log
.info("testing snapserver mds_kill_mdstable_at={0}, also kill snapclient".format(i
))
94 status
= self
.fs
.status()
95 last_created
= self
._get
_last
_created
_snap
(rank
=0, status
=status
)
97 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
98 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
99 self
.fs
.rank_freeze(True, rank
=0) # prevent failover...
100 self
.fs
.rank_freeze(True, rank
=1) # prevent failover...
101 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=0, status
=status
)
102 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i
)], wait
=False)
103 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=grace
*2);
104 self
.delete_mds_coredump(rank0
['name']);
106 self
.fs
.rank_signal(signal
.SIGKILL
, rank
=1)
109 self
.mount_a
.kill_cleanup()
111 self
.fs
.rank_fail(rank
=0)
112 self
.fs
.mds_restart(rank0
['name'])
113 self
.wait_for_daemon_start([rank0
['name']])
115 self
.fs
.wait_for_state('up:resolve', rank
=0, timeout
=MDS_RESTART_GRACE
)
117 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
119 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 0)
120 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
122 self
.fs
.rank_fail(rank
=1)
123 self
.fs
.mds_restart(rank1
['name'])
124 self
.wait_for_daemon_start([rank1
['name']])
125 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
128 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
130 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
132 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
134 self
.mount_a
.mount_wait()
136 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
138 # mds_kill_mdstable_at:
139 # 3: MDSTableClient::handle_request (got agree)
140 # 4: MDSTableClient::commit
141 # 7: MDSTableClient::handle_request (got ack)
143 log
.info("testing snapclient mds_kill_mdstable_at={0}".format(i
))
144 last_created
= self
._get
_last
_created
_snap
(rank
=0)
146 status
= self
.fs
.status()
147 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
148 self
.fs
.rank_freeze(True, rank
=1) # prevent failover...
149 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=1, status
=status
)
150 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s3{0}".format(i
)], wait
=False)
151 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=1), timeout
=grace
*2);
152 self
.delete_mds_coredump(rank1
['name']);
155 self
.mount_a
.kill_cleanup()
158 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
160 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 0)
161 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
163 self
.fs
.rank_fail(rank
=1)
164 self
.fs
.mds_restart(rank1
['name'])
165 self
.wait_for_daemon_start([rank1
['name']])
166 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
169 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
171 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
173 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
175 self
.mount_a
.mount_wait()
177 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
179 # mds_kill_mdstable_at:
180 # 3: MDSTableClient::handle_request (got agree)
181 # 8: MDSTableServer::handle_rollback
182 log
.info("testing snapclient mds_kill_mdstable_at=3, snapserver mds_kill_mdstable_at=8")
183 last_created
= self
._get
_last
_created
_snap
(rank
=0)
185 status
= self
.fs
.status()
186 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
187 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
188 self
.fs
.rank_freeze(True, rank
=0)
189 self
.fs
.rank_freeze(True, rank
=1)
190 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank
=0, status
=status
)
191 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank
=1, status
=status
)
192 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s4"], wait
=False)
193 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=1), timeout
=grace
*2);
194 self
.delete_mds_coredump(rank1
['name']);
197 self
.mount_a
.kill_cleanup()
199 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
201 self
.fs
.rank_fail(rank
=1)
202 self
.fs
.mds_restart(rank1
['name'])
203 self
.wait_for_daemon_start([rank1
['name']])
205 # rollback triggers assertion
206 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=grace
*2);
207 self
.delete_mds_coredump(rank0
['name']);
208 self
.fs
.rank_fail(rank
=0)
209 self
.fs
.mds_restart(rank0
['name'])
210 self
.wait_for_daemon_start([rank0
['name']])
211 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
213 # mds.1 should re-send rollback message
214 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
215 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
217 self
.mount_a
.mount_wait()
219 def test_snapclient_cache(self
):
221 check if snapclient cache gets synced properly
223 self
.fs
.set_allow_new_snaps(True);
224 self
.fs
.set_max_mds(3)
225 status
= self
.fs
.wait_for_daemons()
227 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
229 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d1/dir"])
230 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d2/dir"])
231 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
232 self
.mount_a
.setfattr("d0/d1", "ceph.dir.pin", "1")
233 self
.mount_a
.setfattr("d0/d2", "ceph.dir.pin", "2")
234 self
.wait_until_true(lambda: self
._check
_subtree
(2, '/d0/d2', status
=status
), timeout
=30)
235 self
.wait_until_true(lambda: self
._check
_subtree
(1, '/d0/d1', status
=status
), timeout
=5)
236 self
.wait_until_true(lambda: self
._check
_subtree
(0, '/d0', status
=status
), timeout
=5)
238 def _check_snapclient_cache(snaps_dump
, cache_dump
=None, rank
=0):
239 if cache_dump
is None:
240 cache_dump
= self
._get
_snapclient
_dump
(rank
=rank
)
241 for key
, value
in cache_dump
.items():
242 if value
!= snaps_dump
[key
]:
247 last_created
= self
._get
_last
_created
_snap
(rank
=0)
248 self
.mount_a
.run_shell(["mkdir", "d0/d1/dir/.snap/s1", "d0/d1/dir/.snap/s2"])
249 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
250 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
252 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
253 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
254 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
255 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
258 last_destroyed
= self
._get
_last
_destroyed
_snap
(rank
=0)
259 self
.mount_a
.run_shell(["rmdir", "d0/d1/dir/.snap/s1"])
260 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_destroy
(rank
=0)) == 0, timeout
=30)
261 self
.assertGreater(self
._get
_last
_destroyed
_snap
(rank
=0), last_destroyed
)
263 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
264 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
265 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
266 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
268 # sync during mds recovers
269 self
.fs
.rank_fail(rank
=2)
270 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
271 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
273 self
.fs
.rank_fail(rank
=0)
274 self
.fs
.rank_fail(rank
=1)
275 status
= self
.fs
.wait_for_daemons()
276 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
277 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
278 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
279 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
281 # kill at MDSTableClient::handle_notify_prep
282 status
= self
.fs
.status()
283 rank2
= self
.fs
.get_rank(rank
=2, status
=status
)
284 self
.fs
.rank_freeze(True, rank
=2)
285 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "9"], rank
=2, status
=status
)
286 proc
= self
.mount_a
.run_shell(["mkdir", "d0/d1/dir/.snap/s3"], wait
=False)
287 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=2), timeout
=grace
*2);
288 self
.delete_mds_coredump(rank2
['name']);
290 # mksnap should wait for notify ack from mds.2
291 self
.assertFalse(proc
.finished
);
293 # mksnap should proceed after mds.2 fails
294 self
.fs
.rank_fail(rank
=2)
295 self
.wait_until_true(lambda: proc
.finished
, timeout
=30);
297 self
.fs
.mds_restart(rank2
['name'])
298 self
.wait_for_daemon_start([rank2
['name']])
299 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
301 self
.mount_a
.run_shell(["rmdir", Raw("d0/d1/dir/.snap/*")])
303 # kill at MDSTableClient::commit
304 # the recovering mds should sync all mds' cache when it enters resolve stage
305 self
.set_conf("mds", "mds_reconnect_timeout", "5")
306 for i
in range(1, 4):
307 status
= self
.fs
.status()
308 rank2
= self
.fs
.get_rank(rank
=2, status
=status
)
309 self
.fs
.rank_freeze(True, rank
=2)
310 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "4"], rank
=2, status
=status
)
311 last_created
= self
._get
_last
_created
_snap
(rank
=0)
312 proc
= self
.mount_a
.run_shell(["mkdir", "d0/d2/dir/.snap/s{0}".format(i
)], wait
=False)
313 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=2), timeout
=grace
*2);
314 self
.delete_mds_coredump(rank2
['name']);
317 self
.mount_a
.kill_cleanup()
319 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
322 self
.fs
.rank_fail(rank
=0)
324 self
.fs
.rank_fail(rank
=1)
326 self
.fs
.rank_fail(rank
=2)
327 self
.fs
.mds_restart(rank2
['name'])
328 self
.wait_for_daemon_start([rank2
['name']])
329 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
331 rank0_cache
= self
._get
_snapclient
_dump
(rank
=0)
332 rank1_cache
= self
._get
_snapclient
_dump
(rank
=1)
333 rank2_cache
= self
._get
_snapclient
_dump
(rank
=2)
335 self
.assertGreater(int(rank0_cache
["last_created"]), last_created
)
336 self
.assertEqual(rank0_cache
, rank1_cache
);
337 self
.assertEqual(rank0_cache
, rank2_cache
);
339 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
341 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
342 self
.assertEqual(snaps_dump
["last_created"], rank0_cache
["last_created"])
343 self
.assertTrue(_check_snapclient_cache(snaps_dump
, cache_dump
=rank0_cache
));
345 self
.mount_a
.mount_wait()
347 self
.mount_a
.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
349 def test_multimds_mksnap(self
):
351 check if snapshot takes effect across authority subtrees
353 self
.fs
.set_allow_new_snaps(True);
354 self
.fs
.set_max_mds(2)
355 status
= self
.fs
.wait_for_daemons()
357 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d1"])
358 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
359 self
.mount_a
.setfattr("d0/d1", "ceph.dir.pin", "1")
360 self
.wait_until_true(lambda: self
._check
_subtree
(1, '/d0/d1', status
=status
), timeout
=30)
361 self
.wait_until_true(lambda: self
._check
_subtree
(0, '/d0', status
=status
), timeout
=5)
363 self
.mount_a
.write_test_pattern("d0/d1/file_a", 8 * 1024 * 1024)
364 self
.mount_a
.run_shell(["mkdir", "d0/.snap/s1"])
365 self
.mount_a
.run_shell(["rm", "-f", "d0/d1/file_a"])
366 self
.mount_a
.validate_test_pattern("d0/.snap/s1/d1/file_a", 8 * 1024 * 1024)
368 self
.mount_a
.run_shell(["rmdir", "d0/.snap/s1"])
369 self
.mount_a
.run_shell(["rm", "-rf", "d0"])
371 def test_multimds_past_parents(self
):
373 check if past parents are properly recorded during across authority rename
375 self
.fs
.set_allow_new_snaps(True);
376 self
.fs
.set_max_mds(2)
377 status
= self
.fs
.wait_for_daemons()
379 self
.mount_a
.run_shell(["mkdir", "d0", "d1"])
380 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
381 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
382 self
.wait_until_true(lambda: self
._check
_subtree
(1, '/d1', status
=status
), timeout
=30)
383 self
.wait_until_true(lambda: self
._check
_subtree
(0, '/d0', status
=status
), timeout
=5)
385 self
.mount_a
.run_shell(["mkdir", "d0/d3"])
386 self
.mount_a
.run_shell(["mkdir", "d0/.snap/s1"])
387 snap_name
= self
.mount_a
.run_shell(["ls", "d0/d3/.snap"]).stdout
.getvalue()
389 self
.mount_a
.run_shell(["mv", "d0/d3", "d1/d3"])
390 snap_name1
= self
.mount_a
.run_shell(["ls", "d1/d3/.snap"]).stdout
.getvalue()
391 self
.assertEqual(snap_name1
, snap_name
);
393 self
.mount_a
.run_shell(["rmdir", "d0/.snap/s1"])
394 snap_name1
= self
.mount_a
.run_shell(["ls", "d1/d3/.snap"]).stdout
.getvalue()
395 self
.assertEqual(snap_name1
, "");
397 self
.mount_a
.run_shell(["rm", "-rf", "d0", "d1"])
399 def test_multimds_hardlink(self
):
401 check if hardlink snapshot works in multimds setup
403 self
.fs
.set_allow_new_snaps(True);
404 self
.fs
.set_max_mds(2)
405 status
= self
.fs
.wait_for_daemons()
407 self
.mount_a
.run_shell(["mkdir", "d0", "d1"])
409 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
410 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
411 self
.wait_until_true(lambda: self
._check
_subtree
(1, '/d1', status
=status
), timeout
=30)
412 self
.wait_until_true(lambda: self
._check
_subtree
(0, '/d0', status
=status
), timeout
=5)
414 self
.mount_a
.run_python(dedent("""
416 open(os.path.join("{path}", "d0/file1"), 'w').write("asdf")
417 open(os.path.join("{path}", "d0/file2"), 'w').write("asdf")
418 """.format(path
=self
.mount_a
.mountpoint
)
421 self
.mount_a
.run_shell(["ln", "d0/file1", "d1/file1"])
422 self
.mount_a
.run_shell(["ln", "d0/file2", "d1/file2"])
424 self
.mount_a
.run_shell(["mkdir", "d1/.snap/s1"])
426 self
.mount_a
.run_python(dedent("""
428 open(os.path.join("{path}", "d0/file1"), 'w').write("qwer")
429 """.format(path
=self
.mount_a
.mountpoint
)
432 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file1"])
434 self
.mount_a
.run_shell(["rm", "-f", "d0/file2"])
435 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
437 self
.mount_a
.run_shell(["rm", "-f", "d1/file2"])
438 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
440 self
.mount_a
.run_shell(["rmdir", "d1/.snap/s1"])
441 self
.mount_a
.run_shell(["rm", "-rf", "d0", "d1"])
443 class SnapLimitViolationException(Exception):
444 failed_snapshot_number
= -1
446 def __init__(self
, num
):
447 self
.failed_snapshot_number
= num
449 def get_snap_name(self
, dir_name
, sno
):
450 sname
= "{dir_name}/.snap/s_{sno}".format(dir_name
=dir_name
, sno
=sno
)
453 def create_snap_dir(self
, sname
):
454 self
.mount_a
.run_shell(["mkdir", sname
])
456 def delete_dir_and_snaps(self
, dir_name
, snaps
):
457 for sno
in range(1, snaps
+1, 1):
458 sname
= self
.get_snap_name(dir_name
, sno
)
459 self
.mount_a
.run_shell(["rmdir", sname
])
460 self
.mount_a
.run_shell(["rmdir", dir_name
])
462 def create_dir_and_snaps(self
, dir_name
, snaps
):
463 self
.mount_a
.run_shell(["mkdir", dir_name
])
465 for sno
in range(1, snaps
+1, 1):
466 sname
= self
.get_snap_name(dir_name
, sno
)
468 self
.create_snap_dir(sname
)
469 except CommandFailedError
as e
:
470 # failing at the last mkdir beyond the limit is expected
472 log
.info("failed while creating snap #{}: {}".format(sno
, repr(e
)))
473 raise TestSnapshots
.SnapLimitViolationException(sno
)
475 def test_mds_max_snaps_per_dir_default_limit(self
):
477 Test the newly introudced option named mds_max_snaps_per_dir
478 Default snaps limit is 100
479 Test if the default number of snapshot directories can be created
481 self
.create_dir_and_snaps("accounts", int(self
.mds_max_snaps_per_dir
))
482 self
.delete_dir_and_snaps("accounts", int(self
.mds_max_snaps_per_dir
))
484 def test_mds_max_snaps_per_dir_with_increased_limit(self
):
486 Test the newly introudced option named mds_max_snaps_per_dir
487 First create 101 directories and ensure that the 101st directory
488 creation fails. Then increase the default by one and see if the
489 additional directory creation succeeds
491 # first test the default limit
492 new_limit
= int(self
.mds_max_snaps_per_dir
)
493 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
495 self
.create_dir_and_snaps("accounts", new_limit
+ 1)
496 except TestSnapshots
.SnapLimitViolationException
as e
:
497 if e
.failed_snapshot_number
== (new_limit
+ 1):
499 # then increase the limit by one and test
500 new_limit
= new_limit
+ 1
501 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
502 sname
= self
.get_snap_name("accounts", new_limit
)
503 self
.create_snap_dir(sname
)
504 self
.delete_dir_and_snaps("accounts", new_limit
)
506 def test_mds_max_snaps_per_dir_with_reduced_limit(self
):
508 Test the newly introudced option named mds_max_snaps_per_dir
509 First create 99 directories. Then reduce the limit to 98. Then try
510 creating another directory and ensure that additional directory
513 # first test the new limit
514 new_limit
= int(self
.mds_max_snaps_per_dir
) - 1
515 self
.create_dir_and_snaps("accounts", new_limit
)
516 sname
= self
.get_snap_name("accounts", new_limit
+ 1)
517 # then reduce the limit by one and test
518 new_limit
= new_limit
- 1
519 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
521 self
.create_snap_dir(sname
)
522 except CommandFailedError
:
523 # after reducing limit we expect the new snapshot creation to fail
525 self
.delete_dir_and_snaps("accounts", new_limit
+ 1)