4 from textwrap
import dedent
5 from tasks
.cephfs
.fuse_mount
import FuseMount
6 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
7 from teuthology
.orchestra
.run
import Raw
8 from teuthology
.exceptions
import CommandFailedError
10 log
= logging
.getLogger(__name__
)
12 MDS_RESTART_GRACE
= 60
14 class TestSnapshots(CephFSTestCase
):
16 LOAD_SETTINGS
= ["mds_max_snaps_per_dir"]
18 def _check_subtree(self
, rank
, path
, status
=None):
19 got_subtrees
= self
.fs
.rank_asok(["get", "subtrees"], rank
=rank
, status
=status
)
20 for s
in got_subtrees
:
21 if s
['dir']['path'] == path
and s
['auth_first'] == rank
:
25 def _get_snapclient_dump(self
, rank
=0, status
=None):
26 return self
.fs
.rank_asok(["dump", "snaps"], rank
=rank
, status
=status
)
28 def _get_snapserver_dump(self
, rank
=0, status
=None):
29 return self
.fs
.rank_asok(["dump", "snaps", "--server"], rank
=rank
, status
=status
)
31 def _get_last_created_snap(self
, rank
=0, status
=None):
32 return int(self
._get
_snapserver
_dump
(rank
,status
=status
)["last_created"])
34 def _get_last_destroyed_snap(self
, rank
=0, status
=None):
35 return int(self
._get
_snapserver
_dump
(rank
,status
=status
)["last_destroyed"])
37 def _get_pending_snap_update(self
, rank
=0, status
=None):
38 return self
._get
_snapserver
_dump
(rank
,status
=status
)["pending_update"]
40 def _get_pending_snap_destroy(self
, rank
=0, status
=None):
41 return self
._get
_snapserver
_dump
(rank
,status
=status
)["pending_destroy"]
43 def test_allow_new_snaps_config(self
):
45 Check whether 'allow_new_snaps' setting works
47 self
.mount_a
.run_shell(["mkdir", "test-allow-snaps"])
49 self
.fs
.set_allow_new_snaps(False);
51 self
.mount_a
.run_shell(["mkdir", "test-allow-snaps/.snap/snap00"])
52 except CommandFailedError
as ce
:
53 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
, "expected EPERM")
55 self
.fail("expected snap creatiion to fail")
57 self
.fs
.set_allow_new_snaps(True);
58 self
.mount_a
.run_shell(["mkdir", "test-allow-snaps/.snap/snap00"])
59 self
.mount_a
.run_shell(["rmdir", "test-allow-snaps/.snap/snap00"])
60 self
.mount_a
.run_shell(["rmdir", "test-allow-snaps"])
62 def test_kill_mdstable(self
):
64 check snaptable transcation
66 if not isinstance(self
.mount_a
, FuseMount
):
67 self
.skipTest("Require FUSE client to forcibly kill mount")
69 self
.fs
.set_allow_new_snaps(True);
70 self
.fs
.set_max_mds(2)
71 status
= self
.fs
.wait_for_daemons()
74 self
.mount_a
.run_shell(["mkdir", "-p", "d1/dir"])
75 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
76 self
._wait
_subtrees
([("/d1", 1)], rank
=1, path
="/d1")
78 last_created
= self
._get
_last
_created
_snap
(rank
=0,status
=status
)
80 # mds_kill_mdstable_at:
81 # 1: MDSTableServer::handle_prepare
82 # 2: MDSTableServer::_prepare_logged
83 # 5: MDSTableServer::handle_commit
84 # 6: MDSTableServer::_commit_logged
86 log
.info("testing snapserver mds_kill_mdstable_at={0}".format(i
))
88 status
= self
.fs
.status()
89 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
90 self
.fs
.rank_freeze(True, rank
=0)
91 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=0, status
=status
)
92 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s1{0}".format(i
)], wait
=False)
93 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=self
.fs
.beacon_timeout
);
94 self
.delete_mds_coredump(rank0
['name']);
96 self
.fs
.rank_fail(rank
=0)
97 self
.fs
.mds_restart(rank0
['name'])
98 self
.wait_for_daemon_start([rank0
['name']])
99 status
= self
.fs
.wait_for_daemons()
103 self
.wait_until_true(lambda: self
._get
_last
_created
_snap
(rank
=0) == last_created
, timeout
=30)
105 self
.set_conf("mds", "mds_reconnect_timeout", "5")
107 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
109 # set mds_kill_mdstable_at, also kill snapclient
111 log
.info("testing snapserver mds_kill_mdstable_at={0}, also kill snapclient".format(i
))
112 status
= self
.fs
.status()
113 last_created
= self
._get
_last
_created
_snap
(rank
=0, status
=status
)
115 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
116 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
117 self
.fs
.rank_freeze(True, rank
=0) # prevent failover...
118 self
.fs
.rank_freeze(True, rank
=1) # prevent failover...
119 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=0, status
=status
)
120 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i
)], wait
=False)
121 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=self
.fs
.beacon_timeout
);
122 self
.delete_mds_coredump(rank0
['name']);
124 self
.fs
.rank_signal(signal
.SIGKILL
, rank
=1)
127 self
.mount_a
.kill_cleanup()
129 self
.fs
.rank_fail(rank
=0)
130 self
.fs
.mds_restart(rank0
['name'])
131 self
.wait_for_daemon_start([rank0
['name']])
133 self
.fs
.wait_for_state('up:resolve', rank
=0, timeout
=MDS_RESTART_GRACE
)
135 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
137 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 0)
138 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
140 self
.fs
.rank_fail(rank
=1)
141 self
.fs
.mds_restart(rank1
['name'])
142 self
.wait_for_daemon_start([rank1
['name']])
143 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
146 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
148 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
150 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
152 self
.mount_a
.mount_wait()
154 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
156 # mds_kill_mdstable_at:
157 # 3: MDSTableClient::handle_request (got agree)
158 # 4: MDSTableClient::commit
159 # 7: MDSTableClient::handle_request (got ack)
161 log
.info("testing snapclient mds_kill_mdstable_at={0}".format(i
))
162 last_created
= self
._get
_last
_created
_snap
(rank
=0)
164 status
= self
.fs
.status()
165 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
166 self
.fs
.rank_freeze(True, rank
=1) # prevent failover...
167 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=1, status
=status
)
168 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s3{0}".format(i
)], wait
=False)
169 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=1), timeout
=self
.fs
.beacon_timeout
);
170 self
.delete_mds_coredump(rank1
['name']);
173 self
.mount_a
.kill_cleanup()
176 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
178 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 0)
179 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
181 self
.fs
.rank_fail(rank
=1)
182 self
.fs
.mds_restart(rank1
['name'])
183 self
.wait_for_daemon_start([rank1
['name']])
184 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
187 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
189 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
191 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
193 self
.mount_a
.mount_wait()
195 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
197 # mds_kill_mdstable_at:
198 # 3: MDSTableClient::handle_request (got agree)
199 # 8: MDSTableServer::handle_rollback
200 log
.info("testing snapclient mds_kill_mdstable_at=3, snapserver mds_kill_mdstable_at=8")
201 last_created
= self
._get
_last
_created
_snap
(rank
=0)
203 status
= self
.fs
.status()
204 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
205 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
206 self
.fs
.rank_freeze(True, rank
=0)
207 self
.fs
.rank_freeze(True, rank
=1)
208 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank
=0, status
=status
)
209 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank
=1, status
=status
)
210 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s4"], wait
=False)
211 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=1), timeout
=self
.fs
.beacon_timeout
);
212 self
.delete_mds_coredump(rank1
['name']);
215 self
.mount_a
.kill_cleanup()
217 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
219 self
.fs
.rank_fail(rank
=1)
220 self
.fs
.mds_restart(rank1
['name'])
221 self
.wait_for_daemon_start([rank1
['name']])
223 # rollback triggers assertion
224 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=self
.fs
.beacon_timeout
);
225 self
.delete_mds_coredump(rank0
['name']);
226 self
.fs
.rank_fail(rank
=0)
227 self
.fs
.mds_restart(rank0
['name'])
228 self
.wait_for_daemon_start([rank0
['name']])
229 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
231 # mds.1 should re-send rollback message
232 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
233 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
235 self
.mount_a
.mount_wait()
237 def test_snapclient_cache(self
):
239 check if snapclient cache gets synced properly
241 self
.fs
.set_allow_new_snaps(True);
242 self
.fs
.set_max_mds(3)
243 status
= self
.fs
.wait_for_daemons()
245 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d1/dir"])
246 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d2/dir"])
247 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
248 self
.mount_a
.setfattr("d0/d1", "ceph.dir.pin", "1")
249 self
.mount_a
.setfattr("d0/d2", "ceph.dir.pin", "2")
250 self
._wait
_subtrees
([("/d0", 0), ("/d0/d1", 1), ("/d0/d2", 2)], rank
="all", status
=status
, path
="/d0")
252 def _check_snapclient_cache(snaps_dump
, cache_dump
=None, rank
=0):
253 if cache_dump
is None:
254 cache_dump
= self
._get
_snapclient
_dump
(rank
=rank
)
255 for key
, value
in cache_dump
.items():
256 if value
!= snaps_dump
[key
]:
261 last_created
= self
._get
_last
_created
_snap
(rank
=0)
262 self
.mount_a
.run_shell(["mkdir", "d0/d1/dir/.snap/s1", "d0/d1/dir/.snap/s2"])
263 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
264 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
266 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
267 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
268 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
269 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
272 last_destroyed
= self
._get
_last
_destroyed
_snap
(rank
=0)
273 self
.mount_a
.run_shell(["rmdir", "d0/d1/dir/.snap/s1"])
274 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_destroy
(rank
=0)) == 0, timeout
=30)
275 self
.assertGreater(self
._get
_last
_destroyed
_snap
(rank
=0), last_destroyed
)
277 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
278 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
279 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
280 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
282 # sync during mds recovers
283 self
.fs
.rank_fail(rank
=2)
284 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
285 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
287 self
.fs
.rank_fail(rank
=0)
288 self
.fs
.rank_fail(rank
=1)
289 status
= self
.fs
.wait_for_daemons()
290 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
291 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
292 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
293 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
295 # kill at MDSTableClient::handle_notify_prep
296 status
= self
.fs
.status()
297 rank2
= self
.fs
.get_rank(rank
=2, status
=status
)
298 self
.fs
.rank_freeze(True, rank
=2)
299 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "9"], rank
=2, status
=status
)
300 proc
= self
.mount_a
.run_shell(["mkdir", "d0/d1/dir/.snap/s3"], wait
=False)
301 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=2), timeout
=self
.fs
.beacon_timeout
);
302 self
.delete_mds_coredump(rank2
['name']);
304 # mksnap should wait for notify ack from mds.2
305 self
.assertFalse(proc
.finished
);
307 # mksnap should proceed after mds.2 fails
308 self
.fs
.rank_fail(rank
=2)
309 self
.wait_until_true(lambda: proc
.finished
, timeout
=30);
311 self
.fs
.mds_restart(rank2
['name'])
312 self
.wait_for_daemon_start([rank2
['name']])
313 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
315 self
.mount_a
.run_shell(["rmdir", Raw("d0/d1/dir/.snap/*")])
317 # kill at MDSTableClient::commit
318 # the recovering mds should sync all mds' cache when it enters resolve stage
319 self
.set_conf("mds", "mds_reconnect_timeout", "5")
320 for i
in range(1, 4):
321 status
= self
.fs
.status()
322 rank2
= self
.fs
.get_rank(rank
=2, status
=status
)
323 self
.fs
.rank_freeze(True, rank
=2)
324 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "4"], rank
=2, status
=status
)
325 last_created
= self
._get
_last
_created
_snap
(rank
=0)
326 proc
= self
.mount_a
.run_shell(["mkdir", "d0/d2/dir/.snap/s{0}".format(i
)], wait
=False)
327 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=2), timeout
=self
.fs
.beacon_timeout
);
328 self
.delete_mds_coredump(rank2
['name']);
331 self
.mount_a
.kill_cleanup()
333 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
336 self
.fs
.rank_fail(rank
=0)
338 self
.fs
.rank_fail(rank
=1)
340 self
.fs
.rank_fail(rank
=2)
341 self
.fs
.mds_restart(rank2
['name'])
342 self
.wait_for_daemon_start([rank2
['name']])
343 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
345 rank0_cache
= self
._get
_snapclient
_dump
(rank
=0)
346 rank1_cache
= self
._get
_snapclient
_dump
(rank
=1)
347 rank2_cache
= self
._get
_snapclient
_dump
(rank
=2)
349 self
.assertGreater(int(rank0_cache
["last_created"]), last_created
)
350 self
.assertEqual(rank0_cache
, rank1_cache
);
351 self
.assertEqual(rank0_cache
, rank2_cache
);
353 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
355 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
356 self
.assertEqual(snaps_dump
["last_created"], rank0_cache
["last_created"])
357 self
.assertTrue(_check_snapclient_cache(snaps_dump
, cache_dump
=rank0_cache
));
359 self
.mount_a
.mount_wait()
361 self
.mount_a
.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
363 def test_multimds_mksnap(self
):
365 check if snapshot takes effect across authority subtrees
367 self
.fs
.set_allow_new_snaps(True);
368 self
.fs
.set_max_mds(2)
369 status
= self
.fs
.wait_for_daemons()
371 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d1/empty"])
372 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
373 self
.mount_a
.setfattr("d0/d1", "ceph.dir.pin", "1")
374 self
._wait
_subtrees
([("/d0", 0), ("/d0/d1", 1)], rank
="all", status
=status
, path
="/d0")
376 self
.mount_a
.write_test_pattern("d0/d1/file_a", 8 * 1024 * 1024)
377 self
.mount_a
.run_shell(["mkdir", "d0/.snap/s1"])
378 self
.mount_a
.run_shell(["rm", "-f", "d0/d1/file_a"])
379 self
.mount_a
.validate_test_pattern("d0/.snap/s1/d1/file_a", 8 * 1024 * 1024)
381 self
.mount_a
.run_shell(["rmdir", "d0/.snap/s1"])
382 self
.mount_a
.run_shell(["rm", "-rf", "d0"])
384 def test_multimds_past_parents(self
):
386 check if past parents are properly recorded during across authority rename
388 self
.fs
.set_allow_new_snaps(True);
389 self
.fs
.set_max_mds(2)
390 status
= self
.fs
.wait_for_daemons()
392 self
.mount_a
.run_shell_payload("mkdir -p {d0,d1}/empty")
393 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
394 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
395 self
._wait
_subtrees
([("/d0", 0), ("/d1", 1)], rank
=0, status
=status
)
397 self
.mount_a
.run_shell(["mkdir", "d0/d3"])
398 self
.mount_a
.run_shell(["mkdir", "d0/.snap/s1"])
399 snap_name
= self
.mount_a
.run_shell(["ls", "d0/d3/.snap"]).stdout
.getvalue()
401 self
.mount_a
.run_shell(["mv", "d0/d3", "d1/d3"])
402 snap_name1
= self
.mount_a
.run_shell(["ls", "d1/d3/.snap"]).stdout
.getvalue()
403 self
.assertEqual(snap_name1
, snap_name
);
405 self
.mount_a
.run_shell(["rmdir", "d0/.snap/s1"])
406 snap_name1
= self
.mount_a
.run_shell(["ls", "d1/d3/.snap"]).stdout
.getvalue()
407 self
.assertEqual(snap_name1
, "");
409 self
.mount_a
.run_shell(["rm", "-rf", "d0", "d1"])
411 def test_multimds_hardlink(self
):
413 check if hardlink snapshot works in multimds setup
415 self
.fs
.set_allow_new_snaps(True);
416 self
.fs
.set_max_mds(2)
417 status
= self
.fs
.wait_for_daemons()
419 self
.mount_a
.run_shell_payload("mkdir -p {d0,d1}/empty")
421 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
422 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
423 self
._wait
_subtrees
([("/d0", 0), ("/d1", 1)], rank
=0, status
=status
)
425 self
.mount_a
.run_python(dedent("""
427 open(os.path.join("{path}", "d0/file1"), 'w').write("asdf")
428 open(os.path.join("{path}", "d0/file2"), 'w').write("asdf")
429 """.format(path
=self
.mount_a
.mountpoint
)
432 self
.mount_a
.run_shell(["ln", "d0/file1", "d1/file1"])
433 self
.mount_a
.run_shell(["ln", "d0/file2", "d1/file2"])
435 self
.mount_a
.run_shell(["mkdir", "d1/.snap/s1"])
437 self
.mount_a
.run_python(dedent("""
439 open(os.path.join("{path}", "d0/file1"), 'w').write("qwer")
440 """.format(path
=self
.mount_a
.mountpoint
)
443 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file1"])
445 self
.mount_a
.run_shell(["rm", "-f", "d0/file2"])
446 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
448 self
.mount_a
.run_shell(["rm", "-f", "d1/file2"])
449 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
451 self
.mount_a
.run_shell(["rmdir", "d1/.snap/s1"])
452 self
.mount_a
.run_shell(["rm", "-rf", "d0", "d1"])
454 class SnapLimitViolationException(Exception):
455 failed_snapshot_number
= -1
457 def __init__(self
, num
):
458 self
.failed_snapshot_number
= num
460 def get_snap_name(self
, dir_name
, sno
):
461 sname
= "{dir_name}/.snap/s_{sno}".format(dir_name
=dir_name
, sno
=sno
)
464 def create_snap_dir(self
, sname
):
465 self
.mount_a
.run_shell(["mkdir", sname
])
467 def delete_dir_and_snaps(self
, dir_name
, snaps
):
468 for sno
in range(1, snaps
+1, 1):
469 sname
= self
.get_snap_name(dir_name
, sno
)
470 self
.mount_a
.run_shell(["rmdir", sname
])
471 self
.mount_a
.run_shell(["rmdir", dir_name
])
473 def create_dir_and_snaps(self
, dir_name
, snaps
):
474 self
.mount_a
.run_shell(["mkdir", dir_name
])
476 for sno
in range(1, snaps
+1, 1):
477 sname
= self
.get_snap_name(dir_name
, sno
)
479 self
.create_snap_dir(sname
)
480 except CommandFailedError
as e
:
481 # failing at the last mkdir beyond the limit is expected
483 log
.info("failed while creating snap #{}: {}".format(sno
, repr(e
)))
484 raise TestSnapshots
.SnapLimitViolationException(sno
)
486 def test_mds_max_snaps_per_dir_default_limit(self
):
488 Test the newly introudced option named mds_max_snaps_per_dir
489 Default snaps limit is 100
490 Test if the default number of snapshot directories can be created
492 self
.create_dir_and_snaps("accounts", int(self
.mds_max_snaps_per_dir
))
493 self
.delete_dir_and_snaps("accounts", int(self
.mds_max_snaps_per_dir
))
495 def test_mds_max_snaps_per_dir_with_increased_limit(self
):
497 Test the newly introudced option named mds_max_snaps_per_dir
498 First create 101 directories and ensure that the 101st directory
499 creation fails. Then increase the default by one and see if the
500 additional directory creation succeeds
502 # first test the default limit
503 new_limit
= int(self
.mds_max_snaps_per_dir
)
504 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
506 self
.create_dir_and_snaps("accounts", new_limit
+ 1)
507 except TestSnapshots
.SnapLimitViolationException
as e
:
508 if e
.failed_snapshot_number
== (new_limit
+ 1):
510 # then increase the limit by one and test
511 new_limit
= new_limit
+ 1
512 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
513 sname
= self
.get_snap_name("accounts", new_limit
)
514 self
.create_snap_dir(sname
)
515 self
.delete_dir_and_snaps("accounts", new_limit
)
517 def test_mds_max_snaps_per_dir_with_reduced_limit(self
):
519 Test the newly introudced option named mds_max_snaps_per_dir
520 First create 99 directories. Then reduce the limit to 98. Then try
521 creating another directory and ensure that additional directory
524 # first test the new limit
525 new_limit
= int(self
.mds_max_snaps_per_dir
) - 1
526 self
.create_dir_and_snaps("accounts", new_limit
)
527 sname
= self
.get_snap_name("accounts", new_limit
+ 1)
528 # then reduce the limit by one and test
529 new_limit
= new_limit
- 1
530 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
532 self
.create_snap_dir(sname
)
533 except CommandFailedError
:
534 # after reducing limit we expect the new snapshot creation to fail
536 self
.delete_dir_and_snaps("accounts", new_limit
+ 1)