4 from textwrap
import dedent
5 from tasks
.cephfs
.fuse_mount
import FuseMount
6 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
7 from teuthology
.orchestra
.run
import CommandFailedError
, Raw
9 log
= logging
.getLogger(__name__
)
11 MDS_RESTART_GRACE
= 60
13 class TestSnapshots(CephFSTestCase
):
15 LOAD_SETTINGS
= ["mds_max_snaps_per_dir"]
17 def _check_subtree(self
, rank
, path
, status
=None):
18 got_subtrees
= self
.fs
.rank_asok(["get", "subtrees"], rank
=rank
, status
=status
)
19 for s
in got_subtrees
:
20 if s
['dir']['path'] == path
and s
['auth_first'] == rank
:
24 def _get_snapclient_dump(self
, rank
=0, status
=None):
25 return self
.fs
.rank_asok(["dump", "snaps"], rank
=rank
, status
=status
)
27 def _get_snapserver_dump(self
, rank
=0, status
=None):
28 return self
.fs
.rank_asok(["dump", "snaps", "--server"], rank
=rank
, status
=status
)
30 def _get_last_created_snap(self
, rank
=0, status
=None):
31 return int(self
._get
_snapserver
_dump
(rank
,status
=status
)["last_created"])
33 def _get_last_destroyed_snap(self
, rank
=0, status
=None):
34 return int(self
._get
_snapserver
_dump
(rank
,status
=status
)["last_destroyed"])
36 def _get_pending_snap_update(self
, rank
=0, status
=None):
37 return self
._get
_snapserver
_dump
(rank
,status
=status
)["pending_update"]
39 def _get_pending_snap_destroy(self
, rank
=0, status
=None):
40 return self
._get
_snapserver
_dump
(rank
,status
=status
)["pending_destroy"]
42 def test_allow_new_snaps_config(self
):
44 Check whether 'allow_new_snaps' setting works
46 self
.mount_a
.run_shell(["mkdir", "test-allow-snaps"])
48 self
.fs
.set_allow_new_snaps(False);
50 self
.mount_a
.run_shell(["mkdir", "test-allow-snaps/.snap/snap00"])
51 except CommandFailedError
as ce
:
52 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
, "expected EPERM")
54 self
.fail("expected snap creatiion to fail")
56 self
.fs
.set_allow_new_snaps(True);
57 self
.mount_a
.run_shell(["mkdir", "test-allow-snaps/.snap/snap00"])
58 self
.mount_a
.run_shell(["rmdir", "test-allow-snaps/.snap/snap00"])
59 self
.mount_a
.run_shell(["rmdir", "test-allow-snaps"])
61 def test_kill_mdstable(self
):
63 check snaptable transcation
65 if not isinstance(self
.mount_a
, FuseMount
):
66 self
.skipTest("Require FUSE client to forcibly kill mount")
68 self
.fs
.set_allow_new_snaps(True);
69 self
.fs
.set_max_mds(2)
70 status
= self
.fs
.wait_for_daemons()
72 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
75 self
.mount_a
.run_shell(["mkdir", "-p", "d1/dir"])
76 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
77 self
._wait
_subtrees
([("/d1", 1)], rank
=1, path
="/d1")
79 last_created
= self
._get
_last
_created
_snap
(rank
=0,status
=status
)
81 # mds_kill_mdstable_at:
82 # 1: MDSTableServer::handle_prepare
83 # 2: MDSTableServer::_prepare_logged
84 # 5: MDSTableServer::handle_commit
85 # 6: MDSTableServer::_commit_logged
87 log
.info("testing snapserver mds_kill_mdstable_at={0}".format(i
))
89 status
= self
.fs
.status()
90 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
91 self
.fs
.rank_freeze(True, rank
=0)
92 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=0, status
=status
)
93 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s1{0}".format(i
)], wait
=False)
94 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=grace
*2);
95 self
.delete_mds_coredump(rank0
['name']);
97 self
.fs
.rank_fail(rank
=0)
98 self
.fs
.mds_restart(rank0
['name'])
99 self
.wait_for_daemon_start([rank0
['name']])
100 status
= self
.fs
.wait_for_daemons()
104 self
.wait_until_true(lambda: self
._get
_last
_created
_snap
(rank
=0) == last_created
, timeout
=30)
106 self
.set_conf("mds", "mds_reconnect_timeout", "5")
108 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
110 # set mds_kill_mdstable_at, also kill snapclient
112 log
.info("testing snapserver mds_kill_mdstable_at={0}, also kill snapclient".format(i
))
113 status
= self
.fs
.status()
114 last_created
= self
._get
_last
_created
_snap
(rank
=0, status
=status
)
116 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
117 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
118 self
.fs
.rank_freeze(True, rank
=0) # prevent failover...
119 self
.fs
.rank_freeze(True, rank
=1) # prevent failover...
120 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=0, status
=status
)
121 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i
)], wait
=False)
122 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=grace
*3);
123 self
.delete_mds_coredump(rank0
['name']);
125 self
.fs
.rank_signal(signal
.SIGKILL
, rank
=1)
128 self
.mount_a
.kill_cleanup()
130 self
.fs
.rank_fail(rank
=0)
131 self
.fs
.mds_restart(rank0
['name'])
132 self
.wait_for_daemon_start([rank0
['name']])
134 self
.fs
.wait_for_state('up:resolve', rank
=0, timeout
=MDS_RESTART_GRACE
)
136 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
138 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 0)
139 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
141 self
.fs
.rank_fail(rank
=1)
142 self
.fs
.mds_restart(rank1
['name'])
143 self
.wait_for_daemon_start([rank1
['name']])
144 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
147 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
149 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
151 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
153 self
.mount_a
.mount_wait()
155 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
157 # mds_kill_mdstable_at:
158 # 3: MDSTableClient::handle_request (got agree)
159 # 4: MDSTableClient::commit
160 # 7: MDSTableClient::handle_request (got ack)
162 log
.info("testing snapclient mds_kill_mdstable_at={0}".format(i
))
163 last_created
= self
._get
_last
_created
_snap
(rank
=0)
165 status
= self
.fs
.status()
166 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
167 self
.fs
.rank_freeze(True, rank
=1) # prevent failover...
168 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=1, status
=status
)
169 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s3{0}".format(i
)], wait
=False)
170 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=1), timeout
=grace
*2);
171 self
.delete_mds_coredump(rank1
['name']);
174 self
.mount_a
.kill_cleanup()
177 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
179 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 0)
180 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
182 self
.fs
.rank_fail(rank
=1)
183 self
.fs
.mds_restart(rank1
['name'])
184 self
.wait_for_daemon_start([rank1
['name']])
185 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
188 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
190 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
192 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
194 self
.mount_a
.mount_wait()
196 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
198 # mds_kill_mdstable_at:
199 # 3: MDSTableClient::handle_request (got agree)
200 # 8: MDSTableServer::handle_rollback
201 log
.info("testing snapclient mds_kill_mdstable_at=3, snapserver mds_kill_mdstable_at=8")
202 last_created
= self
._get
_last
_created
_snap
(rank
=0)
204 status
= self
.fs
.status()
205 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
206 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
207 self
.fs
.rank_freeze(True, rank
=0)
208 self
.fs
.rank_freeze(True, rank
=1)
209 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank
=0, status
=status
)
210 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank
=1, status
=status
)
211 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s4"], wait
=False)
212 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=1), timeout
=grace
*2);
213 self
.delete_mds_coredump(rank1
['name']);
216 self
.mount_a
.kill_cleanup()
218 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
220 self
.fs
.rank_fail(rank
=1)
221 self
.fs
.mds_restart(rank1
['name'])
222 self
.wait_for_daemon_start([rank1
['name']])
224 # rollback triggers assertion
225 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=grace
*2);
226 self
.delete_mds_coredump(rank0
['name']);
227 self
.fs
.rank_fail(rank
=0)
228 self
.fs
.mds_restart(rank0
['name'])
229 self
.wait_for_daemon_start([rank0
['name']])
230 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
232 # mds.1 should re-send rollback message
233 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
234 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
236 self
.mount_a
.mount_wait()
238 def test_snapclient_cache(self
):
240 check if snapclient cache gets synced properly
242 self
.fs
.set_allow_new_snaps(True);
243 self
.fs
.set_max_mds(3)
244 status
= self
.fs
.wait_for_daemons()
246 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
248 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d1/dir"])
249 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d2/dir"])
250 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
251 self
.mount_a
.setfattr("d0/d1", "ceph.dir.pin", "1")
252 self
.mount_a
.setfattr("d0/d2", "ceph.dir.pin", "2")
253 self
._wait
_subtrees
([("/d0", 0), ("/d0/d1", 1), ("/d0/d2", 2)], rank
="all", status
=status
, path
="/d0")
255 def _check_snapclient_cache(snaps_dump
, cache_dump
=None, rank
=0):
256 if cache_dump
is None:
257 cache_dump
= self
._get
_snapclient
_dump
(rank
=rank
)
258 for key
, value
in cache_dump
.items():
259 if value
!= snaps_dump
[key
]:
264 last_created
= self
._get
_last
_created
_snap
(rank
=0)
265 self
.mount_a
.run_shell(["mkdir", "d0/d1/dir/.snap/s1", "d0/d1/dir/.snap/s2"])
266 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
267 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
269 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
270 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
271 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
272 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
275 last_destroyed
= self
._get
_last
_destroyed
_snap
(rank
=0)
276 self
.mount_a
.run_shell(["rmdir", "d0/d1/dir/.snap/s1"])
277 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_destroy
(rank
=0)) == 0, timeout
=30)
278 self
.assertGreater(self
._get
_last
_destroyed
_snap
(rank
=0), last_destroyed
)
280 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
281 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
282 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
283 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
285 # sync during mds recovers
286 self
.fs
.rank_fail(rank
=2)
287 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
288 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
290 self
.fs
.rank_fail(rank
=0)
291 self
.fs
.rank_fail(rank
=1)
292 status
= self
.fs
.wait_for_daemons()
293 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
294 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
295 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
296 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
298 # kill at MDSTableClient::handle_notify_prep
299 status
= self
.fs
.status()
300 rank2
= self
.fs
.get_rank(rank
=2, status
=status
)
301 self
.fs
.rank_freeze(True, rank
=2)
302 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "9"], rank
=2, status
=status
)
303 proc
= self
.mount_a
.run_shell(["mkdir", "d0/d1/dir/.snap/s3"], wait
=False)
304 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=2), timeout
=grace
*2);
305 self
.delete_mds_coredump(rank2
['name']);
307 # mksnap should wait for notify ack from mds.2
308 self
.assertFalse(proc
.finished
);
310 # mksnap should proceed after mds.2 fails
311 self
.fs
.rank_fail(rank
=2)
312 self
.wait_until_true(lambda: proc
.finished
, timeout
=30);
314 self
.fs
.mds_restart(rank2
['name'])
315 self
.wait_for_daemon_start([rank2
['name']])
316 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
318 self
.mount_a
.run_shell(["rmdir", Raw("d0/d1/dir/.snap/*")])
320 # kill at MDSTableClient::commit
321 # the recovering mds should sync all mds' cache when it enters resolve stage
322 self
.set_conf("mds", "mds_reconnect_timeout", "5")
323 for i
in range(1, 4):
324 status
= self
.fs
.status()
325 rank2
= self
.fs
.get_rank(rank
=2, status
=status
)
326 self
.fs
.rank_freeze(True, rank
=2)
327 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "4"], rank
=2, status
=status
)
328 last_created
= self
._get
_last
_created
_snap
(rank
=0)
329 proc
= self
.mount_a
.run_shell(["mkdir", "d0/d2/dir/.snap/s{0}".format(i
)], wait
=False)
330 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=2), timeout
=grace
*2);
331 self
.delete_mds_coredump(rank2
['name']);
334 self
.mount_a
.kill_cleanup()
336 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
339 self
.fs
.rank_fail(rank
=0)
341 self
.fs
.rank_fail(rank
=1)
343 self
.fs
.rank_fail(rank
=2)
344 self
.fs
.mds_restart(rank2
['name'])
345 self
.wait_for_daemon_start([rank2
['name']])
346 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
348 rank0_cache
= self
._get
_snapclient
_dump
(rank
=0)
349 rank1_cache
= self
._get
_snapclient
_dump
(rank
=1)
350 rank2_cache
= self
._get
_snapclient
_dump
(rank
=2)
352 self
.assertGreater(int(rank0_cache
["last_created"]), last_created
)
353 self
.assertEqual(rank0_cache
, rank1_cache
);
354 self
.assertEqual(rank0_cache
, rank2_cache
);
356 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
358 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
359 self
.assertEqual(snaps_dump
["last_created"], rank0_cache
["last_created"])
360 self
.assertTrue(_check_snapclient_cache(snaps_dump
, cache_dump
=rank0_cache
));
362 self
.mount_a
.mount_wait()
364 self
.mount_a
.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
366 def test_multimds_mksnap(self
):
368 check if snapshot takes effect across authority subtrees
370 self
.fs
.set_allow_new_snaps(True);
371 self
.fs
.set_max_mds(2)
372 status
= self
.fs
.wait_for_daemons()
374 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d1/empty"])
375 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
376 self
.mount_a
.setfattr("d0/d1", "ceph.dir.pin", "1")
377 self
._wait
_subtrees
([("/d0", 0), ("/d0/d1", 1)], rank
="all", status
=status
, path
="/d0")
379 self
.mount_a
.write_test_pattern("d0/d1/file_a", 8 * 1024 * 1024)
380 self
.mount_a
.run_shell(["mkdir", "d0/.snap/s1"])
381 self
.mount_a
.run_shell(["rm", "-f", "d0/d1/file_a"])
382 self
.mount_a
.validate_test_pattern("d0/.snap/s1/d1/file_a", 8 * 1024 * 1024)
384 self
.mount_a
.run_shell(["rmdir", "d0/.snap/s1"])
385 self
.mount_a
.run_shell(["rm", "-rf", "d0"])
387 def test_multimds_past_parents(self
):
389 check if past parents are properly recorded during across authority rename
391 self
.fs
.set_allow_new_snaps(True);
392 self
.fs
.set_max_mds(2)
393 status
= self
.fs
.wait_for_daemons()
395 self
.mount_a
.run_shell_payload("mkdir -p {d0,d1}/empty")
396 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
397 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
398 self
._wait
_subtrees
([("/d0", 0), ("/d1", 1)], rank
=0, status
=status
)
400 self
.mount_a
.run_shell(["mkdir", "d0/d3"])
401 self
.mount_a
.run_shell(["mkdir", "d0/.snap/s1"])
402 snap_name
= self
.mount_a
.run_shell(["ls", "d0/d3/.snap"]).stdout
.getvalue()
404 self
.mount_a
.run_shell(["mv", "d0/d3", "d1/d3"])
405 snap_name1
= self
.mount_a
.run_shell(["ls", "d1/d3/.snap"]).stdout
.getvalue()
406 self
.assertEqual(snap_name1
, snap_name
);
408 self
.mount_a
.run_shell(["rmdir", "d0/.snap/s1"])
409 snap_name1
= self
.mount_a
.run_shell(["ls", "d1/d3/.snap"]).stdout
.getvalue()
410 self
.assertEqual(snap_name1
, "");
412 self
.mount_a
.run_shell(["rm", "-rf", "d0", "d1"])
414 def test_multimds_hardlink(self
):
416 check if hardlink snapshot works in multimds setup
418 self
.fs
.set_allow_new_snaps(True);
419 self
.fs
.set_max_mds(2)
420 status
= self
.fs
.wait_for_daemons()
422 self
.mount_a
.run_shell_payload("mkdir -p {d0,d1}/empty")
424 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
425 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
426 self
._wait
_subtrees
([("/d0", 0), ("/d1", 1)], rank
=0, status
=status
)
428 self
.mount_a
.run_python(dedent("""
430 open(os.path.join("{path}", "d0/file1"), 'w').write("asdf")
431 open(os.path.join("{path}", "d0/file2"), 'w').write("asdf")
432 """.format(path
=self
.mount_a
.mountpoint
)
435 self
.mount_a
.run_shell(["ln", "d0/file1", "d1/file1"])
436 self
.mount_a
.run_shell(["ln", "d0/file2", "d1/file2"])
438 self
.mount_a
.run_shell(["mkdir", "d1/.snap/s1"])
440 self
.mount_a
.run_python(dedent("""
442 open(os.path.join("{path}", "d0/file1"), 'w').write("qwer")
443 """.format(path
=self
.mount_a
.mountpoint
)
446 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file1"])
448 self
.mount_a
.run_shell(["rm", "-f", "d0/file2"])
449 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
451 self
.mount_a
.run_shell(["rm", "-f", "d1/file2"])
452 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
454 self
.mount_a
.run_shell(["rmdir", "d1/.snap/s1"])
455 self
.mount_a
.run_shell(["rm", "-rf", "d0", "d1"])
457 class SnapLimitViolationException(Exception):
458 failed_snapshot_number
= -1
460 def __init__(self
, num
):
461 self
.failed_snapshot_number
= num
463 def get_snap_name(self
, dir_name
, sno
):
464 sname
= "{dir_name}/.snap/s_{sno}".format(dir_name
=dir_name
, sno
=sno
)
467 def create_snap_dir(self
, sname
):
468 self
.mount_a
.run_shell(["mkdir", sname
])
470 def delete_dir_and_snaps(self
, dir_name
, snaps
):
471 for sno
in range(1, snaps
+1, 1):
472 sname
= self
.get_snap_name(dir_name
, sno
)
473 self
.mount_a
.run_shell(["rmdir", sname
])
474 self
.mount_a
.run_shell(["rmdir", dir_name
])
476 def create_dir_and_snaps(self
, dir_name
, snaps
):
477 self
.mount_a
.run_shell(["mkdir", dir_name
])
479 for sno
in range(1, snaps
+1, 1):
480 sname
= self
.get_snap_name(dir_name
, sno
)
482 self
.create_snap_dir(sname
)
483 except CommandFailedError
as e
:
484 # failing at the last mkdir beyond the limit is expected
486 log
.info("failed while creating snap #{}: {}".format(sno
, repr(e
)))
487 raise TestSnapshots
.SnapLimitViolationException(sno
)
489 def test_mds_max_snaps_per_dir_default_limit(self
):
491 Test the newly introudced option named mds_max_snaps_per_dir
492 Default snaps limit is 100
493 Test if the default number of snapshot directories can be created
495 self
.create_dir_and_snaps("accounts", int(self
.mds_max_snaps_per_dir
))
496 self
.delete_dir_and_snaps("accounts", int(self
.mds_max_snaps_per_dir
))
498 def test_mds_max_snaps_per_dir_with_increased_limit(self
):
500 Test the newly introudced option named mds_max_snaps_per_dir
501 First create 101 directories and ensure that the 101st directory
502 creation fails. Then increase the default by one and see if the
503 additional directory creation succeeds
505 # first test the default limit
506 new_limit
= int(self
.mds_max_snaps_per_dir
)
507 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
509 self
.create_dir_and_snaps("accounts", new_limit
+ 1)
510 except TestSnapshots
.SnapLimitViolationException
as e
:
511 if e
.failed_snapshot_number
== (new_limit
+ 1):
513 # then increase the limit by one and test
514 new_limit
= new_limit
+ 1
515 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
516 sname
= self
.get_snap_name("accounts", new_limit
)
517 self
.create_snap_dir(sname
)
518 self
.delete_dir_and_snaps("accounts", new_limit
)
520 def test_mds_max_snaps_per_dir_with_reduced_limit(self
):
522 Test the newly introudced option named mds_max_snaps_per_dir
523 First create 99 directories. Then reduce the limit to 98. Then try
524 creating another directory and ensure that additional directory
527 # first test the new limit
528 new_limit
= int(self
.mds_max_snaps_per_dir
) - 1
529 self
.create_dir_and_snaps("accounts", new_limit
)
530 sname
= self
.get_snap_name("accounts", new_limit
+ 1)
531 # then reduce the limit by one and test
532 new_limit
= new_limit
- 1
533 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
535 self
.create_snap_dir(sname
)
536 except CommandFailedError
:
537 # after reducing limit we expect the new snapshot creation to fail
539 self
.delete_dir_and_snaps("accounts", new_limit
+ 1)