4 from textwrap
import dedent
5 from tasks
.cephfs
.fuse_mount
import FuseMount
6 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
7 from teuthology
.orchestra
.run
import CommandFailedError
, Raw
9 log
= logging
.getLogger(__name__
)
11 MDS_RESTART_GRACE
= 60
13 class TestSnapshots(CephFSTestCase
):
15 LOAD_SETTINGS
= ["mds_max_snaps_per_dir"]
17 def _check_subtree(self
, rank
, path
, status
=None):
18 got_subtrees
= self
.fs
.rank_asok(["get", "subtrees"], rank
=rank
, status
=status
)
19 for s
in got_subtrees
:
20 if s
['dir']['path'] == path
and s
['auth_first'] == rank
:
24 def _get_snapclient_dump(self
, rank
=0, status
=None):
25 return self
.fs
.rank_asok(["dump", "snaps"], rank
=rank
, status
=status
)
27 def _get_snapserver_dump(self
, rank
=0, status
=None):
28 return self
.fs
.rank_asok(["dump", "snaps", "--server"], rank
=rank
, status
=status
)
30 def _get_last_created_snap(self
, rank
=0, status
=None):
31 return int(self
._get
_snapserver
_dump
(rank
,status
=status
)["last_created"])
33 def _get_last_destroyed_snap(self
, rank
=0, status
=None):
34 return int(self
._get
_snapserver
_dump
(rank
,status
=status
)["last_destroyed"])
36 def _get_pending_snap_update(self
, rank
=0, status
=None):
37 return self
._get
_snapserver
_dump
(rank
,status
=status
)["pending_update"]
39 def _get_pending_snap_destroy(self
, rank
=0, status
=None):
40 return self
._get
_snapserver
_dump
(rank
,status
=status
)["pending_destroy"]
42 def test_kill_mdstable(self
):
44 check snaptable transcation
46 if not isinstance(self
.mount_a
, FuseMount
):
47 self
.skipTest("Require FUSE client to forcibly kill mount")
49 self
.fs
.set_allow_new_snaps(True);
50 self
.fs
.set_max_mds(2)
51 status
= self
.fs
.wait_for_daemons()
53 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
56 self
.mount_a
.run_shell(["mkdir", "-p", "d1/dir"])
57 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
58 self
._wait
_subtrees
([("/d1", 1)], rank
=1, path
="/d1")
60 last_created
= self
._get
_last
_created
_snap
(rank
=0,status
=status
)
62 # mds_kill_mdstable_at:
63 # 1: MDSTableServer::handle_prepare
64 # 2: MDSTableServer::_prepare_logged
65 # 5: MDSTableServer::handle_commit
66 # 6: MDSTableServer::_commit_logged
68 log
.info("testing snapserver mds_kill_mdstable_at={0}".format(i
))
70 status
= self
.fs
.status()
71 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
72 self
.fs
.rank_freeze(True, rank
=0)
73 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=0, status
=status
)
74 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s1{0}".format(i
)], wait
=False)
75 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=grace
*2);
76 self
.delete_mds_coredump(rank0
['name']);
78 self
.fs
.rank_fail(rank
=0)
79 self
.fs
.mds_restart(rank0
['name'])
80 self
.wait_for_daemon_start([rank0
['name']])
81 status
= self
.fs
.wait_for_daemons()
85 self
.wait_until_true(lambda: self
._get
_last
_created
_snap
(rank
=0) == last_created
, timeout
=30)
87 self
.set_conf("mds", "mds_reconnect_timeout", "5")
89 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
91 # set mds_kill_mdstable_at, also kill snapclient
93 log
.info("testing snapserver mds_kill_mdstable_at={0}, also kill snapclient".format(i
))
94 status
= self
.fs
.status()
95 last_created
= self
._get
_last
_created
_snap
(rank
=0, status
=status
)
97 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
98 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
99 self
.fs
.rank_freeze(True, rank
=0) # prevent failover...
100 self
.fs
.rank_freeze(True, rank
=1) # prevent failover...
101 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=0, status
=status
)
102 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i
)], wait
=False)
103 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=grace
*2);
104 self
.delete_mds_coredump(rank0
['name']);
106 self
.fs
.rank_signal(signal
.SIGKILL
, rank
=1)
109 self
.mount_a
.kill_cleanup()
111 self
.fs
.rank_fail(rank
=0)
112 self
.fs
.mds_restart(rank0
['name'])
113 self
.wait_for_daemon_start([rank0
['name']])
115 self
.fs
.wait_for_state('up:resolve', rank
=0, timeout
=MDS_RESTART_GRACE
)
117 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
119 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 0)
120 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
122 self
.fs
.rank_fail(rank
=1)
123 self
.fs
.mds_restart(rank1
['name'])
124 self
.wait_for_daemon_start([rank1
['name']])
125 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
128 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
130 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
132 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
134 self
.mount_a
.mount_wait()
136 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
138 # mds_kill_mdstable_at:
139 # 3: MDSTableClient::handle_request (got agree)
140 # 4: MDSTableClient::commit
141 # 7: MDSTableClient::handle_request (got ack)
143 log
.info("testing snapclient mds_kill_mdstable_at={0}".format(i
))
144 last_created
= self
._get
_last
_created
_snap
(rank
=0)
146 status
= self
.fs
.status()
147 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
148 self
.fs
.rank_freeze(True, rank
=1) # prevent failover...
149 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i
)], rank
=1, status
=status
)
150 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s3{0}".format(i
)], wait
=False)
151 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=1), timeout
=grace
*2);
152 self
.delete_mds_coredump(rank1
['name']);
155 self
.mount_a
.kill_cleanup()
158 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
160 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 0)
161 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
163 self
.fs
.rank_fail(rank
=1)
164 self
.fs
.mds_restart(rank1
['name'])
165 self
.wait_for_daemon_start([rank1
['name']])
166 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
169 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
171 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
173 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
175 self
.mount_a
.mount_wait()
177 self
.mount_a
.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
179 # mds_kill_mdstable_at:
180 # 3: MDSTableClient::handle_request (got agree)
181 # 8: MDSTableServer::handle_rollback
182 log
.info("testing snapclient mds_kill_mdstable_at=3, snapserver mds_kill_mdstable_at=8")
183 last_created
= self
._get
_last
_created
_snap
(rank
=0)
185 status
= self
.fs
.status()
186 rank0
= self
.fs
.get_rank(rank
=0, status
=status
)
187 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
188 self
.fs
.rank_freeze(True, rank
=0)
189 self
.fs
.rank_freeze(True, rank
=1)
190 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank
=0, status
=status
)
191 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank
=1, status
=status
)
192 proc
= self
.mount_a
.run_shell(["mkdir", "d1/dir/.snap/s4"], wait
=False)
193 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=1), timeout
=grace
*2);
194 self
.delete_mds_coredump(rank1
['name']);
197 self
.mount_a
.kill_cleanup()
199 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
201 self
.fs
.rank_fail(rank
=1)
202 self
.fs
.mds_restart(rank1
['name'])
203 self
.wait_for_daemon_start([rank1
['name']])
205 # rollback triggers assertion
206 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=0), timeout
=grace
*2);
207 self
.delete_mds_coredump(rank0
['name']);
208 self
.fs
.rank_fail(rank
=0)
209 self
.fs
.mds_restart(rank0
['name'])
210 self
.wait_for_daemon_start([rank0
['name']])
211 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
213 # mds.1 should re-send rollback message
214 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
215 self
.assertEqual(self
._get
_last
_created
_snap
(rank
=0), last_created
)
217 self
.mount_a
.mount_wait()
219 def test_snapclient_cache(self
):
221 check if snapclient cache gets synced properly
223 self
.fs
.set_allow_new_snaps(True);
224 self
.fs
.set_max_mds(3)
225 status
= self
.fs
.wait_for_daemons()
227 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
229 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d1/dir"])
230 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d2/dir"])
231 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
232 self
.mount_a
.setfattr("d0/d1", "ceph.dir.pin", "1")
233 self
.mount_a
.setfattr("d0/d2", "ceph.dir.pin", "2")
234 self
._wait
_subtrees
([("/d0", 0), ("/d0/d1", 1), ("/d0/d2", 2)], rank
="all", status
=status
, path
="/d0")
236 def _check_snapclient_cache(snaps_dump
, cache_dump
=None, rank
=0):
237 if cache_dump
is None:
238 cache_dump
= self
._get
_snapclient
_dump
(rank
=rank
)
239 for key
, value
in cache_dump
.items():
240 if value
!= snaps_dump
[key
]:
245 last_created
= self
._get
_last
_created
_snap
(rank
=0)
246 self
.mount_a
.run_shell(["mkdir", "d0/d1/dir/.snap/s1", "d0/d1/dir/.snap/s2"])
247 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
248 self
.assertGreater(self
._get
_last
_created
_snap
(rank
=0), last_created
)
250 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
251 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
252 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
253 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
256 last_destroyed
= self
._get
_last
_destroyed
_snap
(rank
=0)
257 self
.mount_a
.run_shell(["rmdir", "d0/d1/dir/.snap/s1"])
258 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_destroy
(rank
=0)) == 0, timeout
=30)
259 self
.assertGreater(self
._get
_last
_destroyed
_snap
(rank
=0), last_destroyed
)
261 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
262 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
263 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
264 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
266 # sync during mds recovers
267 self
.fs
.rank_fail(rank
=2)
268 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
269 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
271 self
.fs
.rank_fail(rank
=0)
272 self
.fs
.rank_fail(rank
=1)
273 status
= self
.fs
.wait_for_daemons()
274 self
.fs
.wait_for_state('up:active', rank
=0, timeout
=MDS_RESTART_GRACE
)
275 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=0));
276 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=1));
277 self
.assertTrue(_check_snapclient_cache(snaps_dump
, rank
=2));
279 # kill at MDSTableClient::handle_notify_prep
280 status
= self
.fs
.status()
281 rank2
= self
.fs
.get_rank(rank
=2, status
=status
)
282 self
.fs
.rank_freeze(True, rank
=2)
283 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "9"], rank
=2, status
=status
)
284 proc
= self
.mount_a
.run_shell(["mkdir", "d0/d1/dir/.snap/s3"], wait
=False)
285 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=2), timeout
=grace
*2);
286 self
.delete_mds_coredump(rank2
['name']);
288 # mksnap should wait for notify ack from mds.2
289 self
.assertFalse(proc
.finished
);
291 # mksnap should proceed after mds.2 fails
292 self
.fs
.rank_fail(rank
=2)
293 self
.wait_until_true(lambda: proc
.finished
, timeout
=30);
295 self
.fs
.mds_restart(rank2
['name'])
296 self
.wait_for_daemon_start([rank2
['name']])
297 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
299 self
.mount_a
.run_shell(["rmdir", Raw("d0/d1/dir/.snap/*")])
301 # kill at MDSTableClient::commit
302 # the recovering mds should sync all mds' cache when it enters resolve stage
303 self
.set_conf("mds", "mds_reconnect_timeout", "5")
304 for i
in range(1, 4):
305 status
= self
.fs
.status()
306 rank2
= self
.fs
.get_rank(rank
=2, status
=status
)
307 self
.fs
.rank_freeze(True, rank
=2)
308 self
.fs
.rank_asok(['config', 'set', "mds_kill_mdstable_at", "4"], rank
=2, status
=status
)
309 last_created
= self
._get
_last
_created
_snap
(rank
=0)
310 proc
= self
.mount_a
.run_shell(["mkdir", "d0/d2/dir/.snap/s{0}".format(i
)], wait
=False)
311 self
.wait_until_true(lambda: "laggy_since" in self
.fs
.get_rank(rank
=2), timeout
=grace
*2);
312 self
.delete_mds_coredump(rank2
['name']);
315 self
.mount_a
.kill_cleanup()
317 self
.assertEqual(len(self
._get
_pending
_snap
_update
(rank
=0)), 1)
320 self
.fs
.rank_fail(rank
=0)
322 self
.fs
.rank_fail(rank
=1)
324 self
.fs
.rank_fail(rank
=2)
325 self
.fs
.mds_restart(rank2
['name'])
326 self
.wait_for_daemon_start([rank2
['name']])
327 status
= self
.fs
.wait_for_daemons(timeout
=MDS_RESTART_GRACE
)
329 rank0_cache
= self
._get
_snapclient
_dump
(rank
=0)
330 rank1_cache
= self
._get
_snapclient
_dump
(rank
=1)
331 rank2_cache
= self
._get
_snapclient
_dump
(rank
=2)
333 self
.assertGreater(int(rank0_cache
["last_created"]), last_created
)
334 self
.assertEqual(rank0_cache
, rank1_cache
);
335 self
.assertEqual(rank0_cache
, rank2_cache
);
337 self
.wait_until_true(lambda: len(self
._get
_pending
_snap
_update
(rank
=0)) == 0, timeout
=30)
339 snaps_dump
= self
._get
_snapserver
_dump
(rank
=0)
340 self
.assertEqual(snaps_dump
["last_created"], rank0_cache
["last_created"])
341 self
.assertTrue(_check_snapclient_cache(snaps_dump
, cache_dump
=rank0_cache
));
343 self
.mount_a
.mount_wait()
345 self
.mount_a
.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
347 def test_multimds_mksnap(self
):
349 check if snapshot takes effect across authority subtrees
351 self
.fs
.set_allow_new_snaps(True);
352 self
.fs
.set_max_mds(2)
353 status
= self
.fs
.wait_for_daemons()
355 self
.mount_a
.run_shell(["mkdir", "-p", "d0/d1/empty"])
356 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
357 self
.mount_a
.setfattr("d0/d1", "ceph.dir.pin", "1")
358 self
._wait
_subtrees
([("/d0", 0), ("/d0/d1", 1)], rank
="all", status
=status
, path
="/d0")
360 self
.mount_a
.write_test_pattern("d0/d1/file_a", 8 * 1024 * 1024)
361 self
.mount_a
.run_shell(["mkdir", "d0/.snap/s1"])
362 self
.mount_a
.run_shell(["rm", "-f", "d0/d1/file_a"])
363 self
.mount_a
.validate_test_pattern("d0/.snap/s1/d1/file_a", 8 * 1024 * 1024)
365 self
.mount_a
.run_shell(["rmdir", "d0/.snap/s1"])
366 self
.mount_a
.run_shell(["rm", "-rf", "d0"])
368 def test_multimds_past_parents(self
):
370 check if past parents are properly recorded during across authority rename
372 self
.fs
.set_allow_new_snaps(True);
373 self
.fs
.set_max_mds(2)
374 status
= self
.fs
.wait_for_daemons()
376 self
.mount_a
.run_shell_payload("mkdir -p {d0,d1}/empty")
377 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
378 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
379 self
._wait
_subtrees
([("/d0", 0), ("/d1", 1)], rank
=0, status
=status
)
381 self
.mount_a
.run_shell(["mkdir", "d0/d3"])
382 self
.mount_a
.run_shell(["mkdir", "d0/.snap/s1"])
383 snap_name
= self
.mount_a
.run_shell(["ls", "d0/d3/.snap"]).stdout
.getvalue()
385 self
.mount_a
.run_shell(["mv", "d0/d3", "d1/d3"])
386 snap_name1
= self
.mount_a
.run_shell(["ls", "d1/d3/.snap"]).stdout
.getvalue()
387 self
.assertEqual(snap_name1
, snap_name
);
389 self
.mount_a
.run_shell(["rmdir", "d0/.snap/s1"])
390 snap_name1
= self
.mount_a
.run_shell(["ls", "d1/d3/.snap"]).stdout
.getvalue()
391 self
.assertEqual(snap_name1
, "");
393 self
.mount_a
.run_shell(["rm", "-rf", "d0", "d1"])
395 def test_multimds_hardlink(self
):
397 check if hardlink snapshot works in multimds setup
399 self
.fs
.set_allow_new_snaps(True);
400 self
.fs
.set_max_mds(2)
401 status
= self
.fs
.wait_for_daemons()
403 self
.mount_a
.run_shell_payload("mkdir -p {d0,d1}/empty")
405 self
.mount_a
.setfattr("d0", "ceph.dir.pin", "0")
406 self
.mount_a
.setfattr("d1", "ceph.dir.pin", "1")
407 self
._wait
_subtrees
([("/d0", 0), ("/d1", 1)], rank
=0, status
=status
)
409 self
.mount_a
.run_python(dedent("""
411 open(os.path.join("{path}", "d0/file1"), 'w').write("asdf")
412 open(os.path.join("{path}", "d0/file2"), 'w').write("asdf")
413 """.format(path
=self
.mount_a
.mountpoint
)
416 self
.mount_a
.run_shell(["ln", "d0/file1", "d1/file1"])
417 self
.mount_a
.run_shell(["ln", "d0/file2", "d1/file2"])
419 self
.mount_a
.run_shell(["mkdir", "d1/.snap/s1"])
421 self
.mount_a
.run_python(dedent("""
423 open(os.path.join("{path}", "d0/file1"), 'w').write("qwer")
424 """.format(path
=self
.mount_a
.mountpoint
)
427 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file1"])
429 self
.mount_a
.run_shell(["rm", "-f", "d0/file2"])
430 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
432 self
.mount_a
.run_shell(["rm", "-f", "d1/file2"])
433 self
.mount_a
.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
435 self
.mount_a
.run_shell(["rmdir", "d1/.snap/s1"])
436 self
.mount_a
.run_shell(["rm", "-rf", "d0", "d1"])
438 class SnapLimitViolationException(Exception):
439 failed_snapshot_number
= -1
441 def __init__(self
, num
):
442 self
.failed_snapshot_number
= num
444 def get_snap_name(self
, dir_name
, sno
):
445 sname
= "{dir_name}/.snap/s_{sno}".format(dir_name
=dir_name
, sno
=sno
)
448 def create_snap_dir(self
, sname
):
449 self
.mount_a
.run_shell(["mkdir", sname
])
451 def delete_dir_and_snaps(self
, dir_name
, snaps
):
452 for sno
in range(1, snaps
+1, 1):
453 sname
= self
.get_snap_name(dir_name
, sno
)
454 self
.mount_a
.run_shell(["rmdir", sname
])
455 self
.mount_a
.run_shell(["rmdir", dir_name
])
457 def create_dir_and_snaps(self
, dir_name
, snaps
):
458 self
.mount_a
.run_shell(["mkdir", dir_name
])
460 for sno
in range(1, snaps
+1, 1):
461 sname
= self
.get_snap_name(dir_name
, sno
)
463 self
.create_snap_dir(sname
)
464 except CommandFailedError
as e
:
465 # failing at the last mkdir beyond the limit is expected
467 log
.info("failed while creating snap #{}: {}".format(sno
, repr(e
)))
468 raise TestSnapshots
.SnapLimitViolationException(sno
)
470 def test_mds_max_snaps_per_dir_default_limit(self
):
472 Test the newly introudced option named mds_max_snaps_per_dir
473 Default snaps limit is 100
474 Test if the default number of snapshot directories can be created
476 self
.create_dir_and_snaps("accounts", int(self
.mds_max_snaps_per_dir
))
477 self
.delete_dir_and_snaps("accounts", int(self
.mds_max_snaps_per_dir
))
479 def test_mds_max_snaps_per_dir_with_increased_limit(self
):
481 Test the newly introudced option named mds_max_snaps_per_dir
482 First create 101 directories and ensure that the 101st directory
483 creation fails. Then increase the default by one and see if the
484 additional directory creation succeeds
486 # first test the default limit
487 new_limit
= int(self
.mds_max_snaps_per_dir
)
488 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
490 self
.create_dir_and_snaps("accounts", new_limit
+ 1)
491 except TestSnapshots
.SnapLimitViolationException
as e
:
492 if e
.failed_snapshot_number
== (new_limit
+ 1):
494 # then increase the limit by one and test
495 new_limit
= new_limit
+ 1
496 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
497 sname
= self
.get_snap_name("accounts", new_limit
)
498 self
.create_snap_dir(sname
)
499 self
.delete_dir_and_snaps("accounts", new_limit
)
501 def test_mds_max_snaps_per_dir_with_reduced_limit(self
):
503 Test the newly introudced option named mds_max_snaps_per_dir
504 First create 99 directories. Then reduce the limit to 98. Then try
505 creating another directory and ensure that additional directory
508 # first test the new limit
509 new_limit
= int(self
.mds_max_snaps_per_dir
) - 1
510 self
.create_dir_and_snaps("accounts", new_limit
)
511 sname
= self
.get_snap_name("accounts", new_limit
+ 1)
512 # then reduce the limit by one and test
513 new_limit
= new_limit
- 1
514 self
.fs
.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit
)])
516 self
.create_snap_dir(sname
)
517 except CommandFailedError
:
518 # after reducing limit we expect the new snapshot creation to fail
520 self
.delete_dir_and_snaps("accounts", new_limit
+ 1)