]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_snapshots.py
import 15.2.0 Octopus source
[ceph.git] / ceph / qa / tasks / cephfs / test_snapshots.py
CommitLineData
9f95a23c 1import sys
11fdf7f2
TL
2import logging
3import signal
11fdf7f2
TL
4from textwrap import dedent
5from tasks.cephfs.fuse_mount import FuseMount
6from tasks.cephfs.cephfs_test_case import CephFSTestCase
7from teuthology.orchestra.run import CommandFailedError, Raw
11fdf7f2
TL
8
9log = logging.getLogger(__name__)
10
11MDS_RESTART_GRACE = 60
12
13class TestSnapshots(CephFSTestCase):
14 MDSS_REQUIRED = 3
9f95a23c 15 LOAD_SETTINGS = ["mds_max_snaps_per_dir"]
11fdf7f2
TL
16
17 def _check_subtree(self, rank, path, status=None):
18 got_subtrees = self.fs.rank_asok(["get", "subtrees"], rank=rank, status=status)
19 for s in got_subtrees:
20 if s['dir']['path'] == path and s['auth_first'] == rank:
21 return True
22 return False
23
24 def _get_snapclient_dump(self, rank=0, status=None):
25 return self.fs.rank_asok(["dump", "snaps"], rank=rank, status=status)
26
27 def _get_snapserver_dump(self, rank=0, status=None):
28 return self.fs.rank_asok(["dump", "snaps", "--server"], rank=rank, status=status)
29
30 def _get_last_created_snap(self, rank=0, status=None):
31 return int(self._get_snapserver_dump(rank,status=status)["last_created"])
32
33 def _get_last_destroyed_snap(self, rank=0, status=None):
34 return int(self._get_snapserver_dump(rank,status=status)["last_destroyed"])
35
36 def _get_pending_snap_update(self, rank=0, status=None):
37 return self._get_snapserver_dump(rank,status=status)["pending_update"]
38
39 def _get_pending_snap_destroy(self, rank=0, status=None):
40 return self._get_snapserver_dump(rank,status=status)["pending_destroy"]
41
42 def test_kill_mdstable(self):
43 """
44 check snaptable transcation
45 """
46 if not isinstance(self.mount_a, FuseMount):
9f95a23c 47 self.skipTest("Require FUSE client to forcibly kill mount")
11fdf7f2
TL
48
49 self.fs.set_allow_new_snaps(True);
50 self.fs.set_max_mds(2)
51 status = self.fs.wait_for_daemons()
52
53 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
54
55 # setup subtrees
56 self.mount_a.run_shell(["mkdir", "-p", "d1/dir"])
57 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
58 self.wait_until_true(lambda: self._check_subtree(1, '/d1', status=status), timeout=30)
59
60 last_created = self._get_last_created_snap(rank=0,status=status)
61
62 # mds_kill_mdstable_at:
63 # 1: MDSTableServer::handle_prepare
64 # 2: MDSTableServer::_prepare_logged
65 # 5: MDSTableServer::handle_commit
66 # 6: MDSTableServer::_commit_logged
67 for i in [1,2,5,6]:
68 log.info("testing snapserver mds_kill_mdstable_at={0}".format(i))
69
70 status = self.fs.status()
71 rank0 = self.fs.get_rank(rank=0, status=status)
72 self.fs.rank_freeze(True, rank=0)
73 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status)
74 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s1{0}".format(i)], wait=False)
75 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
76 self.delete_mds_coredump(rank0['name']);
77
78 self.fs.rank_fail(rank=0)
79 self.fs.mds_restart(rank0['name'])
80 self.wait_for_daemon_start([rank0['name']])
81 status = self.fs.wait_for_daemons()
82
83 proc.wait()
84 last_created += 1
85 self.wait_until_true(lambda: self._get_last_created_snap(rank=0) == last_created, timeout=30)
86
87 self.set_conf("mds", "mds_reconnect_timeout", "5")
88
89 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
90
91 # set mds_kill_mdstable_at, also kill snapclient
92 for i in [2,5,6]:
93 log.info("testing snapserver mds_kill_mdstable_at={0}, also kill snapclient".format(i))
94 status = self.fs.status()
95 last_created = self._get_last_created_snap(rank=0, status=status)
96
97 rank0 = self.fs.get_rank(rank=0, status=status)
98 rank1 = self.fs.get_rank(rank=1, status=status)
99 self.fs.rank_freeze(True, rank=0) # prevent failover...
100 self.fs.rank_freeze(True, rank=1) # prevent failover...
101 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status)
102 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i)], wait=False)
103 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
104 self.delete_mds_coredump(rank0['name']);
105
106 self.fs.rank_signal(signal.SIGKILL, rank=1)
107
108 self.mount_a.kill()
109 self.mount_a.kill_cleanup()
110
111 self.fs.rank_fail(rank=0)
112 self.fs.mds_restart(rank0['name'])
113 self.wait_for_daemon_start([rank0['name']])
114
115 self.fs.wait_for_state('up:resolve', rank=0, timeout=MDS_RESTART_GRACE)
116 if i in [2,5]:
117 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
118 elif i == 6:
119 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0)
120 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
121
122 self.fs.rank_fail(rank=1)
123 self.fs.mds_restart(rank1['name'])
124 self.wait_for_daemon_start([rank1['name']])
125 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
126
127 if i in [2,5]:
128 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
129 if i == 2:
130 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
131 else:
132 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
133
134 self.mount_a.mount()
135 self.mount_a.wait_until_mounted()
136
137 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
138
139 # mds_kill_mdstable_at:
140 # 3: MDSTableClient::handle_request (got agree)
141 # 4: MDSTableClient::commit
142 # 7: MDSTableClient::handle_request (got ack)
143 for i in [3,4,7]:
144 log.info("testing snapclient mds_kill_mdstable_at={0}".format(i))
145 last_created = self._get_last_created_snap(rank=0)
146
147 status = self.fs.status()
148 rank1 = self.fs.get_rank(rank=1, status=status)
149 self.fs.rank_freeze(True, rank=1) # prevent failover...
150 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=1, status=status)
151 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s3{0}".format(i)], wait=False)
152 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2);
153 self.delete_mds_coredump(rank1['name']);
154
155 self.mount_a.kill()
156 self.mount_a.kill_cleanup()
157
158 if i in [3,4]:
159 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
160 elif i == 7:
161 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0)
162 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
163
164 self.fs.rank_fail(rank=1)
165 self.fs.mds_restart(rank1['name'])
166 self.wait_for_daemon_start([rank1['name']])
167 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
168
169 if i in [3,4]:
170 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
171 if i == 3:
172 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
173 else:
174 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
175
176 self.mount_a.mount()
177 self.mount_a.wait_until_mounted()
178
179 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
180
181 # mds_kill_mdstable_at:
182 # 3: MDSTableClient::handle_request (got agree)
183 # 8: MDSTableServer::handle_rollback
184 log.info("testing snapclient mds_kill_mdstable_at=3, snapserver mds_kill_mdstable_at=8")
185 last_created = self._get_last_created_snap(rank=0)
186
187 status = self.fs.status()
188 rank0 = self.fs.get_rank(rank=0, status=status)
189 rank1 = self.fs.get_rank(rank=1, status=status)
190 self.fs.rank_freeze(True, rank=0)
191 self.fs.rank_freeze(True, rank=1)
9f95a23c
TL
192 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank=0, status=status)
193 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank=1, status=status)
194 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s4"], wait=False)
11fdf7f2
TL
195 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2);
196 self.delete_mds_coredump(rank1['name']);
197
198 self.mount_a.kill()
199 self.mount_a.kill_cleanup()
200
201 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
202
203 self.fs.rank_fail(rank=1)
204 self.fs.mds_restart(rank1['name'])
205 self.wait_for_daemon_start([rank1['name']])
206
207 # rollback triggers assertion
208 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
209 self.delete_mds_coredump(rank0['name']);
210 self.fs.rank_fail(rank=0)
211 self.fs.mds_restart(rank0['name'])
212 self.wait_for_daemon_start([rank0['name']])
213 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
214
215 # mds.1 should re-send rollback message
216 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
217 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
218
219 self.mount_a.mount()
220 self.mount_a.wait_until_mounted()
221
222 def test_snapclient_cache(self):
223 """
224 check if snapclient cache gets synced properly
225 """
226 self.fs.set_allow_new_snaps(True);
227 self.fs.set_max_mds(3)
228 status = self.fs.wait_for_daemons()
229
230 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
231
232 self.mount_a.run_shell(["mkdir", "-p", "d0/d1/dir"])
233 self.mount_a.run_shell(["mkdir", "-p", "d0/d2/dir"])
234 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
235 self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1")
236 self.mount_a.setfattr("d0/d2", "ceph.dir.pin", "2")
237 self.wait_until_true(lambda: self._check_subtree(2, '/d0/d2', status=status), timeout=30)
238 self.wait_until_true(lambda: self._check_subtree(1, '/d0/d1', status=status), timeout=5)
239 self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5)
240
241 def _check_snapclient_cache(snaps_dump, cache_dump=None, rank=0):
242 if cache_dump is None:
243 cache_dump = self._get_snapclient_dump(rank=rank)
9f95a23c 244 for key, value in cache_dump.items():
11fdf7f2
TL
245 if value != snaps_dump[key]:
246 return False
247 return True;
248
249 # sync after mksnap
250 last_created = self._get_last_created_snap(rank=0)
251 self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s1", "d0/d1/dir/.snap/s2"])
252 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
253 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
254
255 snaps_dump = self._get_snapserver_dump(rank=0)
256 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
257 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
258 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
259
260 # sync after rmsnap
261 last_destroyed = self._get_last_destroyed_snap(rank=0)
262 self.mount_a.run_shell(["rmdir", "d0/d1/dir/.snap/s1"])
263 self.wait_until_true(lambda: len(self._get_pending_snap_destroy(rank=0)) == 0, timeout=30)
264 self.assertGreater(self._get_last_destroyed_snap(rank=0), last_destroyed)
265
266 snaps_dump = self._get_snapserver_dump(rank=0)
267 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
268 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
269 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
270
271 # sync during mds recovers
272 self.fs.rank_fail(rank=2)
273 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
274 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
275
276 self.fs.rank_fail(rank=0)
277 self.fs.rank_fail(rank=1)
278 status = self.fs.wait_for_daemons()
279 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
280 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
281 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
282 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
283
284 # kill at MDSTableClient::handle_notify_prep
285 status = self.fs.status()
286 rank2 = self.fs.get_rank(rank=2, status=status)
287 self.fs.rank_freeze(True, rank=2)
288 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "9"], rank=2, status=status)
289 proc = self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s3"], wait=False)
290 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=grace*2);
291 self.delete_mds_coredump(rank2['name']);
292
293 # mksnap should wait for notify ack from mds.2
294 self.assertFalse(proc.finished);
295
296 # mksnap should proceed after mds.2 fails
297 self.fs.rank_fail(rank=2)
298 self.wait_until_true(lambda: proc.finished, timeout=30);
299
300 self.fs.mds_restart(rank2['name'])
301 self.wait_for_daemon_start([rank2['name']])
302 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
303
304 self.mount_a.run_shell(["rmdir", Raw("d0/d1/dir/.snap/*")])
305
306 # kill at MDSTableClient::commit
307 # the recovering mds should sync all mds' cache when it enters resolve stage
308 self.set_conf("mds", "mds_reconnect_timeout", "5")
309 for i in range(1, 4):
310 status = self.fs.status()
311 rank2 = self.fs.get_rank(rank=2, status=status)
312 self.fs.rank_freeze(True, rank=2)
313 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "4"], rank=2, status=status)
314 last_created = self._get_last_created_snap(rank=0)
315 proc = self.mount_a.run_shell(["mkdir", "d0/d2/dir/.snap/s{0}".format(i)], wait=False)
316 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=grace*2);
317 self.delete_mds_coredump(rank2['name']);
318
319 self.mount_a.kill()
320 self.mount_a.kill_cleanup()
321
322 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
323
324 if i in [2,4]:
325 self.fs.rank_fail(rank=0)
326 if i in [3,4]:
327 self.fs.rank_fail(rank=1)
328
329 self.fs.rank_fail(rank=2)
330 self.fs.mds_restart(rank2['name'])
331 self.wait_for_daemon_start([rank2['name']])
332 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
333
334 rank0_cache = self._get_snapclient_dump(rank=0)
335 rank1_cache = self._get_snapclient_dump(rank=1)
336 rank2_cache = self._get_snapclient_dump(rank=2)
337
338 self.assertGreater(int(rank0_cache["last_created"]), last_created)
339 self.assertEqual(rank0_cache, rank1_cache);
340 self.assertEqual(rank0_cache, rank2_cache);
341
342 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
343
344 snaps_dump = self._get_snapserver_dump(rank=0)
345 self.assertEqual(snaps_dump["last_created"], rank0_cache["last_created"])
346 self.assertTrue(_check_snapclient_cache(snaps_dump, cache_dump=rank0_cache));
347
348 self.mount_a.mount()
349 self.mount_a.wait_until_mounted()
350
351 self.mount_a.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
352
353 def test_multimds_mksnap(self):
354 """
355 check if snapshot takes effect across authority subtrees
356 """
357 self.fs.set_allow_new_snaps(True);
358 self.fs.set_max_mds(2)
359 status = self.fs.wait_for_daemons()
360
361 self.mount_a.run_shell(["mkdir", "-p", "d0/d1"])
362 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
363 self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1")
364 self.wait_until_true(lambda: self._check_subtree(1, '/d0/d1', status=status), timeout=30)
365 self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5)
366
367 self.mount_a.write_test_pattern("d0/d1/file_a", 8 * 1024 * 1024)
368 self.mount_a.run_shell(["mkdir", "d0/.snap/s1"])
369 self.mount_a.run_shell(["rm", "-f", "d0/d1/file_a"])
370 self.mount_a.validate_test_pattern("d0/.snap/s1/d1/file_a", 8 * 1024 * 1024)
371
372 self.mount_a.run_shell(["rmdir", "d0/.snap/s1"])
373 self.mount_a.run_shell(["rm", "-rf", "d0"])
374
375 def test_multimds_past_parents(self):
376 """
377 check if past parents are properly recorded during across authority rename
378 """
379 self.fs.set_allow_new_snaps(True);
380 self.fs.set_max_mds(2)
381 status = self.fs.wait_for_daemons()
382
383 self.mount_a.run_shell(["mkdir", "d0", "d1"])
384 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
385 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
386 self.wait_until_true(lambda: self._check_subtree(1, '/d1', status=status), timeout=30)
387 self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5)
388
389 self.mount_a.run_shell(["mkdir", "d0/d3"])
390 self.mount_a.run_shell(["mkdir", "d0/.snap/s1"])
391 snap_name = self.mount_a.run_shell(["ls", "d0/d3/.snap"]).stdout.getvalue()
392
393 self.mount_a.run_shell(["mv", "d0/d3", "d1/d3"])
394 snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue()
395 self.assertEqual(snap_name1, snap_name);
396
397 self.mount_a.run_shell(["rmdir", "d0/.snap/s1"])
398 snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue()
399 self.assertEqual(snap_name1, "");
400
401 self.mount_a.run_shell(["rm", "-rf", "d0", "d1"])
402
403 def test_multimds_hardlink(self):
404 """
405 check if hardlink snapshot works in multimds setup
406 """
407 self.fs.set_allow_new_snaps(True);
408 self.fs.set_max_mds(2)
409 status = self.fs.wait_for_daemons()
410
411 self.mount_a.run_shell(["mkdir", "d0", "d1"])
412
413 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
414 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
415 self.wait_until_true(lambda: self._check_subtree(1, '/d1', status=status), timeout=30)
416 self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5)
417
418 self.mount_a.run_python(dedent("""
419 import os
420 open(os.path.join("{path}", "d0/file1"), 'w').write("asdf")
421 open(os.path.join("{path}", "d0/file2"), 'w').write("asdf")
422 """.format(path=self.mount_a.mountpoint)
423 ))
424
425 self.mount_a.run_shell(["ln", "d0/file1", "d1/file1"])
426 self.mount_a.run_shell(["ln", "d0/file2", "d1/file2"])
427
428 self.mount_a.run_shell(["mkdir", "d1/.snap/s1"])
429
430 self.mount_a.run_python(dedent("""
431 import os
432 open(os.path.join("{path}", "d0/file1"), 'w').write("qwer")
433 """.format(path=self.mount_a.mountpoint)
434 ))
435
436 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file1"])
437
438 self.mount_a.run_shell(["rm", "-f", "d0/file2"])
439 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
440
441 self.mount_a.run_shell(["rm", "-f", "d1/file2"])
442 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
443
444 self.mount_a.run_shell(["rmdir", "d1/.snap/s1"])
445 self.mount_a.run_shell(["rm", "-rf", "d0", "d1"])
9f95a23c
TL
446
447 class SnapLimitViolationException(Exception):
448 failed_snapshot_number = -1
449
450 def __init__(self, num):
451 self.failed_snapshot_number = num
452
453 def get_snap_name(self, dir_name, sno):
454 sname = "{dir_name}/.snap/s_{sno}".format(dir_name=dir_name, sno=sno)
455 return sname
456
457 def create_snap_dir(self, sname):
458 self.mount_a.run_shell(["mkdir", sname])
459
460 def delete_dir_and_snaps(self, dir_name, snaps):
461 for sno in range(1, snaps+1, 1):
462 sname = self.get_snap_name(dir_name, sno)
463 self.mount_a.run_shell(["rmdir", sname])
464 self.mount_a.run_shell(["rmdir", dir_name])
465
466 def create_dir_and_snaps(self, dir_name, snaps):
467 self.mount_a.run_shell(["mkdir", dir_name])
468
469 for sno in range(1, snaps+1, 1):
470 sname = self.get_snap_name(dir_name, sno)
471 try:
472 self.create_snap_dir(sname)
473 except CommandFailedError as e:
474 # failing at the last mkdir beyond the limit is expected
475 if sno == snaps:
476 log.info("failed while creating snap #{}: {}".format(sno, repr(e)))
477 sys.exc_clear()
478 raise TestSnapshots.SnapLimitViolationException(sno)
479
480 def test_mds_max_snaps_per_dir_default_limit(self):
481 """
482 Test the newly introudced option named mds_max_snaps_per_dir
483 Default snaps limit is 100
484 Test if the default number of snapshot directories can be created
485 """
486 self.create_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir))
487 self.delete_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir))
488
489 def test_mds_max_snaps_per_dir_with_increased_limit(self):
490 """
491 Test the newly introudced option named mds_max_snaps_per_dir
492 First create 101 directories and ensure that the 101st directory
493 creation fails. Then increase the default by one and see if the
494 additional directory creation succeeds
495 """
496 # first test the default limit
497 new_limit = int(self.mds_max_snaps_per_dir)
498 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
499 try:
500 self.create_dir_and_snaps("accounts", new_limit + 1)
501 except TestSnapshots.SnapLimitViolationException as e:
502 if e.failed_snapshot_number == (new_limit + 1):
503 sys.exc_clear()
504 pass
505 # then increase the limit by one and test
506 new_limit = new_limit + 1
507 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
508 sname = self.get_snap_name("accounts", new_limit)
509 self.create_snap_dir(sname)
510 self.delete_dir_and_snaps("accounts", new_limit)
511
512 def test_mds_max_snaps_per_dir_with_reduced_limit(self):
513 """
514 Test the newly introudced option named mds_max_snaps_per_dir
515 First create 99 directories. Then reduce the limit to 98. Then try
516 creating another directory and ensure that additional directory
517 creation fails.
518 """
519 # first test the new limit
520 new_limit = int(self.mds_max_snaps_per_dir) - 1
521 self.create_dir_and_snaps("accounts", new_limit)
522 sname = self.get_snap_name("accounts", new_limit + 1)
523 # then reduce the limit by one and test
524 new_limit = new_limit - 1
525 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
526 try:
527 self.create_snap_dir(sname)
528 except CommandFailedError:
529 # after reducing limit we expect the new snapshot creation to fail
530 pass
531 self.delete_dir_and_snaps("accounts", new_limit + 1)