]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_failover.py
update ceph source to reef 18.2.0
[ceph.git] / ceph / qa / tasks / cephfs / test_failover.py
1 import time
2 import signal
3 import logging
4 import operator
5 from random import randint, choice
6
7 from tasks.cephfs.cephfs_test_case import CephFSTestCase
8 from teuthology.exceptions import CommandFailedError
9 from tasks.cephfs.fuse_mount import FuseMount
10
11 log = logging.getLogger(__name__)
12
13 class TestClusterAffinity(CephFSTestCase):
14 CLIENTS_REQUIRED = 0
15 MDSS_REQUIRED = 4
16
17 def _verify_join_fs(self, target, status=None):
18 if status is None:
19 status = self.fs.wait_for_daemons(timeout=30)
20 log.debug("%s", status)
21 target = sorted(target, key=operator.itemgetter('name'))
22 log.info("target = %s", target)
23 current = list(status.get_all())
24 current = sorted(current, key=operator.itemgetter('name'))
25 log.info("current = %s", current)
26 self.assertEqual(len(current), len(target))
27 for i in range(len(current)):
28 for attr in target[i]:
29 self.assertIn(attr, current[i])
30 self.assertEqual(target[i][attr], current[i][attr])
31
32 def _change_target_state(self, state, name, changes):
33 for entity in state:
34 if entity['name'] == name:
35 for k, v in changes.items():
36 entity[k] = v
37 return
38 self.fail("no entity")
39
40 def _verify_init(self):
41 status = self.fs.status()
42 log.info("status = {0}".format(status))
43 target = [{'join_fscid': -1, 'name': info['name']} for info in status.get_all()]
44 self._verify_join_fs(target, status=status)
45 return (status, target)
46
47 def _reach_target(self, target):
48 def takeover():
49 try:
50 self._verify_join_fs(target)
51 return True
52 except AssertionError as e:
53 log.debug("%s", e)
54 return False
55 self.wait_until_true(takeover, 30)
56
57 def test_join_fs_runtime(self):
58 """
59 That setting mds_join_fs at runtime affects the cluster layout.
60 """
61 status, target = self._verify_init()
62 standbys = list(status.get_standbys())
63 self.config_set('mds.'+standbys[0]['name'], 'mds_join_fs', 'cephfs')
64 self._change_target_state(target, standbys[0]['name'], {'join_fscid': self.fs.id, 'state': 'up:active'})
65 self._reach_target(target)
66
67 def test_join_fs_unset(self):
68 """
69 That unsetting mds_join_fs will cause failover if another high-affinity standby exists.
70 """
71 status, target = self._verify_init()
72 standbys = list(status.get_standbys())
73 names = (standbys[0]['name'], standbys[1]['name'])
74 self.config_set('mds.'+names[0], 'mds_join_fs', 'cephfs')
75 self.config_set('mds.'+names[1], 'mds_join_fs', 'cephfs')
76 self._change_target_state(target, names[0], {'join_fscid': self.fs.id})
77 self._change_target_state(target, names[1], {'join_fscid': self.fs.id})
78 self._reach_target(target)
79 status = self.fs.status()
80 active = self.fs.get_active_names(status=status)[0]
81 self.assertIn(active, names)
82 self.config_rm('mds.'+active, 'mds_join_fs')
83 self._change_target_state(target, active, {'join_fscid': -1})
84 new_active = (set(names) - set((active,))).pop()
85 self._change_target_state(target, new_active, {'state': 'up:active'})
86 self._reach_target(target)
87
88 def test_join_fs_drop(self):
89 """
90 That unsetting mds_join_fs will not cause failover if no high-affinity standby exists.
91 """
92 status, target = self._verify_init()
93 standbys = list(status.get_standbys())
94 active = standbys[0]['name']
95 self.config_set('mds.'+active, 'mds_join_fs', 'cephfs')
96 self._change_target_state(target, active, {'join_fscid': self.fs.id, 'state': 'up:active'})
97 self._reach_target(target)
98 self.config_rm('mds.'+active, 'mds_join_fs')
99 self._change_target_state(target, active, {'join_fscid': -1})
100 self._reach_target(target)
101
102 def test_join_fs_vanilla(self):
103 """
104 That a vanilla standby is preferred over others with mds_join_fs set to another fs.
105 """
106 fs2 = self.mds_cluster.newfs(name="cephfs2")
107 status, target = self._verify_init()
108 active = self.fs.get_active_names(status=status)[0]
109 standbys = [info['name'] for info in status.get_standbys()]
110 victim = standbys.pop()
111 # Set a bogus fs on the others
112 for mds in standbys:
113 self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
114 self._change_target_state(target, mds, {'join_fscid': fs2.id})
115 self.fs.rank_fail()
116 self._change_target_state(target, victim, {'state': 'up:active'})
117 self._reach_target(target)
118 status = self.fs.status()
119 active = self.fs.get_active_names(status=status)[0]
120 self.assertEqual(active, victim)
121
122 def test_join_fs_last_resort(self):
123 """
124 That a standby with mds_join_fs set to another fs is still used if necessary.
125 """
126 status, target = self._verify_init()
127 standbys = [info['name'] for info in status.get_standbys()]
128 for mds in standbys:
129 self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
130 fs2 = self.mds_cluster.newfs(name="cephfs2")
131 for mds in standbys:
132 self._change_target_state(target, mds, {'join_fscid': fs2.id})
133 self.fs.rank_fail()
134 status = self.fs.status()
135 ranks = list(self.fs.get_ranks(status=status))
136 self.assertEqual(len(ranks), 1)
137 self.assertIn(ranks[0]['name'], standbys)
138 # Note that we would expect the former active to reclaim its spot, but
139 # we're not testing that here.
140
141 def test_join_fs_steady(self):
142 """
143 That a sole MDS with mds_join_fs set will come back as active eventually even after failover.
144 """
145 status, target = self._verify_init()
146 active = self.fs.get_active_names(status=status)[0]
147 self.config_set('mds.'+active, 'mds_join_fs', 'cephfs')
148 self._change_target_state(target, active, {'join_fscid': self.fs.id})
149 self._reach_target(target)
150 self.fs.rank_fail()
151 self._reach_target(target)
152
153 def test_join_fs_standby_replay(self):
154 """
155 That a standby-replay daemon with weak affinity is replaced by a stronger one.
156 """
157 status, target = self._verify_init()
158 standbys = [info['name'] for info in status.get_standbys()]
159 self.config_set('mds.'+standbys[0], 'mds_join_fs', 'cephfs')
160 self._change_target_state(target, standbys[0], {'join_fscid': self.fs.id, 'state': 'up:active'})
161 self._reach_target(target)
162 self.fs.set_allow_standby_replay(True)
163 status = self.fs.status()
164 standbys = [info['name'] for info in status.get_standbys()]
165 self.config_set('mds.'+standbys[0], 'mds_join_fs', 'cephfs')
166 self._change_target_state(target, standbys[0], {'join_fscid': self.fs.id, 'state': 'up:standby-replay'})
167 self._reach_target(target)
168
169 class TestClusterResize(CephFSTestCase):
170 CLIENTS_REQUIRED = 0
171 MDSS_REQUIRED = 3
172
173 def test_grow(self):
174 """
175 That the MDS cluster grows after increasing max_mds.
176 """
177
178 # Need all my standbys up as well as the active daemons
179 # self.wait_for_daemon_start() necessary?
180
181 self.fs.grow(2)
182 self.fs.grow(3)
183
184
185 def test_shrink(self):
186 """
187 That the MDS cluster shrinks automatically after decreasing max_mds.
188 """
189
190 self.fs.grow(3)
191 self.fs.shrink(1)
192
193 def test_up_less_than_max(self):
194 """
195 That a health warning is generated when max_mds is greater than active count.
196 """
197
198 status = self.fs.status()
199 mdss = [info['gid'] for info in status.get_all()]
200 self.fs.set_max_mds(len(mdss)+1)
201 self.wait_for_health("MDS_UP_LESS_THAN_MAX", 30)
202 self.fs.shrink(2)
203 self.wait_for_health_clear(30)
204
205 def test_down_health(self):
206 """
207 That marking a FS down does not generate a health warning
208 """
209
210 self.fs.set_down()
211 try:
212 self.wait_for_health("", 30)
213 raise RuntimeError("got health warning?")
214 except RuntimeError as e:
215 if "Timed out after" in str(e):
216 pass
217 else:
218 raise
219
220 def test_down_twice(self):
221 """
222 That marking a FS down twice does not wipe old_max_mds.
223 """
224
225 self.fs.grow(2)
226 self.fs.set_down()
227 self.fs.wait_for_daemons()
228 self.fs.set_down(False)
229 self.assertEqual(self.fs.get_var("max_mds"), 2)
230 self.fs.wait_for_daemons(timeout=60)
231
232 def test_down_grow(self):
233 """
234 That setting max_mds undoes down.
235 """
236
237 self.fs.set_down()
238 self.fs.wait_for_daemons()
239 self.fs.grow(2)
240 self.fs.wait_for_daemons()
241
242 def test_down(self):
243 """
244 That down setting toggles and sets max_mds appropriately.
245 """
246
247 self.fs.set_down()
248 self.fs.wait_for_daemons()
249 self.assertEqual(self.fs.get_var("max_mds"), 0)
250 self.fs.set_down(False)
251 self.assertEqual(self.fs.get_var("max_mds"), 1)
252 self.fs.wait_for_daemons()
253 self.assertEqual(self.fs.get_var("max_mds"), 1)
254
255 def test_hole(self):
256 """
257 Test that a hole cannot be created in the FS ranks.
258 """
259
260 fscid = self.fs.id
261
262 self.fs.grow(2)
263
264 # Now add a delay which should slow down how quickly rank 1 stops
265 self.config_set('mds', 'ms_inject_delay_max', '5.0')
266 self.config_set('mds', 'ms_inject_delay_probability', '1.0')
267 self.fs.set_max_mds(1)
268 log.info("status = {0}".format(self.fs.status()))
269
270 # Don't wait for rank 1 to stop
271 self.fs.set_max_mds(3)
272 log.info("status = {0}".format(self.fs.status()))
273
274 # Now check that the mons didn't try to promote a standby to rank 2
275 self.fs.set_max_mds(2)
276 status = self.fs.status()
277 try:
278 status = self.fs.wait_for_daemons(timeout=90)
279 ranks = set([info['rank'] for info in status.get_ranks(fscid)])
280 self.assertEqual(ranks, set([0, 1]))
281 finally:
282 log.info("status = {0}".format(status))
283
284 def test_thrash(self):
285 """
286 Test that thrashing max_mds does not fail.
287 """
288
289 max_mds = 2
290 for i in range(0, 100):
291 self.fs.set_max_mds(max_mds)
292 max_mds = (max_mds+1)%3+1
293
294 self.fs.wait_for_daemons(timeout=90)
295
296 class TestFailover(CephFSTestCase):
297 CLIENTS_REQUIRED = 1
298 MDSS_REQUIRED = 2
299
300 def test_repeated_boot(self):
301 """
302 That multiple boot messages do not result in the MDS getting evicted.
303 """
304
305 interval = 10
306 self.config_set("mon", "paxos_propose_interval", interval)
307
308 mds = choice(list(self.fs.status().get_all()))
309
310 with self.assert_cluster_log(f"daemon mds.{mds['name']} restarted", present=False):
311 # Avoid a beacon to the monitors with down:dne by restarting:
312 self.fs.mds_fail(mds_id=mds['name'])
313 # `ceph mds fail` won't return until the FSMap is committed, double-check:
314 self.assertIsNone(self.fs.status().get_mds_gid(mds['gid']))
315 time.sleep(2) # for mds to restart and accept asok commands
316 status1 = self.fs.mds_asok(['status'], mds_id=mds['name'])
317 time.sleep(interval*1.5)
318 status2 = self.fs.mds_asok(['status'], mds_id=mds['name'])
319 self.assertEqual(status1['id'], status2['id'])
320
321 def test_simple(self):
322 """
323 That when the active MDS is killed, a standby MDS is promoted into
324 its rank after the grace period.
325
326 This is just a simple unit test, the harder cases are covered
327 in thrashing tests.
328 """
329
330 (original_active, ) = self.fs.get_active_names()
331 original_standbys = self.mds_cluster.get_standby_daemons()
332
333 # Kill the rank 0 daemon's physical process
334 self.fs.mds_stop(original_active)
335
336 # Wait until the monitor promotes his replacement
337 def promoted():
338 ranks = list(self.fs.get_ranks())
339 return len(ranks) > 0 and ranks[0]['name'] in original_standbys
340
341 log.info("Waiting for promotion of one of the original standbys {0}".format(
342 original_standbys))
343 self.wait_until_true(promoted, timeout=self.fs.beacon_timeout)
344
345 # Start the original rank 0 daemon up again, see that he becomes a standby
346 self.fs.mds_restart(original_active)
347 self.wait_until_true(
348 lambda: original_active in self.mds_cluster.get_standby_daemons(),
349 timeout=60 # Approximately long enough for MDS to start and mon to notice
350 )
351
352 def test_client_abort(self):
353 """
354 That a client will respect fuse_require_active_mds and error out
355 when the cluster appears to be unavailable.
356 """
357
358 if not isinstance(self.mount_a, FuseMount):
359 self.skipTest("Requires FUSE client to inject client metadata")
360
361 require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true"
362 if not require_active:
363 self.skipTest("fuse_require_active_mds is not set")
364
365 # Check it's not laggy to begin with
366 (original_active, ) = self.fs.get_active_names()
367 self.assertNotIn("laggy_since", self.fs.status().get_mds(original_active))
368
369 self.mounts[0].umount_wait()
370
371 # Control: that we can mount and unmount usually, while the cluster is healthy
372 self.mounts[0].mount_wait()
373 self.mounts[0].umount_wait()
374
375 # Stop the daemon processes
376 self.fs.mds_stop()
377
378 # Wait for everyone to go laggy
379 def laggy():
380 mdsmap = self.fs.get_mds_map()
381 for info in mdsmap['info'].values():
382 if "laggy_since" not in info:
383 return False
384
385 return True
386
387 self.wait_until_true(laggy, self.fs.beacon_timeout)
388 with self.assertRaises(CommandFailedError):
389 self.mounts[0].mount_wait()
390
391 def test_standby_count_wanted(self):
392 """
393 That cluster health warnings are generated by insufficient standbys available.
394 """
395
396 # Need all my standbys up as well as the active daemons
397 self.wait_for_daemon_start()
398
399 standbys = self.mds_cluster.get_standby_daemons()
400 self.assertGreaterEqual(len(standbys), 1)
401 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
402
403 # Kill a standby and check for warning
404 victim = standbys.pop()
405 self.fs.mds_stop(victim)
406 self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
407
408 # restart the standby, see that he becomes a standby, check health clears
409 self.fs.mds_restart(victim)
410 self.wait_until_true(
411 lambda: victim in self.mds_cluster.get_standby_daemons(),
412 timeout=60 # Approximately long enough for MDS to start and mon to notice
413 )
414 self.wait_for_health_clear(timeout=30)
415
416 # Set it one greater than standbys ever seen
417 standbys = self.mds_cluster.get_standby_daemons()
418 self.assertGreaterEqual(len(standbys), 1)
419 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
420 self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
421
422 # Set it to 0
423 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
424 self.wait_for_health_clear(timeout=30)
425
426 def test_discontinuous_mdsmap(self):
427 """
428 That discontinuous mdsmap does not affect failover.
429 See http://tracker.ceph.com/issues/24856.
430 """
431 self.fs.set_max_mds(2)
432 status = self.fs.wait_for_daemons()
433
434 self.mount_a.umount_wait()
435
436 monc_timeout = float(self.fs.get_config("mon_client_ping_timeout", service_type="mds"))
437
438 mds_0 = self.fs.get_rank(rank=0, status=status)
439 self.fs.rank_freeze(True, rank=0) # prevent failover
440 self.fs.rank_signal(signal.SIGSTOP, rank=0, status=status)
441 self.wait_until_true(
442 lambda: "laggy_since" in self.fs.get_rank(),
443 timeout=self.fs.beacon_timeout
444 )
445
446 self.fs.rank_fail(rank=1)
447 self.fs.wait_for_state('up:resolve', rank=1, timeout=30)
448
449 # Make sure of mds_0's monitor connection gets reset
450 time.sleep(monc_timeout * 2)
451
452 # Continue rank 0, it will get discontinuous mdsmap
453 self.fs.rank_signal(signal.SIGCONT, rank=0)
454 self.wait_until_true(
455 lambda: "laggy_since" not in self.fs.get_rank(rank=0),
456 timeout=self.fs.beacon_timeout
457 )
458
459 # mds.b will be stuck at 'reconnect' state if snapserver gets confused
460 # by discontinuous mdsmap
461 self.fs.wait_for_state('up:active', rank=1, timeout=30)
462 self.assertEqual(mds_0['gid'], self.fs.get_rank(rank=0)['gid'])
463 self.fs.rank_freeze(False, rank=0)
464
465 def test_connect_bootstrapping(self):
466 self.config_set("mds", "mds_sleep_rank_change", 10000000.0)
467 self.config_set("mds", "mds_connect_bootstrapping", True)
468 self.fs.set_max_mds(2)
469 self.fs.wait_for_daemons()
470 self.fs.rank_fail(rank=0)
471 # rank 0 will get stuck in up:resolve, see https://tracker.ceph.com/issues/53194
472 self.fs.wait_for_daemons()
473
474
475 class TestStandbyReplay(CephFSTestCase):
476 CLIENTS_REQUIRED = 0
477 MDSS_REQUIRED = 4
478
479 def _confirm_no_replay(self):
480 status = self.fs.status()
481 _ = len(list(status.get_standbys()))
482 self.assertEqual(0, len(list(self.fs.get_replays(status=status))))
483 return status
484
485 def _confirm_single_replay(self, full=True, status=None, retries=3):
486 status = self.fs.wait_for_daemons(status=status)
487 ranks = sorted(self.fs.get_mds_map(status=status)['in'])
488 replays = list(self.fs.get_replays(status=status))
489 checked_replays = set()
490 for rank in ranks:
491 has_replay = False
492 for replay in replays:
493 if replay['rank'] == rank:
494 self.assertFalse(has_replay)
495 has_replay = True
496 checked_replays.add(replay['gid'])
497 if full and not has_replay:
498 if retries <= 0:
499 raise RuntimeError("rank "+str(rank)+" has no standby-replay follower")
500 else:
501 retries = retries-1
502 time.sleep(2)
503 self.assertEqual(checked_replays, set(info['gid'] for info in replays))
504 return status
505
506 def _check_replay_takeover(self, status, rank=0):
507 replay = self.fs.get_replay(rank=rank, status=status)
508 new_status = self.fs.wait_for_daemons()
509 new_active = self.fs.get_rank(rank=rank, status=new_status)
510 if replay:
511 self.assertEqual(replay['gid'], new_active['gid'])
512 else:
513 # double check takeover came from a standby (or some new daemon via restart)
514 found = False
515 for info in status.get_standbys():
516 if info['gid'] == new_active['gid']:
517 found = True
518 break
519 if not found:
520 for info in status.get_all():
521 self.assertNotEqual(info['gid'], new_active['gid'])
522 return new_status
523
524 def test_standby_replay_singleton(self):
525 """
526 That only one MDS becomes standby-replay.
527 """
528
529 self._confirm_no_replay()
530 self.fs.set_allow_standby_replay(True)
531 time.sleep(30)
532 self._confirm_single_replay()
533
534 def test_standby_replay_damaged(self):
535 """
536 That a standby-replay daemon can cause the rank to go damaged correctly.
537 """
538
539 self._confirm_no_replay()
540 self.config_set("mds", "mds_standby_replay_damaged", True)
541 self.fs.set_allow_standby_replay(True)
542 self.wait_until_true(
543 lambda: len(self.fs.get_damaged()) > 0,
544 timeout=30
545 )
546 status = self.fs.status()
547 self.assertListEqual([], list(self.fs.get_ranks(status=status)))
548 self.assertListEqual([0], self.fs.get_damaged(status=status))
549
550 def test_standby_replay_disable(self):
551 """
552 That turning off allow_standby_replay fails all standby-replay daemons.
553 """
554
555 self._confirm_no_replay()
556 self.fs.set_allow_standby_replay(True)
557 time.sleep(30)
558 self._confirm_single_replay()
559 self.fs.set_allow_standby_replay(False)
560 self._confirm_no_replay()
561
562 def test_standby_replay_singleton_fail(self):
563 """
564 That failures don't violate singleton constraint.
565 """
566
567 self._confirm_no_replay()
568 self.fs.set_allow_standby_replay(True)
569 status = self._confirm_single_replay()
570
571 for i in range(10):
572 time.sleep(randint(1, 5))
573 self.fs.rank_restart(status=status)
574 status = self._check_replay_takeover(status)
575 status = self._confirm_single_replay(status=status)
576
577 for i in range(10):
578 time.sleep(randint(1, 5))
579 self.fs.rank_fail()
580 status = self._check_replay_takeover(status)
581 status = self._confirm_single_replay(status=status)
582
583 def test_standby_replay_singleton_fail_multimds(self):
584 """
585 That failures don't violate singleton constraint with multiple actives.
586 """
587
588 status = self._confirm_no_replay()
589 new_max_mds = randint(2, len(list(status.get_standbys())))
590 self.fs.set_max_mds(new_max_mds)
591 self.fs.wait_for_daemons() # wait for actives to come online!
592 self.fs.set_allow_standby_replay(True)
593 status = self._confirm_single_replay(full=False)
594
595 for i in range(10):
596 time.sleep(randint(1, 5))
597 victim = randint(0, new_max_mds-1)
598 self.fs.rank_restart(rank=victim, status=status)
599 status = self._check_replay_takeover(status, rank=victim)
600 status = self._confirm_single_replay(status=status, full=False)
601
602 for i in range(10):
603 time.sleep(randint(1, 5))
604 victim = randint(0, new_max_mds-1)
605 self.fs.rank_fail(rank=victim)
606 status = self._check_replay_takeover(status, rank=victim)
607 status = self._confirm_single_replay(status=status, full=False)
608
609 def test_standby_replay_failure(self):
610 """
611 That the failure of a standby-replay daemon happens cleanly
612 and doesn't interrupt anything else.
613 """
614
615 status = self._confirm_no_replay()
616 self.fs.set_max_mds(1)
617 self.fs.set_allow_standby_replay(True)
618 status = self._confirm_single_replay()
619
620 for i in range(10):
621 time.sleep(randint(1, 5))
622 victim = self.fs.get_replay(status=status)
623 self.fs.mds_restart(mds_id=victim['name'])
624 status = self._confirm_single_replay(status=status)
625
626 def test_standby_replay_prepare_beacon(self):
627 """
628 That a MDSMonitor::prepare_beacon handles standby-replay daemons
629 correctly without removing the standby. (Note, usually a standby-replay
630 beacon will just be replied to by MDSMonitor::preprocess_beacon.)
631 """
632
633 status = self._confirm_no_replay()
634 self.fs.set_max_mds(1)
635 self.fs.set_allow_standby_replay(True)
636 status = self._confirm_single_replay()
637 replays = list(status.get_replays(self.fs.id))
638 self.assertEqual(len(replays), 1)
639 self.config_set('mds.'+replays[0]['name'], 'mds_inject_health_dummy', True)
640 time.sleep(10) # for something not to happen...
641 status = self._confirm_single_replay()
642 replays2 = list(status.get_replays(self.fs.id))
643 self.assertEqual(replays[0]['gid'], replays2[0]['gid'])
644
645 def test_rank_stopped(self):
646 """
647 That when a rank is STOPPED, standby replays for
648 that rank get torn down
649 """
650
651 status = self._confirm_no_replay()
652 standby_count = len(list(status.get_standbys()))
653 self.fs.set_max_mds(2)
654 self.fs.set_allow_standby_replay(True)
655 status = self._confirm_single_replay()
656
657 self.fs.set_max_mds(1) # stop rank 1
658
659 status = self._confirm_single_replay()
660 self.assertTrue(standby_count, len(list(status.get_standbys())))
661
662
663 class TestMultiFilesystems(CephFSTestCase):
664 CLIENTS_REQUIRED = 2
665 MDSS_REQUIRED = 4
666
667 # We'll create our own filesystems and start our own daemons
668 REQUIRE_FILESYSTEM = False
669
670 def setUp(self):
671 super(TestMultiFilesystems, self).setUp()
672 self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
673 "enable_multiple", "true",
674 "--yes-i-really-mean-it")
675
676 def _setup_two(self):
677 fs_a = self.mds_cluster.newfs(name="alpha")
678 fs_b = self.mds_cluster.newfs(name="bravo")
679
680 self.mds_cluster.mds_restart()
681
682 # Wait for both filesystems to go healthy
683 fs_a.wait_for_daemons()
684 fs_b.wait_for_daemons()
685
686 # Reconfigure client auth caps
687 for mount in self.mounts:
688 self.mds_cluster.mon_manager.raw_cluster_cmd_result(
689 'auth', 'caps', "client.{0}".format(mount.client_id),
690 'mds', 'allow',
691 'mon', 'allow r',
692 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
693 fs_a.get_data_pool_name(), fs_b.get_data_pool_name()))
694
695 return fs_a, fs_b
696
697 def test_clients(self):
698 fs_a, fs_b = self._setup_two()
699
700 # Mount a client on fs_a
701 self.mount_a.mount_wait(cephfs_name=fs_a.name)
702 self.mount_a.write_n_mb("pad.bin", 1)
703 self.mount_a.write_n_mb("test.bin", 2)
704 a_created_ino = self.mount_a.path_to_ino("test.bin")
705 self.mount_a.create_files()
706
707 # Mount a client on fs_b
708 self.mount_b.mount_wait(cephfs_name=fs_b.name)
709 self.mount_b.write_n_mb("test.bin", 1)
710 b_created_ino = self.mount_b.path_to_ino("test.bin")
711 self.mount_b.create_files()
712
713 # Check that a non-default filesystem mount survives an MDS
714 # failover (i.e. that map subscription is continuous, not
715 # just the first time), reproduces #16022
716 old_fs_b_mds = fs_b.get_active_names()[0]
717 self.mds_cluster.mds_stop(old_fs_b_mds)
718 self.mds_cluster.mds_fail(old_fs_b_mds)
719 fs_b.wait_for_daemons()
720 background = self.mount_b.write_background()
721 # Raise exception if the write doesn't finish (i.e. if client
722 # has not kept up with MDS failure)
723 try:
724 self.wait_until_true(lambda: background.finished, timeout=30)
725 except RuntimeError:
726 # The mount is stuck, we'll have to force it to fail cleanly
727 background.stdin.close()
728 self.mount_b.umount_wait(force=True)
729 raise
730
731 self.mount_a.umount_wait()
732 self.mount_b.umount_wait()
733
734 # See that the client's files went into the correct pool
735 self.assertTrue(fs_a.data_objects_present(a_created_ino, 1024 * 1024))
736 self.assertTrue(fs_b.data_objects_present(b_created_ino, 1024 * 1024))
737
738 def test_standby(self):
739 fs_a, fs_b = self._setup_two()
740
741 # Assert that the remaining two MDS daemons are now standbys
742 a_daemons = fs_a.get_active_names()
743 b_daemons = fs_b.get_active_names()
744 self.assertEqual(len(a_daemons), 1)
745 self.assertEqual(len(b_daemons), 1)
746 original_a = a_daemons[0]
747 original_b = b_daemons[0]
748 expect_standby_daemons = set(self.mds_cluster.mds_ids) - (set(a_daemons) | set(b_daemons))
749
750 # Need all my standbys up as well as the active daemons
751 self.wait_for_daemon_start()
752 self.assertEqual(expect_standby_daemons, self.mds_cluster.get_standby_daemons())
753
754 # Kill fs_a's active MDS, see a standby take over
755 self.mds_cluster.mds_stop(original_a)
756 self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_a)
757 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
758 reject_fn=lambda v: v > 1)
759 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
760 self.assertNotEqual(fs_a.get_active_names()[0], original_a)
761
762 # Kill fs_b's active MDS, see a standby take over
763 self.mds_cluster.mds_stop(original_b)
764 self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_b)
765 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
766 reject_fn=lambda v: v > 1)
767 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
768 self.assertNotEqual(fs_b.get_active_names()[0], original_b)
769
770 # Both of the original active daemons should be gone, and all standbys used up
771 self.assertEqual(self.mds_cluster.get_standby_daemons(), set())
772
773 # Restart the ones I killed, see them reappear as standbys
774 self.mds_cluster.mds_restart(original_a)
775 self.mds_cluster.mds_restart(original_b)
776 self.wait_until_true(
777 lambda: {original_a, original_b} == self.mds_cluster.get_standby_daemons(),
778 timeout=30
779 )
780
781 def test_grow_shrink(self):
782 # Usual setup...
783 fs_a, fs_b = self._setup_two()
784
785 # Increase max_mds on fs_b, see a standby take up the role
786 fs_b.set_max_mds(2)
787 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 2, 30,
788 reject_fn=lambda v: v > 2 or v < 1)
789
790 # Increase max_mds on fs_a, see a standby take up the role
791 fs_a.set_max_mds(2)
792 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 2, 30,
793 reject_fn=lambda v: v > 2 or v < 1)
794
795 # Shrink fs_b back to 1, see a daemon go back to standby
796 fs_b.set_max_mds(1)
797 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
798 reject_fn=lambda v: v > 2 or v < 1)
799
800 # Grow fs_a up to 3, see the former fs_b daemon join it.
801 fs_a.set_max_mds(3)
802 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60,
803 reject_fn=lambda v: v > 3 or v < 2)