]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_mantle.py
bump version to 18.2.2-pve1
[ceph.git] / ceph / qa / tasks / cephfs / test_mantle.py
CommitLineData
f67539c2
TL
1from io import StringIO
2
7c673cae
FG
3from tasks.cephfs.cephfs_test_case import CephFSTestCase
4import json
5import logging
6
7log = logging.getLogger(__name__)
8failure = "using old balancer; mantle failed for balancer="
9success = "mantle balancer version changed: "
10
11class TestMantle(CephFSTestCase):
12 def start_mantle(self):
13 self.wait_for_health_clear(timeout=30)
7c673cae
FG
14 self.fs.set_max_mds(2)
15 self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30,
16 reject_fn=lambda v: v > 2 or v < 1)
17
18 for m in self.fs.get_active_names():
19 self.fs.mds_asok(['config', 'set', 'debug_objecter', '20'], mds_id=m)
20 self.fs.mds_asok(['config', 'set', 'debug_ms', '0'], mds_id=m)
21 self.fs.mds_asok(['config', 'set', 'debug_mds', '0'], mds_id=m)
22 self.fs.mds_asok(['config', 'set', 'debug_mds_balancer', '5'], mds_id=m)
23
24 def push_balancer(self, obj, lua_code, expect):
25 self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', obj)
f67539c2 26 self.fs.radosm(["put", obj, "-"], stdin=StringIO(lua_code))
7c673cae
FG
27 with self.assert_cluster_log(failure + obj + " " + expect):
28 log.info("run a " + obj + " balancer that expects=" + expect)
29
30 def test_version_empty(self):
31 self.start_mantle()
32 expect = " : (2) No such file or directory"
33
34 ret = self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer')
35 assert(ret == 22) # EINVAL
36
37 self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', " ")
38 with self.assert_cluster_log(failure + " " + expect): pass
39
40 def test_version_not_in_rados(self):
41 self.start_mantle()
42 expect = failure + "ghost.lua : (2) No such file or directory"
43 self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "ghost.lua")
44 with self.assert_cluster_log(expect): pass
45
46 def test_balancer_invalid(self):
47 self.start_mantle()
48 expect = ": (22) Invalid argument"
49
50 lua_code = "this is invalid lua code!"
51 self.push_balancer("invalid.lua", lua_code, expect)
52
53 lua_code = "BAL_LOG()"
54 self.push_balancer("invalid_log.lua", lua_code, expect)
55
56 lua_code = "BAL_LOG(0)"
57 self.push_balancer("invalid_log_again.lua", lua_code, expect)
58
59 def test_balancer_valid(self):
60 self.start_mantle()
61 lua_code = "BAL_LOG(0, \"test\")\nreturn {3, 4}"
62 self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
f67539c2 63 self.fs.radosm(["put", "valid.lua", "-"], stdin=StringIO(lua_code))
7c673cae
FG
64 with self.assert_cluster_log(success + "valid.lua"):
65 log.info("run a valid.lua balancer")
66
67 def test_return_invalid(self):
68 self.start_mantle()
69 expect = ": (22) Invalid argument"
70
71 lua_code = "return \"hello\""
72 self.push_balancer("string.lua", lua_code, expect)
73
74 lua_code = "return 3"
75 self.push_balancer("number.lua", lua_code, expect)
76
77 lua_code = "return {}"
78 self.push_balancer("dict_empty.lua", lua_code, expect)
79
80 lua_code = "return {\"this\", \"is\", \"a\", \"test\"}"
81 self.push_balancer("dict_of_strings.lua", lua_code, expect)
82
83 lua_code = "return {3, \"test\"}"
84 self.push_balancer("dict_of_mixed.lua", lua_code, expect)
85
86 lua_code = "return {3}"
87 self.push_balancer("not_enough_numbers.lua", lua_code, expect)
88
89 lua_code = "return {3, 4, 5, 6, 7, 8, 9}"
90 self.push_balancer("too_many_numbers.lua", lua_code, expect)
91
92 def test_dead_osd(self):
93 self.start_mantle()
94 expect = " : (110) Connection timed out"
95
96 # kill the OSDs so that the balancer pull from RADOS times out
97 osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
98 for i in range(0, len(osd_map['osds'])):
99 self.fs.mon_manager.raw_cluster_cmd_result('osd', 'down', str(i))
100 self.fs.mon_manager.raw_cluster_cmd_result('osd', 'out', str(i))
101
102 # trigger a pull from RADOS
103 self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
104
105 # make the timeout a little longer since dead OSDs spam ceph -w
106 with self.assert_cluster_log(failure + "valid.lua" + expect, timeout=30):
107 log.info("run a balancer that should timeout")
108
109 # cleanup
110 for i in range(0, len(osd_map['osds'])):
111 self.fs.mon_manager.raw_cluster_cmd_result('osd', 'in', str(i))