]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_mantle.py
8e0526332e65ef5592e0287cb56c62891c2ce213
1 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
5 log
= logging
.getLogger(__name__
)
6 failure
= "using old balancer; mantle failed for balancer="
7 success
= "mantle balancer version changed: "
9 class TestMantle(CephFSTestCase
):
10 def start_mantle(self
):
11 self
.wait_for_health_clear(timeout
=30)
12 self
.fs
.set_allow_multimds(True)
13 self
.fs
.set_max_mds(2)
14 self
.wait_until_equal(lambda: len(self
.fs
.get_active_names()), 2, 30,
15 reject_fn
=lambda v
: v
> 2 or v
< 1)
17 for m
in self
.fs
.get_active_names():
18 self
.fs
.mds_asok(['config', 'set', 'debug_objecter', '20'], mds_id
=m
)
19 self
.fs
.mds_asok(['config', 'set', 'debug_ms', '0'], mds_id
=m
)
20 self
.fs
.mds_asok(['config', 'set', 'debug_mds', '0'], mds_id
=m
)
21 self
.fs
.mds_asok(['config', 'set', 'debug_mds_balancer', '5'], mds_id
=m
)
23 def push_balancer(self
, obj
, lua_code
, expect
):
24 self
.fs
.mon_manager
.raw_cluster_cmd_result('fs', 'set', self
.fs
.name
, 'balancer', obj
)
25 self
.fs
.rados(["put", obj
, "-"], stdin_data
=lua_code
)
26 with self
.assert_cluster_log(failure
+ obj
+ " " + expect
):
27 log
.info("run a " + obj
+ " balancer that expects=" + expect
)
29 def test_version_empty(self
):
31 expect
= " : (2) No such file or directory"
33 ret
= self
.fs
.mon_manager
.raw_cluster_cmd_result('fs', 'set', self
.fs
.name
, 'balancer')
34 assert(ret
== 22) # EINVAL
36 self
.fs
.mon_manager
.raw_cluster_cmd_result('fs', 'set', self
.fs
.name
, 'balancer', " ")
37 with self
.assert_cluster_log(failure
+ " " + expect
): pass
39 def test_version_not_in_rados(self
):
41 expect
= failure
+ "ghost.lua : (2) No such file or directory"
42 self
.fs
.mon_manager
.raw_cluster_cmd_result('fs', 'set', self
.fs
.name
, 'balancer', "ghost.lua")
43 with self
.assert_cluster_log(expect
): pass
45 def test_balancer_invalid(self
):
47 expect
= ": (22) Invalid argument"
49 lua_code
= "this is invalid lua code!"
50 self
.push_balancer("invalid.lua", lua_code
, expect
)
52 lua_code
= "BAL_LOG()"
53 self
.push_balancer("invalid_log.lua", lua_code
, expect
)
55 lua_code
= "BAL_LOG(0)"
56 self
.push_balancer("invalid_log_again.lua", lua_code
, expect
)
58 def test_balancer_valid(self
):
60 lua_code
= "BAL_LOG(0, \"test\")\nreturn {3, 4}"
61 self
.fs
.mon_manager
.raw_cluster_cmd_result('fs', 'set', self
.fs
.name
, 'balancer', "valid.lua")
62 self
.fs
.rados(["put", "valid.lua", "-"], stdin_data
=lua_code
)
63 with self
.assert_cluster_log(success
+ "valid.lua"):
64 log
.info("run a valid.lua balancer")
66 def test_return_invalid(self
):
68 expect
= ": (22) Invalid argument"
70 lua_code
= "return \"hello\""
71 self
.push_balancer("string.lua", lua_code
, expect
)
74 self
.push_balancer("number.lua", lua_code
, expect
)
76 lua_code
= "return {}"
77 self
.push_balancer("dict_empty.lua", lua_code
, expect
)
79 lua_code
= "return {\"this\", \"is\", \"a\", \"test\"}"
80 self
.push_balancer("dict_of_strings.lua", lua_code
, expect
)
82 lua_code
= "return {3, \"test\"}"
83 self
.push_balancer("dict_of_mixed.lua", lua_code
, expect
)
85 lua_code
= "return {3}"
86 self
.push_balancer("not_enough_numbers.lua", lua_code
, expect
)
88 lua_code
= "return {3, 4, 5, 6, 7, 8, 9}"
89 self
.push_balancer("too_many_numbers.lua", lua_code
, expect
)
91 def test_dead_osd(self
):
93 expect
= " : (110) Connection timed out"
95 # kill the OSDs so that the balancer pull from RADOS times out
96 osd_map
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
97 for i
in range(0, len(osd_map
['osds'])):
98 self
.fs
.mon_manager
.raw_cluster_cmd_result('osd', 'down', str(i
))
99 self
.fs
.mon_manager
.raw_cluster_cmd_result('osd', 'out', str(i
))
101 # trigger a pull from RADOS
102 self
.fs
.mon_manager
.raw_cluster_cmd_result('fs', 'set', self
.fs
.name
, 'balancer', "valid.lua")
104 # make the timeout a little longer since dead OSDs spam ceph -w
105 with self
.assert_cluster_log(failure
+ "valid.lua" + expect
, timeout
=30):
106 log
.info("run a balancer that should timeout")
109 for i
in range(0, len(osd_map
['osds'])):
110 self
.fs
.mon_manager
.raw_cluster_cmd_result('osd', 'in', str(i
))