3 # Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
6 # Author: Loic Dachary <loic@dachary.org>
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU Library Public License as published by
10 # the Free Software Foundation; either version 2, or (at your option)
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Library Public License for more details.
18 source $
(dirname $0)/..
/detect-build-env-vars.sh
19 source $CEPH_ROOT/qa
/workunits
/ceph-helpers.sh
25 export CEPH_MON
="127.0.0.1:7104" # git grep '\<7104\>' : there must be only one
27 CEPH_ARGS
+="--fsid=$(uuidgen) --auth-supported=none "
28 CEPH_ARGS
+="--mon-host=$CEPH_MON "
30 local funcs
=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
31 for func
in $funcs ; do
32 setup
$dir ||
return 1
33 $func $dir ||
return 1
34 teardown
$dir ||
return 1
38 function TEST_crush_rule_create_simple
() {
41 run_mon
$dir a ||
return 1
43 ceph
--format xml osd crush rule dump replicated_ruleset | \
44 egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
45 grep '<op>choose_firstn</op><num>0</num><type>osd</type>' ||
return 1
46 local ruleset
=ruleset0
48 ceph osd crush add-bucket
$root host
49 local failure_domain
=osd
50 ceph osd crush rule create-simple
$ruleset $root $failure_domain ||
return 1
51 ceph osd crush rule create-simple
$ruleset $root $failure_domain 2>&1 | \
52 grep "$ruleset already exists" ||
return 1
53 ceph
--format xml osd crush rule dump
$ruleset | \
54 egrep '<op>take</op><item>[^<]+</item><item_name>'$root'</item_name>' | \
55 grep '<op>choose_firstn</op><num>0</num><type>'$failure_domain'</type>' ||
return 1
56 ceph osd crush rule
rm $ruleset ||
return 1
59 function TEST_crush_rule_dump
() {
62 run_mon
$dir a ||
return 1
64 local ruleset
=ruleset1
65 ceph osd crush rule create-erasure
$ruleset ||
return 1
67 expected
="<rule_name>$ruleset</rule_name>"
68 ceph
--format xml osd crush rule dump
$ruleset |
grep $expected ||
return 1
69 expected
='"rule_name": "'$ruleset'"'
70 ceph osd crush rule dump |
grep "$expected" ||
return 1
71 ! ceph osd crush rule dump non_existent_ruleset ||
return 1
72 ceph osd crush rule
rm $ruleset ||
return 1
75 function TEST_crush_rule_rm
() {
76 local ruleset
=erasure2
78 run_mon
$dir a ||
return 1
80 ceph osd crush rule create-erasure
$ruleset default ||
return 1
81 ceph osd crush rule
ls |
grep $ruleset ||
return 1
82 ceph osd crush rule
rm $ruleset ||
return 1
83 ! ceph osd crush rule
ls |
grep $ruleset ||
return 1
86 function TEST_crush_rule_create_erasure
() {
89 run_mon
$dir a ||
return 1
90 # should have at least one OSD
91 run_osd
$dir 0 ||
return 1
93 local ruleset
=ruleset3
95 # create a new ruleset with the default profile, implicitly
97 ceph osd crush rule create-erasure
$ruleset ||
return 1
98 ceph osd crush rule create-erasure
$ruleset 2>&1 | \
99 grep "$ruleset already exists" ||
return 1
100 ceph
--format xml osd crush rule dump
$ruleset | \
101 egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
102 grep '<op>chooseleaf_indep</op><num>0</num><type>host</type>' ||
return 1
103 ceph osd crush rule
rm $ruleset ||
return 1
104 ! ceph osd crush rule
ls |
grep $ruleset ||
return 1
106 # create a new ruleset with the default profile, explicitly
108 ceph osd crush rule create-erasure
$ruleset default ||
return 1
109 ceph osd crush rule
ls |
grep $ruleset ||
return 1
110 ceph osd crush rule
rm $ruleset ||
return 1
111 ! ceph osd crush rule
ls |
grep $ruleset ||
return 1
113 # create a new ruleset and the default profile, implicitly
115 ceph osd erasure-code-profile
rm default ||
return 1
116 ! ceph osd erasure-code-profile
ls |
grep default ||
return 1
117 ceph osd crush rule create-erasure
$ruleset ||
return 1
118 CEPH_ARGS
='' ceph
--admin-daemon $dir/ceph-mon.a.asok log flush ||
return 1
119 grep 'profile set default' $dir/mon.a.log ||
return 1
120 ceph osd erasure-code-profile
ls |
grep default ||
return 1
121 ceph osd crush rule
rm $ruleset ||
return 1
122 ! ceph osd crush rule
ls |
grep $ruleset ||
return 1
124 # verify that if the crushmap contains a bugous ruleset,
125 # it will prevent the creation of a pool.
127 local crushtool_path_old
=`ceph-conf --show-config-value crushtool`
128 ceph tell mon.\
* injectargs
--crushtool "false"
130 expect_failure
$dir "Error EINVAL" \
131 ceph osd pool create mypool
1 1 erasure ||
return 1
134 function check_ruleset_id_match_rule_id
() {
136 rule_id
=`ceph osd crush rule dump $rule_name | grep "\"rule_id\":" | awk -F ":|," '{print int($2)}'`
137 ruleset_id
=`ceph osd crush rule dump $rule_name | grep "\"ruleset\":"| awk -F ":|," '{print int($2)}'`
138 test $ruleset_id = $rule_id ||
return 1
141 function generate_manipulated_rules
() {
143 ceph osd crush add-bucket
$root host
144 ceph osd crush rule create-simple test_rule1
$root osd firstn ||
return 1
145 ceph osd crush rule create-simple test_rule2
$root osd firstn ||
return 1
146 ceph osd getcrushmap
-o $dir/original_map
147 crushtool
-d $dir/original_map
-o $dir/decoded_original_map
148 #manipulate the rulesets , to make the rule_id != ruleset_id
149 ${SED} -i 's/ruleset 0/ruleset 3/' $dir/decoded_original_map
150 ${SED} -i 's/ruleset 2/ruleset 0/' $dir/decoded_original_map
151 ${SED} -i 's/ruleset 1/ruleset 2/' $dir/decoded_original_map
153 crushtool
-c $dir/decoded_original_map
-o $dir/new_map
154 ceph osd setcrushmap
-i $dir/new_map
156 ceph osd crush rule dump
159 function TEST_crush_ruleset_match_rule_when_creating
() {
162 run_mon
$dir a ||
return 1
166 generate_manipulated_rules
$dir
168 ceph osd crush rule create-simple special_rule_simple
$root osd firstn ||
return 1
170 ceph osd crush rule dump
171 #show special_rule_simple has same rule_id and ruleset_id
172 check_ruleset_id_match_rule_id special_rule_simple ||
return 1
175 function TEST_add_ruleset_failed
() {
178 run_mon
$dir a ||
return 1
182 ceph osd crush add-bucket
$root host
183 ceph osd crush rule create-simple test_rule1
$root osd firstn ||
return 1
184 ceph osd crush rule create-simple test_rule2
$root osd firstn ||
return 1
185 ceph osd getcrushmap
> $dir/crushmap ||
return 1
186 crushtool
--decompile $dir/crushmap
> $dir/crushmap.txt ||
return 1
187 for i
in $
(seq 3 255)
196 step choose firstn 0 type osd
200 done >> $dir/crushmap.txt
201 crushtool
--compile $dir/crushmap.txt
-o $dir/crushmap ||
return 1
202 ceph osd setcrushmap
-i $dir/crushmap ||
return 1
203 ceph osd crush rule create-simple test_rule_nospace
$root osd firstn
2>&1 |
grep "Error ENOSPC" ||
return 1
207 function TEST_crush_rename_bucket
() {
210 run_mon
$dir a ||
return 1
212 ceph osd crush add-bucket host1
host
213 ! ceph osd tree |
grep host2 ||
return 1
214 ceph osd crush rename-bucket host1 host2 ||
return 1
215 ceph osd tree |
grep host2 ||
return 1
216 ceph osd crush rename-bucket host1 host2 ||
return 1 # idempotency
217 ceph osd crush rename-bucket nonexistent something
2>&1 |
grep "Error ENOENT" ||
return 1
220 function TEST_crush_reject_empty
() {
222 run_mon
$dir a ||
return 1
223 # should have at least one OSD
224 run_osd
$dir 0 ||
return 1
226 local empty_map
=$dir/empty_map
228 crushtool
-c $empty_map.txt
-o $empty_map.map ||
return 1
229 expect_failure
$dir "Error EINVAL" \
230 ceph osd setcrushmap
-i $empty_map.map ||
return 1
233 function TEST_crush_tree
() {
235 run_mon
$dir a ||
return 1
237 ceph osd crush tree
--format=xml | \
238 $XMLSTARLET val
-e -r $CEPH_ROOT/src
/test
/mon
/osd-crush-tree.rng
- ||
return 1
241 # NB: disable me if i am too time consuming
242 function TEST_crush_repair_faulty_crushmap
() {
245 MONA
=127.0.0.1:7113 # git grep '\<7113\>' : there must be only one
246 MONB
=127.0.0.1:7114 # git grep '\<7114\>' : there must be only one
247 MONC
=127.0.0.1:7115 # git grep '\<7115\>' : there must be only one
248 CEPH_ARGS_orig
=$CEPH_ARGS
249 CEPH_ARGS
="--fsid=$fsid --auth-supported=none "
250 CEPH_ARGS
+="--mon-initial-members=a,b,c "
251 CEPH_ARGS
+="--mon-host=$MONA,$MONB,$MONC "
252 run_mon
$dir a
--public-addr $MONA ||
return 1
253 run_mon
$dir b
--public-addr $MONB ||
return 1
254 run_mon
$dir c
--public-addr $MONC ||
return 1
256 local empty_map
=$dir/empty_map
258 crushtool
-c $empty_map.txt
-o $empty_map.map ||
return 1
260 local crushtool_path_old
=`ceph-conf --show-config-value crushtool`
261 ceph tell mon.\
* injectargs
--crushtool "true"
264 #import empty crushmap should failture.because the default pool rbd use the rule
265 ceph osd setcrushmap
-i $empty_map.map
2>&1|
grep "Error EINVAL: the crush rule no"||
return 1
267 #remove the default pool rbd
268 ceph osd pool delete rbd rbd
--yes-i-really-really-mean-it ||
return 1
270 #now it can be successful to set the empty crush map
271 ceph osd setcrushmap
-i $empty_map.map ||
return 1
273 # should be an empty crush map without any buckets
275 for delay
in 1 2 4 8 16 32 64 128 256 ; do
276 if ! test $
(ceph osd crush dump
--format=xml | \
277 $XMLSTARLET sel
-t -m "//buckets/bucket" -v .
) ; then
284 ceph osd crush dump
--format=xml
287 # bring them down, the "ceph" commands will try to hunt for other monitor in
288 # vain, after mon.a is offline
289 kill_daemons
$dir ||
return 1
290 # rewrite the monstore with the good crush map,
291 $CEPH_ROOT/src
/tools
/ceph-monstore-update-crush.sh
--rewrite $dir/a ||
return 1
293 run_mon
$dir a
--public-addr $MONA ||
return 1
294 run_mon
$dir b
--public-addr $MONB ||
return 1
295 run_mon
$dir c
--public-addr $MONC ||
return 1
296 # the buckets are back
297 test $
(ceph osd crush dump
--format=xml | \
298 $XMLSTARLET sel
-t -m "//buckets/bucket" -v .
) ||
return 1
299 CEPH_ARGS
=$CEPH_ARGS_orig
305 # compile-command: "cd ../.. ; make -j4 && test/mon/osd-crush.sh"