3 # Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
6 # Author: Loic Dachary <loic@dachary.org>
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU Library Public License as published by
10 # the Free Software Foundation; either version 2, or (at your option)
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Library Public License for more details.
18 source $
(dirname $0)/..
/detect-build-env-vars.sh
19 source $CEPH_ROOT/qa
/workunits
/ceph-helpers.sh
25 export CEPH_MON
="127.0.0.1:7104" # git grep '\<7104\>' : there must be only one
27 CEPH_ARGS
+="--fsid=$(uuidgen) --auth-supported=none "
28 CEPH_ARGS
+="--mon-host=$CEPH_MON "
30 local funcs
=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
31 for func
in $funcs ; do
32 setup
$dir ||
return 1
33 $func $dir ||
return 1
34 teardown
$dir ||
return 1
38 function TEST_crush_rule_create_simple
() {
41 run_mon
$dir a ||
return 1
43 ceph
--format xml osd crush rule dump replicated_rule | \
44 egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
45 grep '<op>choose_firstn</op><num>0</num><type>osd</type>' ||
return 1
46 local ruleset
=ruleset0
48 ceph osd crush add-bucket
$root host
49 local failure_domain
=osd
50 ceph osd crush rule create-simple
$ruleset $root $failure_domain ||
return 1
51 ceph osd crush rule create-simple
$ruleset $root $failure_domain 2>&1 | \
52 grep "$ruleset already exists" ||
return 1
53 ceph
--format xml osd crush rule dump
$ruleset | \
54 egrep '<op>take</op><item>[^<]+</item><item_name>'$root'</item_name>' | \
55 grep '<op>choose_firstn</op><num>0</num><type>'$failure_domain'</type>' ||
return 1
56 ceph osd crush rule
rm $ruleset ||
return 1
59 function TEST_crush_rule_dump
() {
62 run_mon
$dir a ||
return 1
64 local ruleset
=ruleset1
65 ceph osd crush rule create-erasure
$ruleset ||
return 1
66 test $
(ceph
--format json osd crush rule dump
$ruleset | \
67 jq
".rule_name == \"$ruleset\"") == true ||
return 1
68 test $
(ceph
--format json osd crush rule dump | \
69 jq
"map(select(.rule_name == \"$ruleset\")) | length == 1") == true ||
return 1
70 ! ceph osd crush rule dump non_existent_ruleset ||
return 1
71 ceph osd crush rule
rm $ruleset ||
return 1
74 function TEST_crush_rule_rm
() {
75 local ruleset
=erasure2
77 run_mon
$dir a ||
return 1
79 ceph osd crush rule create-erasure
$ruleset default ||
return 1
80 ceph osd crush rule
ls |
grep $ruleset ||
return 1
81 ceph osd crush rule
rm $ruleset ||
return 1
82 ! ceph osd crush rule
ls |
grep $ruleset ||
return 1
85 function TEST_crush_rule_create_erasure
() {
88 run_mon
$dir a ||
return 1
89 # should have at least one OSD
90 run_osd
$dir 0 ||
return 1
92 local ruleset
=ruleset3
94 # create a new ruleset with the default profile, implicitly
96 ceph osd crush rule create-erasure
$ruleset ||
return 1
97 ceph osd crush rule create-erasure
$ruleset 2>&1 | \
98 grep "$ruleset already exists" ||
return 1
99 ceph
--format xml osd crush rule dump
$ruleset | \
100 egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
101 grep '<op>chooseleaf_indep</op><num>0</num><type>host</type>' ||
return 1
102 ceph osd crush rule
rm $ruleset ||
return 1
103 ! ceph osd crush rule
ls |
grep $ruleset ||
return 1
105 # create a new ruleset with the default profile, explicitly
107 ceph osd crush rule create-erasure
$ruleset default ||
return 1
108 ceph osd crush rule
ls |
grep $ruleset ||
return 1
109 ceph osd crush rule
rm $ruleset ||
return 1
110 ! ceph osd crush rule
ls |
grep $ruleset ||
return 1
112 # create a new ruleset and the default profile, implicitly
114 ceph osd erasure-code-profile
rm default ||
return 1
115 ! ceph osd erasure-code-profile
ls |
grep default ||
return 1
116 ceph osd crush rule create-erasure
$ruleset ||
return 1
117 CEPH_ARGS
='' ceph
--admin-daemon $dir/ceph-mon.a.asok log flush ||
return 1
118 grep 'profile set default' $dir/mon.a.log ||
return 1
119 ceph osd erasure-code-profile
ls |
grep default ||
return 1
120 ceph osd crush rule
rm $ruleset ||
return 1
121 ! ceph osd crush rule
ls |
grep $ruleset ||
return 1
123 # verify that if the crushmap contains a bugous ruleset,
124 # it will prevent the creation of a pool.
126 local crushtool_path_old
=`ceph-conf --show-config-value crushtool`
127 ceph tell mon.\
* injectargs
--crushtool "false"
129 expect_failure
$dir "Error EINVAL" \
130 ceph osd pool create mypool
1 1 erasure ||
return 1
133 function check_ruleset_id_match_rule_id
() {
135 rule_id
=`ceph osd crush rule dump $rule_name | grep "\"rule_id\":" | awk -F ":|," '{print int($2)}'`
136 ruleset_id
=`ceph osd crush rule dump $rule_name | grep "\"ruleset\":"| awk -F ":|," '{print int($2)}'`
137 test $ruleset_id = $rule_id ||
return 1
140 function generate_manipulated_rules
() {
142 ceph osd crush add-bucket
$root host
143 ceph osd crush rule create-simple test_rule1
$root osd firstn ||
return 1
144 ceph osd crush rule create-simple test_rule2
$root osd firstn ||
return 1
145 ceph osd getcrushmap
-o $dir/original_map
146 crushtool
-d $dir/original_map
-o $dir/decoded_original_map
147 #manipulate the rulesets , to make the rule_id != ruleset_id
148 ${SED} -i 's/ruleset 0/ruleset 3/' $dir/decoded_original_map
149 ${SED} -i 's/ruleset 2/ruleset 0/' $dir/decoded_original_map
150 ${SED} -i 's/ruleset 1/ruleset 2/' $dir/decoded_original_map
152 crushtool
-c $dir/decoded_original_map
-o $dir/new_map
153 ceph osd setcrushmap
-i $dir/new_map
155 ceph osd crush rule dump
158 function TEST_crush_ruleset_match_rule_when_creating
() {
161 run_mon
$dir a ||
return 1
165 generate_manipulated_rules
$dir
167 ceph osd crush rule create-simple special_rule_simple
$root osd firstn ||
return 1
169 ceph osd crush rule dump
170 #show special_rule_simple has same rule_id and ruleset_id
171 check_ruleset_id_match_rule_id special_rule_simple ||
return 1
174 function TEST_add_ruleset_failed
() {
177 run_mon
$dir a ||
return 1
181 ceph osd crush add-bucket
$root host
182 ceph osd crush rule create-simple test_rule1
$root osd firstn ||
return 1
183 ceph osd crush rule create-simple test_rule2
$root osd firstn ||
return 1
184 ceph osd getcrushmap
> $dir/crushmap ||
return 1
185 crushtool
--decompile $dir/crushmap
> $dir/crushmap.txt ||
return 1
186 for i
in $
(seq 3 255)
195 step choose firstn 0 type osd
199 done >> $dir/crushmap.txt
200 crushtool
--compile $dir/crushmap.txt
-o $dir/crushmap ||
return 1
201 ceph osd setcrushmap
-i $dir/crushmap ||
return 1
202 ceph osd crush rule create-simple test_rule_nospace
$root osd firstn
2>&1 |
grep "Error ENOSPC" ||
return 1
206 function TEST_crush_rename_bucket
() {
209 run_mon
$dir a ||
return 1
211 ceph osd crush add-bucket host1
host
213 ! ceph osd tree |
grep host2 ||
return 1
214 ceph osd crush rename-bucket host1 host2 ||
return 1
216 ceph osd tree |
grep host2 ||
return 1
217 ceph osd crush rename-bucket host1 host2 ||
return 1 # idempotency
218 ceph osd crush rename-bucket nonexistent something
2>&1 |
grep "Error ENOENT" ||
return 1
221 function TEST_crush_reject_empty
() {
223 run_mon
$dir a ||
return 1
224 # should have at least one OSD
225 run_osd
$dir 0 ||
return 1
227 local empty_map
=$dir/empty_map
229 crushtool
-c $empty_map.txt
-o $empty_map.map ||
return 1
230 expect_failure
$dir "Error EINVAL" \
231 ceph osd setcrushmap
-i $empty_map.map ||
return 1
234 function TEST_crush_tree
() {
236 run_mon
$dir a ||
return 1
238 ceph osd crush tree
--format=xml | \
239 $XMLSTARLET val
-e -r $CEPH_ROOT/src
/test
/mon
/osd-crush-tree.rng
- ||
return 1
242 # NB: disable me if i am too time consuming
243 function TEST_crush_repair_faulty_crushmap
() {
246 MONA
=127.0.0.1:7113 # git grep '\<7113\>' : there must be only one
247 MONB
=127.0.0.1:7114 # git grep '\<7114\>' : there must be only one
248 MONC
=127.0.0.1:7115 # git grep '\<7115\>' : there must be only one
249 CEPH_ARGS_orig
=$CEPH_ARGS
250 CEPH_ARGS
="--fsid=$fsid --auth-supported=none "
251 CEPH_ARGS
+="--mon-initial-members=a,b,c "
252 CEPH_ARGS
+="--mon-host=$MONA,$MONB,$MONC "
253 run_mon
$dir a
--public-addr $MONA ||
return 1
254 run_mon
$dir b
--public-addr $MONB ||
return 1
255 run_mon
$dir c
--public-addr $MONC ||
return 1
257 local empty_map
=$dir/empty_map
259 crushtool
-c $empty_map.txt
-o $empty_map.map ||
return 1
261 local crushtool_path_old
=`ceph-conf --show-config-value crushtool`
262 ceph tell mon.\
* injectargs
--crushtool "true"
265 #import empty crushmap should failture.because the default pool rbd use the rule
266 ceph osd setcrushmap
-i $empty_map.map
2>&1|
grep "Error EINVAL: the crush rule no"||
return 1
268 #remove the default pool rbd
269 ceph osd pool delete rbd rbd
--yes-i-really-really-mean-it ||
return 1
271 #now it can be successful to set the empty crush map
272 ceph osd setcrushmap
-i $empty_map.map ||
return 1
274 # should be an empty crush map without any buckets
276 for delay
in 1 2 4 8 16 32 64 128 256 ; do
277 if test $
(ceph osd crush dump
--format=json | \
278 jq
'.buckets | length == 0') == true
; then
285 ceph osd crush dump
--format=json-pretty
288 # bring them down, the "ceph" commands will try to hunt for other monitor in
289 # vain, after mon.a is offline
290 kill_daemons
$dir ||
return 1
291 # rewrite the monstore with the good crush map,
292 $CEPH_ROOT/src
/tools
/ceph-monstore-update-crush.sh
--rewrite $dir/a ||
return 1
294 run_mon
$dir a
--public-addr $MONA ||
return 1
295 run_mon
$dir b
--public-addr $MONB ||
return 1
296 run_mon
$dir c
--public-addr $MONC ||
return 1
297 # the buckets are back
298 test $
(ceph osd crush dump
--format=json | \
299 jq
'.buckets | length > 0') == true ||
return 1
300 CEPH_ARGS
=$CEPH_ARGS_orig
306 # compile-command: "cd ../.. ; make -j4 && test/mon/osd-crush.sh"